2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
44 #define LE_FLOWCTL_MAX_CREDITS 65535
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
67 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
73 return BDADDR_LE_RANDOM;
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
86 list_for_each_entry(c, &conn->chan_l, list) {
93 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
98 list_for_each_entry(c, &conn->chan_l, list) {
105 /* Find channel with given SCID.
106 * Returns locked channel. */
107 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
110 struct l2cap_chan *c;
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_scid(conn, cid);
116 mutex_unlock(&conn->chan_lock);
121 /* Find channel with given DCID.
122 * Returns locked channel.
124 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
127 struct l2cap_chan *c;
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
133 mutex_unlock(&conn->chan_lock);
138 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
141 struct l2cap_chan *c;
143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
150 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 struct l2cap_chan *c;
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
159 mutex_unlock(&conn->chan_lock);
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
166 struct l2cap_chan *c;
168 list_for_each_entry(c, &chan_list, global_l) {
169 if (c->sport == psm && !bacmp(&c->src, src))
175 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
179 write_lock(&chan_list_lock);
181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
194 for (p = 0x1001; p < 0x1100; p += 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
204 write_unlock(&chan_list_lock);
207 EXPORT_SYMBOL_GPL(l2cap_add_psm);
209 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
211 write_lock(&chan_list_lock);
213 /* Override the defaults (which are for conn-oriented) */
214 chan->omtu = L2CAP_DEFAULT_MTU;
215 chan->chan_type = L2CAP_CHAN_FIXED;
219 write_unlock(&chan_list_lock);
224 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
228 if (conn->hcon->type == LE_LINK)
229 dyn_end = L2CAP_CID_LE_DYN_END;
231 dyn_end = L2CAP_CID_DYN_END;
233 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
234 if (!__l2cap_get_chan_by_scid(conn, cid))
241 static void l2cap_state_change(struct l2cap_chan *chan, int state)
243 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
244 state_to_string(state));
247 chan->ops->state_change(chan, state, 0);
250 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
254 chan->ops->state_change(chan, chan->state, err);
257 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
259 chan->ops->state_change(chan, chan->state, err);
262 static void __set_retrans_timer(struct l2cap_chan *chan)
264 if (!delayed_work_pending(&chan->monitor_timer) &&
265 chan->retrans_timeout) {
266 l2cap_set_timer(chan, &chan->retrans_timer,
267 msecs_to_jiffies(chan->retrans_timeout));
271 static void __set_monitor_timer(struct l2cap_chan *chan)
273 __clear_retrans_timer(chan);
274 if (chan->monitor_timeout) {
275 l2cap_set_timer(chan, &chan->monitor_timer,
276 msecs_to_jiffies(chan->monitor_timeout));
280 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
285 skb_queue_walk(head, skb) {
286 if (bt_cb(skb)->control.txseq == seq)
293 /* ---- L2CAP sequence number lists ---- */
295 /* For ERTM, ordered lists of sequence numbers must be tracked for
296 * SREJ requests that are received and for frames that are to be
297 * retransmitted. These seq_list functions implement a singly-linked
298 * list in an array, where membership in the list can also be checked
299 * in constant time. Items can also be added to the tail of the list
300 * and removed from the head in constant time, without further memory
304 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
306 size_t alloc_size, i;
308 /* Allocated size is a power of 2 to map sequence numbers
309 * (which may be up to 14 bits) in to a smaller array that is
310 * sized for the negotiated ERTM transmit windows.
312 alloc_size = roundup_pow_of_two(size);
314 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
318 seq_list->mask = alloc_size - 1;
319 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
320 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
321 for (i = 0; i < alloc_size; i++)
322 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
327 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
329 kfree(seq_list->list);
332 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
335 /* Constant-time check for list membership */
336 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
339 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
341 u16 seq = seq_list->head;
342 u16 mask = seq_list->mask;
344 seq_list->head = seq_list->list[seq & mask];
345 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
347 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
348 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
349 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
355 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
359 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
362 for (i = 0; i <= seq_list->mask; i++)
363 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
365 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
369 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
371 u16 mask = seq_list->mask;
373 /* All appends happen in constant time */
375 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
378 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
379 seq_list->head = seq;
381 seq_list->list[seq_list->tail & mask] = seq;
383 seq_list->tail = seq;
384 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
387 static void l2cap_chan_timeout(struct work_struct *work)
389 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
391 struct l2cap_conn *conn = chan->conn;
394 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
396 mutex_lock(&conn->chan_lock);
397 l2cap_chan_lock(chan);
399 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
400 reason = ECONNREFUSED;
401 else if (chan->state == BT_CONNECT &&
402 chan->sec_level != BT_SECURITY_SDP)
403 reason = ECONNREFUSED;
407 l2cap_chan_close(chan, reason);
409 l2cap_chan_unlock(chan);
411 chan->ops->close(chan);
412 mutex_unlock(&conn->chan_lock);
414 l2cap_chan_put(chan);
417 struct l2cap_chan *l2cap_chan_create(void)
419 struct l2cap_chan *chan;
421 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
425 mutex_init(&chan->lock);
427 /* Set default lock nesting level */
428 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
430 write_lock(&chan_list_lock);
431 list_add(&chan->global_l, &chan_list);
432 write_unlock(&chan_list_lock);
434 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
436 chan->state = BT_OPEN;
438 kref_init(&chan->kref);
440 /* This flag is cleared in l2cap_chan_ready() */
441 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
443 BT_DBG("chan %p", chan);
447 EXPORT_SYMBOL_GPL(l2cap_chan_create);
449 static void l2cap_chan_destroy(struct kref *kref)
451 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
453 BT_DBG("chan %p", chan);
455 write_lock(&chan_list_lock);
456 list_del(&chan->global_l);
457 write_unlock(&chan_list_lock);
462 void l2cap_chan_hold(struct l2cap_chan *c)
464 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
469 void l2cap_chan_put(struct l2cap_chan *c)
471 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
473 kref_put(&c->kref, l2cap_chan_destroy);
475 EXPORT_SYMBOL_GPL(l2cap_chan_put);
477 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
479 chan->fcs = L2CAP_FCS_CRC16;
480 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
481 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
482 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
483 chan->remote_max_tx = chan->max_tx;
484 chan->remote_tx_win = chan->tx_win;
485 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->sec_level = BT_SECURITY_LOW;
487 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
488 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
489 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
490 chan->conf_state = 0;
492 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
494 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
496 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
499 chan->sdu_last_frag = NULL;
501 chan->tx_credits = 0;
502 chan->rx_credits = le_max_credits;
503 chan->mps = min_t(u16, chan->imtu, le_default_mps);
505 skb_queue_head_init(&chan->tx_q);
508 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
510 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
511 __le16_to_cpu(chan->psm), chan->dcid);
513 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
517 switch (chan->chan_type) {
518 case L2CAP_CHAN_CONN_ORIENTED:
519 /* Alloc CID for connection-oriented socket */
520 chan->scid = l2cap_alloc_cid(conn);
521 if (conn->hcon->type == ACL_LINK)
522 chan->omtu = L2CAP_DEFAULT_MTU;
525 case L2CAP_CHAN_CONN_LESS:
526 /* Connectionless socket */
527 chan->scid = L2CAP_CID_CONN_LESS;
528 chan->dcid = L2CAP_CID_CONN_LESS;
529 chan->omtu = L2CAP_DEFAULT_MTU;
532 case L2CAP_CHAN_FIXED:
533 /* Caller will set CID and CID specific MTU values */
537 /* Raw socket can send/recv signalling messages only */
538 chan->scid = L2CAP_CID_SIGNALING;
539 chan->dcid = L2CAP_CID_SIGNALING;
540 chan->omtu = L2CAP_DEFAULT_MTU;
543 chan->local_id = L2CAP_BESTEFFORT_ID;
544 chan->local_stype = L2CAP_SERV_BESTEFFORT;
545 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
546 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
547 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
548 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
550 l2cap_chan_hold(chan);
552 /* Only keep a reference for fixed channels if they requested it */
553 if (chan->chan_type != L2CAP_CHAN_FIXED ||
554 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
555 hci_conn_hold(conn->hcon);
557 list_add(&chan->list, &conn->chan_l);
560 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
562 mutex_lock(&conn->chan_lock);
563 __l2cap_chan_add(conn, chan);
564 mutex_unlock(&conn->chan_lock);
567 void l2cap_chan_del(struct l2cap_chan *chan, int err)
569 struct l2cap_conn *conn = chan->conn;
571 __clear_chan_timer(chan);
573 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
575 chan->ops->teardown(chan, err);
578 struct amp_mgr *mgr = conn->hcon->amp_mgr;
579 /* Delete from channel list */
580 list_del(&chan->list);
582 l2cap_chan_put(chan);
586 /* Reference was only held for non-fixed channels or
587 * fixed channels that explicitly requested it using the
588 * FLAG_HOLD_HCI_CONN flag.
590 if (chan->chan_type != L2CAP_CHAN_FIXED ||
591 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
592 hci_conn_drop(conn->hcon);
594 if (mgr && mgr->bredr_chan == chan)
595 mgr->bredr_chan = NULL;
598 if (chan->hs_hchan) {
599 struct hci_chan *hs_hchan = chan->hs_hchan;
601 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
602 amp_disconnect_logical_link(hs_hchan);
605 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
609 case L2CAP_MODE_BASIC:
612 case L2CAP_MODE_LE_FLOWCTL:
613 skb_queue_purge(&chan->tx_q);
616 case L2CAP_MODE_ERTM:
617 __clear_retrans_timer(chan);
618 __clear_monitor_timer(chan);
619 __clear_ack_timer(chan);
621 skb_queue_purge(&chan->srej_q);
623 l2cap_seq_list_free(&chan->srej_list);
624 l2cap_seq_list_free(&chan->retrans_list);
628 case L2CAP_MODE_STREAMING:
629 skb_queue_purge(&chan->tx_q);
635 EXPORT_SYMBOL_GPL(l2cap_chan_del);
637 static void l2cap_conn_update_id_addr(struct work_struct *work)
639 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
640 id_addr_update_work);
641 struct hci_conn *hcon = conn->hcon;
642 struct l2cap_chan *chan;
644 mutex_lock(&conn->chan_lock);
646 list_for_each_entry(chan, &conn->chan_l, list) {
647 l2cap_chan_lock(chan);
648 bacpy(&chan->dst, &hcon->dst);
649 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
650 l2cap_chan_unlock(chan);
653 mutex_unlock(&conn->chan_lock);
656 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
658 struct l2cap_conn *conn = chan->conn;
659 struct l2cap_le_conn_rsp rsp;
662 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
663 result = L2CAP_CR_AUTHORIZATION;
665 result = L2CAP_CR_BAD_PSM;
667 l2cap_state_change(chan, BT_DISCONN);
669 rsp.dcid = cpu_to_le16(chan->scid);
670 rsp.mtu = cpu_to_le16(chan->imtu);
671 rsp.mps = cpu_to_le16(chan->mps);
672 rsp.credits = cpu_to_le16(chan->rx_credits);
673 rsp.result = cpu_to_le16(result);
675 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
679 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
681 struct l2cap_conn *conn = chan->conn;
682 struct l2cap_conn_rsp rsp;
685 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
686 result = L2CAP_CR_SEC_BLOCK;
688 result = L2CAP_CR_BAD_PSM;
690 l2cap_state_change(chan, BT_DISCONN);
692 rsp.scid = cpu_to_le16(chan->dcid);
693 rsp.dcid = cpu_to_le16(chan->scid);
694 rsp.result = cpu_to_le16(result);
695 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
697 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
700 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
702 struct l2cap_conn *conn = chan->conn;
704 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
706 switch (chan->state) {
708 chan->ops->teardown(chan, 0);
713 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
714 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
715 l2cap_send_disconn_req(chan, reason);
717 l2cap_chan_del(chan, reason);
721 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
722 if (conn->hcon->type == ACL_LINK)
723 l2cap_chan_connect_reject(chan);
724 else if (conn->hcon->type == LE_LINK)
725 l2cap_chan_le_connect_reject(chan);
728 l2cap_chan_del(chan, reason);
733 l2cap_chan_del(chan, reason);
737 chan->ops->teardown(chan, 0);
741 EXPORT_SYMBOL(l2cap_chan_close);
743 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
745 switch (chan->chan_type) {
747 switch (chan->sec_level) {
748 case BT_SECURITY_HIGH:
749 case BT_SECURITY_FIPS:
750 return HCI_AT_DEDICATED_BONDING_MITM;
751 case BT_SECURITY_MEDIUM:
752 return HCI_AT_DEDICATED_BONDING;
754 return HCI_AT_NO_BONDING;
757 case L2CAP_CHAN_CONN_LESS:
758 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
759 if (chan->sec_level == BT_SECURITY_LOW)
760 chan->sec_level = BT_SECURITY_SDP;
762 if (chan->sec_level == BT_SECURITY_HIGH ||
763 chan->sec_level == BT_SECURITY_FIPS)
764 return HCI_AT_NO_BONDING_MITM;
766 return HCI_AT_NO_BONDING;
768 case L2CAP_CHAN_CONN_ORIENTED:
769 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
770 if (chan->sec_level == BT_SECURITY_LOW)
771 chan->sec_level = BT_SECURITY_SDP;
773 if (chan->sec_level == BT_SECURITY_HIGH ||
774 chan->sec_level == BT_SECURITY_FIPS)
775 return HCI_AT_NO_BONDING_MITM;
777 return HCI_AT_NO_BONDING;
781 switch (chan->sec_level) {
782 case BT_SECURITY_HIGH:
783 case BT_SECURITY_FIPS:
784 return HCI_AT_GENERAL_BONDING_MITM;
785 case BT_SECURITY_MEDIUM:
786 return HCI_AT_GENERAL_BONDING;
788 return HCI_AT_NO_BONDING;
794 /* Service level security */
795 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
797 struct l2cap_conn *conn = chan->conn;
800 if (conn->hcon->type == LE_LINK)
801 return smp_conn_security(conn->hcon, chan->sec_level);
803 auth_type = l2cap_get_auth_type(chan);
805 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
809 static u8 l2cap_get_ident(struct l2cap_conn *conn)
813 /* Get next available identificator.
814 * 1 - 128 are used by kernel.
815 * 129 - 199 are reserved.
816 * 200 - 254 are used by utilities like l2ping, etc.
819 mutex_lock(&conn->ident_lock);
821 if (++conn->tx_ident > 128)
826 mutex_unlock(&conn->ident_lock);
831 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
834 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
837 BT_DBG("code 0x%2.2x", code);
842 if (lmp_no_flush_capable(conn->hcon->hdev))
843 flags = ACL_START_NO_FLUSH;
847 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
848 skb->priority = HCI_PRIO_MAX;
850 hci_send_acl(conn->hchan, skb, flags);
853 static bool __chan_is_moving(struct l2cap_chan *chan)
855 return chan->move_state != L2CAP_MOVE_STABLE &&
856 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
859 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
861 struct hci_conn *hcon = chan->conn->hcon;
864 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
867 if (chan->hs_hcon && !__chan_is_moving(chan)) {
869 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
876 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
877 lmp_no_flush_capable(hcon->hdev))
878 flags = ACL_START_NO_FLUSH;
882 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
883 hci_send_acl(chan->conn->hchan, skb, flags);
886 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
888 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
889 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
891 if (enh & L2CAP_CTRL_FRAME_TYPE) {
894 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
895 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
902 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
903 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
910 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
912 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
913 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
915 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
918 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
919 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
926 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
927 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
934 static inline void __unpack_control(struct l2cap_chan *chan,
937 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
938 __unpack_extended_control(get_unaligned_le32(skb->data),
939 &bt_cb(skb)->control);
940 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
942 __unpack_enhanced_control(get_unaligned_le16(skb->data),
943 &bt_cb(skb)->control);
944 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
948 static u32 __pack_extended_control(struct l2cap_ctrl *control)
952 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
953 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
955 if (control->sframe) {
956 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
957 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
958 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
960 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
961 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
967 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
971 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
972 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
974 if (control->sframe) {
975 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
976 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
977 packed |= L2CAP_CTRL_FRAME_TYPE;
979 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
980 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
986 static inline void __pack_control(struct l2cap_chan *chan,
987 struct l2cap_ctrl *control,
990 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
991 put_unaligned_le32(__pack_extended_control(control),
992 skb->data + L2CAP_HDR_SIZE);
994 put_unaligned_le16(__pack_enhanced_control(control),
995 skb->data + L2CAP_HDR_SIZE);
999 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1001 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1002 return L2CAP_EXT_HDR_SIZE;
1004 return L2CAP_ENH_HDR_SIZE;
1007 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1010 struct sk_buff *skb;
1011 struct l2cap_hdr *lh;
1012 int hlen = __ertm_hdr_size(chan);
1014 if (chan->fcs == L2CAP_FCS_CRC16)
1015 hlen += L2CAP_FCS_SIZE;
1017 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1020 return ERR_PTR(-ENOMEM);
1022 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1023 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1024 lh->cid = cpu_to_le16(chan->dcid);
1026 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1027 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1029 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1031 if (chan->fcs == L2CAP_FCS_CRC16) {
1032 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1033 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1036 skb->priority = HCI_PRIO_MAX;
1040 static void l2cap_send_sframe(struct l2cap_chan *chan,
1041 struct l2cap_ctrl *control)
1043 struct sk_buff *skb;
1046 BT_DBG("chan %p, control %p", chan, control);
1048 if (!control->sframe)
1051 if (__chan_is_moving(chan))
1054 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1058 if (control->super == L2CAP_SUPER_RR)
1059 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1060 else if (control->super == L2CAP_SUPER_RNR)
1061 set_bit(CONN_RNR_SENT, &chan->conn_state);
1063 if (control->super != L2CAP_SUPER_SREJ) {
1064 chan->last_acked_seq = control->reqseq;
1065 __clear_ack_timer(chan);
1068 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1069 control->final, control->poll, control->super);
1071 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1072 control_field = __pack_extended_control(control);
1074 control_field = __pack_enhanced_control(control);
1076 skb = l2cap_create_sframe_pdu(chan, control_field);
1078 l2cap_do_send(chan, skb);
1081 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1083 struct l2cap_ctrl control;
1085 BT_DBG("chan %p, poll %d", chan, poll);
1087 memset(&control, 0, sizeof(control));
1089 control.poll = poll;
1091 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1092 control.super = L2CAP_SUPER_RNR;
1094 control.super = L2CAP_SUPER_RR;
1096 control.reqseq = chan->buffer_seq;
1097 l2cap_send_sframe(chan, &control);
1100 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1102 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1105 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1108 static bool __amp_capable(struct l2cap_chan *chan)
1110 struct l2cap_conn *conn = chan->conn;
1111 struct hci_dev *hdev;
1112 bool amp_available = false;
1114 if (!conn->hs_enabled)
1117 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1120 read_lock(&hci_dev_list_lock);
1121 list_for_each_entry(hdev, &hci_dev_list, list) {
1122 if (hdev->amp_type != AMP_TYPE_BREDR &&
1123 test_bit(HCI_UP, &hdev->flags)) {
1124 amp_available = true;
1128 read_unlock(&hci_dev_list_lock);
1130 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1131 return amp_available;
1136 static bool l2cap_check_efs(struct l2cap_chan *chan)
1138 /* Check EFS parameters */
1142 void l2cap_send_conn_req(struct l2cap_chan *chan)
1144 struct l2cap_conn *conn = chan->conn;
1145 struct l2cap_conn_req req;
1147 req.scid = cpu_to_le16(chan->scid);
1148 req.psm = chan->psm;
1150 chan->ident = l2cap_get_ident(conn);
1152 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1154 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1157 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1159 struct l2cap_create_chan_req req;
1160 req.scid = cpu_to_le16(chan->scid);
1161 req.psm = chan->psm;
1162 req.amp_id = amp_id;
1164 chan->ident = l2cap_get_ident(chan->conn);
1166 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1170 static void l2cap_move_setup(struct l2cap_chan *chan)
1172 struct sk_buff *skb;
1174 BT_DBG("chan %p", chan);
1176 if (chan->mode != L2CAP_MODE_ERTM)
1179 __clear_retrans_timer(chan);
1180 __clear_monitor_timer(chan);
1181 __clear_ack_timer(chan);
1183 chan->retry_count = 0;
1184 skb_queue_walk(&chan->tx_q, skb) {
1185 if (bt_cb(skb)->control.retries)
1186 bt_cb(skb)->control.retries = 1;
1191 chan->expected_tx_seq = chan->buffer_seq;
1193 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1194 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1195 l2cap_seq_list_clear(&chan->retrans_list);
1196 l2cap_seq_list_clear(&chan->srej_list);
1197 skb_queue_purge(&chan->srej_q);
1199 chan->tx_state = L2CAP_TX_STATE_XMIT;
1200 chan->rx_state = L2CAP_RX_STATE_MOVE;
1202 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1205 static void l2cap_move_done(struct l2cap_chan *chan)
1207 u8 move_role = chan->move_role;
1208 BT_DBG("chan %p", chan);
1210 chan->move_state = L2CAP_MOVE_STABLE;
1211 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1213 if (chan->mode != L2CAP_MODE_ERTM)
1216 switch (move_role) {
1217 case L2CAP_MOVE_ROLE_INITIATOR:
1218 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1219 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1221 case L2CAP_MOVE_ROLE_RESPONDER:
1222 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1227 static void l2cap_chan_ready(struct l2cap_chan *chan)
1229 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1230 chan->conf_state = 0;
1231 __clear_chan_timer(chan);
1233 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1234 chan->ops->suspend(chan);
1236 chan->state = BT_CONNECTED;
1238 chan->ops->ready(chan);
1241 static void l2cap_le_connect(struct l2cap_chan *chan)
1243 struct l2cap_conn *conn = chan->conn;
1244 struct l2cap_le_conn_req req;
1246 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1249 req.psm = chan->psm;
1250 req.scid = cpu_to_le16(chan->scid);
1251 req.mtu = cpu_to_le16(chan->imtu);
1252 req.mps = cpu_to_le16(chan->mps);
1253 req.credits = cpu_to_le16(chan->rx_credits);
1255 chan->ident = l2cap_get_ident(conn);
1257 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1261 static void l2cap_le_start(struct l2cap_chan *chan)
1263 struct l2cap_conn *conn = chan->conn;
1265 if (!smp_conn_security(conn->hcon, chan->sec_level))
1269 l2cap_chan_ready(chan);
1273 if (chan->state == BT_CONNECT)
1274 l2cap_le_connect(chan);
1277 static void l2cap_start_connection(struct l2cap_chan *chan)
1279 if (__amp_capable(chan)) {
1280 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1281 a2mp_discover_amp(chan);
1282 } else if (chan->conn->hcon->type == LE_LINK) {
1283 l2cap_le_start(chan);
1285 l2cap_send_conn_req(chan);
1289 static void l2cap_request_info(struct l2cap_conn *conn)
1291 struct l2cap_info_req req;
1293 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1296 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1298 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1299 conn->info_ident = l2cap_get_ident(conn);
1301 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1303 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1307 static void l2cap_do_start(struct l2cap_chan *chan)
1309 struct l2cap_conn *conn = chan->conn;
1311 if (conn->hcon->type == LE_LINK) {
1312 l2cap_le_start(chan);
1316 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1317 l2cap_request_info(conn);
1321 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1324 if (l2cap_chan_check_security(chan, true) &&
1325 __l2cap_no_conn_pending(chan))
1326 l2cap_start_connection(chan);
1329 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1331 u32 local_feat_mask = l2cap_feat_mask;
1333 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1336 case L2CAP_MODE_ERTM:
1337 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1338 case L2CAP_MODE_STREAMING:
1339 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1345 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1347 struct l2cap_conn *conn = chan->conn;
1348 struct l2cap_disconn_req req;
1353 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1354 __clear_retrans_timer(chan);
1355 __clear_monitor_timer(chan);
1356 __clear_ack_timer(chan);
1359 if (chan->scid == L2CAP_CID_A2MP) {
1360 l2cap_state_change(chan, BT_DISCONN);
1364 req.dcid = cpu_to_le16(chan->dcid);
1365 req.scid = cpu_to_le16(chan->scid);
1366 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1369 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1372 /* ---- L2CAP connections ---- */
1373 static void l2cap_conn_start(struct l2cap_conn *conn)
1375 struct l2cap_chan *chan, *tmp;
1377 BT_DBG("conn %p", conn);
1379 mutex_lock(&conn->chan_lock);
1381 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1382 l2cap_chan_lock(chan);
1384 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1385 l2cap_chan_ready(chan);
1386 l2cap_chan_unlock(chan);
1390 if (chan->state == BT_CONNECT) {
1391 if (!l2cap_chan_check_security(chan, true) ||
1392 !__l2cap_no_conn_pending(chan)) {
1393 l2cap_chan_unlock(chan);
1397 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1398 && test_bit(CONF_STATE2_DEVICE,
1399 &chan->conf_state)) {
1400 l2cap_chan_close(chan, ECONNRESET);
1401 l2cap_chan_unlock(chan);
1405 l2cap_start_connection(chan);
1407 } else if (chan->state == BT_CONNECT2) {
1408 struct l2cap_conn_rsp rsp;
1410 rsp.scid = cpu_to_le16(chan->dcid);
1411 rsp.dcid = cpu_to_le16(chan->scid);
1413 if (l2cap_chan_check_security(chan, false)) {
1414 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1415 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1416 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1417 chan->ops->defer(chan);
1420 l2cap_state_change(chan, BT_CONFIG);
1421 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1422 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1425 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1426 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1429 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1432 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1433 rsp.result != L2CAP_CR_SUCCESS) {
1434 l2cap_chan_unlock(chan);
1438 set_bit(CONF_REQ_SENT, &chan->conf_state);
1439 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1440 l2cap_build_conf_req(chan, buf), buf);
1441 chan->num_conf_req++;
1444 l2cap_chan_unlock(chan);
1447 mutex_unlock(&conn->chan_lock);
1450 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1452 struct hci_conn *hcon = conn->hcon;
1453 struct hci_dev *hdev = hcon->hdev;
1455 BT_DBG("%s conn %p", hdev->name, conn);
1457 /* For outgoing pairing which doesn't necessarily have an
1458 * associated socket (e.g. mgmt_pair_device).
1461 smp_conn_security(hcon, hcon->pending_sec_level);
1463 /* For LE slave connections, make sure the connection interval
1464 * is in the range of the minium and maximum interval that has
1465 * been configured for this connection. If not, then trigger
1466 * the connection update procedure.
1468 if (hcon->role == HCI_ROLE_SLAVE &&
1469 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1470 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1471 struct l2cap_conn_param_update_req req;
1473 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1474 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1475 req.latency = cpu_to_le16(hcon->le_conn_latency);
1476 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1478 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1479 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1483 static void l2cap_conn_ready(struct l2cap_conn *conn)
1485 struct l2cap_chan *chan;
1486 struct hci_conn *hcon = conn->hcon;
1488 BT_DBG("conn %p", conn);
1490 if (hcon->type == ACL_LINK)
1491 l2cap_request_info(conn);
1493 mutex_lock(&conn->chan_lock);
1495 list_for_each_entry(chan, &conn->chan_l, list) {
1497 l2cap_chan_lock(chan);
1499 if (chan->scid == L2CAP_CID_A2MP) {
1500 l2cap_chan_unlock(chan);
1504 if (hcon->type == LE_LINK) {
1505 l2cap_le_start(chan);
1506 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1507 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1508 l2cap_chan_ready(chan);
1509 } else if (chan->state == BT_CONNECT) {
1510 l2cap_do_start(chan);
1513 l2cap_chan_unlock(chan);
1516 mutex_unlock(&conn->chan_lock);
1518 if (hcon->type == LE_LINK)
1519 l2cap_le_conn_ready(conn);
1521 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1524 /* Notify sockets that we cannot guaranty reliability anymore */
1525 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1527 struct l2cap_chan *chan;
1529 BT_DBG("conn %p", conn);
1531 mutex_lock(&conn->chan_lock);
1533 list_for_each_entry(chan, &conn->chan_l, list) {
1534 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1535 l2cap_chan_set_err(chan, err);
1538 mutex_unlock(&conn->chan_lock);
1541 static void l2cap_info_timeout(struct work_struct *work)
1543 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1546 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1547 conn->info_ident = 0;
1549 l2cap_conn_start(conn);
1554 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1555 * callback is called during registration. The ->remove callback is called
1556 * during unregistration.
1557 * An l2cap_user object can either be explicitly unregistered or when the
1558 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1559 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1560 * External modules must own a reference to the l2cap_conn object if they intend
1561 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1562 * any time if they don't.
1565 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1567 struct hci_dev *hdev = conn->hcon->hdev;
1570 /* We need to check whether l2cap_conn is registered. If it is not, we
1571 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1572 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1573 * relies on the parent hci_conn object to be locked. This itself relies
1574 * on the hci_dev object to be locked. So we must lock the hci device
1579 if (user->list.next || user->list.prev) {
1584 /* conn->hchan is NULL after l2cap_conn_del() was called */
1590 ret = user->probe(conn, user);
1594 list_add(&user->list, &conn->users);
1598 hci_dev_unlock(hdev);
1601 EXPORT_SYMBOL(l2cap_register_user);
1603 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1605 struct hci_dev *hdev = conn->hcon->hdev;
1609 if (!user->list.next || !user->list.prev)
1612 list_del(&user->list);
1613 user->list.next = NULL;
1614 user->list.prev = NULL;
1615 user->remove(conn, user);
1618 hci_dev_unlock(hdev);
1620 EXPORT_SYMBOL(l2cap_unregister_user);
1622 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1624 struct l2cap_user *user;
1626 while (!list_empty(&conn->users)) {
1627 user = list_first_entry(&conn->users, struct l2cap_user, list);
1628 list_del(&user->list);
1629 user->list.next = NULL;
1630 user->list.prev = NULL;
1631 user->remove(conn, user);
1635 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1637 struct l2cap_conn *conn = hcon->l2cap_data;
1638 struct l2cap_chan *chan, *l;
1643 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1645 kfree_skb(conn->rx_skb);
1647 skb_queue_purge(&conn->pending_rx);
1649 /* We can not call flush_work(&conn->pending_rx_work) here since we
1650 * might block if we are running on a worker from the same workqueue
1651 * pending_rx_work is waiting on.
1653 if (work_pending(&conn->pending_rx_work))
1654 cancel_work_sync(&conn->pending_rx_work);
1656 if (work_pending(&conn->id_addr_update_work))
1657 cancel_work_sync(&conn->id_addr_update_work);
1659 l2cap_unregister_all_users(conn);
1661 /* Force the connection to be immediately dropped */
1662 hcon->disc_timeout = 0;
1664 mutex_lock(&conn->chan_lock);
1667 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1668 l2cap_chan_hold(chan);
1669 l2cap_chan_lock(chan);
1671 l2cap_chan_del(chan, err);
1673 l2cap_chan_unlock(chan);
1675 chan->ops->close(chan);
1676 l2cap_chan_put(chan);
1679 mutex_unlock(&conn->chan_lock);
1681 hci_chan_del(conn->hchan);
1683 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1684 cancel_delayed_work_sync(&conn->info_timer);
1686 hcon->l2cap_data = NULL;
1688 l2cap_conn_put(conn);
1691 static void l2cap_conn_free(struct kref *ref)
1693 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1695 hci_conn_put(conn->hcon);
1699 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1701 kref_get(&conn->ref);
1704 EXPORT_SYMBOL(l2cap_conn_get);
1706 void l2cap_conn_put(struct l2cap_conn *conn)
1708 kref_put(&conn->ref, l2cap_conn_free);
1710 EXPORT_SYMBOL(l2cap_conn_put);
1712 /* ---- Socket interface ---- */
1714 /* Find socket with psm and source / destination bdaddr.
1715 * Returns closest match.
1717 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1722 struct l2cap_chan *c, *c1 = NULL;
1724 read_lock(&chan_list_lock);
1726 list_for_each_entry(c, &chan_list, global_l) {
1727 if (state && c->state != state)
1730 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1733 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1736 if (c->psm == psm) {
1737 int src_match, dst_match;
1738 int src_any, dst_any;
1741 src_match = !bacmp(&c->src, src);
1742 dst_match = !bacmp(&c->dst, dst);
1743 if (src_match && dst_match) {
1745 read_unlock(&chan_list_lock);
1750 src_any = !bacmp(&c->src, BDADDR_ANY);
1751 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1752 if ((src_match && dst_any) || (src_any && dst_match) ||
1753 (src_any && dst_any))
1759 l2cap_chan_hold(c1);
1761 read_unlock(&chan_list_lock);
1766 static void l2cap_monitor_timeout(struct work_struct *work)
1768 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1769 monitor_timer.work);
1771 BT_DBG("chan %p", chan);
1773 l2cap_chan_lock(chan);
1776 l2cap_chan_unlock(chan);
1777 l2cap_chan_put(chan);
1781 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1783 l2cap_chan_unlock(chan);
1784 l2cap_chan_put(chan);
1787 static void l2cap_retrans_timeout(struct work_struct *work)
1789 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1790 retrans_timer.work);
1792 BT_DBG("chan %p", chan);
1794 l2cap_chan_lock(chan);
1797 l2cap_chan_unlock(chan);
1798 l2cap_chan_put(chan);
1802 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1803 l2cap_chan_unlock(chan);
1804 l2cap_chan_put(chan);
1807 static void l2cap_streaming_send(struct l2cap_chan *chan,
1808 struct sk_buff_head *skbs)
1810 struct sk_buff *skb;
1811 struct l2cap_ctrl *control;
1813 BT_DBG("chan %p, skbs %p", chan, skbs);
1815 if (__chan_is_moving(chan))
1818 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1820 while (!skb_queue_empty(&chan->tx_q)) {
1822 skb = skb_dequeue(&chan->tx_q);
1824 bt_cb(skb)->control.retries = 1;
1825 control = &bt_cb(skb)->control;
1827 control->reqseq = 0;
1828 control->txseq = chan->next_tx_seq;
1830 __pack_control(chan, control, skb);
1832 if (chan->fcs == L2CAP_FCS_CRC16) {
1833 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1834 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1837 l2cap_do_send(chan, skb);
1839 BT_DBG("Sent txseq %u", control->txseq);
1841 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1842 chan->frames_sent++;
1846 static int l2cap_ertm_send(struct l2cap_chan *chan)
1848 struct sk_buff *skb, *tx_skb;
1849 struct l2cap_ctrl *control;
1852 BT_DBG("chan %p", chan);
1854 if (chan->state != BT_CONNECTED)
1857 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1860 if (__chan_is_moving(chan))
1863 while (chan->tx_send_head &&
1864 chan->unacked_frames < chan->remote_tx_win &&
1865 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1867 skb = chan->tx_send_head;
1869 bt_cb(skb)->control.retries = 1;
1870 control = &bt_cb(skb)->control;
1872 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1875 control->reqseq = chan->buffer_seq;
1876 chan->last_acked_seq = chan->buffer_seq;
1877 control->txseq = chan->next_tx_seq;
1879 __pack_control(chan, control, skb);
1881 if (chan->fcs == L2CAP_FCS_CRC16) {
1882 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1883 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1886 /* Clone after data has been modified. Data is assumed to be
1887 read-only (for locking purposes) on cloned sk_buffs.
1889 tx_skb = skb_clone(skb, GFP_KERNEL);
1894 __set_retrans_timer(chan);
1896 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1897 chan->unacked_frames++;
1898 chan->frames_sent++;
1901 if (skb_queue_is_last(&chan->tx_q, skb))
1902 chan->tx_send_head = NULL;
1904 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1906 l2cap_do_send(chan, tx_skb);
1907 BT_DBG("Sent txseq %u", control->txseq);
1910 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1911 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1916 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1918 struct l2cap_ctrl control;
1919 struct sk_buff *skb;
1920 struct sk_buff *tx_skb;
1923 BT_DBG("chan %p", chan);
1925 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1928 if (__chan_is_moving(chan))
1931 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1932 seq = l2cap_seq_list_pop(&chan->retrans_list);
1934 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1936 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1941 bt_cb(skb)->control.retries++;
1942 control = bt_cb(skb)->control;
1944 if (chan->max_tx != 0 &&
1945 bt_cb(skb)->control.retries > chan->max_tx) {
1946 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1947 l2cap_send_disconn_req(chan, ECONNRESET);
1948 l2cap_seq_list_clear(&chan->retrans_list);
1952 control.reqseq = chan->buffer_seq;
1953 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1958 if (skb_cloned(skb)) {
1959 /* Cloned sk_buffs are read-only, so we need a
1962 tx_skb = skb_copy(skb, GFP_KERNEL);
1964 tx_skb = skb_clone(skb, GFP_KERNEL);
1968 l2cap_seq_list_clear(&chan->retrans_list);
1972 /* Update skb contents */
1973 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1974 put_unaligned_le32(__pack_extended_control(&control),
1975 tx_skb->data + L2CAP_HDR_SIZE);
1977 put_unaligned_le16(__pack_enhanced_control(&control),
1978 tx_skb->data + L2CAP_HDR_SIZE);
1982 if (chan->fcs == L2CAP_FCS_CRC16) {
1983 u16 fcs = crc16(0, (u8 *) tx_skb->data,
1984 tx_skb->len - L2CAP_FCS_SIZE);
1985 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
1989 l2cap_do_send(chan, tx_skb);
1991 BT_DBG("Resent txseq %d", control.txseq);
1993 chan->last_acked_seq = chan->buffer_seq;
1997 static void l2cap_retransmit(struct l2cap_chan *chan,
1998 struct l2cap_ctrl *control)
2000 BT_DBG("chan %p, control %p", chan, control);
2002 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2003 l2cap_ertm_resend(chan);
2006 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2007 struct l2cap_ctrl *control)
2009 struct sk_buff *skb;
2011 BT_DBG("chan %p, control %p", chan, control);
2014 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2016 l2cap_seq_list_clear(&chan->retrans_list);
2018 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2021 if (chan->unacked_frames) {
2022 skb_queue_walk(&chan->tx_q, skb) {
2023 if (bt_cb(skb)->control.txseq == control->reqseq ||
2024 skb == chan->tx_send_head)
2028 skb_queue_walk_from(&chan->tx_q, skb) {
2029 if (skb == chan->tx_send_head)
2032 l2cap_seq_list_append(&chan->retrans_list,
2033 bt_cb(skb)->control.txseq);
2036 l2cap_ertm_resend(chan);
2040 static void l2cap_send_ack(struct l2cap_chan *chan)
2042 struct l2cap_ctrl control;
2043 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2044 chan->last_acked_seq);
2047 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2048 chan, chan->last_acked_seq, chan->buffer_seq);
2050 memset(&control, 0, sizeof(control));
2053 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2054 chan->rx_state == L2CAP_RX_STATE_RECV) {
2055 __clear_ack_timer(chan);
2056 control.super = L2CAP_SUPER_RNR;
2057 control.reqseq = chan->buffer_seq;
2058 l2cap_send_sframe(chan, &control);
2060 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2061 l2cap_ertm_send(chan);
2062 /* If any i-frames were sent, they included an ack */
2063 if (chan->buffer_seq == chan->last_acked_seq)
2067 /* Ack now if the window is 3/4ths full.
2068 * Calculate without mul or div
2070 threshold = chan->ack_win;
2071 threshold += threshold << 1;
2074 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2077 if (frames_to_ack >= threshold) {
2078 __clear_ack_timer(chan);
2079 control.super = L2CAP_SUPER_RR;
2080 control.reqseq = chan->buffer_seq;
2081 l2cap_send_sframe(chan, &control);
2086 __set_ack_timer(chan);
2090 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2091 struct msghdr *msg, int len,
2092 int count, struct sk_buff *skb)
2094 struct l2cap_conn *conn = chan->conn;
2095 struct sk_buff **frag;
2098 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2099 msg->msg_iov, count))
2105 /* Continuation fragments (no L2CAP header) */
2106 frag = &skb_shinfo(skb)->frag_list;
2108 struct sk_buff *tmp;
2110 count = min_t(unsigned int, conn->mtu, len);
2112 tmp = chan->ops->alloc_skb(chan, 0, count,
2113 msg->msg_flags & MSG_DONTWAIT);
2115 return PTR_ERR(tmp);
2119 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2120 msg->msg_iov, count))
2126 skb->len += (*frag)->len;
2127 skb->data_len += (*frag)->len;
2129 frag = &(*frag)->next;
2135 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2136 struct msghdr *msg, size_t len)
2138 struct l2cap_conn *conn = chan->conn;
2139 struct sk_buff *skb;
2140 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2141 struct l2cap_hdr *lh;
2143 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2144 __le16_to_cpu(chan->psm), len);
2146 count = min_t(unsigned int, (conn->mtu - hlen), len);
2148 skb = chan->ops->alloc_skb(chan, hlen, count,
2149 msg->msg_flags & MSG_DONTWAIT);
2153 /* Create L2CAP header */
2154 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2155 lh->cid = cpu_to_le16(chan->dcid);
2156 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2157 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2159 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2160 if (unlikely(err < 0)) {
2162 return ERR_PTR(err);
2167 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2168 struct msghdr *msg, size_t len)
2170 struct l2cap_conn *conn = chan->conn;
2171 struct sk_buff *skb;
2173 struct l2cap_hdr *lh;
2175 BT_DBG("chan %p len %zu", chan, len);
2177 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2179 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2180 msg->msg_flags & MSG_DONTWAIT);
2184 /* Create L2CAP header */
2185 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2186 lh->cid = cpu_to_le16(chan->dcid);
2187 lh->len = cpu_to_le16(len);
2189 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2190 if (unlikely(err < 0)) {
2192 return ERR_PTR(err);
2197 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2198 struct msghdr *msg, size_t len,
2201 struct l2cap_conn *conn = chan->conn;
2202 struct sk_buff *skb;
2203 int err, count, hlen;
2204 struct l2cap_hdr *lh;
2206 BT_DBG("chan %p len %zu", chan, len);
2209 return ERR_PTR(-ENOTCONN);
2211 hlen = __ertm_hdr_size(chan);
2214 hlen += L2CAP_SDULEN_SIZE;
2216 if (chan->fcs == L2CAP_FCS_CRC16)
2217 hlen += L2CAP_FCS_SIZE;
2219 count = min_t(unsigned int, (conn->mtu - hlen), len);
2221 skb = chan->ops->alloc_skb(chan, hlen, count,
2222 msg->msg_flags & MSG_DONTWAIT);
2226 /* Create L2CAP header */
2227 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2228 lh->cid = cpu_to_le16(chan->dcid);
2229 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2231 /* Control header is populated later */
2232 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2233 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2235 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2238 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2240 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2241 if (unlikely(err < 0)) {
2243 return ERR_PTR(err);
2246 bt_cb(skb)->control.fcs = chan->fcs;
2247 bt_cb(skb)->control.retries = 0;
2251 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2252 struct sk_buff_head *seg_queue,
2253 struct msghdr *msg, size_t len)
2255 struct sk_buff *skb;
2260 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2262 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2263 * so fragmented skbs are not used. The HCI layer's handling
2264 * of fragmented skbs is not compatible with ERTM's queueing.
2267 /* PDU size is derived from the HCI MTU */
2268 pdu_len = chan->conn->mtu;
2270 /* Constrain PDU size for BR/EDR connections */
2272 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2274 /* Adjust for largest possible L2CAP overhead. */
2276 pdu_len -= L2CAP_FCS_SIZE;
2278 pdu_len -= __ertm_hdr_size(chan);
2280 /* Remote device may have requested smaller PDUs */
2281 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2283 if (len <= pdu_len) {
2284 sar = L2CAP_SAR_UNSEGMENTED;
2288 sar = L2CAP_SAR_START;
2293 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2296 __skb_queue_purge(seg_queue);
2297 return PTR_ERR(skb);
2300 bt_cb(skb)->control.sar = sar;
2301 __skb_queue_tail(seg_queue, skb);
2307 if (len <= pdu_len) {
2308 sar = L2CAP_SAR_END;
2311 sar = L2CAP_SAR_CONTINUE;
2318 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2320 size_t len, u16 sdulen)
2322 struct l2cap_conn *conn = chan->conn;
2323 struct sk_buff *skb;
2324 int err, count, hlen;
2325 struct l2cap_hdr *lh;
2327 BT_DBG("chan %p len %zu", chan, len);
2330 return ERR_PTR(-ENOTCONN);
2332 hlen = L2CAP_HDR_SIZE;
2335 hlen += L2CAP_SDULEN_SIZE;
2337 count = min_t(unsigned int, (conn->mtu - hlen), len);
2339 skb = chan->ops->alloc_skb(chan, hlen, count,
2340 msg->msg_flags & MSG_DONTWAIT);
2344 /* Create L2CAP header */
2345 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2346 lh->cid = cpu_to_le16(chan->dcid);
2347 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2350 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2352 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2353 if (unlikely(err < 0)) {
2355 return ERR_PTR(err);
2361 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2362 struct sk_buff_head *seg_queue,
2363 struct msghdr *msg, size_t len)
2365 struct sk_buff *skb;
2369 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2372 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2378 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2380 __skb_queue_purge(seg_queue);
2381 return PTR_ERR(skb);
2384 __skb_queue_tail(seg_queue, skb);
2390 pdu_len += L2CAP_SDULEN_SIZE;
2397 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2399 struct sk_buff *skb;
2401 struct sk_buff_head seg_queue;
2406 /* Connectionless channel */
2407 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2408 skb = l2cap_create_connless_pdu(chan, msg, len);
2410 return PTR_ERR(skb);
2412 /* Channel lock is released before requesting new skb and then
2413 * reacquired thus we need to recheck channel state.
2415 if (chan->state != BT_CONNECTED) {
2420 l2cap_do_send(chan, skb);
2424 switch (chan->mode) {
2425 case L2CAP_MODE_LE_FLOWCTL:
2426 /* Check outgoing MTU */
2427 if (len > chan->omtu)
2430 if (!chan->tx_credits)
2433 __skb_queue_head_init(&seg_queue);
2435 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2437 if (chan->state != BT_CONNECTED) {
2438 __skb_queue_purge(&seg_queue);
2445 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2447 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2448 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2452 if (!chan->tx_credits)
2453 chan->ops->suspend(chan);
2459 case L2CAP_MODE_BASIC:
2460 /* Check outgoing MTU */
2461 if (len > chan->omtu)
2464 /* Create a basic PDU */
2465 skb = l2cap_create_basic_pdu(chan, msg, len);
2467 return PTR_ERR(skb);
2469 /* Channel lock is released before requesting new skb and then
2470 * reacquired thus we need to recheck channel state.
2472 if (chan->state != BT_CONNECTED) {
2477 l2cap_do_send(chan, skb);
2481 case L2CAP_MODE_ERTM:
2482 case L2CAP_MODE_STREAMING:
2483 /* Check outgoing MTU */
2484 if (len > chan->omtu) {
2489 __skb_queue_head_init(&seg_queue);
2491 /* Do segmentation before calling in to the state machine,
2492 * since it's possible to block while waiting for memory
2495 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2497 /* The channel could have been closed while segmenting,
2498 * check that it is still connected.
2500 if (chan->state != BT_CONNECTED) {
2501 __skb_queue_purge(&seg_queue);
2508 if (chan->mode == L2CAP_MODE_ERTM)
2509 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2511 l2cap_streaming_send(chan, &seg_queue);
2515 /* If the skbs were not queued for sending, they'll still be in
2516 * seg_queue and need to be purged.
2518 __skb_queue_purge(&seg_queue);
2522 BT_DBG("bad state %1.1x", chan->mode);
2528 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2530 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2532 struct l2cap_ctrl control;
2535 BT_DBG("chan %p, txseq %u", chan, txseq);
2537 memset(&control, 0, sizeof(control));
2539 control.super = L2CAP_SUPER_SREJ;
2541 for (seq = chan->expected_tx_seq; seq != txseq;
2542 seq = __next_seq(chan, seq)) {
2543 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2544 control.reqseq = seq;
2545 l2cap_send_sframe(chan, &control);
2546 l2cap_seq_list_append(&chan->srej_list, seq);
2550 chan->expected_tx_seq = __next_seq(chan, txseq);
2553 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2555 struct l2cap_ctrl control;
2557 BT_DBG("chan %p", chan);
2559 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2562 memset(&control, 0, sizeof(control));
2564 control.super = L2CAP_SUPER_SREJ;
2565 control.reqseq = chan->srej_list.tail;
2566 l2cap_send_sframe(chan, &control);
2569 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2571 struct l2cap_ctrl control;
2575 BT_DBG("chan %p, txseq %u", chan, txseq);
2577 memset(&control, 0, sizeof(control));
2579 control.super = L2CAP_SUPER_SREJ;
2581 /* Capture initial list head to allow only one pass through the list. */
2582 initial_head = chan->srej_list.head;
2585 seq = l2cap_seq_list_pop(&chan->srej_list);
2586 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2589 control.reqseq = seq;
2590 l2cap_send_sframe(chan, &control);
2591 l2cap_seq_list_append(&chan->srej_list, seq);
2592 } while (chan->srej_list.head != initial_head);
2595 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2597 struct sk_buff *acked_skb;
2600 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2602 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2605 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2606 chan->expected_ack_seq, chan->unacked_frames);
2608 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2609 ackseq = __next_seq(chan, ackseq)) {
2611 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2613 skb_unlink(acked_skb, &chan->tx_q);
2614 kfree_skb(acked_skb);
2615 chan->unacked_frames--;
2619 chan->expected_ack_seq = reqseq;
2621 if (chan->unacked_frames == 0)
2622 __clear_retrans_timer(chan);
2624 BT_DBG("unacked_frames %u", chan->unacked_frames);
2627 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2629 BT_DBG("chan %p", chan);
2631 chan->expected_tx_seq = chan->buffer_seq;
2632 l2cap_seq_list_clear(&chan->srej_list);
2633 skb_queue_purge(&chan->srej_q);
2634 chan->rx_state = L2CAP_RX_STATE_RECV;
2637 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2638 struct l2cap_ctrl *control,
2639 struct sk_buff_head *skbs, u8 event)
2641 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2645 case L2CAP_EV_DATA_REQUEST:
2646 if (chan->tx_send_head == NULL)
2647 chan->tx_send_head = skb_peek(skbs);
2649 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2650 l2cap_ertm_send(chan);
2652 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2653 BT_DBG("Enter LOCAL_BUSY");
2654 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2656 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2657 /* The SREJ_SENT state must be aborted if we are to
2658 * enter the LOCAL_BUSY state.
2660 l2cap_abort_rx_srej_sent(chan);
2663 l2cap_send_ack(chan);
2666 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2667 BT_DBG("Exit LOCAL_BUSY");
2668 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2670 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2671 struct l2cap_ctrl local_control;
2673 memset(&local_control, 0, sizeof(local_control));
2674 local_control.sframe = 1;
2675 local_control.super = L2CAP_SUPER_RR;
2676 local_control.poll = 1;
2677 local_control.reqseq = chan->buffer_seq;
2678 l2cap_send_sframe(chan, &local_control);
2680 chan->retry_count = 1;
2681 __set_monitor_timer(chan);
2682 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2685 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2686 l2cap_process_reqseq(chan, control->reqseq);
2688 case L2CAP_EV_EXPLICIT_POLL:
2689 l2cap_send_rr_or_rnr(chan, 1);
2690 chan->retry_count = 1;
2691 __set_monitor_timer(chan);
2692 __clear_ack_timer(chan);
2693 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2695 case L2CAP_EV_RETRANS_TO:
2696 l2cap_send_rr_or_rnr(chan, 1);
2697 chan->retry_count = 1;
2698 __set_monitor_timer(chan);
2699 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2701 case L2CAP_EV_RECV_FBIT:
2702 /* Nothing to process */
2709 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2710 struct l2cap_ctrl *control,
2711 struct sk_buff_head *skbs, u8 event)
2713 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2717 case L2CAP_EV_DATA_REQUEST:
2718 if (chan->tx_send_head == NULL)
2719 chan->tx_send_head = skb_peek(skbs);
2720 /* Queue data, but don't send. */
2721 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2723 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2724 BT_DBG("Enter LOCAL_BUSY");
2725 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2727 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2728 /* The SREJ_SENT state must be aborted if we are to
2729 * enter the LOCAL_BUSY state.
2731 l2cap_abort_rx_srej_sent(chan);
2734 l2cap_send_ack(chan);
2737 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2738 BT_DBG("Exit LOCAL_BUSY");
2739 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2741 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2742 struct l2cap_ctrl local_control;
2743 memset(&local_control, 0, sizeof(local_control));
2744 local_control.sframe = 1;
2745 local_control.super = L2CAP_SUPER_RR;
2746 local_control.poll = 1;
2747 local_control.reqseq = chan->buffer_seq;
2748 l2cap_send_sframe(chan, &local_control);
2750 chan->retry_count = 1;
2751 __set_monitor_timer(chan);
2752 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2755 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2756 l2cap_process_reqseq(chan, control->reqseq);
2760 case L2CAP_EV_RECV_FBIT:
2761 if (control && control->final) {
2762 __clear_monitor_timer(chan);
2763 if (chan->unacked_frames > 0)
2764 __set_retrans_timer(chan);
2765 chan->retry_count = 0;
2766 chan->tx_state = L2CAP_TX_STATE_XMIT;
2767 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2770 case L2CAP_EV_EXPLICIT_POLL:
2773 case L2CAP_EV_MONITOR_TO:
2774 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2775 l2cap_send_rr_or_rnr(chan, 1);
2776 __set_monitor_timer(chan);
2777 chan->retry_count++;
2779 l2cap_send_disconn_req(chan, ECONNABORTED);
2787 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2788 struct sk_buff_head *skbs, u8 event)
2790 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2791 chan, control, skbs, event, chan->tx_state);
2793 switch (chan->tx_state) {
2794 case L2CAP_TX_STATE_XMIT:
2795 l2cap_tx_state_xmit(chan, control, skbs, event);
2797 case L2CAP_TX_STATE_WAIT_F:
2798 l2cap_tx_state_wait_f(chan, control, skbs, event);
2806 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2807 struct l2cap_ctrl *control)
2809 BT_DBG("chan %p, control %p", chan, control);
2810 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2813 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2814 struct l2cap_ctrl *control)
2816 BT_DBG("chan %p, control %p", chan, control);
2817 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2820 /* Copy frame to all raw sockets on that connection */
2821 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2823 struct sk_buff *nskb;
2824 struct l2cap_chan *chan;
2826 BT_DBG("conn %p", conn);
2828 mutex_lock(&conn->chan_lock);
2830 list_for_each_entry(chan, &conn->chan_l, list) {
2831 if (chan->chan_type != L2CAP_CHAN_RAW)
2834 /* Don't send frame to the channel it came from */
2835 if (bt_cb(skb)->chan == chan)
2838 nskb = skb_clone(skb, GFP_KERNEL);
2841 if (chan->ops->recv(chan, nskb))
2845 mutex_unlock(&conn->chan_lock);
2848 /* ---- L2CAP signalling commands ---- */
2849 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2850 u8 ident, u16 dlen, void *data)
2852 struct sk_buff *skb, **frag;
2853 struct l2cap_cmd_hdr *cmd;
2854 struct l2cap_hdr *lh;
2857 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2858 conn, code, ident, dlen);
2860 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2863 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2864 count = min_t(unsigned int, conn->mtu, len);
2866 skb = bt_skb_alloc(count, GFP_KERNEL);
2870 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2871 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2873 if (conn->hcon->type == LE_LINK)
2874 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2876 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2878 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2881 cmd->len = cpu_to_le16(dlen);
2884 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2885 memcpy(skb_put(skb, count), data, count);
2891 /* Continuation fragments (no L2CAP header) */
2892 frag = &skb_shinfo(skb)->frag_list;
2894 count = min_t(unsigned int, conn->mtu, len);
2896 *frag = bt_skb_alloc(count, GFP_KERNEL);
2900 memcpy(skb_put(*frag, count), data, count);
2905 frag = &(*frag)->next;
2915 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2918 struct l2cap_conf_opt *opt = *ptr;
2921 len = L2CAP_CONF_OPT_SIZE + opt->len;
2929 *val = *((u8 *) opt->val);
2933 *val = get_unaligned_le16(opt->val);
2937 *val = get_unaligned_le32(opt->val);
2941 *val = (unsigned long) opt->val;
2945 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2949 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2951 struct l2cap_conf_opt *opt = *ptr;
2953 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2960 *((u8 *) opt->val) = val;
2964 put_unaligned_le16(val, opt->val);
2968 put_unaligned_le32(val, opt->val);
2972 memcpy(opt->val, (void *) val, len);
2976 *ptr += L2CAP_CONF_OPT_SIZE + len;
2979 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2981 struct l2cap_conf_efs efs;
2983 switch (chan->mode) {
2984 case L2CAP_MODE_ERTM:
2985 efs.id = chan->local_id;
2986 efs.stype = chan->local_stype;
2987 efs.msdu = cpu_to_le16(chan->local_msdu);
2988 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2989 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2990 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2993 case L2CAP_MODE_STREAMING:
2995 efs.stype = L2CAP_SERV_BESTEFFORT;
2996 efs.msdu = cpu_to_le16(chan->local_msdu);
2997 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3006 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3007 (unsigned long) &efs);
3010 static void l2cap_ack_timeout(struct work_struct *work)
3012 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3016 BT_DBG("chan %p", chan);
3018 l2cap_chan_lock(chan);
3020 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3021 chan->last_acked_seq);
3024 l2cap_send_rr_or_rnr(chan, 0);
3026 l2cap_chan_unlock(chan);
3027 l2cap_chan_put(chan);
3030 int l2cap_ertm_init(struct l2cap_chan *chan)
3034 chan->next_tx_seq = 0;
3035 chan->expected_tx_seq = 0;
3036 chan->expected_ack_seq = 0;
3037 chan->unacked_frames = 0;
3038 chan->buffer_seq = 0;
3039 chan->frames_sent = 0;
3040 chan->last_acked_seq = 0;
3042 chan->sdu_last_frag = NULL;
3045 skb_queue_head_init(&chan->tx_q);
3047 chan->local_amp_id = AMP_ID_BREDR;
3048 chan->move_id = AMP_ID_BREDR;
3049 chan->move_state = L2CAP_MOVE_STABLE;
3050 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3052 if (chan->mode != L2CAP_MODE_ERTM)
3055 chan->rx_state = L2CAP_RX_STATE_RECV;
3056 chan->tx_state = L2CAP_TX_STATE_XMIT;
3058 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3059 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3060 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3062 skb_queue_head_init(&chan->srej_q);
3064 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3068 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3070 l2cap_seq_list_free(&chan->srej_list);
3075 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3078 case L2CAP_MODE_STREAMING:
3079 case L2CAP_MODE_ERTM:
3080 if (l2cap_mode_supported(mode, remote_feat_mask))
3084 return L2CAP_MODE_BASIC;
3088 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3090 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3093 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3095 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3098 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3099 struct l2cap_conf_rfc *rfc)
3101 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3102 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3104 /* Class 1 devices have must have ERTM timeouts
3105 * exceeding the Link Supervision Timeout. The
3106 * default Link Supervision Timeout for AMP
3107 * controllers is 10 seconds.
3109 * Class 1 devices use 0xffffffff for their
3110 * best-effort flush timeout, so the clamping logic
3111 * will result in a timeout that meets the above
3112 * requirement. ERTM timeouts are 16-bit values, so
3113 * the maximum timeout is 65.535 seconds.
3116 /* Convert timeout to milliseconds and round */
3117 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3119 /* This is the recommended formula for class 2 devices
3120 * that start ERTM timers when packets are sent to the
3123 ertm_to = 3 * ertm_to + 500;
3125 if (ertm_to > 0xffff)
3128 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3129 rfc->monitor_timeout = rfc->retrans_timeout;
3131 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3132 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3136 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3138 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3139 __l2cap_ews_supported(chan->conn)) {
3140 /* use extended control field */
3141 set_bit(FLAG_EXT_CTRL, &chan->flags);
3142 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3144 chan->tx_win = min_t(u16, chan->tx_win,
3145 L2CAP_DEFAULT_TX_WINDOW);
3146 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3148 chan->ack_win = chan->tx_win;
3151 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3153 struct l2cap_conf_req *req = data;
3154 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3155 void *ptr = req->data;
3158 BT_DBG("chan %p", chan);
3160 if (chan->num_conf_req || chan->num_conf_rsp)
3163 switch (chan->mode) {
3164 case L2CAP_MODE_STREAMING:
3165 case L2CAP_MODE_ERTM:
3166 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3169 if (__l2cap_efs_supported(chan->conn))
3170 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3174 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3179 if (chan->imtu != L2CAP_DEFAULT_MTU)
3180 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3182 switch (chan->mode) {
3183 case L2CAP_MODE_BASIC:
3187 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3188 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3191 rfc.mode = L2CAP_MODE_BASIC;
3193 rfc.max_transmit = 0;
3194 rfc.retrans_timeout = 0;
3195 rfc.monitor_timeout = 0;
3196 rfc.max_pdu_size = 0;
3198 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3199 (unsigned long) &rfc);
3202 case L2CAP_MODE_ERTM:
3203 rfc.mode = L2CAP_MODE_ERTM;
3204 rfc.max_transmit = chan->max_tx;
3206 __l2cap_set_ertm_timeouts(chan, &rfc);
3208 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3209 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3211 rfc.max_pdu_size = cpu_to_le16(size);
3213 l2cap_txwin_setup(chan);
3215 rfc.txwin_size = min_t(u16, chan->tx_win,
3216 L2CAP_DEFAULT_TX_WINDOW);
3218 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3219 (unsigned long) &rfc);
3221 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3222 l2cap_add_opt_efs(&ptr, chan);
3224 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3225 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3228 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3229 if (chan->fcs == L2CAP_FCS_NONE ||
3230 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3231 chan->fcs = L2CAP_FCS_NONE;
3232 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3237 case L2CAP_MODE_STREAMING:
3238 l2cap_txwin_setup(chan);
3239 rfc.mode = L2CAP_MODE_STREAMING;
3241 rfc.max_transmit = 0;
3242 rfc.retrans_timeout = 0;
3243 rfc.monitor_timeout = 0;
3245 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3246 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3248 rfc.max_pdu_size = cpu_to_le16(size);
3250 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3251 (unsigned long) &rfc);
3253 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3254 l2cap_add_opt_efs(&ptr, chan);
3256 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3257 if (chan->fcs == L2CAP_FCS_NONE ||
3258 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3259 chan->fcs = L2CAP_FCS_NONE;
3260 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3266 req->dcid = cpu_to_le16(chan->dcid);
3267 req->flags = cpu_to_le16(0);
3272 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3274 struct l2cap_conf_rsp *rsp = data;
3275 void *ptr = rsp->data;
3276 void *req = chan->conf_req;
3277 int len = chan->conf_len;
3278 int type, hint, olen;
3280 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3281 struct l2cap_conf_efs efs;
3283 u16 mtu = L2CAP_DEFAULT_MTU;
3284 u16 result = L2CAP_CONF_SUCCESS;
3287 BT_DBG("chan %p", chan);
3289 while (len >= L2CAP_CONF_OPT_SIZE) {
3290 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3292 hint = type & L2CAP_CONF_HINT;
3293 type &= L2CAP_CONF_MASK;
3296 case L2CAP_CONF_MTU:
3300 case L2CAP_CONF_FLUSH_TO:
3301 chan->flush_to = val;
3304 case L2CAP_CONF_QOS:
3307 case L2CAP_CONF_RFC:
3308 if (olen == sizeof(rfc))
3309 memcpy(&rfc, (void *) val, olen);
3312 case L2CAP_CONF_FCS:
3313 if (val == L2CAP_FCS_NONE)
3314 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3317 case L2CAP_CONF_EFS:
3319 if (olen == sizeof(efs))
3320 memcpy(&efs, (void *) val, olen);
3323 case L2CAP_CONF_EWS:
3324 if (!chan->conn->hs_enabled)
3325 return -ECONNREFUSED;
3327 set_bit(FLAG_EXT_CTRL, &chan->flags);
3328 set_bit(CONF_EWS_RECV, &chan->conf_state);
3329 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3330 chan->remote_tx_win = val;
3337 result = L2CAP_CONF_UNKNOWN;
3338 *((u8 *) ptr++) = type;
3343 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3346 switch (chan->mode) {
3347 case L2CAP_MODE_STREAMING:
3348 case L2CAP_MODE_ERTM:
3349 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3350 chan->mode = l2cap_select_mode(rfc.mode,
3351 chan->conn->feat_mask);
3356 if (__l2cap_efs_supported(chan->conn))
3357 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3359 return -ECONNREFUSED;
3362 if (chan->mode != rfc.mode)
3363 return -ECONNREFUSED;
3369 if (chan->mode != rfc.mode) {
3370 result = L2CAP_CONF_UNACCEPT;
3371 rfc.mode = chan->mode;
3373 if (chan->num_conf_rsp == 1)
3374 return -ECONNREFUSED;
3376 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3377 (unsigned long) &rfc);
3380 if (result == L2CAP_CONF_SUCCESS) {
3381 /* Configure output options and let the other side know
3382 * which ones we don't like. */
3384 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3385 result = L2CAP_CONF_UNACCEPT;
3388 set_bit(CONF_MTU_DONE, &chan->conf_state);
3390 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3393 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3394 efs.stype != L2CAP_SERV_NOTRAFIC &&
3395 efs.stype != chan->local_stype) {
3397 result = L2CAP_CONF_UNACCEPT;
3399 if (chan->num_conf_req >= 1)
3400 return -ECONNREFUSED;
3402 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3404 (unsigned long) &efs);
3406 /* Send PENDING Conf Rsp */
3407 result = L2CAP_CONF_PENDING;
3408 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3413 case L2CAP_MODE_BASIC:
3414 chan->fcs = L2CAP_FCS_NONE;
3415 set_bit(CONF_MODE_DONE, &chan->conf_state);
3418 case L2CAP_MODE_ERTM:
3419 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3420 chan->remote_tx_win = rfc.txwin_size;
3422 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3424 chan->remote_max_tx = rfc.max_transmit;
3426 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3427 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3428 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3429 rfc.max_pdu_size = cpu_to_le16(size);
3430 chan->remote_mps = size;
3432 __l2cap_set_ertm_timeouts(chan, &rfc);
3434 set_bit(CONF_MODE_DONE, &chan->conf_state);
3436 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3437 sizeof(rfc), (unsigned long) &rfc);
3439 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3440 chan->remote_id = efs.id;
3441 chan->remote_stype = efs.stype;
3442 chan->remote_msdu = le16_to_cpu(efs.msdu);
3443 chan->remote_flush_to =
3444 le32_to_cpu(efs.flush_to);
3445 chan->remote_acc_lat =
3446 le32_to_cpu(efs.acc_lat);
3447 chan->remote_sdu_itime =
3448 le32_to_cpu(efs.sdu_itime);
3449 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3451 (unsigned long) &efs);
3455 case L2CAP_MODE_STREAMING:
3456 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3457 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3458 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3459 rfc.max_pdu_size = cpu_to_le16(size);
3460 chan->remote_mps = size;
3462 set_bit(CONF_MODE_DONE, &chan->conf_state);
3464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3465 (unsigned long) &rfc);
3470 result = L2CAP_CONF_UNACCEPT;
3472 memset(&rfc, 0, sizeof(rfc));
3473 rfc.mode = chan->mode;
3476 if (result == L2CAP_CONF_SUCCESS)
3477 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3479 rsp->scid = cpu_to_le16(chan->dcid);
3480 rsp->result = cpu_to_le16(result);
3481 rsp->flags = cpu_to_le16(0);
3486 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3487 void *data, u16 *result)
3489 struct l2cap_conf_req *req = data;
3490 void *ptr = req->data;
3493 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3494 struct l2cap_conf_efs efs;
3496 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3498 while (len >= L2CAP_CONF_OPT_SIZE) {
3499 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3502 case L2CAP_CONF_MTU:
3503 if (val < L2CAP_DEFAULT_MIN_MTU) {
3504 *result = L2CAP_CONF_UNACCEPT;
3505 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3508 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3511 case L2CAP_CONF_FLUSH_TO:
3512 chan->flush_to = val;
3513 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3517 case L2CAP_CONF_RFC:
3518 if (olen == sizeof(rfc))
3519 memcpy(&rfc, (void *)val, olen);
3521 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3522 rfc.mode != chan->mode)
3523 return -ECONNREFUSED;
3527 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3528 sizeof(rfc), (unsigned long) &rfc);
3531 case L2CAP_CONF_EWS:
3532 chan->ack_win = min_t(u16, val, chan->ack_win);
3533 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3537 case L2CAP_CONF_EFS:
3538 if (olen == sizeof(efs))
3539 memcpy(&efs, (void *)val, olen);
3541 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3542 efs.stype != L2CAP_SERV_NOTRAFIC &&
3543 efs.stype != chan->local_stype)
3544 return -ECONNREFUSED;
3546 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3547 (unsigned long) &efs);
3550 case L2CAP_CONF_FCS:
3551 if (*result == L2CAP_CONF_PENDING)
3552 if (val == L2CAP_FCS_NONE)
3553 set_bit(CONF_RECV_NO_FCS,
3559 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3560 return -ECONNREFUSED;
3562 chan->mode = rfc.mode;
3564 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3566 case L2CAP_MODE_ERTM:
3567 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3568 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3569 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3570 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3571 chan->ack_win = min_t(u16, chan->ack_win,
3574 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3575 chan->local_msdu = le16_to_cpu(efs.msdu);
3576 chan->local_sdu_itime =
3577 le32_to_cpu(efs.sdu_itime);
3578 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3579 chan->local_flush_to =
3580 le32_to_cpu(efs.flush_to);
3584 case L2CAP_MODE_STREAMING:
3585 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3589 req->dcid = cpu_to_le16(chan->dcid);
3590 req->flags = cpu_to_le16(0);
3595 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3596 u16 result, u16 flags)
3598 struct l2cap_conf_rsp *rsp = data;
3599 void *ptr = rsp->data;
3601 BT_DBG("chan %p", chan);
3603 rsp->scid = cpu_to_le16(chan->dcid);
3604 rsp->result = cpu_to_le16(result);
3605 rsp->flags = cpu_to_le16(flags);
3610 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3612 struct l2cap_le_conn_rsp rsp;
3613 struct l2cap_conn *conn = chan->conn;
3615 BT_DBG("chan %p", chan);
3617 rsp.dcid = cpu_to_le16(chan->scid);
3618 rsp.mtu = cpu_to_le16(chan->imtu);
3619 rsp.mps = cpu_to_le16(chan->mps);
3620 rsp.credits = cpu_to_le16(chan->rx_credits);
3621 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3623 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3627 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3629 struct l2cap_conn_rsp rsp;
3630 struct l2cap_conn *conn = chan->conn;
3634 rsp.scid = cpu_to_le16(chan->dcid);
3635 rsp.dcid = cpu_to_le16(chan->scid);
3636 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3637 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3640 rsp_code = L2CAP_CREATE_CHAN_RSP;
3642 rsp_code = L2CAP_CONN_RSP;
3644 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3646 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3648 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3651 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3652 l2cap_build_conf_req(chan, buf), buf);
3653 chan->num_conf_req++;
3656 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3660 /* Use sane default values in case a misbehaving remote device
3661 * did not send an RFC or extended window size option.
3663 u16 txwin_ext = chan->ack_win;
3664 struct l2cap_conf_rfc rfc = {
3666 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3667 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3668 .max_pdu_size = cpu_to_le16(chan->imtu),
3669 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3672 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3674 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3677 while (len >= L2CAP_CONF_OPT_SIZE) {
3678 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3681 case L2CAP_CONF_RFC:
3682 if (olen == sizeof(rfc))
3683 memcpy(&rfc, (void *)val, olen);
3685 case L2CAP_CONF_EWS:
3692 case L2CAP_MODE_ERTM:
3693 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3694 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3695 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3696 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3697 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3699 chan->ack_win = min_t(u16, chan->ack_win,
3702 case L2CAP_MODE_STREAMING:
3703 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3707 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3708 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3711 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3713 if (cmd_len < sizeof(*rej))
3716 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3719 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3720 cmd->ident == conn->info_ident) {
3721 cancel_delayed_work(&conn->info_timer);
3723 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3724 conn->info_ident = 0;
3726 l2cap_conn_start(conn);
3732 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3733 struct l2cap_cmd_hdr *cmd,
3734 u8 *data, u8 rsp_code, u8 amp_id)
3736 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3737 struct l2cap_conn_rsp rsp;
3738 struct l2cap_chan *chan = NULL, *pchan;
3739 int result, status = L2CAP_CS_NO_INFO;
3741 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3742 __le16 psm = req->psm;
3744 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3746 /* Check if we have socket listening on psm */
3747 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3748 &conn->hcon->dst, ACL_LINK);
3750 result = L2CAP_CR_BAD_PSM;
3754 mutex_lock(&conn->chan_lock);
3755 l2cap_chan_lock(pchan);
3757 /* Check if the ACL is secure enough (if not SDP) */
3758 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3759 !hci_conn_check_link_mode(conn->hcon)) {
3760 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3761 result = L2CAP_CR_SEC_BLOCK;
3765 result = L2CAP_CR_NO_MEM;
3767 /* Check if we already have channel with that dcid */
3768 if (__l2cap_get_chan_by_dcid(conn, scid))
3771 chan = pchan->ops->new_connection(pchan);
3775 /* For certain devices (ex: HID mouse), support for authentication,
3776 * pairing and bonding is optional. For such devices, inorder to avoid
3777 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3778 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3780 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3782 bacpy(&chan->src, &conn->hcon->src);
3783 bacpy(&chan->dst, &conn->hcon->dst);
3784 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3785 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3788 chan->local_amp_id = amp_id;
3790 __l2cap_chan_add(conn, chan);
3794 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3796 chan->ident = cmd->ident;
3798 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3799 if (l2cap_chan_check_security(chan, false)) {
3800 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3801 l2cap_state_change(chan, BT_CONNECT2);
3802 result = L2CAP_CR_PEND;
3803 status = L2CAP_CS_AUTHOR_PEND;
3804 chan->ops->defer(chan);
3806 /* Force pending result for AMP controllers.
3807 * The connection will succeed after the
3808 * physical link is up.
3810 if (amp_id == AMP_ID_BREDR) {
3811 l2cap_state_change(chan, BT_CONFIG);
3812 result = L2CAP_CR_SUCCESS;
3814 l2cap_state_change(chan, BT_CONNECT2);
3815 result = L2CAP_CR_PEND;
3817 status = L2CAP_CS_NO_INFO;
3820 l2cap_state_change(chan, BT_CONNECT2);
3821 result = L2CAP_CR_PEND;
3822 status = L2CAP_CS_AUTHEN_PEND;
3825 l2cap_state_change(chan, BT_CONNECT2);
3826 result = L2CAP_CR_PEND;
3827 status = L2CAP_CS_NO_INFO;
3831 l2cap_chan_unlock(pchan);
3832 mutex_unlock(&conn->chan_lock);
3833 l2cap_chan_put(pchan);
3836 rsp.scid = cpu_to_le16(scid);
3837 rsp.dcid = cpu_to_le16(dcid);
3838 rsp.result = cpu_to_le16(result);
3839 rsp.status = cpu_to_le16(status);
3840 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3842 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3843 struct l2cap_info_req info;
3844 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3846 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3847 conn->info_ident = l2cap_get_ident(conn);
3849 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3851 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3852 sizeof(info), &info);
3855 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3856 result == L2CAP_CR_SUCCESS) {
3858 set_bit(CONF_REQ_SENT, &chan->conf_state);
3859 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3860 l2cap_build_conf_req(chan, buf), buf);
3861 chan->num_conf_req++;
3867 static int l2cap_connect_req(struct l2cap_conn *conn,
3868 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3870 struct hci_dev *hdev = conn->hcon->hdev;
3871 struct hci_conn *hcon = conn->hcon;
3873 if (cmd_len < sizeof(struct l2cap_conn_req))
3877 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3878 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3879 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3880 hci_dev_unlock(hdev);
3882 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3886 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3887 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3890 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3891 u16 scid, dcid, result, status;
3892 struct l2cap_chan *chan;
3896 if (cmd_len < sizeof(*rsp))
3899 scid = __le16_to_cpu(rsp->scid);
3900 dcid = __le16_to_cpu(rsp->dcid);
3901 result = __le16_to_cpu(rsp->result);
3902 status = __le16_to_cpu(rsp->status);
3904 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3905 dcid, scid, result, status);
3907 mutex_lock(&conn->chan_lock);
3910 chan = __l2cap_get_chan_by_scid(conn, scid);
3916 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3925 l2cap_chan_lock(chan);
3928 case L2CAP_CR_SUCCESS:
3929 l2cap_state_change(chan, BT_CONFIG);
3932 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3934 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3937 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3938 l2cap_build_conf_req(chan, req), req);
3939 chan->num_conf_req++;
3943 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3947 l2cap_chan_del(chan, ECONNREFUSED);
3951 l2cap_chan_unlock(chan);
3954 mutex_unlock(&conn->chan_lock);
3959 static inline void set_default_fcs(struct l2cap_chan *chan)
3961 /* FCS is enabled only in ERTM or streaming mode, if one or both
3964 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3965 chan->fcs = L2CAP_FCS_NONE;
3966 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3967 chan->fcs = L2CAP_FCS_CRC16;
3970 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3971 u8 ident, u16 flags)
3973 struct l2cap_conn *conn = chan->conn;
3975 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3978 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3979 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3981 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3982 l2cap_build_conf_rsp(chan, data,
3983 L2CAP_CONF_SUCCESS, flags), data);
3986 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3989 struct l2cap_cmd_rej_cid rej;
3991 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3992 rej.scid = __cpu_to_le16(scid);
3993 rej.dcid = __cpu_to_le16(dcid);
3995 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3998 static inline int l2cap_config_req(struct l2cap_conn *conn,
3999 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4002 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4005 struct l2cap_chan *chan;
4008 if (cmd_len < sizeof(*req))
4011 dcid = __le16_to_cpu(req->dcid);
4012 flags = __le16_to_cpu(req->flags);
4014 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4016 chan = l2cap_get_chan_by_scid(conn, dcid);
4018 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4022 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4023 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4028 /* Reject if config buffer is too small. */
4029 len = cmd_len - sizeof(*req);
4030 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4031 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4032 l2cap_build_conf_rsp(chan, rsp,
4033 L2CAP_CONF_REJECT, flags), rsp);
4038 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4039 chan->conf_len += len;
4041 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4042 /* Incomplete config. Send empty response. */
4043 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4044 l2cap_build_conf_rsp(chan, rsp,
4045 L2CAP_CONF_SUCCESS, flags), rsp);
4049 /* Complete config. */
4050 len = l2cap_parse_conf_req(chan, rsp);
4052 l2cap_send_disconn_req(chan, ECONNRESET);
4056 chan->ident = cmd->ident;
4057 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4058 chan->num_conf_rsp++;
4060 /* Reset config buffer. */
4063 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4066 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4067 set_default_fcs(chan);
4069 if (chan->mode == L2CAP_MODE_ERTM ||
4070 chan->mode == L2CAP_MODE_STREAMING)
4071 err = l2cap_ertm_init(chan);
4074 l2cap_send_disconn_req(chan, -err);
4076 l2cap_chan_ready(chan);
4081 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4083 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4084 l2cap_build_conf_req(chan, buf), buf);
4085 chan->num_conf_req++;
4088 /* Got Conf Rsp PENDING from remote side and assume we sent
4089 Conf Rsp PENDING in the code above */
4090 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4091 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4093 /* check compatibility */
4095 /* Send rsp for BR/EDR channel */
4097 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4099 chan->ident = cmd->ident;
4103 l2cap_chan_unlock(chan);
4107 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4108 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4111 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4112 u16 scid, flags, result;
4113 struct l2cap_chan *chan;
4114 int len = cmd_len - sizeof(*rsp);
4117 if (cmd_len < sizeof(*rsp))
4120 scid = __le16_to_cpu(rsp->scid);
4121 flags = __le16_to_cpu(rsp->flags);
4122 result = __le16_to_cpu(rsp->result);
4124 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4127 chan = l2cap_get_chan_by_scid(conn, scid);
4132 case L2CAP_CONF_SUCCESS:
4133 l2cap_conf_rfc_get(chan, rsp->data, len);
4134 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4137 case L2CAP_CONF_PENDING:
4138 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4140 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4143 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4146 l2cap_send_disconn_req(chan, ECONNRESET);
4150 if (!chan->hs_hcon) {
4151 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4154 if (l2cap_check_efs(chan)) {
4155 amp_create_logical_link(chan);
4156 chan->ident = cmd->ident;
4162 case L2CAP_CONF_UNACCEPT:
4163 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4166 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4167 l2cap_send_disconn_req(chan, ECONNRESET);
4171 /* throw out any old stored conf requests */
4172 result = L2CAP_CONF_SUCCESS;
4173 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4176 l2cap_send_disconn_req(chan, ECONNRESET);
4180 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4181 L2CAP_CONF_REQ, len, req);
4182 chan->num_conf_req++;
4183 if (result != L2CAP_CONF_SUCCESS)
4189 l2cap_chan_set_err(chan, ECONNRESET);
4191 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4192 l2cap_send_disconn_req(chan, ECONNRESET);
4196 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4199 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4201 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4202 set_default_fcs(chan);
4204 if (chan->mode == L2CAP_MODE_ERTM ||
4205 chan->mode == L2CAP_MODE_STREAMING)
4206 err = l2cap_ertm_init(chan);
4209 l2cap_send_disconn_req(chan, -err);
4211 l2cap_chan_ready(chan);
4215 l2cap_chan_unlock(chan);
4219 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4220 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4223 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4224 struct l2cap_disconn_rsp rsp;
4226 struct l2cap_chan *chan;
4228 if (cmd_len != sizeof(*req))
4231 scid = __le16_to_cpu(req->scid);
4232 dcid = __le16_to_cpu(req->dcid);
4234 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4236 mutex_lock(&conn->chan_lock);
4238 chan = __l2cap_get_chan_by_scid(conn, dcid);
4240 mutex_unlock(&conn->chan_lock);
4241 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4245 l2cap_chan_lock(chan);
4247 rsp.dcid = cpu_to_le16(chan->scid);
4248 rsp.scid = cpu_to_le16(chan->dcid);
4249 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4251 chan->ops->set_shutdown(chan);
4253 l2cap_chan_hold(chan);
4254 l2cap_chan_del(chan, ECONNRESET);
4256 l2cap_chan_unlock(chan);
4258 chan->ops->close(chan);
4259 l2cap_chan_put(chan);
4261 mutex_unlock(&conn->chan_lock);
4266 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4267 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4270 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4272 struct l2cap_chan *chan;
4274 if (cmd_len != sizeof(*rsp))
4277 scid = __le16_to_cpu(rsp->scid);
4278 dcid = __le16_to_cpu(rsp->dcid);
4280 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4282 mutex_lock(&conn->chan_lock);
4284 chan = __l2cap_get_chan_by_scid(conn, scid);
4286 mutex_unlock(&conn->chan_lock);
4290 l2cap_chan_lock(chan);
4292 l2cap_chan_hold(chan);
4293 l2cap_chan_del(chan, 0);
4295 l2cap_chan_unlock(chan);
4297 chan->ops->close(chan);
4298 l2cap_chan_put(chan);
4300 mutex_unlock(&conn->chan_lock);
4305 static inline int l2cap_information_req(struct l2cap_conn *conn,
4306 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4309 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4312 if (cmd_len != sizeof(*req))
4315 type = __le16_to_cpu(req->type);
4317 BT_DBG("type 0x%4.4x", type);
4319 if (type == L2CAP_IT_FEAT_MASK) {
4321 u32 feat_mask = l2cap_feat_mask;
4322 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4323 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4324 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4326 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4328 if (conn->hs_enabled)
4329 feat_mask |= L2CAP_FEAT_EXT_FLOW
4330 | L2CAP_FEAT_EXT_WINDOW;
4332 put_unaligned_le32(feat_mask, rsp->data);
4333 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4335 } else if (type == L2CAP_IT_FIXED_CHAN) {
4337 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4339 if (conn->hs_enabled)
4340 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4342 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4344 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4345 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4346 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4347 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4350 struct l2cap_info_rsp rsp;
4351 rsp.type = cpu_to_le16(type);
4352 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4353 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4360 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4361 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4364 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4367 if (cmd_len < sizeof(*rsp))
4370 type = __le16_to_cpu(rsp->type);
4371 result = __le16_to_cpu(rsp->result);
4373 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4375 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4376 if (cmd->ident != conn->info_ident ||
4377 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4380 cancel_delayed_work(&conn->info_timer);
4382 if (result != L2CAP_IR_SUCCESS) {
4383 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4384 conn->info_ident = 0;
4386 l2cap_conn_start(conn);
4392 case L2CAP_IT_FEAT_MASK:
4393 conn->feat_mask = get_unaligned_le32(rsp->data);
4395 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4396 struct l2cap_info_req req;
4397 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4399 conn->info_ident = l2cap_get_ident(conn);
4401 l2cap_send_cmd(conn, conn->info_ident,
4402 L2CAP_INFO_REQ, sizeof(req), &req);
4404 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4405 conn->info_ident = 0;
4407 l2cap_conn_start(conn);
4411 case L2CAP_IT_FIXED_CHAN:
4412 conn->fixed_chan_mask = rsp->data[0];
4413 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4414 conn->info_ident = 0;
4416 l2cap_conn_start(conn);
4423 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4424 struct l2cap_cmd_hdr *cmd,
4425 u16 cmd_len, void *data)
4427 struct l2cap_create_chan_req *req = data;
4428 struct l2cap_create_chan_rsp rsp;
4429 struct l2cap_chan *chan;
4430 struct hci_dev *hdev;
4433 if (cmd_len != sizeof(*req))
4436 if (!conn->hs_enabled)
4439 psm = le16_to_cpu(req->psm);
4440 scid = le16_to_cpu(req->scid);
4442 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4444 /* For controller id 0 make BR/EDR connection */
4445 if (req->amp_id == AMP_ID_BREDR) {
4446 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4451 /* Validate AMP controller id */
4452 hdev = hci_dev_get(req->amp_id);
4456 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4461 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4464 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4465 struct hci_conn *hs_hcon;
4467 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4471 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4476 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4478 mgr->bredr_chan = chan;
4479 chan->hs_hcon = hs_hcon;
4480 chan->fcs = L2CAP_FCS_NONE;
4481 conn->mtu = hdev->block_mtu;
4490 rsp.scid = cpu_to_le16(scid);
4491 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4492 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4494 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4500 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4502 struct l2cap_move_chan_req req;
4505 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4507 ident = l2cap_get_ident(chan->conn);
4508 chan->ident = ident;
4510 req.icid = cpu_to_le16(chan->scid);
4511 req.dest_amp_id = dest_amp_id;
4513 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4516 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4519 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4521 struct l2cap_move_chan_rsp rsp;
4523 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4525 rsp.icid = cpu_to_le16(chan->dcid);
4526 rsp.result = cpu_to_le16(result);
4528 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4532 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4534 struct l2cap_move_chan_cfm cfm;
4536 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4538 chan->ident = l2cap_get_ident(chan->conn);
4540 cfm.icid = cpu_to_le16(chan->scid);
4541 cfm.result = cpu_to_le16(result);
4543 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4546 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4549 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4551 struct l2cap_move_chan_cfm cfm;
4553 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4555 cfm.icid = cpu_to_le16(icid);
4556 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4558 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4562 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4565 struct l2cap_move_chan_cfm_rsp rsp;
4567 BT_DBG("icid 0x%4.4x", icid);
4569 rsp.icid = cpu_to_le16(icid);
4570 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4573 static void __release_logical_link(struct l2cap_chan *chan)
4575 chan->hs_hchan = NULL;
4576 chan->hs_hcon = NULL;
4578 /* Placeholder - release the logical link */
4581 static void l2cap_logical_fail(struct l2cap_chan *chan)
4583 /* Logical link setup failed */
4584 if (chan->state != BT_CONNECTED) {
4585 /* Create channel failure, disconnect */
4586 l2cap_send_disconn_req(chan, ECONNRESET);
4590 switch (chan->move_role) {
4591 case L2CAP_MOVE_ROLE_RESPONDER:
4592 l2cap_move_done(chan);
4593 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4595 case L2CAP_MOVE_ROLE_INITIATOR:
4596 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4597 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4598 /* Remote has only sent pending or
4599 * success responses, clean up
4601 l2cap_move_done(chan);
4604 /* Other amp move states imply that the move
4605 * has already aborted
4607 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4612 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4613 struct hci_chan *hchan)
4615 struct l2cap_conf_rsp rsp;
4617 chan->hs_hchan = hchan;
4618 chan->hs_hcon->l2cap_data = chan->conn;
4620 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4622 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4625 set_default_fcs(chan);
4627 err = l2cap_ertm_init(chan);
4629 l2cap_send_disconn_req(chan, -err);
4631 l2cap_chan_ready(chan);
4635 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4636 struct hci_chan *hchan)
4638 chan->hs_hcon = hchan->conn;
4639 chan->hs_hcon->l2cap_data = chan->conn;
4641 BT_DBG("move_state %d", chan->move_state);
4643 switch (chan->move_state) {
4644 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4645 /* Move confirm will be sent after a success
4646 * response is received
4648 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4650 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4651 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4652 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4653 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4654 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4655 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4656 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4657 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4658 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4662 /* Move was not in expected state, free the channel */
4663 __release_logical_link(chan);
4665 chan->move_state = L2CAP_MOVE_STABLE;
4669 /* Call with chan locked */
4670 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4673 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4676 l2cap_logical_fail(chan);
4677 __release_logical_link(chan);
4681 if (chan->state != BT_CONNECTED) {
4682 /* Ignore logical link if channel is on BR/EDR */
4683 if (chan->local_amp_id != AMP_ID_BREDR)
4684 l2cap_logical_finish_create(chan, hchan);
4686 l2cap_logical_finish_move(chan, hchan);
4690 void l2cap_move_start(struct l2cap_chan *chan)
4692 BT_DBG("chan %p", chan);
4694 if (chan->local_amp_id == AMP_ID_BREDR) {
4695 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4697 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4698 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4699 /* Placeholder - start physical link setup */
4701 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4702 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4704 l2cap_move_setup(chan);
4705 l2cap_send_move_chan_req(chan, 0);
4709 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4710 u8 local_amp_id, u8 remote_amp_id)
4712 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4713 local_amp_id, remote_amp_id);
4715 chan->fcs = L2CAP_FCS_NONE;
4717 /* Outgoing channel on AMP */
4718 if (chan->state == BT_CONNECT) {
4719 if (result == L2CAP_CR_SUCCESS) {
4720 chan->local_amp_id = local_amp_id;
4721 l2cap_send_create_chan_req(chan, remote_amp_id);
4723 /* Revert to BR/EDR connect */
4724 l2cap_send_conn_req(chan);
4730 /* Incoming channel on AMP */
4731 if (__l2cap_no_conn_pending(chan)) {
4732 struct l2cap_conn_rsp rsp;
4734 rsp.scid = cpu_to_le16(chan->dcid);
4735 rsp.dcid = cpu_to_le16(chan->scid);
4737 if (result == L2CAP_CR_SUCCESS) {
4738 /* Send successful response */
4739 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4740 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4742 /* Send negative response */
4743 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4744 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4747 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4750 if (result == L2CAP_CR_SUCCESS) {
4751 l2cap_state_change(chan, BT_CONFIG);
4752 set_bit(CONF_REQ_SENT, &chan->conf_state);
4753 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4755 l2cap_build_conf_req(chan, buf), buf);
4756 chan->num_conf_req++;
4761 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4764 l2cap_move_setup(chan);
4765 chan->move_id = local_amp_id;
4766 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4768 l2cap_send_move_chan_req(chan, remote_amp_id);
4771 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4773 struct hci_chan *hchan = NULL;
4775 /* Placeholder - get hci_chan for logical link */
4778 if (hchan->state == BT_CONNECTED) {
4779 /* Logical link is ready to go */
4780 chan->hs_hcon = hchan->conn;
4781 chan->hs_hcon->l2cap_data = chan->conn;
4782 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4783 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4785 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4787 /* Wait for logical link to be ready */
4788 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4791 /* Logical link not available */
4792 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4796 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4798 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4800 if (result == -EINVAL)
4801 rsp_result = L2CAP_MR_BAD_ID;
4803 rsp_result = L2CAP_MR_NOT_ALLOWED;
4805 l2cap_send_move_chan_rsp(chan, rsp_result);
4808 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4809 chan->move_state = L2CAP_MOVE_STABLE;
4811 /* Restart data transmission */
4812 l2cap_ertm_send(chan);
4815 /* Invoke with locked chan */
4816 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4818 u8 local_amp_id = chan->local_amp_id;
4819 u8 remote_amp_id = chan->remote_amp_id;
4821 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4822 chan, result, local_amp_id, remote_amp_id);
4824 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4825 l2cap_chan_unlock(chan);
4829 if (chan->state != BT_CONNECTED) {
4830 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4831 } else if (result != L2CAP_MR_SUCCESS) {
4832 l2cap_do_move_cancel(chan, result);
4834 switch (chan->move_role) {
4835 case L2CAP_MOVE_ROLE_INITIATOR:
4836 l2cap_do_move_initiate(chan, local_amp_id,
4839 case L2CAP_MOVE_ROLE_RESPONDER:
4840 l2cap_do_move_respond(chan, result);
4843 l2cap_do_move_cancel(chan, result);
4849 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4850 struct l2cap_cmd_hdr *cmd,
4851 u16 cmd_len, void *data)
4853 struct l2cap_move_chan_req *req = data;
4854 struct l2cap_move_chan_rsp rsp;
4855 struct l2cap_chan *chan;
4857 u16 result = L2CAP_MR_NOT_ALLOWED;
4859 if (cmd_len != sizeof(*req))
4862 icid = le16_to_cpu(req->icid);
4864 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4866 if (!conn->hs_enabled)
4869 chan = l2cap_get_chan_by_dcid(conn, icid);
4871 rsp.icid = cpu_to_le16(icid);
4872 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4873 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4878 chan->ident = cmd->ident;
4880 if (chan->scid < L2CAP_CID_DYN_START ||
4881 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4882 (chan->mode != L2CAP_MODE_ERTM &&
4883 chan->mode != L2CAP_MODE_STREAMING)) {
4884 result = L2CAP_MR_NOT_ALLOWED;
4885 goto send_move_response;
4888 if (chan->local_amp_id == req->dest_amp_id) {
4889 result = L2CAP_MR_SAME_ID;
4890 goto send_move_response;
4893 if (req->dest_amp_id != AMP_ID_BREDR) {
4894 struct hci_dev *hdev;
4895 hdev = hci_dev_get(req->dest_amp_id);
4896 if (!hdev || hdev->dev_type != HCI_AMP ||
4897 !test_bit(HCI_UP, &hdev->flags)) {
4901 result = L2CAP_MR_BAD_ID;
4902 goto send_move_response;
4907 /* Detect a move collision. Only send a collision response
4908 * if this side has "lost", otherwise proceed with the move.
4909 * The winner has the larger bd_addr.
4911 if ((__chan_is_moving(chan) ||
4912 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4913 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4914 result = L2CAP_MR_COLLISION;
4915 goto send_move_response;
4918 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4919 l2cap_move_setup(chan);
4920 chan->move_id = req->dest_amp_id;
4923 if (req->dest_amp_id == AMP_ID_BREDR) {
4924 /* Moving to BR/EDR */
4925 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4926 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4927 result = L2CAP_MR_PEND;
4929 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4930 result = L2CAP_MR_SUCCESS;
4933 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4934 /* Placeholder - uncomment when amp functions are available */
4935 /*amp_accept_physical(chan, req->dest_amp_id);*/
4936 result = L2CAP_MR_PEND;
4940 l2cap_send_move_chan_rsp(chan, result);
4942 l2cap_chan_unlock(chan);
4947 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4949 struct l2cap_chan *chan;
4950 struct hci_chan *hchan = NULL;
4952 chan = l2cap_get_chan_by_scid(conn, icid);
4954 l2cap_send_move_chan_cfm_icid(conn, icid);
4958 __clear_chan_timer(chan);
4959 if (result == L2CAP_MR_PEND)
4960 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4962 switch (chan->move_state) {
4963 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4964 /* Move confirm will be sent when logical link
4967 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4969 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4970 if (result == L2CAP_MR_PEND) {
4972 } else if (test_bit(CONN_LOCAL_BUSY,
4973 &chan->conn_state)) {
4974 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4976 /* Logical link is up or moving to BR/EDR,
4979 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4980 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4983 case L2CAP_MOVE_WAIT_RSP:
4985 if (result == L2CAP_MR_SUCCESS) {
4986 /* Remote is ready, send confirm immediately
4987 * after logical link is ready
4989 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4991 /* Both logical link and move success
4992 * are required to confirm
4994 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4997 /* Placeholder - get hci_chan for logical link */
4999 /* Logical link not available */
5000 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5004 /* If the logical link is not yet connected, do not
5005 * send confirmation.
5007 if (hchan->state != BT_CONNECTED)
5010 /* Logical link is already ready to go */
5012 chan->hs_hcon = hchan->conn;
5013 chan->hs_hcon->l2cap_data = chan->conn;
5015 if (result == L2CAP_MR_SUCCESS) {
5016 /* Can confirm now */
5017 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5019 /* Now only need move success
5022 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5025 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5028 /* Any other amp move state means the move failed. */
5029 chan->move_id = chan->local_amp_id;
5030 l2cap_move_done(chan);
5031 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5034 l2cap_chan_unlock(chan);
5037 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5040 struct l2cap_chan *chan;
5042 chan = l2cap_get_chan_by_ident(conn, ident);
5044 /* Could not locate channel, icid is best guess */
5045 l2cap_send_move_chan_cfm_icid(conn, icid);
5049 __clear_chan_timer(chan);
5051 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5052 if (result == L2CAP_MR_COLLISION) {
5053 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5055 /* Cleanup - cancel move */
5056 chan->move_id = chan->local_amp_id;
5057 l2cap_move_done(chan);
5061 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5063 l2cap_chan_unlock(chan);
5066 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5067 struct l2cap_cmd_hdr *cmd,
5068 u16 cmd_len, void *data)
5070 struct l2cap_move_chan_rsp *rsp = data;
5073 if (cmd_len != sizeof(*rsp))
5076 icid = le16_to_cpu(rsp->icid);
5077 result = le16_to_cpu(rsp->result);
5079 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5081 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5082 l2cap_move_continue(conn, icid, result);
5084 l2cap_move_fail(conn, cmd->ident, icid, result);
5089 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5090 struct l2cap_cmd_hdr *cmd,
5091 u16 cmd_len, void *data)
5093 struct l2cap_move_chan_cfm *cfm = data;
5094 struct l2cap_chan *chan;
5097 if (cmd_len != sizeof(*cfm))
5100 icid = le16_to_cpu(cfm->icid);
5101 result = le16_to_cpu(cfm->result);
5103 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5105 chan = l2cap_get_chan_by_dcid(conn, icid);
5107 /* Spec requires a response even if the icid was not found */
5108 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5112 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5113 if (result == L2CAP_MC_CONFIRMED) {
5114 chan->local_amp_id = chan->move_id;
5115 if (chan->local_amp_id == AMP_ID_BREDR)
5116 __release_logical_link(chan);
5118 chan->move_id = chan->local_amp_id;
5121 l2cap_move_done(chan);
5124 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5126 l2cap_chan_unlock(chan);
5131 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5132 struct l2cap_cmd_hdr *cmd,
5133 u16 cmd_len, void *data)
5135 struct l2cap_move_chan_cfm_rsp *rsp = data;
5136 struct l2cap_chan *chan;
5139 if (cmd_len != sizeof(*rsp))
5142 icid = le16_to_cpu(rsp->icid);
5144 BT_DBG("icid 0x%4.4x", icid);
5146 chan = l2cap_get_chan_by_scid(conn, icid);
5150 __clear_chan_timer(chan);
5152 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5153 chan->local_amp_id = chan->move_id;
5155 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5156 __release_logical_link(chan);
5158 l2cap_move_done(chan);
5161 l2cap_chan_unlock(chan);
5166 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5167 struct l2cap_cmd_hdr *cmd,
5168 u16 cmd_len, u8 *data)
5170 struct hci_conn *hcon = conn->hcon;
5171 struct l2cap_conn_param_update_req *req;
5172 struct l2cap_conn_param_update_rsp rsp;
5173 u16 min, max, latency, to_multiplier;
5176 if (hcon->role != HCI_ROLE_MASTER)
5179 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5182 req = (struct l2cap_conn_param_update_req *) data;
5183 min = __le16_to_cpu(req->min);
5184 max = __le16_to_cpu(req->max);
5185 latency = __le16_to_cpu(req->latency);
5186 to_multiplier = __le16_to_cpu(req->to_multiplier);
5188 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5189 min, max, latency, to_multiplier);
5191 memset(&rsp, 0, sizeof(rsp));
5193 err = hci_check_conn_params(min, max, latency, to_multiplier);
5195 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5197 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5199 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5205 store_hint = hci_le_conn_update(hcon, min, max, latency,
5207 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5208 store_hint, min, max, latency,
5216 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5217 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5220 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5221 u16 dcid, mtu, mps, credits, result;
5222 struct l2cap_chan *chan;
5225 if (cmd_len < sizeof(*rsp))
5228 dcid = __le16_to_cpu(rsp->dcid);
5229 mtu = __le16_to_cpu(rsp->mtu);
5230 mps = __le16_to_cpu(rsp->mps);
5231 credits = __le16_to_cpu(rsp->credits);
5232 result = __le16_to_cpu(rsp->result);
5234 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5237 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5238 dcid, mtu, mps, credits, result);
5240 mutex_lock(&conn->chan_lock);
5242 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5250 l2cap_chan_lock(chan);
5253 case L2CAP_CR_SUCCESS:
5257 chan->remote_mps = mps;
5258 chan->tx_credits = credits;
5259 l2cap_chan_ready(chan);
5263 l2cap_chan_del(chan, ECONNREFUSED);
5267 l2cap_chan_unlock(chan);
5270 mutex_unlock(&conn->chan_lock);
5275 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5276 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5281 switch (cmd->code) {
5282 case L2CAP_COMMAND_REJ:
5283 l2cap_command_rej(conn, cmd, cmd_len, data);
5286 case L2CAP_CONN_REQ:
5287 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5290 case L2CAP_CONN_RSP:
5291 case L2CAP_CREATE_CHAN_RSP:
5292 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5295 case L2CAP_CONF_REQ:
5296 err = l2cap_config_req(conn, cmd, cmd_len, data);
5299 case L2CAP_CONF_RSP:
5300 l2cap_config_rsp(conn, cmd, cmd_len, data);
5303 case L2CAP_DISCONN_REQ:
5304 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5307 case L2CAP_DISCONN_RSP:
5308 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5311 case L2CAP_ECHO_REQ:
5312 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5315 case L2CAP_ECHO_RSP:
5318 case L2CAP_INFO_REQ:
5319 err = l2cap_information_req(conn, cmd, cmd_len, data);
5322 case L2CAP_INFO_RSP:
5323 l2cap_information_rsp(conn, cmd, cmd_len, data);
5326 case L2CAP_CREATE_CHAN_REQ:
5327 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5330 case L2CAP_MOVE_CHAN_REQ:
5331 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5334 case L2CAP_MOVE_CHAN_RSP:
5335 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5338 case L2CAP_MOVE_CHAN_CFM:
5339 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5342 case L2CAP_MOVE_CHAN_CFM_RSP:
5343 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5347 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5355 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5356 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5359 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5360 struct l2cap_le_conn_rsp rsp;
5361 struct l2cap_chan *chan, *pchan;
5362 u16 dcid, scid, credits, mtu, mps;
5366 if (cmd_len != sizeof(*req))
5369 scid = __le16_to_cpu(req->scid);
5370 mtu = __le16_to_cpu(req->mtu);
5371 mps = __le16_to_cpu(req->mps);
5376 if (mtu < 23 || mps < 23)
5379 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5382 /* Check if we have socket listening on psm */
5383 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5384 &conn->hcon->dst, LE_LINK);
5386 result = L2CAP_CR_BAD_PSM;
5391 mutex_lock(&conn->chan_lock);
5392 l2cap_chan_lock(pchan);
5394 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5396 result = L2CAP_CR_AUTHENTICATION;
5398 goto response_unlock;
5401 /* Check if we already have channel with that dcid */
5402 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5403 result = L2CAP_CR_NO_MEM;
5405 goto response_unlock;
5408 chan = pchan->ops->new_connection(pchan);
5410 result = L2CAP_CR_NO_MEM;
5411 goto response_unlock;
5414 l2cap_le_flowctl_init(chan);
5416 bacpy(&chan->src, &conn->hcon->src);
5417 bacpy(&chan->dst, &conn->hcon->dst);
5418 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5419 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5423 chan->remote_mps = mps;
5424 chan->tx_credits = __le16_to_cpu(req->credits);
5426 __l2cap_chan_add(conn, chan);
5428 credits = chan->rx_credits;
5430 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5432 chan->ident = cmd->ident;
5434 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5435 l2cap_state_change(chan, BT_CONNECT2);
5436 /* The following result value is actually not defined
5437 * for LE CoC but we use it to let the function know
5438 * that it should bail out after doing its cleanup
5439 * instead of sending a response.
5441 result = L2CAP_CR_PEND;
5442 chan->ops->defer(chan);
5444 l2cap_chan_ready(chan);
5445 result = L2CAP_CR_SUCCESS;
5449 l2cap_chan_unlock(pchan);
5450 mutex_unlock(&conn->chan_lock);
5451 l2cap_chan_put(pchan);
5453 if (result == L2CAP_CR_PEND)
5458 rsp.mtu = cpu_to_le16(chan->imtu);
5459 rsp.mps = cpu_to_le16(chan->mps);
5465 rsp.dcid = cpu_to_le16(dcid);
5466 rsp.credits = cpu_to_le16(credits);
5467 rsp.result = cpu_to_le16(result);
5469 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5474 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5475 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5478 struct l2cap_le_credits *pkt;
5479 struct l2cap_chan *chan;
5480 u16 cid, credits, max_credits;
5482 if (cmd_len != sizeof(*pkt))
5485 pkt = (struct l2cap_le_credits *) data;
5486 cid = __le16_to_cpu(pkt->cid);
5487 credits = __le16_to_cpu(pkt->credits);
5489 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5491 chan = l2cap_get_chan_by_dcid(conn, cid);
5495 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5496 if (credits > max_credits) {
5497 BT_ERR("LE credits overflow");
5498 l2cap_send_disconn_req(chan, ECONNRESET);
5499 l2cap_chan_unlock(chan);
5501 /* Return 0 so that we don't trigger an unnecessary
5502 * command reject packet.
5507 chan->tx_credits += credits;
5509 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5510 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5514 if (chan->tx_credits)
5515 chan->ops->resume(chan);
5517 l2cap_chan_unlock(chan);
5522 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5523 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5526 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5527 struct l2cap_chan *chan;
5529 if (cmd_len < sizeof(*rej))
5532 mutex_lock(&conn->chan_lock);
5534 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5538 l2cap_chan_lock(chan);
5539 l2cap_chan_del(chan, ECONNREFUSED);
5540 l2cap_chan_unlock(chan);
5543 mutex_unlock(&conn->chan_lock);
5547 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5548 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5553 switch (cmd->code) {
5554 case L2CAP_COMMAND_REJ:
5555 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5558 case L2CAP_CONN_PARAM_UPDATE_REQ:
5559 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5562 case L2CAP_CONN_PARAM_UPDATE_RSP:
5565 case L2CAP_LE_CONN_RSP:
5566 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5569 case L2CAP_LE_CONN_REQ:
5570 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5573 case L2CAP_LE_CREDITS:
5574 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5577 case L2CAP_DISCONN_REQ:
5578 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5581 case L2CAP_DISCONN_RSP:
5582 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5586 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5594 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5595 struct sk_buff *skb)
5597 struct hci_conn *hcon = conn->hcon;
5598 struct l2cap_cmd_hdr *cmd;
5602 if (hcon->type != LE_LINK)
5605 if (skb->len < L2CAP_CMD_HDR_SIZE)
5608 cmd = (void *) skb->data;
5609 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5611 len = le16_to_cpu(cmd->len);
5613 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5615 if (len != skb->len || !cmd->ident) {
5616 BT_DBG("corrupted command");
5620 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5622 struct l2cap_cmd_rej_unk rej;
5624 BT_ERR("Wrong link type (%d)", err);
5626 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5627 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5635 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5636 struct sk_buff *skb)
5638 struct hci_conn *hcon = conn->hcon;
5639 u8 *data = skb->data;
5641 struct l2cap_cmd_hdr cmd;
5644 l2cap_raw_recv(conn, skb);
5646 if (hcon->type != ACL_LINK)
5649 while (len >= L2CAP_CMD_HDR_SIZE) {
5651 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5652 data += L2CAP_CMD_HDR_SIZE;
5653 len -= L2CAP_CMD_HDR_SIZE;
5655 cmd_len = le16_to_cpu(cmd.len);
5657 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5660 if (cmd_len > len || !cmd.ident) {
5661 BT_DBG("corrupted command");
5665 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5667 struct l2cap_cmd_rej_unk rej;
5669 BT_ERR("Wrong link type (%d)", err);
5671 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5672 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5684 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5686 u16 our_fcs, rcv_fcs;
5689 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5690 hdr_size = L2CAP_EXT_HDR_SIZE;
5692 hdr_size = L2CAP_ENH_HDR_SIZE;
5694 if (chan->fcs == L2CAP_FCS_CRC16) {
5695 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5696 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5697 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5699 if (our_fcs != rcv_fcs)
5705 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5707 struct l2cap_ctrl control;
5709 BT_DBG("chan %p", chan);
5711 memset(&control, 0, sizeof(control));
5714 control.reqseq = chan->buffer_seq;
5715 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5717 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5718 control.super = L2CAP_SUPER_RNR;
5719 l2cap_send_sframe(chan, &control);
5722 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5723 chan->unacked_frames > 0)
5724 __set_retrans_timer(chan);
5726 /* Send pending iframes */
5727 l2cap_ertm_send(chan);
5729 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5730 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5731 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5734 control.super = L2CAP_SUPER_RR;
5735 l2cap_send_sframe(chan, &control);
5739 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5740 struct sk_buff **last_frag)
5742 /* skb->len reflects data in skb as well as all fragments
5743 * skb->data_len reflects only data in fragments
5745 if (!skb_has_frag_list(skb))
5746 skb_shinfo(skb)->frag_list = new_frag;
5748 new_frag->next = NULL;
5750 (*last_frag)->next = new_frag;
5751 *last_frag = new_frag;
5753 skb->len += new_frag->len;
5754 skb->data_len += new_frag->len;
5755 skb->truesize += new_frag->truesize;
5758 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5759 struct l2cap_ctrl *control)
5763 switch (control->sar) {
5764 case L2CAP_SAR_UNSEGMENTED:
5768 err = chan->ops->recv(chan, skb);
5771 case L2CAP_SAR_START:
5775 chan->sdu_len = get_unaligned_le16(skb->data);
5776 skb_pull(skb, L2CAP_SDULEN_SIZE);
5778 if (chan->sdu_len > chan->imtu) {
5783 if (skb->len >= chan->sdu_len)
5787 chan->sdu_last_frag = skb;
5793 case L2CAP_SAR_CONTINUE:
5797 append_skb_frag(chan->sdu, skb,
5798 &chan->sdu_last_frag);
5801 if (chan->sdu->len >= chan->sdu_len)
5811 append_skb_frag(chan->sdu, skb,
5812 &chan->sdu_last_frag);
5815 if (chan->sdu->len != chan->sdu_len)
5818 err = chan->ops->recv(chan, chan->sdu);
5821 /* Reassembly complete */
5823 chan->sdu_last_frag = NULL;
5831 kfree_skb(chan->sdu);
5833 chan->sdu_last_frag = NULL;
5840 static int l2cap_resegment(struct l2cap_chan *chan)
5846 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5850 if (chan->mode != L2CAP_MODE_ERTM)
5853 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5854 l2cap_tx(chan, NULL, NULL, event);
5857 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5860 /* Pass sequential frames to l2cap_reassemble_sdu()
5861 * until a gap is encountered.
5864 BT_DBG("chan %p", chan);
5866 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5867 struct sk_buff *skb;
5868 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5869 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5871 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5876 skb_unlink(skb, &chan->srej_q);
5877 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5878 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5883 if (skb_queue_empty(&chan->srej_q)) {
5884 chan->rx_state = L2CAP_RX_STATE_RECV;
5885 l2cap_send_ack(chan);
5891 static void l2cap_handle_srej(struct l2cap_chan *chan,
5892 struct l2cap_ctrl *control)
5894 struct sk_buff *skb;
5896 BT_DBG("chan %p, control %p", chan, control);
5898 if (control->reqseq == chan->next_tx_seq) {
5899 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5900 l2cap_send_disconn_req(chan, ECONNRESET);
5904 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5907 BT_DBG("Seq %d not available for retransmission",
5912 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5913 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5914 l2cap_send_disconn_req(chan, ECONNRESET);
5918 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5920 if (control->poll) {
5921 l2cap_pass_to_tx(chan, control);
5923 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5924 l2cap_retransmit(chan, control);
5925 l2cap_ertm_send(chan);
5927 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5928 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5929 chan->srej_save_reqseq = control->reqseq;
5932 l2cap_pass_to_tx_fbit(chan, control);
5934 if (control->final) {
5935 if (chan->srej_save_reqseq != control->reqseq ||
5936 !test_and_clear_bit(CONN_SREJ_ACT,
5938 l2cap_retransmit(chan, control);
5940 l2cap_retransmit(chan, control);
5941 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5942 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5943 chan->srej_save_reqseq = control->reqseq;
5949 static void l2cap_handle_rej(struct l2cap_chan *chan,
5950 struct l2cap_ctrl *control)
5952 struct sk_buff *skb;
5954 BT_DBG("chan %p, control %p", chan, control);
5956 if (control->reqseq == chan->next_tx_seq) {
5957 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5958 l2cap_send_disconn_req(chan, ECONNRESET);
5962 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5964 if (chan->max_tx && skb &&
5965 bt_cb(skb)->control.retries >= chan->max_tx) {
5966 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5967 l2cap_send_disconn_req(chan, ECONNRESET);
5971 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5973 l2cap_pass_to_tx(chan, control);
5975 if (control->final) {
5976 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5977 l2cap_retransmit_all(chan, control);
5979 l2cap_retransmit_all(chan, control);
5980 l2cap_ertm_send(chan);
5981 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5982 set_bit(CONN_REJ_ACT, &chan->conn_state);
5986 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5988 BT_DBG("chan %p, txseq %d", chan, txseq);
5990 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5991 chan->expected_tx_seq);
5993 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5994 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5996 /* See notes below regarding "double poll" and
5999 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6000 BT_DBG("Invalid/Ignore - after SREJ");
6001 return L2CAP_TXSEQ_INVALID_IGNORE;
6003 BT_DBG("Invalid - in window after SREJ sent");
6004 return L2CAP_TXSEQ_INVALID;
6008 if (chan->srej_list.head == txseq) {
6009 BT_DBG("Expected SREJ");
6010 return L2CAP_TXSEQ_EXPECTED_SREJ;
6013 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6014 BT_DBG("Duplicate SREJ - txseq already stored");
6015 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6018 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6019 BT_DBG("Unexpected SREJ - not requested");
6020 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6024 if (chan->expected_tx_seq == txseq) {
6025 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6027 BT_DBG("Invalid - txseq outside tx window");
6028 return L2CAP_TXSEQ_INVALID;
6031 return L2CAP_TXSEQ_EXPECTED;
6035 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6036 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6037 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6038 return L2CAP_TXSEQ_DUPLICATE;
6041 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6042 /* A source of invalid packets is a "double poll" condition,
6043 * where delays cause us to send multiple poll packets. If
6044 * the remote stack receives and processes both polls,
6045 * sequence numbers can wrap around in such a way that a
6046 * resent frame has a sequence number that looks like new data
6047 * with a sequence gap. This would trigger an erroneous SREJ
6050 * Fortunately, this is impossible with a tx window that's
6051 * less than half of the maximum sequence number, which allows
6052 * invalid frames to be safely ignored.
6054 * With tx window sizes greater than half of the tx window
6055 * maximum, the frame is invalid and cannot be ignored. This
6056 * causes a disconnect.
6059 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6060 BT_DBG("Invalid/Ignore - txseq outside tx window");
6061 return L2CAP_TXSEQ_INVALID_IGNORE;
6063 BT_DBG("Invalid - txseq outside tx window");
6064 return L2CAP_TXSEQ_INVALID;
6067 BT_DBG("Unexpected - txseq indicates missing frames");
6068 return L2CAP_TXSEQ_UNEXPECTED;
6072 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6073 struct l2cap_ctrl *control,
6074 struct sk_buff *skb, u8 event)
6077 bool skb_in_use = false;
6079 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6083 case L2CAP_EV_RECV_IFRAME:
6084 switch (l2cap_classify_txseq(chan, control->txseq)) {
6085 case L2CAP_TXSEQ_EXPECTED:
6086 l2cap_pass_to_tx(chan, control);
6088 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6089 BT_DBG("Busy, discarding expected seq %d",
6094 chan->expected_tx_seq = __next_seq(chan,
6097 chan->buffer_seq = chan->expected_tx_seq;
6100 err = l2cap_reassemble_sdu(chan, skb, control);
6104 if (control->final) {
6105 if (!test_and_clear_bit(CONN_REJ_ACT,
6106 &chan->conn_state)) {
6108 l2cap_retransmit_all(chan, control);
6109 l2cap_ertm_send(chan);
6113 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6114 l2cap_send_ack(chan);
6116 case L2CAP_TXSEQ_UNEXPECTED:
6117 l2cap_pass_to_tx(chan, control);
6119 /* Can't issue SREJ frames in the local busy state.
6120 * Drop this frame, it will be seen as missing
6121 * when local busy is exited.
6123 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6124 BT_DBG("Busy, discarding unexpected seq %d",
6129 /* There was a gap in the sequence, so an SREJ
6130 * must be sent for each missing frame. The
6131 * current frame is stored for later use.
6133 skb_queue_tail(&chan->srej_q, skb);
6135 BT_DBG("Queued %p (queue len %d)", skb,
6136 skb_queue_len(&chan->srej_q));
6138 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6139 l2cap_seq_list_clear(&chan->srej_list);
6140 l2cap_send_srej(chan, control->txseq);
6142 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6144 case L2CAP_TXSEQ_DUPLICATE:
6145 l2cap_pass_to_tx(chan, control);
6147 case L2CAP_TXSEQ_INVALID_IGNORE:
6149 case L2CAP_TXSEQ_INVALID:
6151 l2cap_send_disconn_req(chan, ECONNRESET);
6155 case L2CAP_EV_RECV_RR:
6156 l2cap_pass_to_tx(chan, control);
6157 if (control->final) {
6158 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6160 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6161 !__chan_is_moving(chan)) {
6163 l2cap_retransmit_all(chan, control);
6166 l2cap_ertm_send(chan);
6167 } else if (control->poll) {
6168 l2cap_send_i_or_rr_or_rnr(chan);
6170 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6171 &chan->conn_state) &&
6172 chan->unacked_frames)
6173 __set_retrans_timer(chan);
6175 l2cap_ertm_send(chan);
6178 case L2CAP_EV_RECV_RNR:
6179 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6180 l2cap_pass_to_tx(chan, control);
6181 if (control && control->poll) {
6182 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6183 l2cap_send_rr_or_rnr(chan, 0);
6185 __clear_retrans_timer(chan);
6186 l2cap_seq_list_clear(&chan->retrans_list);
6188 case L2CAP_EV_RECV_REJ:
6189 l2cap_handle_rej(chan, control);
6191 case L2CAP_EV_RECV_SREJ:
6192 l2cap_handle_srej(chan, control);
6198 if (skb && !skb_in_use) {
6199 BT_DBG("Freeing %p", skb);
6206 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6207 struct l2cap_ctrl *control,
6208 struct sk_buff *skb, u8 event)
6211 u16 txseq = control->txseq;
6212 bool skb_in_use = false;
6214 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6218 case L2CAP_EV_RECV_IFRAME:
6219 switch (l2cap_classify_txseq(chan, txseq)) {
6220 case L2CAP_TXSEQ_EXPECTED:
6221 /* Keep frame for reassembly later */
6222 l2cap_pass_to_tx(chan, control);
6223 skb_queue_tail(&chan->srej_q, skb);
6225 BT_DBG("Queued %p (queue len %d)", skb,
6226 skb_queue_len(&chan->srej_q));
6228 chan->expected_tx_seq = __next_seq(chan, txseq);
6230 case L2CAP_TXSEQ_EXPECTED_SREJ:
6231 l2cap_seq_list_pop(&chan->srej_list);
6233 l2cap_pass_to_tx(chan, control);
6234 skb_queue_tail(&chan->srej_q, skb);
6236 BT_DBG("Queued %p (queue len %d)", skb,
6237 skb_queue_len(&chan->srej_q));
6239 err = l2cap_rx_queued_iframes(chan);
6244 case L2CAP_TXSEQ_UNEXPECTED:
6245 /* Got a frame that can't be reassembled yet.
6246 * Save it for later, and send SREJs to cover
6247 * the missing frames.
6249 skb_queue_tail(&chan->srej_q, skb);
6251 BT_DBG("Queued %p (queue len %d)", skb,
6252 skb_queue_len(&chan->srej_q));
6254 l2cap_pass_to_tx(chan, control);
6255 l2cap_send_srej(chan, control->txseq);
6257 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6258 /* This frame was requested with an SREJ, but
6259 * some expected retransmitted frames are
6260 * missing. Request retransmission of missing
6263 skb_queue_tail(&chan->srej_q, skb);
6265 BT_DBG("Queued %p (queue len %d)", skb,
6266 skb_queue_len(&chan->srej_q));
6268 l2cap_pass_to_tx(chan, control);
6269 l2cap_send_srej_list(chan, control->txseq);
6271 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6272 /* We've already queued this frame. Drop this copy. */
6273 l2cap_pass_to_tx(chan, control);
6275 case L2CAP_TXSEQ_DUPLICATE:
6276 /* Expecting a later sequence number, so this frame
6277 * was already received. Ignore it completely.
6280 case L2CAP_TXSEQ_INVALID_IGNORE:
6282 case L2CAP_TXSEQ_INVALID:
6284 l2cap_send_disconn_req(chan, ECONNRESET);
6288 case L2CAP_EV_RECV_RR:
6289 l2cap_pass_to_tx(chan, control);
6290 if (control->final) {
6291 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6293 if (!test_and_clear_bit(CONN_REJ_ACT,
6294 &chan->conn_state)) {
6296 l2cap_retransmit_all(chan, control);
6299 l2cap_ertm_send(chan);
6300 } else if (control->poll) {
6301 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6302 &chan->conn_state) &&
6303 chan->unacked_frames) {
6304 __set_retrans_timer(chan);
6307 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6308 l2cap_send_srej_tail(chan);
6310 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6311 &chan->conn_state) &&
6312 chan->unacked_frames)
6313 __set_retrans_timer(chan);
6315 l2cap_send_ack(chan);
6318 case L2CAP_EV_RECV_RNR:
6319 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6320 l2cap_pass_to_tx(chan, control);
6321 if (control->poll) {
6322 l2cap_send_srej_tail(chan);
6324 struct l2cap_ctrl rr_control;
6325 memset(&rr_control, 0, sizeof(rr_control));
6326 rr_control.sframe = 1;
6327 rr_control.super = L2CAP_SUPER_RR;
6328 rr_control.reqseq = chan->buffer_seq;
6329 l2cap_send_sframe(chan, &rr_control);
6333 case L2CAP_EV_RECV_REJ:
6334 l2cap_handle_rej(chan, control);
6336 case L2CAP_EV_RECV_SREJ:
6337 l2cap_handle_srej(chan, control);
6341 if (skb && !skb_in_use) {
6342 BT_DBG("Freeing %p", skb);
6349 static int l2cap_finish_move(struct l2cap_chan *chan)
6351 BT_DBG("chan %p", chan);
6353 chan->rx_state = L2CAP_RX_STATE_RECV;
6356 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6358 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6360 return l2cap_resegment(chan);
6363 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6364 struct l2cap_ctrl *control,
6365 struct sk_buff *skb, u8 event)
6369 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6375 l2cap_process_reqseq(chan, control->reqseq);
6377 if (!skb_queue_empty(&chan->tx_q))
6378 chan->tx_send_head = skb_peek(&chan->tx_q);
6380 chan->tx_send_head = NULL;
6382 /* Rewind next_tx_seq to the point expected
6385 chan->next_tx_seq = control->reqseq;
6386 chan->unacked_frames = 0;
6388 err = l2cap_finish_move(chan);
6392 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6393 l2cap_send_i_or_rr_or_rnr(chan);
6395 if (event == L2CAP_EV_RECV_IFRAME)
6398 return l2cap_rx_state_recv(chan, control, NULL, event);
6401 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6402 struct l2cap_ctrl *control,
6403 struct sk_buff *skb, u8 event)
6407 if (!control->final)
6410 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6412 chan->rx_state = L2CAP_RX_STATE_RECV;
6413 l2cap_process_reqseq(chan, control->reqseq);
6415 if (!skb_queue_empty(&chan->tx_q))
6416 chan->tx_send_head = skb_peek(&chan->tx_q);
6418 chan->tx_send_head = NULL;
6420 /* Rewind next_tx_seq to the point expected
6423 chan->next_tx_seq = control->reqseq;
6424 chan->unacked_frames = 0;
6427 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6429 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6431 err = l2cap_resegment(chan);
6434 err = l2cap_rx_state_recv(chan, control, skb, event);
6439 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6441 /* Make sure reqseq is for a packet that has been sent but not acked */
6444 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6445 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6448 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6449 struct sk_buff *skb, u8 event)
6453 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6454 control, skb, event, chan->rx_state);
6456 if (__valid_reqseq(chan, control->reqseq)) {
6457 switch (chan->rx_state) {
6458 case L2CAP_RX_STATE_RECV:
6459 err = l2cap_rx_state_recv(chan, control, skb, event);
6461 case L2CAP_RX_STATE_SREJ_SENT:
6462 err = l2cap_rx_state_srej_sent(chan, control, skb,
6465 case L2CAP_RX_STATE_WAIT_P:
6466 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6468 case L2CAP_RX_STATE_WAIT_F:
6469 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6476 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6477 control->reqseq, chan->next_tx_seq,
6478 chan->expected_ack_seq);
6479 l2cap_send_disconn_req(chan, ECONNRESET);
6485 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6486 struct sk_buff *skb)
6490 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6493 if (l2cap_classify_txseq(chan, control->txseq) ==
6494 L2CAP_TXSEQ_EXPECTED) {
6495 l2cap_pass_to_tx(chan, control);
6497 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6498 __next_seq(chan, chan->buffer_seq));
6500 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6502 l2cap_reassemble_sdu(chan, skb, control);
6505 kfree_skb(chan->sdu);
6508 chan->sdu_last_frag = NULL;
6512 BT_DBG("Freeing %p", skb);
6517 chan->last_acked_seq = control->txseq;
6518 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6523 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6525 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6529 __unpack_control(chan, skb);
6534 * We can just drop the corrupted I-frame here.
6535 * Receiver will miss it and start proper recovery
6536 * procedures and ask for retransmission.
6538 if (l2cap_check_fcs(chan, skb))
6541 if (!control->sframe && control->sar == L2CAP_SAR_START)
6542 len -= L2CAP_SDULEN_SIZE;
6544 if (chan->fcs == L2CAP_FCS_CRC16)
6545 len -= L2CAP_FCS_SIZE;
6547 if (len > chan->mps) {
6548 l2cap_send_disconn_req(chan, ECONNRESET);
6552 if (!control->sframe) {
6555 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6556 control->sar, control->reqseq, control->final,
6559 /* Validate F-bit - F=0 always valid, F=1 only
6560 * valid in TX WAIT_F
6562 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6565 if (chan->mode != L2CAP_MODE_STREAMING) {
6566 event = L2CAP_EV_RECV_IFRAME;
6567 err = l2cap_rx(chan, control, skb, event);
6569 err = l2cap_stream_rx(chan, control, skb);
6573 l2cap_send_disconn_req(chan, ECONNRESET);
6575 const u8 rx_func_to_event[4] = {
6576 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6577 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6580 /* Only I-frames are expected in streaming mode */
6581 if (chan->mode == L2CAP_MODE_STREAMING)
6584 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6585 control->reqseq, control->final, control->poll,
6589 BT_ERR("Trailing bytes: %d in sframe", len);
6590 l2cap_send_disconn_req(chan, ECONNRESET);
6594 /* Validate F and P bits */
6595 if (control->final && (control->poll ||
6596 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6599 event = rx_func_to_event[control->super];
6600 if (l2cap_rx(chan, control, skb, event))
6601 l2cap_send_disconn_req(chan, ECONNRESET);
6611 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6613 struct l2cap_conn *conn = chan->conn;
6614 struct l2cap_le_credits pkt;
6617 /* We return more credits to the sender only after the amount of
6618 * credits falls below half of the initial amount.
6620 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6623 return_credits = le_max_credits - chan->rx_credits;
6625 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6627 chan->rx_credits += return_credits;
6629 pkt.cid = cpu_to_le16(chan->scid);
6630 pkt.credits = cpu_to_le16(return_credits);
6632 chan->ident = l2cap_get_ident(conn);
6634 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6637 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6641 if (!chan->rx_credits) {
6642 BT_ERR("No credits to receive LE L2CAP data");
6643 l2cap_send_disconn_req(chan, ECONNRESET);
6647 if (chan->imtu < skb->len) {
6648 BT_ERR("Too big LE L2CAP PDU");
6653 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6655 l2cap_chan_le_send_credits(chan);
6662 sdu_len = get_unaligned_le16(skb->data);
6663 skb_pull(skb, L2CAP_SDULEN_SIZE);
6665 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6666 sdu_len, skb->len, chan->imtu);
6668 if (sdu_len > chan->imtu) {
6669 BT_ERR("Too big LE L2CAP SDU length received");
6674 if (skb->len > sdu_len) {
6675 BT_ERR("Too much LE L2CAP data received");
6680 if (skb->len == sdu_len)
6681 return chan->ops->recv(chan, skb);
6684 chan->sdu_len = sdu_len;
6685 chan->sdu_last_frag = skb;
6690 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6691 chan->sdu->len, skb->len, chan->sdu_len);
6693 if (chan->sdu->len + skb->len > chan->sdu_len) {
6694 BT_ERR("Too much LE L2CAP data received");
6699 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6702 if (chan->sdu->len == chan->sdu_len) {
6703 err = chan->ops->recv(chan, chan->sdu);
6706 chan->sdu_last_frag = NULL;
6714 kfree_skb(chan->sdu);
6716 chan->sdu_last_frag = NULL;
6720 /* We can't return an error here since we took care of the skb
6721 * freeing internally. An error return would cause the caller to
6722 * do a double-free of the skb.
6727 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6728 struct sk_buff *skb)
6730 struct l2cap_chan *chan;
6732 chan = l2cap_get_chan_by_scid(conn, cid);
6734 if (cid == L2CAP_CID_A2MP) {
6735 chan = a2mp_channel_create(conn, skb);
6741 l2cap_chan_lock(chan);
6743 BT_DBG("unknown cid 0x%4.4x", cid);
6744 /* Drop packet and return */
6750 BT_DBG("chan %p, len %d", chan, skb->len);
6752 if (chan->state != BT_CONNECTED)
6755 switch (chan->mode) {
6756 case L2CAP_MODE_LE_FLOWCTL:
6757 if (l2cap_le_data_rcv(chan, skb) < 0)
6762 case L2CAP_MODE_BASIC:
6763 /* If socket recv buffers overflows we drop data here
6764 * which is *bad* because L2CAP has to be reliable.
6765 * But we don't have any other choice. L2CAP doesn't
6766 * provide flow control mechanism. */
6768 if (chan->imtu < skb->len) {
6769 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6773 if (!chan->ops->recv(chan, skb))
6777 case L2CAP_MODE_ERTM:
6778 case L2CAP_MODE_STREAMING:
6779 l2cap_data_rcv(chan, skb);
6783 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6791 l2cap_chan_unlock(chan);
6794 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6795 struct sk_buff *skb)
6797 struct hci_conn *hcon = conn->hcon;
6798 struct l2cap_chan *chan;
6800 if (hcon->type != ACL_LINK)
6803 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6808 BT_DBG("chan %p, len %d", chan, skb->len);
6810 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6813 if (chan->imtu < skb->len)
6816 /* Store remote BD_ADDR and PSM for msg_name */
6817 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6818 bt_cb(skb)->psm = psm;
6820 if (!chan->ops->recv(chan, skb)) {
6821 l2cap_chan_put(chan);
6826 l2cap_chan_put(chan);
6831 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6833 struct l2cap_hdr *lh = (void *) skb->data;
6834 struct hci_conn *hcon = conn->hcon;
6838 if (hcon->state != BT_CONNECTED) {
6839 BT_DBG("queueing pending rx skb");
6840 skb_queue_tail(&conn->pending_rx, skb);
6844 skb_pull(skb, L2CAP_HDR_SIZE);
6845 cid = __le16_to_cpu(lh->cid);
6846 len = __le16_to_cpu(lh->len);
6848 if (len != skb->len) {
6853 /* Since we can't actively block incoming LE connections we must
6854 * at least ensure that we ignore incoming data from them.
6856 if (hcon->type == LE_LINK &&
6857 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6858 bdaddr_type(hcon, hcon->dst_type))) {
6863 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6866 case L2CAP_CID_SIGNALING:
6867 l2cap_sig_channel(conn, skb);
6870 case L2CAP_CID_CONN_LESS:
6871 psm = get_unaligned((__le16 *) skb->data);
6872 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6873 l2cap_conless_channel(conn, psm, skb);
6876 case L2CAP_CID_LE_SIGNALING:
6877 l2cap_le_sig_channel(conn, skb);
6881 l2cap_data_channel(conn, cid, skb);
6886 static void process_pending_rx(struct work_struct *work)
6888 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6890 struct sk_buff *skb;
6894 while ((skb = skb_dequeue(&conn->pending_rx)))
6895 l2cap_recv_frame(conn, skb);
6898 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6900 struct l2cap_conn *conn = hcon->l2cap_data;
6901 struct hci_chan *hchan;
6906 hchan = hci_chan_create(hcon);
6910 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6912 hci_chan_del(hchan);
6916 kref_init(&conn->ref);
6917 hcon->l2cap_data = conn;
6918 conn->hcon = hci_conn_get(hcon);
6919 conn->hchan = hchan;
6921 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6923 switch (hcon->type) {
6925 if (hcon->hdev->le_mtu) {
6926 conn->mtu = hcon->hdev->le_mtu;
6931 conn->mtu = hcon->hdev->acl_mtu;
6935 conn->feat_mask = 0;
6937 if (hcon->type == ACL_LINK)
6938 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6939 &hcon->hdev->dev_flags);
6941 mutex_init(&conn->ident_lock);
6942 mutex_init(&conn->chan_lock);
6944 INIT_LIST_HEAD(&conn->chan_l);
6945 INIT_LIST_HEAD(&conn->users);
6947 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6949 skb_queue_head_init(&conn->pending_rx);
6950 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6951 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
6953 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6958 static bool is_valid_psm(u16 psm, u8 dst_type) {
6962 if (bdaddr_type_is_le(dst_type))
6963 return (psm <= 0x00ff);
6965 /* PSM must be odd and lsb of upper byte must be 0 */
6966 return ((psm & 0x0101) == 0x0001);
6969 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6970 bdaddr_t *dst, u8 dst_type)
6972 struct l2cap_conn *conn;
6973 struct hci_conn *hcon;
6974 struct hci_dev *hdev;
6977 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
6978 dst_type, __le16_to_cpu(psm));
6980 hdev = hci_get_route(dst, &chan->src);
6982 return -EHOSTUNREACH;
6986 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6987 chan->chan_type != L2CAP_CHAN_RAW) {
6992 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6997 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7002 switch (chan->mode) {
7003 case L2CAP_MODE_BASIC:
7005 case L2CAP_MODE_LE_FLOWCTL:
7006 l2cap_le_flowctl_init(chan);
7008 case L2CAP_MODE_ERTM:
7009 case L2CAP_MODE_STREAMING:
7018 switch (chan->state) {
7022 /* Already connecting */
7027 /* Already connected */
7041 /* Set destination address and psm */
7042 bacpy(&chan->dst, dst);
7043 chan->dst_type = dst_type;
7048 if (bdaddr_type_is_le(dst_type)) {
7051 /* Convert from L2CAP channel address type to HCI address type
7053 if (dst_type == BDADDR_LE_PUBLIC)
7054 dst_type = ADDR_LE_DEV_PUBLIC;
7056 dst_type = ADDR_LE_DEV_RANDOM;
7058 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7059 role = HCI_ROLE_SLAVE;
7061 role = HCI_ROLE_MASTER;
7063 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7064 HCI_LE_CONN_TIMEOUT, role);
7066 u8 auth_type = l2cap_get_auth_type(chan);
7067 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7071 err = PTR_ERR(hcon);
7075 conn = l2cap_conn_add(hcon);
7077 hci_conn_drop(hcon);
7082 mutex_lock(&conn->chan_lock);
7083 l2cap_chan_lock(chan);
7085 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7086 hci_conn_drop(hcon);
7091 /* Update source addr of the socket */
7092 bacpy(&chan->src, &hcon->src);
7093 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7095 __l2cap_chan_add(conn, chan);
7097 /* l2cap_chan_add takes its own ref so we can drop this one */
7098 hci_conn_drop(hcon);
7100 l2cap_state_change(chan, BT_CONNECT);
7101 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7103 /* Release chan->sport so that it can be reused by other
7104 * sockets (as it's only used for listening sockets).
7106 write_lock(&chan_list_lock);
7108 write_unlock(&chan_list_lock);
7110 if (hcon->state == BT_CONNECTED) {
7111 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7112 __clear_chan_timer(chan);
7113 if (l2cap_chan_check_security(chan, true))
7114 l2cap_state_change(chan, BT_CONNECTED);
7116 l2cap_do_start(chan);
7122 l2cap_chan_unlock(chan);
7123 mutex_unlock(&conn->chan_lock);
7125 hci_dev_unlock(hdev);
7129 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7131 /* ---- L2CAP interface with lower layer (HCI) ---- */
7133 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7135 int exact = 0, lm1 = 0, lm2 = 0;
7136 struct l2cap_chan *c;
7138 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7140 /* Find listening sockets and check their link_mode */
7141 read_lock(&chan_list_lock);
7142 list_for_each_entry(c, &chan_list, global_l) {
7143 if (c->state != BT_LISTEN)
7146 if (!bacmp(&c->src, &hdev->bdaddr)) {
7147 lm1 |= HCI_LM_ACCEPT;
7148 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7149 lm1 |= HCI_LM_MASTER;
7151 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7152 lm2 |= HCI_LM_ACCEPT;
7153 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7154 lm2 |= HCI_LM_MASTER;
7157 read_unlock(&chan_list_lock);
7159 return exact ? lm1 : lm2;
7162 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7163 * from an existing channel in the list or from the beginning of the
7164 * global list (by passing NULL as first parameter).
7166 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7167 bdaddr_t *src, u8 link_type)
7169 read_lock(&chan_list_lock);
7172 c = list_next_entry(c, global_l);
7174 c = list_entry(chan_list.next, typeof(*c), global_l);
7176 list_for_each_entry_from(c, &chan_list, global_l) {
7177 if (c->chan_type != L2CAP_CHAN_FIXED)
7179 if (c->state != BT_LISTEN)
7181 if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
7183 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
7185 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7189 read_unlock(&chan_list_lock);
7193 read_unlock(&chan_list_lock);
7198 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7200 struct hci_dev *hdev = hcon->hdev;
7201 struct l2cap_conn *conn;
7202 struct l2cap_chan *pchan;
7205 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7208 l2cap_conn_del(hcon, bt_to_errno(status));
7212 conn = l2cap_conn_add(hcon);
7216 dst_type = bdaddr_type(hcon, hcon->dst_type);
7218 /* If device is blocked, do not create channels for it */
7219 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7222 /* Find fixed channels and notify them of the new connection. We
7223 * use multiple individual lookups, continuing each time where
7224 * we left off, because the list lock would prevent calling the
7225 * potentially sleeping l2cap_chan_lock() function.
7227 pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
7229 struct l2cap_chan *chan, *next;
7231 /* Client fixed channels should override server ones */
7232 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7235 l2cap_chan_lock(pchan);
7236 chan = pchan->ops->new_connection(pchan);
7238 bacpy(&chan->src, &hcon->src);
7239 bacpy(&chan->dst, &hcon->dst);
7240 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7241 chan->dst_type = dst_type;
7243 __l2cap_chan_add(conn, chan);
7246 l2cap_chan_unlock(pchan);
7248 next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
7250 l2cap_chan_put(pchan);
7254 l2cap_conn_ready(conn);
7257 int l2cap_disconn_ind(struct hci_conn *hcon)
7259 struct l2cap_conn *conn = hcon->l2cap_data;
7261 BT_DBG("hcon %p", hcon);
7264 return HCI_ERROR_REMOTE_USER_TERM;
7265 return conn->disc_reason;
7268 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7270 BT_DBG("hcon %p reason %d", hcon, reason);
7272 l2cap_conn_del(hcon, bt_to_errno(reason));
7275 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7277 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7280 if (encrypt == 0x00) {
7281 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7282 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7283 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7284 chan->sec_level == BT_SECURITY_FIPS)
7285 l2cap_chan_close(chan, ECONNREFUSED);
7287 if (chan->sec_level == BT_SECURITY_MEDIUM)
7288 __clear_chan_timer(chan);
7292 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7294 struct l2cap_conn *conn = hcon->l2cap_data;
7295 struct l2cap_chan *chan;
7300 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7302 mutex_lock(&conn->chan_lock);
7304 list_for_each_entry(chan, &conn->chan_l, list) {
7305 l2cap_chan_lock(chan);
7307 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7308 state_to_string(chan->state));
7310 if (chan->scid == L2CAP_CID_A2MP) {
7311 l2cap_chan_unlock(chan);
7315 if (!status && encrypt)
7316 chan->sec_level = hcon->sec_level;
7318 if (!__l2cap_no_conn_pending(chan)) {
7319 l2cap_chan_unlock(chan);
7323 if (!status && (chan->state == BT_CONNECTED ||
7324 chan->state == BT_CONFIG)) {
7325 chan->ops->resume(chan);
7326 l2cap_check_encryption(chan, encrypt);
7327 l2cap_chan_unlock(chan);
7331 if (chan->state == BT_CONNECT) {
7333 l2cap_start_connection(chan);
7335 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7336 } else if (chan->state == BT_CONNECT2 &&
7337 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7338 struct l2cap_conn_rsp rsp;
7342 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7343 res = L2CAP_CR_PEND;
7344 stat = L2CAP_CS_AUTHOR_PEND;
7345 chan->ops->defer(chan);
7347 l2cap_state_change(chan, BT_CONFIG);
7348 res = L2CAP_CR_SUCCESS;
7349 stat = L2CAP_CS_NO_INFO;
7352 l2cap_state_change(chan, BT_DISCONN);
7353 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7354 res = L2CAP_CR_SEC_BLOCK;
7355 stat = L2CAP_CS_NO_INFO;
7358 rsp.scid = cpu_to_le16(chan->dcid);
7359 rsp.dcid = cpu_to_le16(chan->scid);
7360 rsp.result = cpu_to_le16(res);
7361 rsp.status = cpu_to_le16(stat);
7362 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7365 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7366 res == L2CAP_CR_SUCCESS) {
7368 set_bit(CONF_REQ_SENT, &chan->conf_state);
7369 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7371 l2cap_build_conf_req(chan, buf),
7373 chan->num_conf_req++;
7377 l2cap_chan_unlock(chan);
7380 mutex_unlock(&conn->chan_lock);
7385 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7387 struct l2cap_conn *conn = hcon->l2cap_data;
7388 struct l2cap_hdr *hdr;
7391 /* For AMP controller do not create l2cap conn */
7392 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7396 conn = l2cap_conn_add(hcon);
7401 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7405 case ACL_START_NO_FLUSH:
7408 BT_ERR("Unexpected start frame (len %d)", skb->len);
7409 kfree_skb(conn->rx_skb);
7410 conn->rx_skb = NULL;
7412 l2cap_conn_unreliable(conn, ECOMM);
7415 /* Start fragment always begin with Basic L2CAP header */
7416 if (skb->len < L2CAP_HDR_SIZE) {
7417 BT_ERR("Frame is too short (len %d)", skb->len);
7418 l2cap_conn_unreliable(conn, ECOMM);
7422 hdr = (struct l2cap_hdr *) skb->data;
7423 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7425 if (len == skb->len) {
7426 /* Complete frame received */
7427 l2cap_recv_frame(conn, skb);
7431 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7433 if (skb->len > len) {
7434 BT_ERR("Frame is too long (len %d, expected len %d)",
7436 l2cap_conn_unreliable(conn, ECOMM);
7440 /* Allocate skb for the complete frame (with header) */
7441 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7445 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7447 conn->rx_len = len - skb->len;
7451 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7453 if (!conn->rx_len) {
7454 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7455 l2cap_conn_unreliable(conn, ECOMM);
7459 if (skb->len > conn->rx_len) {
7460 BT_ERR("Fragment is too long (len %d, expected %d)",
7461 skb->len, conn->rx_len);
7462 kfree_skb(conn->rx_skb);
7463 conn->rx_skb = NULL;
7465 l2cap_conn_unreliable(conn, ECOMM);
7469 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7471 conn->rx_len -= skb->len;
7473 if (!conn->rx_len) {
7474 /* Complete frame received. l2cap_recv_frame
7475 * takes ownership of the skb so set the global
7476 * rx_skb pointer to NULL first.
7478 struct sk_buff *rx_skb = conn->rx_skb;
7479 conn->rx_skb = NULL;
7480 l2cap_recv_frame(conn, rx_skb);
7490 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7492 struct l2cap_chan *c;
7494 read_lock(&chan_list_lock);
7496 list_for_each_entry(c, &chan_list, global_l) {
7497 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7499 c->state, __le16_to_cpu(c->psm),
7500 c->scid, c->dcid, c->imtu, c->omtu,
7501 c->sec_level, c->mode);
7504 read_unlock(&chan_list_lock);
7509 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7511 return single_open(file, l2cap_debugfs_show, inode->i_private);
7514 static const struct file_operations l2cap_debugfs_fops = {
7515 .open = l2cap_debugfs_open,
7517 .llseek = seq_lseek,
7518 .release = single_release,
7521 static struct dentry *l2cap_debugfs;
7523 int __init l2cap_init(void)
7527 err = l2cap_init_sockets();
7531 if (IS_ERR_OR_NULL(bt_debugfs))
7534 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7535 NULL, &l2cap_debugfs_fops);
7537 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7539 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7545 void l2cap_exit(void)
7547 debugfs_remove(l2cap_debugfs);
7548 l2cap_cleanup_sockets();
7551 module_param(disable_ertm, bool, 0644);
7552 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");