2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
55 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
56 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
58 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
59 u8 code, u8 ident, u16 dlen, void *data);
60 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
62 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
63 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
65 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
66 struct sk_buff_head *skbs, u8 event);
68 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
70 if (hcon->type == LE_LINK) {
71 if (type == ADDR_LE_DEV_PUBLIC)
72 return BDADDR_LE_PUBLIC;
74 return BDADDR_LE_RANDOM;
80 /* ---- L2CAP channels ---- */
82 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
87 list_for_each_entry(c, &conn->chan_l, list) {
94 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
99 list_for_each_entry(c, &conn->chan_l, list) {
106 /* Find channel with given SCID.
107 * Returns locked channel. */
108 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
111 struct l2cap_chan *c;
113 mutex_lock(&conn->chan_lock);
114 c = __l2cap_get_chan_by_scid(conn, cid);
117 mutex_unlock(&conn->chan_lock);
122 /* Find channel with given DCID.
123 * Returns locked channel.
125 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
128 struct l2cap_chan *c;
130 mutex_lock(&conn->chan_lock);
131 c = __l2cap_get_chan_by_dcid(conn, cid);
134 mutex_unlock(&conn->chan_lock);
139 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
142 struct l2cap_chan *c;
144 list_for_each_entry(c, &conn->chan_l, list) {
145 if (c->ident == ident)
151 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
154 struct l2cap_chan *c;
156 mutex_lock(&conn->chan_lock);
157 c = __l2cap_get_chan_by_ident(conn, ident);
160 mutex_unlock(&conn->chan_lock);
165 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
167 struct l2cap_chan *c;
169 list_for_each_entry(c, &chan_list, global_l) {
170 if (c->sport == psm && !bacmp(&c->src, src))
176 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
180 write_lock(&chan_list_lock);
182 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
195 for (p = 0x1001; p < 0x1100; p += 2)
196 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
197 chan->psm = cpu_to_le16(p);
198 chan->sport = cpu_to_le16(p);
205 write_unlock(&chan_list_lock);
209 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
211 write_lock(&chan_list_lock);
215 write_unlock(&chan_list_lock);
220 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
224 if (conn->hcon->type == LE_LINK)
225 dyn_end = L2CAP_CID_LE_DYN_END;
227 dyn_end = L2CAP_CID_DYN_END;
229 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
230 if (!__l2cap_get_chan_by_scid(conn, cid))
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
239 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
240 state_to_string(state));
243 chan->ops->state_change(chan, state, 0);
246 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
250 chan->ops->state_change(chan, chan->state, err);
253 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
255 chan->ops->state_change(chan, chan->state, err);
258 static void __set_retrans_timer(struct l2cap_chan *chan)
260 if (!delayed_work_pending(&chan->monitor_timer) &&
261 chan->retrans_timeout) {
262 l2cap_set_timer(chan, &chan->retrans_timer,
263 msecs_to_jiffies(chan->retrans_timeout));
267 static void __set_monitor_timer(struct l2cap_chan *chan)
269 __clear_retrans_timer(chan);
270 if (chan->monitor_timeout) {
271 l2cap_set_timer(chan, &chan->monitor_timer,
272 msecs_to_jiffies(chan->monitor_timeout));
276 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
281 skb_queue_walk(head, skb) {
282 if (bt_cb(skb)->control.txseq == seq)
289 /* ---- L2CAP sequence number lists ---- */
291 /* For ERTM, ordered lists of sequence numbers must be tracked for
292 * SREJ requests that are received and for frames that are to be
293 * retransmitted. These seq_list functions implement a singly-linked
294 * list in an array, where membership in the list can also be checked
295 * in constant time. Items can also be added to the tail of the list
296 * and removed from the head in constant time, without further memory
300 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
302 size_t alloc_size, i;
304 /* Allocated size is a power of 2 to map sequence numbers
305 * (which may be up to 14 bits) in to a smaller array that is
306 * sized for the negotiated ERTM transmit windows.
308 alloc_size = roundup_pow_of_two(size);
310 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
314 seq_list->mask = alloc_size - 1;
315 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
316 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
317 for (i = 0; i < alloc_size; i++)
318 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
325 kfree(seq_list->list);
328 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
331 /* Constant-time check for list membership */
332 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
335 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
337 u16 seq = seq_list->head;
338 u16 mask = seq_list->mask;
340 seq_list->head = seq_list->list[seq & mask];
341 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
343 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
344 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
351 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
355 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
358 for (i = 0; i <= seq_list->mask; i++)
359 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
365 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
367 u16 mask = seq_list->mask;
369 /* All appends happen in constant time */
371 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
374 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
375 seq_list->head = seq;
377 seq_list->list[seq_list->tail & mask] = seq;
379 seq_list->tail = seq;
380 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
383 static void l2cap_chan_timeout(struct work_struct *work)
385 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
387 struct l2cap_conn *conn = chan->conn;
390 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
392 mutex_lock(&conn->chan_lock);
393 l2cap_chan_lock(chan);
395 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
396 reason = ECONNREFUSED;
397 else if (chan->state == BT_CONNECT &&
398 chan->sec_level != BT_SECURITY_SDP)
399 reason = ECONNREFUSED;
403 l2cap_chan_close(chan, reason);
405 l2cap_chan_unlock(chan);
407 chan->ops->close(chan);
408 mutex_unlock(&conn->chan_lock);
410 l2cap_chan_put(chan);
413 struct l2cap_chan *l2cap_chan_create(void)
415 struct l2cap_chan *chan;
417 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
421 mutex_init(&chan->lock);
423 write_lock(&chan_list_lock);
424 list_add(&chan->global_l, &chan_list);
425 write_unlock(&chan_list_lock);
427 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
429 chan->state = BT_OPEN;
431 kref_init(&chan->kref);
433 /* This flag is cleared in l2cap_chan_ready() */
434 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
436 BT_DBG("chan %p", chan);
441 static void l2cap_chan_destroy(struct kref *kref)
443 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
445 BT_DBG("chan %p", chan);
447 write_lock(&chan_list_lock);
448 list_del(&chan->global_l);
449 write_unlock(&chan_list_lock);
454 void l2cap_chan_hold(struct l2cap_chan *c)
456 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
461 void l2cap_chan_put(struct l2cap_chan *c)
463 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
465 kref_put(&c->kref, l2cap_chan_destroy);
468 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
470 chan->fcs = L2CAP_FCS_CRC16;
471 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
472 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
473 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
474 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
475 chan->sec_level = BT_SECURITY_LOW;
477 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
480 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
483 chan->sdu_last_frag = NULL;
485 chan->tx_credits = 0;
486 chan->rx_credits = le_max_credits;
487 chan->mps = min_t(u16, chan->imtu, le_default_mps);
489 skb_queue_head_init(&chan->tx_q);
492 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
495 __le16_to_cpu(chan->psm), chan->dcid);
497 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
501 switch (chan->chan_type) {
502 case L2CAP_CHAN_CONN_ORIENTED:
503 /* Alloc CID for connection-oriented socket */
504 chan->scid = l2cap_alloc_cid(conn);
505 if (conn->hcon->type == ACL_LINK)
506 chan->omtu = L2CAP_DEFAULT_MTU;
509 case L2CAP_CHAN_CONN_LESS:
510 /* Connectionless socket */
511 chan->scid = L2CAP_CID_CONN_LESS;
512 chan->dcid = L2CAP_CID_CONN_LESS;
513 chan->omtu = L2CAP_DEFAULT_MTU;
516 case L2CAP_CHAN_FIXED:
517 /* Caller will set CID and CID specific MTU values */
521 /* Raw socket can send/recv signalling messages only */
522 chan->scid = L2CAP_CID_SIGNALING;
523 chan->dcid = L2CAP_CID_SIGNALING;
524 chan->omtu = L2CAP_DEFAULT_MTU;
527 chan->local_id = L2CAP_BESTEFFORT_ID;
528 chan->local_stype = L2CAP_SERV_BESTEFFORT;
529 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
530 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
531 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
532 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
534 l2cap_chan_hold(chan);
536 hci_conn_hold(conn->hcon);
538 list_add(&chan->list, &conn->chan_l);
541 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
543 mutex_lock(&conn->chan_lock);
544 __l2cap_chan_add(conn, chan);
545 mutex_unlock(&conn->chan_lock);
548 void l2cap_chan_del(struct l2cap_chan *chan, int err)
550 struct l2cap_conn *conn = chan->conn;
552 __clear_chan_timer(chan);
554 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
557 struct amp_mgr *mgr = conn->hcon->amp_mgr;
558 /* Delete from channel list */
559 list_del(&chan->list);
561 l2cap_chan_put(chan);
565 if (chan->scid != L2CAP_CID_A2MP)
566 hci_conn_drop(conn->hcon);
568 if (mgr && mgr->bredr_chan == chan)
569 mgr->bredr_chan = NULL;
572 if (chan->hs_hchan) {
573 struct hci_chan *hs_hchan = chan->hs_hchan;
575 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
576 amp_disconnect_logical_link(hs_hchan);
579 chan->ops->teardown(chan, err);
581 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
585 case L2CAP_MODE_BASIC:
588 case L2CAP_MODE_LE_FLOWCTL:
589 skb_queue_purge(&chan->tx_q);
592 case L2CAP_MODE_ERTM:
593 __clear_retrans_timer(chan);
594 __clear_monitor_timer(chan);
595 __clear_ack_timer(chan);
597 skb_queue_purge(&chan->srej_q);
599 l2cap_seq_list_free(&chan->srej_list);
600 l2cap_seq_list_free(&chan->retrans_list);
604 case L2CAP_MODE_STREAMING:
605 skb_queue_purge(&chan->tx_q);
612 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
614 struct l2cap_conn *conn = chan->conn;
615 struct l2cap_le_conn_rsp rsp;
618 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
619 result = L2CAP_CR_AUTHORIZATION;
621 result = L2CAP_CR_BAD_PSM;
623 l2cap_state_change(chan, BT_DISCONN);
625 rsp.dcid = cpu_to_le16(chan->scid);
626 rsp.mtu = cpu_to_le16(chan->imtu);
627 rsp.mps = cpu_to_le16(chan->mps);
628 rsp.credits = cpu_to_le16(chan->rx_credits);
629 rsp.result = cpu_to_le16(result);
631 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
635 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
637 struct l2cap_conn *conn = chan->conn;
638 struct l2cap_conn_rsp rsp;
641 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
642 result = L2CAP_CR_SEC_BLOCK;
644 result = L2CAP_CR_BAD_PSM;
646 l2cap_state_change(chan, BT_DISCONN);
648 rsp.scid = cpu_to_le16(chan->dcid);
649 rsp.dcid = cpu_to_le16(chan->scid);
650 rsp.result = cpu_to_le16(result);
651 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
653 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
656 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
658 struct l2cap_conn *conn = chan->conn;
660 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
662 switch (chan->state) {
664 chan->ops->teardown(chan, 0);
669 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
670 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
671 l2cap_send_disconn_req(chan, reason);
673 l2cap_chan_del(chan, reason);
677 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
678 if (conn->hcon->type == ACL_LINK)
679 l2cap_chan_connect_reject(chan);
680 else if (conn->hcon->type == LE_LINK)
681 l2cap_chan_le_connect_reject(chan);
684 l2cap_chan_del(chan, reason);
689 l2cap_chan_del(chan, reason);
693 chan->ops->teardown(chan, 0);
698 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
700 switch (chan->chan_type) {
702 switch (chan->sec_level) {
703 case BT_SECURITY_HIGH:
704 case BT_SECURITY_FIPS:
705 return HCI_AT_DEDICATED_BONDING_MITM;
706 case BT_SECURITY_MEDIUM:
707 return HCI_AT_DEDICATED_BONDING;
709 return HCI_AT_NO_BONDING;
712 case L2CAP_CHAN_CONN_LESS:
713 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
714 if (chan->sec_level == BT_SECURITY_LOW)
715 chan->sec_level = BT_SECURITY_SDP;
717 if (chan->sec_level == BT_SECURITY_HIGH ||
718 chan->sec_level == BT_SECURITY_FIPS)
719 return HCI_AT_NO_BONDING_MITM;
721 return HCI_AT_NO_BONDING;
723 case L2CAP_CHAN_CONN_ORIENTED:
724 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
725 if (chan->sec_level == BT_SECURITY_LOW)
726 chan->sec_level = BT_SECURITY_SDP;
728 if (chan->sec_level == BT_SECURITY_HIGH ||
729 chan->sec_level == BT_SECURITY_FIPS)
730 return HCI_AT_NO_BONDING_MITM;
732 return HCI_AT_NO_BONDING;
736 switch (chan->sec_level) {
737 case BT_SECURITY_HIGH:
738 case BT_SECURITY_FIPS:
739 return HCI_AT_GENERAL_BONDING_MITM;
740 case BT_SECURITY_MEDIUM:
741 return HCI_AT_GENERAL_BONDING;
743 return HCI_AT_NO_BONDING;
749 /* Service level security */
750 int l2cap_chan_check_security(struct l2cap_chan *chan)
752 struct l2cap_conn *conn = chan->conn;
755 if (conn->hcon->type == LE_LINK)
756 return smp_conn_security(conn->hcon, chan->sec_level);
758 auth_type = l2cap_get_auth_type(chan);
760 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
763 static u8 l2cap_get_ident(struct l2cap_conn *conn)
767 /* Get next available identificator.
768 * 1 - 128 are used by kernel.
769 * 129 - 199 are reserved.
770 * 200 - 254 are used by utilities like l2ping, etc.
773 spin_lock(&conn->lock);
775 if (++conn->tx_ident > 128)
780 spin_unlock(&conn->lock);
785 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
788 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
791 BT_DBG("code 0x%2.2x", code);
796 if (lmp_no_flush_capable(conn->hcon->hdev))
797 flags = ACL_START_NO_FLUSH;
801 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
802 skb->priority = HCI_PRIO_MAX;
804 hci_send_acl(conn->hchan, skb, flags);
807 static bool __chan_is_moving(struct l2cap_chan *chan)
809 return chan->move_state != L2CAP_MOVE_STABLE &&
810 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
813 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
815 struct hci_conn *hcon = chan->conn->hcon;
818 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
821 if (chan->hs_hcon && !__chan_is_moving(chan)) {
823 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
830 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
831 lmp_no_flush_capable(hcon->hdev))
832 flags = ACL_START_NO_FLUSH;
836 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
837 hci_send_acl(chan->conn->hchan, skb, flags);
840 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
842 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
843 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
845 if (enh & L2CAP_CTRL_FRAME_TYPE) {
848 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
849 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
856 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
857 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
864 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
866 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
867 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
869 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
872 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
873 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
880 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
881 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
888 static inline void __unpack_control(struct l2cap_chan *chan,
891 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
892 __unpack_extended_control(get_unaligned_le32(skb->data),
893 &bt_cb(skb)->control);
894 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
896 __unpack_enhanced_control(get_unaligned_le16(skb->data),
897 &bt_cb(skb)->control);
898 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
902 static u32 __pack_extended_control(struct l2cap_ctrl *control)
906 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
907 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
909 if (control->sframe) {
910 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
911 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
912 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
914 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
915 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
921 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
925 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
926 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
928 if (control->sframe) {
929 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
930 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
931 packed |= L2CAP_CTRL_FRAME_TYPE;
933 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
934 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
940 static inline void __pack_control(struct l2cap_chan *chan,
941 struct l2cap_ctrl *control,
944 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
945 put_unaligned_le32(__pack_extended_control(control),
946 skb->data + L2CAP_HDR_SIZE);
948 put_unaligned_le16(__pack_enhanced_control(control),
949 skb->data + L2CAP_HDR_SIZE);
953 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
955 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
956 return L2CAP_EXT_HDR_SIZE;
958 return L2CAP_ENH_HDR_SIZE;
961 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
965 struct l2cap_hdr *lh;
966 int hlen = __ertm_hdr_size(chan);
968 if (chan->fcs == L2CAP_FCS_CRC16)
969 hlen += L2CAP_FCS_SIZE;
971 skb = bt_skb_alloc(hlen, GFP_KERNEL);
974 return ERR_PTR(-ENOMEM);
976 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
977 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
978 lh->cid = cpu_to_le16(chan->dcid);
980 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
981 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
983 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
985 if (chan->fcs == L2CAP_FCS_CRC16) {
986 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
987 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
990 skb->priority = HCI_PRIO_MAX;
994 static void l2cap_send_sframe(struct l2cap_chan *chan,
995 struct l2cap_ctrl *control)
1000 BT_DBG("chan %p, control %p", chan, control);
1002 if (!control->sframe)
1005 if (__chan_is_moving(chan))
1008 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1012 if (control->super == L2CAP_SUPER_RR)
1013 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1014 else if (control->super == L2CAP_SUPER_RNR)
1015 set_bit(CONN_RNR_SENT, &chan->conn_state);
1017 if (control->super != L2CAP_SUPER_SREJ) {
1018 chan->last_acked_seq = control->reqseq;
1019 __clear_ack_timer(chan);
1022 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1023 control->final, control->poll, control->super);
1025 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1026 control_field = __pack_extended_control(control);
1028 control_field = __pack_enhanced_control(control);
1030 skb = l2cap_create_sframe_pdu(chan, control_field);
1032 l2cap_do_send(chan, skb);
1035 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1037 struct l2cap_ctrl control;
1039 BT_DBG("chan %p, poll %d", chan, poll);
1041 memset(&control, 0, sizeof(control));
1043 control.poll = poll;
1045 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1046 control.super = L2CAP_SUPER_RNR;
1048 control.super = L2CAP_SUPER_RR;
1050 control.reqseq = chan->buffer_seq;
1051 l2cap_send_sframe(chan, &control);
1054 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1056 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1059 static bool __amp_capable(struct l2cap_chan *chan)
1061 struct l2cap_conn *conn = chan->conn;
1062 struct hci_dev *hdev;
1063 bool amp_available = false;
1065 if (!conn->hs_enabled)
1068 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1071 read_lock(&hci_dev_list_lock);
1072 list_for_each_entry(hdev, &hci_dev_list, list) {
1073 if (hdev->amp_type != AMP_TYPE_BREDR &&
1074 test_bit(HCI_UP, &hdev->flags)) {
1075 amp_available = true;
1079 read_unlock(&hci_dev_list_lock);
1081 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1082 return amp_available;
1087 static bool l2cap_check_efs(struct l2cap_chan *chan)
1089 /* Check EFS parameters */
1093 void l2cap_send_conn_req(struct l2cap_chan *chan)
1095 struct l2cap_conn *conn = chan->conn;
1096 struct l2cap_conn_req req;
1098 req.scid = cpu_to_le16(chan->scid);
1099 req.psm = chan->psm;
1101 chan->ident = l2cap_get_ident(conn);
1103 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1105 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1108 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1110 struct l2cap_create_chan_req req;
1111 req.scid = cpu_to_le16(chan->scid);
1112 req.psm = chan->psm;
1113 req.amp_id = amp_id;
1115 chan->ident = l2cap_get_ident(chan->conn);
1117 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1121 static void l2cap_move_setup(struct l2cap_chan *chan)
1123 struct sk_buff *skb;
1125 BT_DBG("chan %p", chan);
1127 if (chan->mode != L2CAP_MODE_ERTM)
1130 __clear_retrans_timer(chan);
1131 __clear_monitor_timer(chan);
1132 __clear_ack_timer(chan);
1134 chan->retry_count = 0;
1135 skb_queue_walk(&chan->tx_q, skb) {
1136 if (bt_cb(skb)->control.retries)
1137 bt_cb(skb)->control.retries = 1;
1142 chan->expected_tx_seq = chan->buffer_seq;
1144 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1145 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1146 l2cap_seq_list_clear(&chan->retrans_list);
1147 l2cap_seq_list_clear(&chan->srej_list);
1148 skb_queue_purge(&chan->srej_q);
1150 chan->tx_state = L2CAP_TX_STATE_XMIT;
1151 chan->rx_state = L2CAP_RX_STATE_MOVE;
1153 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1156 static void l2cap_move_done(struct l2cap_chan *chan)
1158 u8 move_role = chan->move_role;
1159 BT_DBG("chan %p", chan);
1161 chan->move_state = L2CAP_MOVE_STABLE;
1162 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1164 if (chan->mode != L2CAP_MODE_ERTM)
1167 switch (move_role) {
1168 case L2CAP_MOVE_ROLE_INITIATOR:
1169 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1170 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1172 case L2CAP_MOVE_ROLE_RESPONDER:
1173 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1178 static void l2cap_chan_ready(struct l2cap_chan *chan)
1180 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1181 chan->conf_state = 0;
1182 __clear_chan_timer(chan);
1184 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1185 chan->ops->suspend(chan);
1187 chan->state = BT_CONNECTED;
1189 chan->ops->ready(chan);
1192 static void l2cap_le_connect(struct l2cap_chan *chan)
1194 struct l2cap_conn *conn = chan->conn;
1195 struct l2cap_le_conn_req req;
1197 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1200 req.psm = chan->psm;
1201 req.scid = cpu_to_le16(chan->scid);
1202 req.mtu = cpu_to_le16(chan->imtu);
1203 req.mps = cpu_to_le16(chan->mps);
1204 req.credits = cpu_to_le16(chan->rx_credits);
1206 chan->ident = l2cap_get_ident(conn);
1208 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1212 static void l2cap_le_start(struct l2cap_chan *chan)
1214 struct l2cap_conn *conn = chan->conn;
1216 if (!smp_conn_security(conn->hcon, chan->sec_level))
1220 l2cap_chan_ready(chan);
1224 if (chan->state == BT_CONNECT)
1225 l2cap_le_connect(chan);
1228 static void l2cap_start_connection(struct l2cap_chan *chan)
1230 if (__amp_capable(chan)) {
1231 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1232 a2mp_discover_amp(chan);
1233 } else if (chan->conn->hcon->type == LE_LINK) {
1234 l2cap_le_start(chan);
1236 l2cap_send_conn_req(chan);
1240 static void l2cap_do_start(struct l2cap_chan *chan)
1242 struct l2cap_conn *conn = chan->conn;
1244 if (conn->hcon->type == LE_LINK) {
1245 l2cap_le_start(chan);
1249 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1250 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1253 if (l2cap_chan_check_security(chan) &&
1254 __l2cap_no_conn_pending(chan)) {
1255 l2cap_start_connection(chan);
1258 struct l2cap_info_req req;
1259 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1261 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1262 conn->info_ident = l2cap_get_ident(conn);
1264 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1266 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1271 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1273 u32 local_feat_mask = l2cap_feat_mask;
1275 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1278 case L2CAP_MODE_ERTM:
1279 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1280 case L2CAP_MODE_STREAMING:
1281 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1287 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1289 struct l2cap_conn *conn = chan->conn;
1290 struct l2cap_disconn_req req;
1295 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1296 __clear_retrans_timer(chan);
1297 __clear_monitor_timer(chan);
1298 __clear_ack_timer(chan);
1301 if (chan->scid == L2CAP_CID_A2MP) {
1302 l2cap_state_change(chan, BT_DISCONN);
1306 req.dcid = cpu_to_le16(chan->dcid);
1307 req.scid = cpu_to_le16(chan->scid);
1308 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1311 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1314 /* ---- L2CAP connections ---- */
1315 static void l2cap_conn_start(struct l2cap_conn *conn)
1317 struct l2cap_chan *chan, *tmp;
1319 BT_DBG("conn %p", conn);
1321 mutex_lock(&conn->chan_lock);
1323 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1324 l2cap_chan_lock(chan);
1326 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1327 l2cap_chan_unlock(chan);
1331 if (chan->state == BT_CONNECT) {
1332 if (!l2cap_chan_check_security(chan) ||
1333 !__l2cap_no_conn_pending(chan)) {
1334 l2cap_chan_unlock(chan);
1338 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1339 && test_bit(CONF_STATE2_DEVICE,
1340 &chan->conf_state)) {
1341 l2cap_chan_close(chan, ECONNRESET);
1342 l2cap_chan_unlock(chan);
1346 l2cap_start_connection(chan);
1348 } else if (chan->state == BT_CONNECT2) {
1349 struct l2cap_conn_rsp rsp;
1351 rsp.scid = cpu_to_le16(chan->dcid);
1352 rsp.dcid = cpu_to_le16(chan->scid);
1354 if (l2cap_chan_check_security(chan)) {
1355 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1356 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1357 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1358 chan->ops->defer(chan);
1361 l2cap_state_change(chan, BT_CONFIG);
1362 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1363 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1366 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1367 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1370 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1373 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1374 rsp.result != L2CAP_CR_SUCCESS) {
1375 l2cap_chan_unlock(chan);
1379 set_bit(CONF_REQ_SENT, &chan->conf_state);
1380 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1381 l2cap_build_conf_req(chan, buf), buf);
1382 chan->num_conf_req++;
1385 l2cap_chan_unlock(chan);
1388 mutex_unlock(&conn->chan_lock);
1391 /* Find socket with cid and source/destination bdaddr.
1392 * Returns closest match, locked.
1394 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1398 struct l2cap_chan *c, *c1 = NULL;
1400 read_lock(&chan_list_lock);
1402 list_for_each_entry(c, &chan_list, global_l) {
1403 if (state && c->state != state)
1406 if (c->scid == cid) {
1407 int src_match, dst_match;
1408 int src_any, dst_any;
1411 src_match = !bacmp(&c->src, src);
1412 dst_match = !bacmp(&c->dst, dst);
1413 if (src_match && dst_match) {
1414 read_unlock(&chan_list_lock);
1419 src_any = !bacmp(&c->src, BDADDR_ANY);
1420 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1421 if ((src_match && dst_any) || (src_any && dst_match) ||
1422 (src_any && dst_any))
1427 read_unlock(&chan_list_lock);
1432 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1434 struct hci_conn *hcon = conn->hcon;
1435 struct l2cap_chan *chan, *pchan;
1440 bt_6lowpan_add_conn(conn);
1442 /* Check if we have socket listening on cid */
1443 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1444 &hcon->src, &hcon->dst);
1448 /* Client ATT sockets should override the server one */
1449 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1452 dst_type = bdaddr_type(hcon, hcon->dst_type);
1454 /* If device is blocked, do not create a channel for it */
1455 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1458 l2cap_chan_lock(pchan);
1460 chan = pchan->ops->new_connection(pchan);
1464 bacpy(&chan->src, &hcon->src);
1465 bacpy(&chan->dst, &hcon->dst);
1466 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1467 chan->dst_type = dst_type;
1469 __l2cap_chan_add(conn, chan);
1472 l2cap_chan_unlock(pchan);
1475 static void l2cap_conn_ready(struct l2cap_conn *conn)
1477 struct l2cap_chan *chan;
1478 struct hci_conn *hcon = conn->hcon;
1480 BT_DBG("conn %p", conn);
1482 /* For outgoing pairing which doesn't necessarily have an
1483 * associated socket (e.g. mgmt_pair_device).
1485 if (hcon->out && hcon->type == LE_LINK)
1486 smp_conn_security(hcon, hcon->pending_sec_level);
1488 mutex_lock(&conn->chan_lock);
1490 if (hcon->type == LE_LINK)
1491 l2cap_le_conn_ready(conn);
1493 list_for_each_entry(chan, &conn->chan_l, list) {
1495 l2cap_chan_lock(chan);
1497 if (chan->scid == L2CAP_CID_A2MP) {
1498 l2cap_chan_unlock(chan);
1502 if (hcon->type == LE_LINK) {
1503 l2cap_le_start(chan);
1504 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1505 l2cap_chan_ready(chan);
1507 } else if (chan->state == BT_CONNECT) {
1508 l2cap_do_start(chan);
1511 l2cap_chan_unlock(chan);
1514 mutex_unlock(&conn->chan_lock);
1516 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1519 /* Notify sockets that we cannot guaranty reliability anymore */
1520 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1522 struct l2cap_chan *chan;
1524 BT_DBG("conn %p", conn);
1526 mutex_lock(&conn->chan_lock);
1528 list_for_each_entry(chan, &conn->chan_l, list) {
1529 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1530 l2cap_chan_set_err(chan, err);
1533 mutex_unlock(&conn->chan_lock);
1536 static void l2cap_info_timeout(struct work_struct *work)
1538 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1541 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1542 conn->info_ident = 0;
1544 l2cap_conn_start(conn);
1549 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1550 * callback is called during registration. The ->remove callback is called
1551 * during unregistration.
1552 * An l2cap_user object can either be explicitly unregistered or when the
1553 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1554 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1555 * External modules must own a reference to the l2cap_conn object if they intend
1556 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1557 * any time if they don't.
1560 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1562 struct hci_dev *hdev = conn->hcon->hdev;
1565 /* We need to check whether l2cap_conn is registered. If it is not, we
1566 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1567 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1568 * relies on the parent hci_conn object to be locked. This itself relies
1569 * on the hci_dev object to be locked. So we must lock the hci device
1574 if (user->list.next || user->list.prev) {
1579 /* conn->hchan is NULL after l2cap_conn_del() was called */
1585 ret = user->probe(conn, user);
1589 list_add(&user->list, &conn->users);
1593 hci_dev_unlock(hdev);
1596 EXPORT_SYMBOL(l2cap_register_user);
1598 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1600 struct hci_dev *hdev = conn->hcon->hdev;
1604 if (!user->list.next || !user->list.prev)
1607 list_del(&user->list);
1608 user->list.next = NULL;
1609 user->list.prev = NULL;
1610 user->remove(conn, user);
1613 hci_dev_unlock(hdev);
1615 EXPORT_SYMBOL(l2cap_unregister_user);
1617 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1619 struct l2cap_user *user;
1621 while (!list_empty(&conn->users)) {
1622 user = list_first_entry(&conn->users, struct l2cap_user, list);
1623 list_del(&user->list);
1624 user->list.next = NULL;
1625 user->list.prev = NULL;
1626 user->remove(conn, user);
1630 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1632 struct l2cap_conn *conn = hcon->l2cap_data;
1633 struct l2cap_chan *chan, *l;
1638 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1640 kfree_skb(conn->rx_skb);
1642 skb_queue_purge(&conn->pending_rx);
1643 flush_work(&conn->pending_rx_work);
1645 l2cap_unregister_all_users(conn);
1647 mutex_lock(&conn->chan_lock);
1650 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1651 l2cap_chan_hold(chan);
1652 l2cap_chan_lock(chan);
1654 l2cap_chan_del(chan, err);
1656 l2cap_chan_unlock(chan);
1658 chan->ops->close(chan);
1659 l2cap_chan_put(chan);
1662 mutex_unlock(&conn->chan_lock);
1664 hci_chan_del(conn->hchan);
1666 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1667 cancel_delayed_work_sync(&conn->info_timer);
1669 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1670 cancel_delayed_work_sync(&conn->security_timer);
1671 smp_chan_destroy(conn);
1674 hcon->l2cap_data = NULL;
1676 l2cap_conn_put(conn);
1679 static void security_timeout(struct work_struct *work)
1681 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1682 security_timer.work);
1684 BT_DBG("conn %p", conn);
1686 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1687 smp_chan_destroy(conn);
1688 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1692 static void l2cap_conn_free(struct kref *ref)
1694 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1696 hci_conn_put(conn->hcon);
1700 void l2cap_conn_get(struct l2cap_conn *conn)
1702 kref_get(&conn->ref);
1704 EXPORT_SYMBOL(l2cap_conn_get);
1706 void l2cap_conn_put(struct l2cap_conn *conn)
1708 kref_put(&conn->ref, l2cap_conn_free);
1710 EXPORT_SYMBOL(l2cap_conn_put);
1712 /* ---- Socket interface ---- */
1714 /* Find socket with psm and source / destination bdaddr.
1715 * Returns closest match.
1717 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1722 struct l2cap_chan *c, *c1 = NULL;
1724 read_lock(&chan_list_lock);
1726 list_for_each_entry(c, &chan_list, global_l) {
1727 if (state && c->state != state)
1730 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1733 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1736 if (c->psm == psm) {
1737 int src_match, dst_match;
1738 int src_any, dst_any;
1741 src_match = !bacmp(&c->src, src);
1742 dst_match = !bacmp(&c->dst, dst);
1743 if (src_match && dst_match) {
1744 read_unlock(&chan_list_lock);
1749 src_any = !bacmp(&c->src, BDADDR_ANY);
1750 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1751 if ((src_match && dst_any) || (src_any && dst_match) ||
1752 (src_any && dst_any))
1757 read_unlock(&chan_list_lock);
1762 static void l2cap_monitor_timeout(struct work_struct *work)
1764 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1765 monitor_timer.work);
1767 BT_DBG("chan %p", chan);
1769 l2cap_chan_lock(chan);
1772 l2cap_chan_unlock(chan);
1773 l2cap_chan_put(chan);
1777 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1779 l2cap_chan_unlock(chan);
1780 l2cap_chan_put(chan);
1783 static void l2cap_retrans_timeout(struct work_struct *work)
1785 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1786 retrans_timer.work);
1788 BT_DBG("chan %p", chan);
1790 l2cap_chan_lock(chan);
1793 l2cap_chan_unlock(chan);
1794 l2cap_chan_put(chan);
1798 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1799 l2cap_chan_unlock(chan);
1800 l2cap_chan_put(chan);
1803 static void l2cap_streaming_send(struct l2cap_chan *chan,
1804 struct sk_buff_head *skbs)
1806 struct sk_buff *skb;
1807 struct l2cap_ctrl *control;
1809 BT_DBG("chan %p, skbs %p", chan, skbs);
1811 if (__chan_is_moving(chan))
1814 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1816 while (!skb_queue_empty(&chan->tx_q)) {
1818 skb = skb_dequeue(&chan->tx_q);
1820 bt_cb(skb)->control.retries = 1;
1821 control = &bt_cb(skb)->control;
1823 control->reqseq = 0;
1824 control->txseq = chan->next_tx_seq;
1826 __pack_control(chan, control, skb);
1828 if (chan->fcs == L2CAP_FCS_CRC16) {
1829 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1830 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1833 l2cap_do_send(chan, skb);
1835 BT_DBG("Sent txseq %u", control->txseq);
1837 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1838 chan->frames_sent++;
1842 static int l2cap_ertm_send(struct l2cap_chan *chan)
1844 struct sk_buff *skb, *tx_skb;
1845 struct l2cap_ctrl *control;
1848 BT_DBG("chan %p", chan);
1850 if (chan->state != BT_CONNECTED)
1853 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1856 if (__chan_is_moving(chan))
1859 while (chan->tx_send_head &&
1860 chan->unacked_frames < chan->remote_tx_win &&
1861 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1863 skb = chan->tx_send_head;
1865 bt_cb(skb)->control.retries = 1;
1866 control = &bt_cb(skb)->control;
1868 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1871 control->reqseq = chan->buffer_seq;
1872 chan->last_acked_seq = chan->buffer_seq;
1873 control->txseq = chan->next_tx_seq;
1875 __pack_control(chan, control, skb);
1877 if (chan->fcs == L2CAP_FCS_CRC16) {
1878 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1879 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1882 /* Clone after data has been modified. Data is assumed to be
1883 read-only (for locking purposes) on cloned sk_buffs.
1885 tx_skb = skb_clone(skb, GFP_KERNEL);
1890 __set_retrans_timer(chan);
1892 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1893 chan->unacked_frames++;
1894 chan->frames_sent++;
1897 if (skb_queue_is_last(&chan->tx_q, skb))
1898 chan->tx_send_head = NULL;
1900 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1902 l2cap_do_send(chan, tx_skb);
1903 BT_DBG("Sent txseq %u", control->txseq);
1906 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1907 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1912 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1914 struct l2cap_ctrl control;
1915 struct sk_buff *skb;
1916 struct sk_buff *tx_skb;
1919 BT_DBG("chan %p", chan);
1921 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1924 if (__chan_is_moving(chan))
1927 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1928 seq = l2cap_seq_list_pop(&chan->retrans_list);
1930 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1932 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1937 bt_cb(skb)->control.retries++;
1938 control = bt_cb(skb)->control;
1940 if (chan->max_tx != 0 &&
1941 bt_cb(skb)->control.retries > chan->max_tx) {
1942 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1943 l2cap_send_disconn_req(chan, ECONNRESET);
1944 l2cap_seq_list_clear(&chan->retrans_list);
1948 control.reqseq = chan->buffer_seq;
1949 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1954 if (skb_cloned(skb)) {
1955 /* Cloned sk_buffs are read-only, so we need a
1958 tx_skb = skb_copy(skb, GFP_KERNEL);
1960 tx_skb = skb_clone(skb, GFP_KERNEL);
1964 l2cap_seq_list_clear(&chan->retrans_list);
1968 /* Update skb contents */
1969 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1970 put_unaligned_le32(__pack_extended_control(&control),
1971 tx_skb->data + L2CAP_HDR_SIZE);
1973 put_unaligned_le16(__pack_enhanced_control(&control),
1974 tx_skb->data + L2CAP_HDR_SIZE);
1977 if (chan->fcs == L2CAP_FCS_CRC16) {
1978 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1979 put_unaligned_le16(fcs, skb_put(tx_skb,
1983 l2cap_do_send(chan, tx_skb);
1985 BT_DBG("Resent txseq %d", control.txseq);
1987 chan->last_acked_seq = chan->buffer_seq;
1991 static void l2cap_retransmit(struct l2cap_chan *chan,
1992 struct l2cap_ctrl *control)
1994 BT_DBG("chan %p, control %p", chan, control);
1996 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1997 l2cap_ertm_resend(chan);
2000 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2001 struct l2cap_ctrl *control)
2003 struct sk_buff *skb;
2005 BT_DBG("chan %p, control %p", chan, control);
2008 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2010 l2cap_seq_list_clear(&chan->retrans_list);
2012 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2015 if (chan->unacked_frames) {
2016 skb_queue_walk(&chan->tx_q, skb) {
2017 if (bt_cb(skb)->control.txseq == control->reqseq ||
2018 skb == chan->tx_send_head)
2022 skb_queue_walk_from(&chan->tx_q, skb) {
2023 if (skb == chan->tx_send_head)
2026 l2cap_seq_list_append(&chan->retrans_list,
2027 bt_cb(skb)->control.txseq);
2030 l2cap_ertm_resend(chan);
2034 static void l2cap_send_ack(struct l2cap_chan *chan)
2036 struct l2cap_ctrl control;
2037 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2038 chan->last_acked_seq);
2041 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2042 chan, chan->last_acked_seq, chan->buffer_seq);
2044 memset(&control, 0, sizeof(control));
2047 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2048 chan->rx_state == L2CAP_RX_STATE_RECV) {
2049 __clear_ack_timer(chan);
2050 control.super = L2CAP_SUPER_RNR;
2051 control.reqseq = chan->buffer_seq;
2052 l2cap_send_sframe(chan, &control);
2054 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2055 l2cap_ertm_send(chan);
2056 /* If any i-frames were sent, they included an ack */
2057 if (chan->buffer_seq == chan->last_acked_seq)
2061 /* Ack now if the window is 3/4ths full.
2062 * Calculate without mul or div
2064 threshold = chan->ack_win;
2065 threshold += threshold << 1;
2068 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2071 if (frames_to_ack >= threshold) {
2072 __clear_ack_timer(chan);
2073 control.super = L2CAP_SUPER_RR;
2074 control.reqseq = chan->buffer_seq;
2075 l2cap_send_sframe(chan, &control);
2080 __set_ack_timer(chan);
2084 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2085 struct msghdr *msg, int len,
2086 int count, struct sk_buff *skb)
2088 struct l2cap_conn *conn = chan->conn;
2089 struct sk_buff **frag;
2092 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2098 /* Continuation fragments (no L2CAP header) */
2099 frag = &skb_shinfo(skb)->frag_list;
2101 struct sk_buff *tmp;
2103 count = min_t(unsigned int, conn->mtu, len);
2105 tmp = chan->ops->alloc_skb(chan, count,
2106 msg->msg_flags & MSG_DONTWAIT);
2108 return PTR_ERR(tmp);
2112 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2115 (*frag)->priority = skb->priority;
2120 skb->len += (*frag)->len;
2121 skb->data_len += (*frag)->len;
2123 frag = &(*frag)->next;
2129 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2130 struct msghdr *msg, size_t len,
2133 struct l2cap_conn *conn = chan->conn;
2134 struct sk_buff *skb;
2135 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2136 struct l2cap_hdr *lh;
2138 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2139 __le16_to_cpu(chan->psm), len, priority);
2141 count = min_t(unsigned int, (conn->mtu - hlen), len);
2143 skb = chan->ops->alloc_skb(chan, count + hlen,
2144 msg->msg_flags & MSG_DONTWAIT);
2148 skb->priority = priority;
2150 /* Create L2CAP header */
2151 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2152 lh->cid = cpu_to_le16(chan->dcid);
2153 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2154 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2156 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2157 if (unlikely(err < 0)) {
2159 return ERR_PTR(err);
2164 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2165 struct msghdr *msg, size_t len,
2168 struct l2cap_conn *conn = chan->conn;
2169 struct sk_buff *skb;
2171 struct l2cap_hdr *lh;
2173 BT_DBG("chan %p len %zu", chan, len);
2175 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2177 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2178 msg->msg_flags & MSG_DONTWAIT);
2182 skb->priority = priority;
2184 /* Create L2CAP header */
2185 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2186 lh->cid = cpu_to_le16(chan->dcid);
2187 lh->len = cpu_to_le16(len);
2189 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2190 if (unlikely(err < 0)) {
2192 return ERR_PTR(err);
2197 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2198 struct msghdr *msg, size_t len,
2201 struct l2cap_conn *conn = chan->conn;
2202 struct sk_buff *skb;
2203 int err, count, hlen;
2204 struct l2cap_hdr *lh;
2206 BT_DBG("chan %p len %zu", chan, len);
2209 return ERR_PTR(-ENOTCONN);
2211 hlen = __ertm_hdr_size(chan);
2214 hlen += L2CAP_SDULEN_SIZE;
2216 if (chan->fcs == L2CAP_FCS_CRC16)
2217 hlen += L2CAP_FCS_SIZE;
2219 count = min_t(unsigned int, (conn->mtu - hlen), len);
2221 skb = chan->ops->alloc_skb(chan, count + hlen,
2222 msg->msg_flags & MSG_DONTWAIT);
2226 /* Create L2CAP header */
2227 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2228 lh->cid = cpu_to_le16(chan->dcid);
2229 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2231 /* Control header is populated later */
2232 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2233 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2235 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2238 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2240 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2241 if (unlikely(err < 0)) {
2243 return ERR_PTR(err);
2246 bt_cb(skb)->control.fcs = chan->fcs;
2247 bt_cb(skb)->control.retries = 0;
2251 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2252 struct sk_buff_head *seg_queue,
2253 struct msghdr *msg, size_t len)
2255 struct sk_buff *skb;
2260 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2262 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2263 * so fragmented skbs are not used. The HCI layer's handling
2264 * of fragmented skbs is not compatible with ERTM's queueing.
2267 /* PDU size is derived from the HCI MTU */
2268 pdu_len = chan->conn->mtu;
2270 /* Constrain PDU size for BR/EDR connections */
2272 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2274 /* Adjust for largest possible L2CAP overhead. */
2276 pdu_len -= L2CAP_FCS_SIZE;
2278 pdu_len -= __ertm_hdr_size(chan);
2280 /* Remote device may have requested smaller PDUs */
2281 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2283 if (len <= pdu_len) {
2284 sar = L2CAP_SAR_UNSEGMENTED;
2288 sar = L2CAP_SAR_START;
2290 pdu_len -= L2CAP_SDULEN_SIZE;
2294 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2297 __skb_queue_purge(seg_queue);
2298 return PTR_ERR(skb);
2301 bt_cb(skb)->control.sar = sar;
2302 __skb_queue_tail(seg_queue, skb);
2307 pdu_len += L2CAP_SDULEN_SIZE;
2310 if (len <= pdu_len) {
2311 sar = L2CAP_SAR_END;
2314 sar = L2CAP_SAR_CONTINUE;
2321 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2323 size_t len, u16 sdulen)
2325 struct l2cap_conn *conn = chan->conn;
2326 struct sk_buff *skb;
2327 int err, count, hlen;
2328 struct l2cap_hdr *lh;
2330 BT_DBG("chan %p len %zu", chan, len);
2333 return ERR_PTR(-ENOTCONN);
2335 hlen = L2CAP_HDR_SIZE;
2338 hlen += L2CAP_SDULEN_SIZE;
2340 count = min_t(unsigned int, (conn->mtu - hlen), len);
2342 skb = chan->ops->alloc_skb(chan, count + hlen,
2343 msg->msg_flags & MSG_DONTWAIT);
2347 /* Create L2CAP header */
2348 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2349 lh->cid = cpu_to_le16(chan->dcid);
2350 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2353 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2355 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2356 if (unlikely(err < 0)) {
2358 return ERR_PTR(err);
2364 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2365 struct sk_buff_head *seg_queue,
2366 struct msghdr *msg, size_t len)
2368 struct sk_buff *skb;
2372 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2374 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2376 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2379 pdu_len -= L2CAP_SDULEN_SIZE;
2385 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2387 __skb_queue_purge(seg_queue);
2388 return PTR_ERR(skb);
2391 __skb_queue_tail(seg_queue, skb);
2397 pdu_len += L2CAP_SDULEN_SIZE;
2404 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2407 struct sk_buff *skb;
2409 struct sk_buff_head seg_queue;
2414 /* Connectionless channel */
2415 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2416 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2418 return PTR_ERR(skb);
2420 l2cap_do_send(chan, skb);
2424 switch (chan->mode) {
2425 case L2CAP_MODE_LE_FLOWCTL:
2426 /* Check outgoing MTU */
2427 if (len > chan->omtu)
2430 if (!chan->tx_credits)
2433 __skb_queue_head_init(&seg_queue);
2435 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2437 if (chan->state != BT_CONNECTED) {
2438 __skb_queue_purge(&seg_queue);
2445 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2447 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2448 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2452 if (!chan->tx_credits)
2453 chan->ops->suspend(chan);
2459 case L2CAP_MODE_BASIC:
2460 /* Check outgoing MTU */
2461 if (len > chan->omtu)
2464 /* Create a basic PDU */
2465 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2467 return PTR_ERR(skb);
2469 l2cap_do_send(chan, skb);
2473 case L2CAP_MODE_ERTM:
2474 case L2CAP_MODE_STREAMING:
2475 /* Check outgoing MTU */
2476 if (len > chan->omtu) {
2481 __skb_queue_head_init(&seg_queue);
2483 /* Do segmentation before calling in to the state machine,
2484 * since it's possible to block while waiting for memory
2487 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2489 /* The channel could have been closed while segmenting,
2490 * check that it is still connected.
2492 if (chan->state != BT_CONNECTED) {
2493 __skb_queue_purge(&seg_queue);
2500 if (chan->mode == L2CAP_MODE_ERTM)
2501 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2503 l2cap_streaming_send(chan, &seg_queue);
2507 /* If the skbs were not queued for sending, they'll still be in
2508 * seg_queue and need to be purged.
2510 __skb_queue_purge(&seg_queue);
2514 BT_DBG("bad state %1.1x", chan->mode);
2521 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2523 struct l2cap_ctrl control;
2526 BT_DBG("chan %p, txseq %u", chan, txseq);
2528 memset(&control, 0, sizeof(control));
2530 control.super = L2CAP_SUPER_SREJ;
2532 for (seq = chan->expected_tx_seq; seq != txseq;
2533 seq = __next_seq(chan, seq)) {
2534 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2535 control.reqseq = seq;
2536 l2cap_send_sframe(chan, &control);
2537 l2cap_seq_list_append(&chan->srej_list, seq);
2541 chan->expected_tx_seq = __next_seq(chan, txseq);
2544 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2546 struct l2cap_ctrl control;
2548 BT_DBG("chan %p", chan);
2550 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2553 memset(&control, 0, sizeof(control));
2555 control.super = L2CAP_SUPER_SREJ;
2556 control.reqseq = chan->srej_list.tail;
2557 l2cap_send_sframe(chan, &control);
2560 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2562 struct l2cap_ctrl control;
2566 BT_DBG("chan %p, txseq %u", chan, txseq);
2568 memset(&control, 0, sizeof(control));
2570 control.super = L2CAP_SUPER_SREJ;
2572 /* Capture initial list head to allow only one pass through the list. */
2573 initial_head = chan->srej_list.head;
2576 seq = l2cap_seq_list_pop(&chan->srej_list);
2577 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2580 control.reqseq = seq;
2581 l2cap_send_sframe(chan, &control);
2582 l2cap_seq_list_append(&chan->srej_list, seq);
2583 } while (chan->srej_list.head != initial_head);
2586 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2588 struct sk_buff *acked_skb;
2591 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2593 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2596 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2597 chan->expected_ack_seq, chan->unacked_frames);
2599 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2600 ackseq = __next_seq(chan, ackseq)) {
2602 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2604 skb_unlink(acked_skb, &chan->tx_q);
2605 kfree_skb(acked_skb);
2606 chan->unacked_frames--;
2610 chan->expected_ack_seq = reqseq;
2612 if (chan->unacked_frames == 0)
2613 __clear_retrans_timer(chan);
2615 BT_DBG("unacked_frames %u", chan->unacked_frames);
2618 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2620 BT_DBG("chan %p", chan);
2622 chan->expected_tx_seq = chan->buffer_seq;
2623 l2cap_seq_list_clear(&chan->srej_list);
2624 skb_queue_purge(&chan->srej_q);
2625 chan->rx_state = L2CAP_RX_STATE_RECV;
2628 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2629 struct l2cap_ctrl *control,
2630 struct sk_buff_head *skbs, u8 event)
2632 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2636 case L2CAP_EV_DATA_REQUEST:
2637 if (chan->tx_send_head == NULL)
2638 chan->tx_send_head = skb_peek(skbs);
2640 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2641 l2cap_ertm_send(chan);
2643 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2644 BT_DBG("Enter LOCAL_BUSY");
2645 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2647 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2648 /* The SREJ_SENT state must be aborted if we are to
2649 * enter the LOCAL_BUSY state.
2651 l2cap_abort_rx_srej_sent(chan);
2654 l2cap_send_ack(chan);
2657 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2658 BT_DBG("Exit LOCAL_BUSY");
2659 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2661 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2662 struct l2cap_ctrl local_control;
2664 memset(&local_control, 0, sizeof(local_control));
2665 local_control.sframe = 1;
2666 local_control.super = L2CAP_SUPER_RR;
2667 local_control.poll = 1;
2668 local_control.reqseq = chan->buffer_seq;
2669 l2cap_send_sframe(chan, &local_control);
2671 chan->retry_count = 1;
2672 __set_monitor_timer(chan);
2673 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2676 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2677 l2cap_process_reqseq(chan, control->reqseq);
2679 case L2CAP_EV_EXPLICIT_POLL:
2680 l2cap_send_rr_or_rnr(chan, 1);
2681 chan->retry_count = 1;
2682 __set_monitor_timer(chan);
2683 __clear_ack_timer(chan);
2684 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2686 case L2CAP_EV_RETRANS_TO:
2687 l2cap_send_rr_or_rnr(chan, 1);
2688 chan->retry_count = 1;
2689 __set_monitor_timer(chan);
2690 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2692 case L2CAP_EV_RECV_FBIT:
2693 /* Nothing to process */
2700 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2701 struct l2cap_ctrl *control,
2702 struct sk_buff_head *skbs, u8 event)
2704 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2708 case L2CAP_EV_DATA_REQUEST:
2709 if (chan->tx_send_head == NULL)
2710 chan->tx_send_head = skb_peek(skbs);
2711 /* Queue data, but don't send. */
2712 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2714 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2715 BT_DBG("Enter LOCAL_BUSY");
2716 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2718 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2719 /* The SREJ_SENT state must be aborted if we are to
2720 * enter the LOCAL_BUSY state.
2722 l2cap_abort_rx_srej_sent(chan);
2725 l2cap_send_ack(chan);
2728 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2729 BT_DBG("Exit LOCAL_BUSY");
2730 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2732 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2733 struct l2cap_ctrl local_control;
2734 memset(&local_control, 0, sizeof(local_control));
2735 local_control.sframe = 1;
2736 local_control.super = L2CAP_SUPER_RR;
2737 local_control.poll = 1;
2738 local_control.reqseq = chan->buffer_seq;
2739 l2cap_send_sframe(chan, &local_control);
2741 chan->retry_count = 1;
2742 __set_monitor_timer(chan);
2743 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2746 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2747 l2cap_process_reqseq(chan, control->reqseq);
2751 case L2CAP_EV_RECV_FBIT:
2752 if (control && control->final) {
2753 __clear_monitor_timer(chan);
2754 if (chan->unacked_frames > 0)
2755 __set_retrans_timer(chan);
2756 chan->retry_count = 0;
2757 chan->tx_state = L2CAP_TX_STATE_XMIT;
2758 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2761 case L2CAP_EV_EXPLICIT_POLL:
2764 case L2CAP_EV_MONITOR_TO:
2765 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2766 l2cap_send_rr_or_rnr(chan, 1);
2767 __set_monitor_timer(chan);
2768 chan->retry_count++;
2770 l2cap_send_disconn_req(chan, ECONNABORTED);
2778 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2779 struct sk_buff_head *skbs, u8 event)
2781 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2782 chan, control, skbs, event, chan->tx_state);
2784 switch (chan->tx_state) {
2785 case L2CAP_TX_STATE_XMIT:
2786 l2cap_tx_state_xmit(chan, control, skbs, event);
2788 case L2CAP_TX_STATE_WAIT_F:
2789 l2cap_tx_state_wait_f(chan, control, skbs, event);
2797 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2798 struct l2cap_ctrl *control)
2800 BT_DBG("chan %p, control %p", chan, control);
2801 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2804 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2805 struct l2cap_ctrl *control)
2807 BT_DBG("chan %p, control %p", chan, control);
2808 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2811 /* Copy frame to all raw sockets on that connection */
2812 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2814 struct sk_buff *nskb;
2815 struct l2cap_chan *chan;
2817 BT_DBG("conn %p", conn);
2819 mutex_lock(&conn->chan_lock);
2821 list_for_each_entry(chan, &conn->chan_l, list) {
2822 if (chan->chan_type != L2CAP_CHAN_RAW)
2825 /* Don't send frame to the channel it came from */
2826 if (bt_cb(skb)->chan == chan)
2829 nskb = skb_clone(skb, GFP_KERNEL);
2832 if (chan->ops->recv(chan, nskb))
2836 mutex_unlock(&conn->chan_lock);
2839 /* ---- L2CAP signalling commands ---- */
2840 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2841 u8 ident, u16 dlen, void *data)
2843 struct sk_buff *skb, **frag;
2844 struct l2cap_cmd_hdr *cmd;
2845 struct l2cap_hdr *lh;
2848 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2849 conn, code, ident, dlen);
2851 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2854 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2855 count = min_t(unsigned int, conn->mtu, len);
2857 skb = bt_skb_alloc(count, GFP_KERNEL);
2861 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2862 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2864 if (conn->hcon->type == LE_LINK)
2865 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2867 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2869 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2872 cmd->len = cpu_to_le16(dlen);
2875 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2876 memcpy(skb_put(skb, count), data, count);
2882 /* Continuation fragments (no L2CAP header) */
2883 frag = &skb_shinfo(skb)->frag_list;
2885 count = min_t(unsigned int, conn->mtu, len);
2887 *frag = bt_skb_alloc(count, GFP_KERNEL);
2891 memcpy(skb_put(*frag, count), data, count);
2896 frag = &(*frag)->next;
2906 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2909 struct l2cap_conf_opt *opt = *ptr;
2912 len = L2CAP_CONF_OPT_SIZE + opt->len;
2920 *val = *((u8 *) opt->val);
2924 *val = get_unaligned_le16(opt->val);
2928 *val = get_unaligned_le32(opt->val);
2932 *val = (unsigned long) opt->val;
2936 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2940 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2942 struct l2cap_conf_opt *opt = *ptr;
2944 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2951 *((u8 *) opt->val) = val;
2955 put_unaligned_le16(val, opt->val);
2959 put_unaligned_le32(val, opt->val);
2963 memcpy(opt->val, (void *) val, len);
2967 *ptr += L2CAP_CONF_OPT_SIZE + len;
2970 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2972 struct l2cap_conf_efs efs;
2974 switch (chan->mode) {
2975 case L2CAP_MODE_ERTM:
2976 efs.id = chan->local_id;
2977 efs.stype = chan->local_stype;
2978 efs.msdu = cpu_to_le16(chan->local_msdu);
2979 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2980 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2981 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2984 case L2CAP_MODE_STREAMING:
2986 efs.stype = L2CAP_SERV_BESTEFFORT;
2987 efs.msdu = cpu_to_le16(chan->local_msdu);
2988 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2997 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2998 (unsigned long) &efs);
3001 static void l2cap_ack_timeout(struct work_struct *work)
3003 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3007 BT_DBG("chan %p", chan);
3009 l2cap_chan_lock(chan);
3011 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3012 chan->last_acked_seq);
3015 l2cap_send_rr_or_rnr(chan, 0);
3017 l2cap_chan_unlock(chan);
3018 l2cap_chan_put(chan);
3021 int l2cap_ertm_init(struct l2cap_chan *chan)
3025 chan->next_tx_seq = 0;
3026 chan->expected_tx_seq = 0;
3027 chan->expected_ack_seq = 0;
3028 chan->unacked_frames = 0;
3029 chan->buffer_seq = 0;
3030 chan->frames_sent = 0;
3031 chan->last_acked_seq = 0;
3033 chan->sdu_last_frag = NULL;
3036 skb_queue_head_init(&chan->tx_q);
3038 chan->local_amp_id = AMP_ID_BREDR;
3039 chan->move_id = AMP_ID_BREDR;
3040 chan->move_state = L2CAP_MOVE_STABLE;
3041 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3043 if (chan->mode != L2CAP_MODE_ERTM)
3046 chan->rx_state = L2CAP_RX_STATE_RECV;
3047 chan->tx_state = L2CAP_TX_STATE_XMIT;
3049 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3050 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3051 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3053 skb_queue_head_init(&chan->srej_q);
3055 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3059 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3061 l2cap_seq_list_free(&chan->srej_list);
3066 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3069 case L2CAP_MODE_STREAMING:
3070 case L2CAP_MODE_ERTM:
3071 if (l2cap_mode_supported(mode, remote_feat_mask))
3075 return L2CAP_MODE_BASIC;
3079 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3081 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3084 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3086 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3089 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3090 struct l2cap_conf_rfc *rfc)
3092 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3093 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3095 /* Class 1 devices have must have ERTM timeouts
3096 * exceeding the Link Supervision Timeout. The
3097 * default Link Supervision Timeout for AMP
3098 * controllers is 10 seconds.
3100 * Class 1 devices use 0xffffffff for their
3101 * best-effort flush timeout, so the clamping logic
3102 * will result in a timeout that meets the above
3103 * requirement. ERTM timeouts are 16-bit values, so
3104 * the maximum timeout is 65.535 seconds.
3107 /* Convert timeout to milliseconds and round */
3108 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3110 /* This is the recommended formula for class 2 devices
3111 * that start ERTM timers when packets are sent to the
3114 ertm_to = 3 * ertm_to + 500;
3116 if (ertm_to > 0xffff)
3119 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3120 rfc->monitor_timeout = rfc->retrans_timeout;
3122 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3123 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3127 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3129 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3130 __l2cap_ews_supported(chan->conn)) {
3131 /* use extended control field */
3132 set_bit(FLAG_EXT_CTRL, &chan->flags);
3133 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3135 chan->tx_win = min_t(u16, chan->tx_win,
3136 L2CAP_DEFAULT_TX_WINDOW);
3137 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3139 chan->ack_win = chan->tx_win;
3142 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3144 struct l2cap_conf_req *req = data;
3145 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3146 void *ptr = req->data;
3149 BT_DBG("chan %p", chan);
3151 if (chan->num_conf_req || chan->num_conf_rsp)
3154 switch (chan->mode) {
3155 case L2CAP_MODE_STREAMING:
3156 case L2CAP_MODE_ERTM:
3157 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3160 if (__l2cap_efs_supported(chan->conn))
3161 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3165 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3170 if (chan->imtu != L2CAP_DEFAULT_MTU)
3171 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3173 switch (chan->mode) {
3174 case L2CAP_MODE_BASIC:
3175 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3176 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3179 rfc.mode = L2CAP_MODE_BASIC;
3181 rfc.max_transmit = 0;
3182 rfc.retrans_timeout = 0;
3183 rfc.monitor_timeout = 0;
3184 rfc.max_pdu_size = 0;
3186 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3187 (unsigned long) &rfc);
3190 case L2CAP_MODE_ERTM:
3191 rfc.mode = L2CAP_MODE_ERTM;
3192 rfc.max_transmit = chan->max_tx;
3194 __l2cap_set_ertm_timeouts(chan, &rfc);
3196 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3197 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3199 rfc.max_pdu_size = cpu_to_le16(size);
3201 l2cap_txwin_setup(chan);
3203 rfc.txwin_size = min_t(u16, chan->tx_win,
3204 L2CAP_DEFAULT_TX_WINDOW);
3206 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3207 (unsigned long) &rfc);
3209 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3210 l2cap_add_opt_efs(&ptr, chan);
3212 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3213 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3216 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3217 if (chan->fcs == L2CAP_FCS_NONE ||
3218 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3219 chan->fcs = L2CAP_FCS_NONE;
3220 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3225 case L2CAP_MODE_STREAMING:
3226 l2cap_txwin_setup(chan);
3227 rfc.mode = L2CAP_MODE_STREAMING;
3229 rfc.max_transmit = 0;
3230 rfc.retrans_timeout = 0;
3231 rfc.monitor_timeout = 0;
3233 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3234 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3236 rfc.max_pdu_size = cpu_to_le16(size);
3238 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3239 (unsigned long) &rfc);
3241 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3242 l2cap_add_opt_efs(&ptr, chan);
3244 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3245 if (chan->fcs == L2CAP_FCS_NONE ||
3246 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3247 chan->fcs = L2CAP_FCS_NONE;
3248 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3254 req->dcid = cpu_to_le16(chan->dcid);
3255 req->flags = __constant_cpu_to_le16(0);
3260 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3262 struct l2cap_conf_rsp *rsp = data;
3263 void *ptr = rsp->data;
3264 void *req = chan->conf_req;
3265 int len = chan->conf_len;
3266 int type, hint, olen;
3268 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3269 struct l2cap_conf_efs efs;
3271 u16 mtu = L2CAP_DEFAULT_MTU;
3272 u16 result = L2CAP_CONF_SUCCESS;
3275 BT_DBG("chan %p", chan);
3277 while (len >= L2CAP_CONF_OPT_SIZE) {
3278 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3280 hint = type & L2CAP_CONF_HINT;
3281 type &= L2CAP_CONF_MASK;
3284 case L2CAP_CONF_MTU:
3288 case L2CAP_CONF_FLUSH_TO:
3289 chan->flush_to = val;
3292 case L2CAP_CONF_QOS:
3295 case L2CAP_CONF_RFC:
3296 if (olen == sizeof(rfc))
3297 memcpy(&rfc, (void *) val, olen);
3300 case L2CAP_CONF_FCS:
3301 if (val == L2CAP_FCS_NONE)
3302 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3305 case L2CAP_CONF_EFS:
3307 if (olen == sizeof(efs))
3308 memcpy(&efs, (void *) val, olen);
3311 case L2CAP_CONF_EWS:
3312 if (!chan->conn->hs_enabled)
3313 return -ECONNREFUSED;
3315 set_bit(FLAG_EXT_CTRL, &chan->flags);
3316 set_bit(CONF_EWS_RECV, &chan->conf_state);
3317 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3318 chan->remote_tx_win = val;
3325 result = L2CAP_CONF_UNKNOWN;
3326 *((u8 *) ptr++) = type;
3331 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3334 switch (chan->mode) {
3335 case L2CAP_MODE_STREAMING:
3336 case L2CAP_MODE_ERTM:
3337 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3338 chan->mode = l2cap_select_mode(rfc.mode,
3339 chan->conn->feat_mask);
3344 if (__l2cap_efs_supported(chan->conn))
3345 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3347 return -ECONNREFUSED;
3350 if (chan->mode != rfc.mode)
3351 return -ECONNREFUSED;
3357 if (chan->mode != rfc.mode) {
3358 result = L2CAP_CONF_UNACCEPT;
3359 rfc.mode = chan->mode;
3361 if (chan->num_conf_rsp == 1)
3362 return -ECONNREFUSED;
3364 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3365 (unsigned long) &rfc);
3368 if (result == L2CAP_CONF_SUCCESS) {
3369 /* Configure output options and let the other side know
3370 * which ones we don't like. */
3372 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3373 result = L2CAP_CONF_UNACCEPT;
3376 set_bit(CONF_MTU_DONE, &chan->conf_state);
3378 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3381 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3382 efs.stype != L2CAP_SERV_NOTRAFIC &&
3383 efs.stype != chan->local_stype) {
3385 result = L2CAP_CONF_UNACCEPT;
3387 if (chan->num_conf_req >= 1)
3388 return -ECONNREFUSED;
3390 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3392 (unsigned long) &efs);
3394 /* Send PENDING Conf Rsp */
3395 result = L2CAP_CONF_PENDING;
3396 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3401 case L2CAP_MODE_BASIC:
3402 chan->fcs = L2CAP_FCS_NONE;
3403 set_bit(CONF_MODE_DONE, &chan->conf_state);
3406 case L2CAP_MODE_ERTM:
3407 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3408 chan->remote_tx_win = rfc.txwin_size;
3410 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3412 chan->remote_max_tx = rfc.max_transmit;
3414 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3415 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3416 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3417 rfc.max_pdu_size = cpu_to_le16(size);
3418 chan->remote_mps = size;
3420 __l2cap_set_ertm_timeouts(chan, &rfc);
3422 set_bit(CONF_MODE_DONE, &chan->conf_state);
3424 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3425 sizeof(rfc), (unsigned long) &rfc);
3427 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3428 chan->remote_id = efs.id;
3429 chan->remote_stype = efs.stype;
3430 chan->remote_msdu = le16_to_cpu(efs.msdu);
3431 chan->remote_flush_to =
3432 le32_to_cpu(efs.flush_to);
3433 chan->remote_acc_lat =
3434 le32_to_cpu(efs.acc_lat);
3435 chan->remote_sdu_itime =
3436 le32_to_cpu(efs.sdu_itime);
3437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3439 (unsigned long) &efs);
3443 case L2CAP_MODE_STREAMING:
3444 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3445 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3446 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3447 rfc.max_pdu_size = cpu_to_le16(size);
3448 chan->remote_mps = size;
3450 set_bit(CONF_MODE_DONE, &chan->conf_state);
3452 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3453 (unsigned long) &rfc);
3458 result = L2CAP_CONF_UNACCEPT;
3460 memset(&rfc, 0, sizeof(rfc));
3461 rfc.mode = chan->mode;
3464 if (result == L2CAP_CONF_SUCCESS)
3465 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3467 rsp->scid = cpu_to_le16(chan->dcid);
3468 rsp->result = cpu_to_le16(result);
3469 rsp->flags = __constant_cpu_to_le16(0);
3474 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3475 void *data, u16 *result)
3477 struct l2cap_conf_req *req = data;
3478 void *ptr = req->data;
3481 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3482 struct l2cap_conf_efs efs;
3484 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3486 while (len >= L2CAP_CONF_OPT_SIZE) {
3487 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3490 case L2CAP_CONF_MTU:
3491 if (val < L2CAP_DEFAULT_MIN_MTU) {
3492 *result = L2CAP_CONF_UNACCEPT;
3493 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3496 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3499 case L2CAP_CONF_FLUSH_TO:
3500 chan->flush_to = val;
3501 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3505 case L2CAP_CONF_RFC:
3506 if (olen == sizeof(rfc))
3507 memcpy(&rfc, (void *)val, olen);
3509 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3510 rfc.mode != chan->mode)
3511 return -ECONNREFUSED;
3515 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3516 sizeof(rfc), (unsigned long) &rfc);
3519 case L2CAP_CONF_EWS:
3520 chan->ack_win = min_t(u16, val, chan->ack_win);
3521 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3525 case L2CAP_CONF_EFS:
3526 if (olen == sizeof(efs))
3527 memcpy(&efs, (void *)val, olen);
3529 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3530 efs.stype != L2CAP_SERV_NOTRAFIC &&
3531 efs.stype != chan->local_stype)
3532 return -ECONNREFUSED;
3534 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3535 (unsigned long) &efs);
3538 case L2CAP_CONF_FCS:
3539 if (*result == L2CAP_CONF_PENDING)
3540 if (val == L2CAP_FCS_NONE)
3541 set_bit(CONF_RECV_NO_FCS,
3547 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3548 return -ECONNREFUSED;
3550 chan->mode = rfc.mode;
3552 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3554 case L2CAP_MODE_ERTM:
3555 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3556 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3557 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3558 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3559 chan->ack_win = min_t(u16, chan->ack_win,
3562 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3563 chan->local_msdu = le16_to_cpu(efs.msdu);
3564 chan->local_sdu_itime =
3565 le32_to_cpu(efs.sdu_itime);
3566 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3567 chan->local_flush_to =
3568 le32_to_cpu(efs.flush_to);
3572 case L2CAP_MODE_STREAMING:
3573 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3577 req->dcid = cpu_to_le16(chan->dcid);
3578 req->flags = __constant_cpu_to_le16(0);
3583 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3584 u16 result, u16 flags)
3586 struct l2cap_conf_rsp *rsp = data;
3587 void *ptr = rsp->data;
3589 BT_DBG("chan %p", chan);
3591 rsp->scid = cpu_to_le16(chan->dcid);
3592 rsp->result = cpu_to_le16(result);
3593 rsp->flags = cpu_to_le16(flags);
3598 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3600 struct l2cap_le_conn_rsp rsp;
3601 struct l2cap_conn *conn = chan->conn;
3603 BT_DBG("chan %p", chan);
3605 rsp.dcid = cpu_to_le16(chan->scid);
3606 rsp.mtu = cpu_to_le16(chan->imtu);
3607 rsp.mps = cpu_to_le16(chan->mps);
3608 rsp.credits = cpu_to_le16(chan->rx_credits);
3609 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3611 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3615 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3617 struct l2cap_conn_rsp rsp;
3618 struct l2cap_conn *conn = chan->conn;
3622 rsp.scid = cpu_to_le16(chan->dcid);
3623 rsp.dcid = cpu_to_le16(chan->scid);
3624 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3625 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3628 rsp_code = L2CAP_CREATE_CHAN_RSP;
3630 rsp_code = L2CAP_CONN_RSP;
3632 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3634 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3636 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3639 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3640 l2cap_build_conf_req(chan, buf), buf);
3641 chan->num_conf_req++;
3644 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3648 /* Use sane default values in case a misbehaving remote device
3649 * did not send an RFC or extended window size option.
3651 u16 txwin_ext = chan->ack_win;
3652 struct l2cap_conf_rfc rfc = {
3654 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3655 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3656 .max_pdu_size = cpu_to_le16(chan->imtu),
3657 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3660 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3662 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3665 while (len >= L2CAP_CONF_OPT_SIZE) {
3666 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3669 case L2CAP_CONF_RFC:
3670 if (olen == sizeof(rfc))
3671 memcpy(&rfc, (void *)val, olen);
3673 case L2CAP_CONF_EWS:
3680 case L2CAP_MODE_ERTM:
3681 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3682 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3683 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3684 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3685 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3687 chan->ack_win = min_t(u16, chan->ack_win,
3690 case L2CAP_MODE_STREAMING:
3691 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3695 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3696 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3699 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3701 if (cmd_len < sizeof(*rej))
3704 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3707 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3708 cmd->ident == conn->info_ident) {
3709 cancel_delayed_work(&conn->info_timer);
3711 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3712 conn->info_ident = 0;
3714 l2cap_conn_start(conn);
3720 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3721 struct l2cap_cmd_hdr *cmd,
3722 u8 *data, u8 rsp_code, u8 amp_id)
3724 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3725 struct l2cap_conn_rsp rsp;
3726 struct l2cap_chan *chan = NULL, *pchan;
3727 int result, status = L2CAP_CS_NO_INFO;
3729 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3730 __le16 psm = req->psm;
3732 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3734 /* Check if we have socket listening on psm */
3735 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3736 &conn->hcon->dst, ACL_LINK);
3738 result = L2CAP_CR_BAD_PSM;
3742 mutex_lock(&conn->chan_lock);
3743 l2cap_chan_lock(pchan);
3745 /* Check if the ACL is secure enough (if not SDP) */
3746 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3747 !hci_conn_check_link_mode(conn->hcon)) {
3748 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3749 result = L2CAP_CR_SEC_BLOCK;
3753 result = L2CAP_CR_NO_MEM;
3755 /* Check if we already have channel with that dcid */
3756 if (__l2cap_get_chan_by_dcid(conn, scid))
3759 chan = pchan->ops->new_connection(pchan);
3763 /* For certain devices (ex: HID mouse), support for authentication,
3764 * pairing and bonding is optional. For such devices, inorder to avoid
3765 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3766 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3768 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3770 bacpy(&chan->src, &conn->hcon->src);
3771 bacpy(&chan->dst, &conn->hcon->dst);
3772 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3773 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3776 chan->local_amp_id = amp_id;
3778 __l2cap_chan_add(conn, chan);
3782 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3784 chan->ident = cmd->ident;
3786 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3787 if (l2cap_chan_check_security(chan)) {
3788 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3789 l2cap_state_change(chan, BT_CONNECT2);
3790 result = L2CAP_CR_PEND;
3791 status = L2CAP_CS_AUTHOR_PEND;
3792 chan->ops->defer(chan);
3794 /* Force pending result for AMP controllers.
3795 * The connection will succeed after the
3796 * physical link is up.
3798 if (amp_id == AMP_ID_BREDR) {
3799 l2cap_state_change(chan, BT_CONFIG);
3800 result = L2CAP_CR_SUCCESS;
3802 l2cap_state_change(chan, BT_CONNECT2);
3803 result = L2CAP_CR_PEND;
3805 status = L2CAP_CS_NO_INFO;
3808 l2cap_state_change(chan, BT_CONNECT2);
3809 result = L2CAP_CR_PEND;
3810 status = L2CAP_CS_AUTHEN_PEND;
3813 l2cap_state_change(chan, BT_CONNECT2);
3814 result = L2CAP_CR_PEND;
3815 status = L2CAP_CS_NO_INFO;
3819 l2cap_chan_unlock(pchan);
3820 mutex_unlock(&conn->chan_lock);
3823 rsp.scid = cpu_to_le16(scid);
3824 rsp.dcid = cpu_to_le16(dcid);
3825 rsp.result = cpu_to_le16(result);
3826 rsp.status = cpu_to_le16(status);
3827 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3829 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3830 struct l2cap_info_req info;
3831 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3833 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3834 conn->info_ident = l2cap_get_ident(conn);
3836 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3838 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3839 sizeof(info), &info);
3842 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3843 result == L2CAP_CR_SUCCESS) {
3845 set_bit(CONF_REQ_SENT, &chan->conf_state);
3846 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3847 l2cap_build_conf_req(chan, buf), buf);
3848 chan->num_conf_req++;
3854 static int l2cap_connect_req(struct l2cap_conn *conn,
3855 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3857 struct hci_dev *hdev = conn->hcon->hdev;
3858 struct hci_conn *hcon = conn->hcon;
3860 if (cmd_len < sizeof(struct l2cap_conn_req))
3864 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3865 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3866 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3867 hcon->dst_type, 0, NULL, 0,
3869 hci_dev_unlock(hdev);
3871 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3875 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3876 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3879 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3880 u16 scid, dcid, result, status;
3881 struct l2cap_chan *chan;
3885 if (cmd_len < sizeof(*rsp))
3888 scid = __le16_to_cpu(rsp->scid);
3889 dcid = __le16_to_cpu(rsp->dcid);
3890 result = __le16_to_cpu(rsp->result);
3891 status = __le16_to_cpu(rsp->status);
3893 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3894 dcid, scid, result, status);
3896 mutex_lock(&conn->chan_lock);
3899 chan = __l2cap_get_chan_by_scid(conn, scid);
3905 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3914 l2cap_chan_lock(chan);
3917 case L2CAP_CR_SUCCESS:
3918 l2cap_state_change(chan, BT_CONFIG);
3921 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3923 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3926 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3927 l2cap_build_conf_req(chan, req), req);
3928 chan->num_conf_req++;
3932 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3936 l2cap_chan_del(chan, ECONNREFUSED);
3940 l2cap_chan_unlock(chan);
3943 mutex_unlock(&conn->chan_lock);
3948 static inline void set_default_fcs(struct l2cap_chan *chan)
3950 /* FCS is enabled only in ERTM or streaming mode, if one or both
3953 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3954 chan->fcs = L2CAP_FCS_NONE;
3955 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3956 chan->fcs = L2CAP_FCS_CRC16;
3959 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3960 u8 ident, u16 flags)
3962 struct l2cap_conn *conn = chan->conn;
3964 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3967 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3968 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3970 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3971 l2cap_build_conf_rsp(chan, data,
3972 L2CAP_CONF_SUCCESS, flags), data);
3975 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3978 struct l2cap_cmd_rej_cid rej;
3980 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3981 rej.scid = __cpu_to_le16(scid);
3982 rej.dcid = __cpu_to_le16(dcid);
3984 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3987 static inline int l2cap_config_req(struct l2cap_conn *conn,
3988 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3991 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3994 struct l2cap_chan *chan;
3997 if (cmd_len < sizeof(*req))
4000 dcid = __le16_to_cpu(req->dcid);
4001 flags = __le16_to_cpu(req->flags);
4003 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4005 chan = l2cap_get_chan_by_scid(conn, dcid);
4007 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4011 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4012 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4017 /* Reject if config buffer is too small. */
4018 len = cmd_len - sizeof(*req);
4019 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4020 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4021 l2cap_build_conf_rsp(chan, rsp,
4022 L2CAP_CONF_REJECT, flags), rsp);
4027 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4028 chan->conf_len += len;
4030 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4031 /* Incomplete config. Send empty response. */
4032 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4033 l2cap_build_conf_rsp(chan, rsp,
4034 L2CAP_CONF_SUCCESS, flags), rsp);
4038 /* Complete config. */
4039 len = l2cap_parse_conf_req(chan, rsp);
4041 l2cap_send_disconn_req(chan, ECONNRESET);
4045 chan->ident = cmd->ident;
4046 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4047 chan->num_conf_rsp++;
4049 /* Reset config buffer. */
4052 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4055 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4056 set_default_fcs(chan);
4058 if (chan->mode == L2CAP_MODE_ERTM ||
4059 chan->mode == L2CAP_MODE_STREAMING)
4060 err = l2cap_ertm_init(chan);
4063 l2cap_send_disconn_req(chan, -err);
4065 l2cap_chan_ready(chan);
4070 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4072 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4073 l2cap_build_conf_req(chan, buf), buf);
4074 chan->num_conf_req++;
4077 /* Got Conf Rsp PENDING from remote side and asume we sent
4078 Conf Rsp PENDING in the code above */
4079 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4080 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4082 /* check compatibility */
4084 /* Send rsp for BR/EDR channel */
4086 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4088 chan->ident = cmd->ident;
4092 l2cap_chan_unlock(chan);
4096 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4097 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4100 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4101 u16 scid, flags, result;
4102 struct l2cap_chan *chan;
4103 int len = cmd_len - sizeof(*rsp);
4106 if (cmd_len < sizeof(*rsp))
4109 scid = __le16_to_cpu(rsp->scid);
4110 flags = __le16_to_cpu(rsp->flags);
4111 result = __le16_to_cpu(rsp->result);
4113 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4116 chan = l2cap_get_chan_by_scid(conn, scid);
4121 case L2CAP_CONF_SUCCESS:
4122 l2cap_conf_rfc_get(chan, rsp->data, len);
4123 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4126 case L2CAP_CONF_PENDING:
4127 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4129 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4132 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4135 l2cap_send_disconn_req(chan, ECONNRESET);
4139 if (!chan->hs_hcon) {
4140 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4143 if (l2cap_check_efs(chan)) {
4144 amp_create_logical_link(chan);
4145 chan->ident = cmd->ident;
4151 case L2CAP_CONF_UNACCEPT:
4152 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4155 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4156 l2cap_send_disconn_req(chan, ECONNRESET);
4160 /* throw out any old stored conf requests */
4161 result = L2CAP_CONF_SUCCESS;
4162 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4165 l2cap_send_disconn_req(chan, ECONNRESET);
4169 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4170 L2CAP_CONF_REQ, len, req);
4171 chan->num_conf_req++;
4172 if (result != L2CAP_CONF_SUCCESS)
4178 l2cap_chan_set_err(chan, ECONNRESET);
4180 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4181 l2cap_send_disconn_req(chan, ECONNRESET);
4185 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4188 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4190 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4191 set_default_fcs(chan);
4193 if (chan->mode == L2CAP_MODE_ERTM ||
4194 chan->mode == L2CAP_MODE_STREAMING)
4195 err = l2cap_ertm_init(chan);
4198 l2cap_send_disconn_req(chan, -err);
4200 l2cap_chan_ready(chan);
4204 l2cap_chan_unlock(chan);
4208 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4209 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4212 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4213 struct l2cap_disconn_rsp rsp;
4215 struct l2cap_chan *chan;
4217 if (cmd_len != sizeof(*req))
4220 scid = __le16_to_cpu(req->scid);
4221 dcid = __le16_to_cpu(req->dcid);
4223 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4225 mutex_lock(&conn->chan_lock);
4227 chan = __l2cap_get_chan_by_scid(conn, dcid);
4229 mutex_unlock(&conn->chan_lock);
4230 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4234 l2cap_chan_lock(chan);
4236 rsp.dcid = cpu_to_le16(chan->scid);
4237 rsp.scid = cpu_to_le16(chan->dcid);
4238 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4240 chan->ops->set_shutdown(chan);
4242 l2cap_chan_hold(chan);
4243 l2cap_chan_del(chan, ECONNRESET);
4245 l2cap_chan_unlock(chan);
4247 chan->ops->close(chan);
4248 l2cap_chan_put(chan);
4250 mutex_unlock(&conn->chan_lock);
4255 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4256 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4259 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4261 struct l2cap_chan *chan;
4263 if (cmd_len != sizeof(*rsp))
4266 scid = __le16_to_cpu(rsp->scid);
4267 dcid = __le16_to_cpu(rsp->dcid);
4269 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4271 mutex_lock(&conn->chan_lock);
4273 chan = __l2cap_get_chan_by_scid(conn, scid);
4275 mutex_unlock(&conn->chan_lock);
4279 l2cap_chan_lock(chan);
4281 l2cap_chan_hold(chan);
4282 l2cap_chan_del(chan, 0);
4284 l2cap_chan_unlock(chan);
4286 chan->ops->close(chan);
4287 l2cap_chan_put(chan);
4289 mutex_unlock(&conn->chan_lock);
4294 static inline int l2cap_information_req(struct l2cap_conn *conn,
4295 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4298 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4301 if (cmd_len != sizeof(*req))
4304 type = __le16_to_cpu(req->type);
4306 BT_DBG("type 0x%4.4x", type);
4308 if (type == L2CAP_IT_FEAT_MASK) {
4310 u32 feat_mask = l2cap_feat_mask;
4311 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4312 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4313 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4315 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4317 if (conn->hs_enabled)
4318 feat_mask |= L2CAP_FEAT_EXT_FLOW
4319 | L2CAP_FEAT_EXT_WINDOW;
4321 put_unaligned_le32(feat_mask, rsp->data);
4322 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4324 } else if (type == L2CAP_IT_FIXED_CHAN) {
4326 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4328 if (conn->hs_enabled)
4329 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4331 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4333 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4334 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4335 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4336 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4339 struct l2cap_info_rsp rsp;
4340 rsp.type = cpu_to_le16(type);
4341 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4342 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4349 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4350 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4353 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4356 if (cmd_len < sizeof(*rsp))
4359 type = __le16_to_cpu(rsp->type);
4360 result = __le16_to_cpu(rsp->result);
4362 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4364 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4365 if (cmd->ident != conn->info_ident ||
4366 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4369 cancel_delayed_work(&conn->info_timer);
4371 if (result != L2CAP_IR_SUCCESS) {
4372 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4373 conn->info_ident = 0;
4375 l2cap_conn_start(conn);
4381 case L2CAP_IT_FEAT_MASK:
4382 conn->feat_mask = get_unaligned_le32(rsp->data);
4384 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4385 struct l2cap_info_req req;
4386 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4388 conn->info_ident = l2cap_get_ident(conn);
4390 l2cap_send_cmd(conn, conn->info_ident,
4391 L2CAP_INFO_REQ, sizeof(req), &req);
4393 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4394 conn->info_ident = 0;
4396 l2cap_conn_start(conn);
4400 case L2CAP_IT_FIXED_CHAN:
4401 conn->fixed_chan_mask = rsp->data[0];
4402 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4403 conn->info_ident = 0;
4405 l2cap_conn_start(conn);
4412 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4413 struct l2cap_cmd_hdr *cmd,
4414 u16 cmd_len, void *data)
4416 struct l2cap_create_chan_req *req = data;
4417 struct l2cap_create_chan_rsp rsp;
4418 struct l2cap_chan *chan;
4419 struct hci_dev *hdev;
4422 if (cmd_len != sizeof(*req))
4425 if (!conn->hs_enabled)
4428 psm = le16_to_cpu(req->psm);
4429 scid = le16_to_cpu(req->scid);
4431 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4433 /* For controller id 0 make BR/EDR connection */
4434 if (req->amp_id == AMP_ID_BREDR) {
4435 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4440 /* Validate AMP controller id */
4441 hdev = hci_dev_get(req->amp_id);
4445 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4450 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4453 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4454 struct hci_conn *hs_hcon;
4456 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4460 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4465 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4467 mgr->bredr_chan = chan;
4468 chan->hs_hcon = hs_hcon;
4469 chan->fcs = L2CAP_FCS_NONE;
4470 conn->mtu = hdev->block_mtu;
4479 rsp.scid = cpu_to_le16(scid);
4480 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4481 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4483 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4489 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4491 struct l2cap_move_chan_req req;
4494 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4496 ident = l2cap_get_ident(chan->conn);
4497 chan->ident = ident;
4499 req.icid = cpu_to_le16(chan->scid);
4500 req.dest_amp_id = dest_amp_id;
4502 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4505 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4508 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4510 struct l2cap_move_chan_rsp rsp;
4512 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4514 rsp.icid = cpu_to_le16(chan->dcid);
4515 rsp.result = cpu_to_le16(result);
4517 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4521 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4523 struct l2cap_move_chan_cfm cfm;
4525 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4527 chan->ident = l2cap_get_ident(chan->conn);
4529 cfm.icid = cpu_to_le16(chan->scid);
4530 cfm.result = cpu_to_le16(result);
4532 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4535 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4538 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4540 struct l2cap_move_chan_cfm cfm;
4542 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4544 cfm.icid = cpu_to_le16(icid);
4545 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4547 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4551 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4554 struct l2cap_move_chan_cfm_rsp rsp;
4556 BT_DBG("icid 0x%4.4x", icid);
4558 rsp.icid = cpu_to_le16(icid);
4559 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4562 static void __release_logical_link(struct l2cap_chan *chan)
4564 chan->hs_hchan = NULL;
4565 chan->hs_hcon = NULL;
4567 /* Placeholder - release the logical link */
4570 static void l2cap_logical_fail(struct l2cap_chan *chan)
4572 /* Logical link setup failed */
4573 if (chan->state != BT_CONNECTED) {
4574 /* Create channel failure, disconnect */
4575 l2cap_send_disconn_req(chan, ECONNRESET);
4579 switch (chan->move_role) {
4580 case L2CAP_MOVE_ROLE_RESPONDER:
4581 l2cap_move_done(chan);
4582 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4584 case L2CAP_MOVE_ROLE_INITIATOR:
4585 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4586 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4587 /* Remote has only sent pending or
4588 * success responses, clean up
4590 l2cap_move_done(chan);
4593 /* Other amp move states imply that the move
4594 * has already aborted
4596 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4601 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4602 struct hci_chan *hchan)
4604 struct l2cap_conf_rsp rsp;
4606 chan->hs_hchan = hchan;
4607 chan->hs_hcon->l2cap_data = chan->conn;
4609 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4611 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4614 set_default_fcs(chan);
4616 err = l2cap_ertm_init(chan);
4618 l2cap_send_disconn_req(chan, -err);
4620 l2cap_chan_ready(chan);
4624 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4625 struct hci_chan *hchan)
4627 chan->hs_hcon = hchan->conn;
4628 chan->hs_hcon->l2cap_data = chan->conn;
4630 BT_DBG("move_state %d", chan->move_state);
4632 switch (chan->move_state) {
4633 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4634 /* Move confirm will be sent after a success
4635 * response is received
4637 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4639 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4640 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4641 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4642 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4643 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4644 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4645 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4646 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4647 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4651 /* Move was not in expected state, free the channel */
4652 __release_logical_link(chan);
4654 chan->move_state = L2CAP_MOVE_STABLE;
4658 /* Call with chan locked */
4659 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4662 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4665 l2cap_logical_fail(chan);
4666 __release_logical_link(chan);
4670 if (chan->state != BT_CONNECTED) {
4671 /* Ignore logical link if channel is on BR/EDR */
4672 if (chan->local_amp_id != AMP_ID_BREDR)
4673 l2cap_logical_finish_create(chan, hchan);
4675 l2cap_logical_finish_move(chan, hchan);
4679 void l2cap_move_start(struct l2cap_chan *chan)
4681 BT_DBG("chan %p", chan);
4683 if (chan->local_amp_id == AMP_ID_BREDR) {
4684 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4686 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4687 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4688 /* Placeholder - start physical link setup */
4690 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4691 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4693 l2cap_move_setup(chan);
4694 l2cap_send_move_chan_req(chan, 0);
4698 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4699 u8 local_amp_id, u8 remote_amp_id)
4701 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4702 local_amp_id, remote_amp_id);
4704 chan->fcs = L2CAP_FCS_NONE;
4706 /* Outgoing channel on AMP */
4707 if (chan->state == BT_CONNECT) {
4708 if (result == L2CAP_CR_SUCCESS) {
4709 chan->local_amp_id = local_amp_id;
4710 l2cap_send_create_chan_req(chan, remote_amp_id);
4712 /* Revert to BR/EDR connect */
4713 l2cap_send_conn_req(chan);
4719 /* Incoming channel on AMP */
4720 if (__l2cap_no_conn_pending(chan)) {
4721 struct l2cap_conn_rsp rsp;
4723 rsp.scid = cpu_to_le16(chan->dcid);
4724 rsp.dcid = cpu_to_le16(chan->scid);
4726 if (result == L2CAP_CR_SUCCESS) {
4727 /* Send successful response */
4728 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4729 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4731 /* Send negative response */
4732 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4733 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4736 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4739 if (result == L2CAP_CR_SUCCESS) {
4740 l2cap_state_change(chan, BT_CONFIG);
4741 set_bit(CONF_REQ_SENT, &chan->conf_state);
4742 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4744 l2cap_build_conf_req(chan, buf), buf);
4745 chan->num_conf_req++;
4750 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4753 l2cap_move_setup(chan);
4754 chan->move_id = local_amp_id;
4755 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4757 l2cap_send_move_chan_req(chan, remote_amp_id);
4760 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4762 struct hci_chan *hchan = NULL;
4764 /* Placeholder - get hci_chan for logical link */
4767 if (hchan->state == BT_CONNECTED) {
4768 /* Logical link is ready to go */
4769 chan->hs_hcon = hchan->conn;
4770 chan->hs_hcon->l2cap_data = chan->conn;
4771 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4772 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4774 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4776 /* Wait for logical link to be ready */
4777 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4780 /* Logical link not available */
4781 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4785 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4787 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4789 if (result == -EINVAL)
4790 rsp_result = L2CAP_MR_BAD_ID;
4792 rsp_result = L2CAP_MR_NOT_ALLOWED;
4794 l2cap_send_move_chan_rsp(chan, rsp_result);
4797 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4798 chan->move_state = L2CAP_MOVE_STABLE;
4800 /* Restart data transmission */
4801 l2cap_ertm_send(chan);
4804 /* Invoke with locked chan */
4805 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4807 u8 local_amp_id = chan->local_amp_id;
4808 u8 remote_amp_id = chan->remote_amp_id;
4810 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4811 chan, result, local_amp_id, remote_amp_id);
4813 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4814 l2cap_chan_unlock(chan);
4818 if (chan->state != BT_CONNECTED) {
4819 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4820 } else if (result != L2CAP_MR_SUCCESS) {
4821 l2cap_do_move_cancel(chan, result);
4823 switch (chan->move_role) {
4824 case L2CAP_MOVE_ROLE_INITIATOR:
4825 l2cap_do_move_initiate(chan, local_amp_id,
4828 case L2CAP_MOVE_ROLE_RESPONDER:
4829 l2cap_do_move_respond(chan, result);
4832 l2cap_do_move_cancel(chan, result);
4838 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4839 struct l2cap_cmd_hdr *cmd,
4840 u16 cmd_len, void *data)
4842 struct l2cap_move_chan_req *req = data;
4843 struct l2cap_move_chan_rsp rsp;
4844 struct l2cap_chan *chan;
4846 u16 result = L2CAP_MR_NOT_ALLOWED;
4848 if (cmd_len != sizeof(*req))
4851 icid = le16_to_cpu(req->icid);
4853 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4855 if (!conn->hs_enabled)
4858 chan = l2cap_get_chan_by_dcid(conn, icid);
4860 rsp.icid = cpu_to_le16(icid);
4861 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4862 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4867 chan->ident = cmd->ident;
4869 if (chan->scid < L2CAP_CID_DYN_START ||
4870 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4871 (chan->mode != L2CAP_MODE_ERTM &&
4872 chan->mode != L2CAP_MODE_STREAMING)) {
4873 result = L2CAP_MR_NOT_ALLOWED;
4874 goto send_move_response;
4877 if (chan->local_amp_id == req->dest_amp_id) {
4878 result = L2CAP_MR_SAME_ID;
4879 goto send_move_response;
4882 if (req->dest_amp_id != AMP_ID_BREDR) {
4883 struct hci_dev *hdev;
4884 hdev = hci_dev_get(req->dest_amp_id);
4885 if (!hdev || hdev->dev_type != HCI_AMP ||
4886 !test_bit(HCI_UP, &hdev->flags)) {
4890 result = L2CAP_MR_BAD_ID;
4891 goto send_move_response;
4896 /* Detect a move collision. Only send a collision response
4897 * if this side has "lost", otherwise proceed with the move.
4898 * The winner has the larger bd_addr.
4900 if ((__chan_is_moving(chan) ||
4901 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4902 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4903 result = L2CAP_MR_COLLISION;
4904 goto send_move_response;
4907 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4908 l2cap_move_setup(chan);
4909 chan->move_id = req->dest_amp_id;
4912 if (req->dest_amp_id == AMP_ID_BREDR) {
4913 /* Moving to BR/EDR */
4914 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4915 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4916 result = L2CAP_MR_PEND;
4918 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4919 result = L2CAP_MR_SUCCESS;
4922 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4923 /* Placeholder - uncomment when amp functions are available */
4924 /*amp_accept_physical(chan, req->dest_amp_id);*/
4925 result = L2CAP_MR_PEND;
4929 l2cap_send_move_chan_rsp(chan, result);
4931 l2cap_chan_unlock(chan);
4936 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4938 struct l2cap_chan *chan;
4939 struct hci_chan *hchan = NULL;
4941 chan = l2cap_get_chan_by_scid(conn, icid);
4943 l2cap_send_move_chan_cfm_icid(conn, icid);
4947 __clear_chan_timer(chan);
4948 if (result == L2CAP_MR_PEND)
4949 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4951 switch (chan->move_state) {
4952 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4953 /* Move confirm will be sent when logical link
4956 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4958 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4959 if (result == L2CAP_MR_PEND) {
4961 } else if (test_bit(CONN_LOCAL_BUSY,
4962 &chan->conn_state)) {
4963 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4965 /* Logical link is up or moving to BR/EDR,
4968 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4969 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4972 case L2CAP_MOVE_WAIT_RSP:
4974 if (result == L2CAP_MR_SUCCESS) {
4975 /* Remote is ready, send confirm immediately
4976 * after logical link is ready
4978 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4980 /* Both logical link and move success
4981 * are required to confirm
4983 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4986 /* Placeholder - get hci_chan for logical link */
4988 /* Logical link not available */
4989 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4993 /* If the logical link is not yet connected, do not
4994 * send confirmation.
4996 if (hchan->state != BT_CONNECTED)
4999 /* Logical link is already ready to go */
5001 chan->hs_hcon = hchan->conn;
5002 chan->hs_hcon->l2cap_data = chan->conn;
5004 if (result == L2CAP_MR_SUCCESS) {
5005 /* Can confirm now */
5006 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5008 /* Now only need move success
5011 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5014 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5017 /* Any other amp move state means the move failed. */
5018 chan->move_id = chan->local_amp_id;
5019 l2cap_move_done(chan);
5020 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5023 l2cap_chan_unlock(chan);
5026 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5029 struct l2cap_chan *chan;
5031 chan = l2cap_get_chan_by_ident(conn, ident);
5033 /* Could not locate channel, icid is best guess */
5034 l2cap_send_move_chan_cfm_icid(conn, icid);
5038 __clear_chan_timer(chan);
5040 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5041 if (result == L2CAP_MR_COLLISION) {
5042 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5044 /* Cleanup - cancel move */
5045 chan->move_id = chan->local_amp_id;
5046 l2cap_move_done(chan);
5050 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5052 l2cap_chan_unlock(chan);
5055 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5056 struct l2cap_cmd_hdr *cmd,
5057 u16 cmd_len, void *data)
5059 struct l2cap_move_chan_rsp *rsp = data;
5062 if (cmd_len != sizeof(*rsp))
5065 icid = le16_to_cpu(rsp->icid);
5066 result = le16_to_cpu(rsp->result);
5068 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5070 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5071 l2cap_move_continue(conn, icid, result);
5073 l2cap_move_fail(conn, cmd->ident, icid, result);
5078 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5079 struct l2cap_cmd_hdr *cmd,
5080 u16 cmd_len, void *data)
5082 struct l2cap_move_chan_cfm *cfm = data;
5083 struct l2cap_chan *chan;
5086 if (cmd_len != sizeof(*cfm))
5089 icid = le16_to_cpu(cfm->icid);
5090 result = le16_to_cpu(cfm->result);
5092 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5094 chan = l2cap_get_chan_by_dcid(conn, icid);
5096 /* Spec requires a response even if the icid was not found */
5097 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5101 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5102 if (result == L2CAP_MC_CONFIRMED) {
5103 chan->local_amp_id = chan->move_id;
5104 if (chan->local_amp_id == AMP_ID_BREDR)
5105 __release_logical_link(chan);
5107 chan->move_id = chan->local_amp_id;
5110 l2cap_move_done(chan);
5113 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5115 l2cap_chan_unlock(chan);
5120 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5121 struct l2cap_cmd_hdr *cmd,
5122 u16 cmd_len, void *data)
5124 struct l2cap_move_chan_cfm_rsp *rsp = data;
5125 struct l2cap_chan *chan;
5128 if (cmd_len != sizeof(*rsp))
5131 icid = le16_to_cpu(rsp->icid);
5133 BT_DBG("icid 0x%4.4x", icid);
5135 chan = l2cap_get_chan_by_scid(conn, icid);
5139 __clear_chan_timer(chan);
5141 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5142 chan->local_amp_id = chan->move_id;
5144 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5145 __release_logical_link(chan);
5147 l2cap_move_done(chan);
5150 l2cap_chan_unlock(chan);
5155 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5160 if (min > max || min < 6 || max > 3200)
5163 if (to_multiplier < 10 || to_multiplier > 3200)
5166 if (max >= to_multiplier * 8)
5169 max_latency = (to_multiplier * 8 / max) - 1;
5170 if (latency > 499 || latency > max_latency)
5176 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5177 struct l2cap_cmd_hdr *cmd,
5178 u16 cmd_len, u8 *data)
5180 struct hci_conn *hcon = conn->hcon;
5181 struct l2cap_conn_param_update_req *req;
5182 struct l2cap_conn_param_update_rsp rsp;
5183 u16 min, max, latency, to_multiplier;
5186 if (!(hcon->link_mode & HCI_LM_MASTER))
5189 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5192 req = (struct l2cap_conn_param_update_req *) data;
5193 min = __le16_to_cpu(req->min);
5194 max = __le16_to_cpu(req->max);
5195 latency = __le16_to_cpu(req->latency);
5196 to_multiplier = __le16_to_cpu(req->to_multiplier);
5198 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5199 min, max, latency, to_multiplier);
5201 memset(&rsp, 0, sizeof(rsp));
5203 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5205 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5207 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5209 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5213 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5218 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5219 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5222 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5223 u16 dcid, mtu, mps, credits, result;
5224 struct l2cap_chan *chan;
5227 if (cmd_len < sizeof(*rsp))
5230 dcid = __le16_to_cpu(rsp->dcid);
5231 mtu = __le16_to_cpu(rsp->mtu);
5232 mps = __le16_to_cpu(rsp->mps);
5233 credits = __le16_to_cpu(rsp->credits);
5234 result = __le16_to_cpu(rsp->result);
5236 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5239 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5240 dcid, mtu, mps, credits, result);
5242 mutex_lock(&conn->chan_lock);
5244 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5252 l2cap_chan_lock(chan);
5255 case L2CAP_CR_SUCCESS:
5259 chan->remote_mps = mps;
5260 chan->tx_credits = credits;
5261 l2cap_chan_ready(chan);
5265 l2cap_chan_del(chan, ECONNREFUSED);
5269 l2cap_chan_unlock(chan);
5272 mutex_unlock(&conn->chan_lock);
5277 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5278 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5283 switch (cmd->code) {
5284 case L2CAP_COMMAND_REJ:
5285 l2cap_command_rej(conn, cmd, cmd_len, data);
5288 case L2CAP_CONN_REQ:
5289 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5292 case L2CAP_CONN_RSP:
5293 case L2CAP_CREATE_CHAN_RSP:
5294 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5297 case L2CAP_CONF_REQ:
5298 err = l2cap_config_req(conn, cmd, cmd_len, data);
5301 case L2CAP_CONF_RSP:
5302 l2cap_config_rsp(conn, cmd, cmd_len, data);
5305 case L2CAP_DISCONN_REQ:
5306 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5309 case L2CAP_DISCONN_RSP:
5310 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5313 case L2CAP_ECHO_REQ:
5314 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5317 case L2CAP_ECHO_RSP:
5320 case L2CAP_INFO_REQ:
5321 err = l2cap_information_req(conn, cmd, cmd_len, data);
5324 case L2CAP_INFO_RSP:
5325 l2cap_information_rsp(conn, cmd, cmd_len, data);
5328 case L2CAP_CREATE_CHAN_REQ:
5329 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5332 case L2CAP_MOVE_CHAN_REQ:
5333 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5336 case L2CAP_MOVE_CHAN_RSP:
5337 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5340 case L2CAP_MOVE_CHAN_CFM:
5341 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5344 case L2CAP_MOVE_CHAN_CFM_RSP:
5345 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5349 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5357 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5358 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5361 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5362 struct l2cap_le_conn_rsp rsp;
5363 struct l2cap_chan *chan, *pchan;
5364 u16 dcid, scid, credits, mtu, mps;
5368 if (cmd_len != sizeof(*req))
5371 scid = __le16_to_cpu(req->scid);
5372 mtu = __le16_to_cpu(req->mtu);
5373 mps = __le16_to_cpu(req->mps);
5378 if (mtu < 23 || mps < 23)
5381 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5384 /* Check if we have socket listening on psm */
5385 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5386 &conn->hcon->dst, LE_LINK);
5388 result = L2CAP_CR_BAD_PSM;
5393 mutex_lock(&conn->chan_lock);
5394 l2cap_chan_lock(pchan);
5396 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5397 result = L2CAP_CR_AUTHENTICATION;
5399 goto response_unlock;
5402 /* Check if we already have channel with that dcid */
5403 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5404 result = L2CAP_CR_NO_MEM;
5406 goto response_unlock;
5409 chan = pchan->ops->new_connection(pchan);
5411 result = L2CAP_CR_NO_MEM;
5412 goto response_unlock;
5415 l2cap_le_flowctl_init(chan);
5417 bacpy(&chan->src, &conn->hcon->src);
5418 bacpy(&chan->dst, &conn->hcon->dst);
5419 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5420 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5424 chan->remote_mps = mps;
5425 chan->tx_credits = __le16_to_cpu(req->credits);
5427 __l2cap_chan_add(conn, chan);
5429 credits = chan->rx_credits;
5431 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5433 chan->ident = cmd->ident;
5435 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5436 l2cap_state_change(chan, BT_CONNECT2);
5437 result = L2CAP_CR_PEND;
5438 chan->ops->defer(chan);
5440 l2cap_chan_ready(chan);
5441 result = L2CAP_CR_SUCCESS;
5445 l2cap_chan_unlock(pchan);
5446 mutex_unlock(&conn->chan_lock);
5448 if (result == L2CAP_CR_PEND)
5453 rsp.mtu = cpu_to_le16(chan->imtu);
5454 rsp.mps = cpu_to_le16(chan->mps);
5460 rsp.dcid = cpu_to_le16(dcid);
5461 rsp.credits = cpu_to_le16(credits);
5462 rsp.result = cpu_to_le16(result);
5464 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5469 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5470 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5473 struct l2cap_le_credits *pkt;
5474 struct l2cap_chan *chan;
5475 u16 cid, credits, max_credits;
5477 if (cmd_len != sizeof(*pkt))
5480 pkt = (struct l2cap_le_credits *) data;
5481 cid = __le16_to_cpu(pkt->cid);
5482 credits = __le16_to_cpu(pkt->credits);
5484 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5486 chan = l2cap_get_chan_by_dcid(conn, cid);
5490 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5491 if (credits > max_credits) {
5492 BT_ERR("LE credits overflow");
5493 l2cap_send_disconn_req(chan, ECONNRESET);
5495 /* Return 0 so that we don't trigger an unnecessary
5496 * command reject packet.
5501 chan->tx_credits += credits;
5503 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5504 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5508 if (chan->tx_credits)
5509 chan->ops->resume(chan);
5511 l2cap_chan_unlock(chan);
5516 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5517 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5520 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5521 struct l2cap_chan *chan;
5523 if (cmd_len < sizeof(*rej))
5526 mutex_lock(&conn->chan_lock);
5528 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5532 l2cap_chan_lock(chan);
5533 l2cap_chan_del(chan, ECONNREFUSED);
5534 l2cap_chan_unlock(chan);
5537 mutex_unlock(&conn->chan_lock);
5541 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5542 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5547 switch (cmd->code) {
5548 case L2CAP_COMMAND_REJ:
5549 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5552 case L2CAP_CONN_PARAM_UPDATE_REQ:
5553 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5556 case L2CAP_CONN_PARAM_UPDATE_RSP:
5559 case L2CAP_LE_CONN_RSP:
5560 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5563 case L2CAP_LE_CONN_REQ:
5564 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5567 case L2CAP_LE_CREDITS:
5568 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5571 case L2CAP_DISCONN_REQ:
5572 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5575 case L2CAP_DISCONN_RSP:
5576 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5580 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5588 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5589 struct sk_buff *skb)
5591 struct hci_conn *hcon = conn->hcon;
5592 struct l2cap_cmd_hdr *cmd;
5596 if (hcon->type != LE_LINK)
5599 if (skb->len < L2CAP_CMD_HDR_SIZE)
5602 cmd = (void *) skb->data;
5603 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5605 len = le16_to_cpu(cmd->len);
5607 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5609 if (len != skb->len || !cmd->ident) {
5610 BT_DBG("corrupted command");
5614 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5616 struct l2cap_cmd_rej_unk rej;
5618 BT_ERR("Wrong link type (%d)", err);
5620 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5621 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5629 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5630 struct sk_buff *skb)
5632 struct hci_conn *hcon = conn->hcon;
5633 u8 *data = skb->data;
5635 struct l2cap_cmd_hdr cmd;
5638 l2cap_raw_recv(conn, skb);
5640 if (hcon->type != ACL_LINK)
5643 while (len >= L2CAP_CMD_HDR_SIZE) {
5645 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5646 data += L2CAP_CMD_HDR_SIZE;
5647 len -= L2CAP_CMD_HDR_SIZE;
5649 cmd_len = le16_to_cpu(cmd.len);
5651 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5654 if (cmd_len > len || !cmd.ident) {
5655 BT_DBG("corrupted command");
5659 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5661 struct l2cap_cmd_rej_unk rej;
5663 BT_ERR("Wrong link type (%d)", err);
5665 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5666 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5678 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5680 u16 our_fcs, rcv_fcs;
5683 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5684 hdr_size = L2CAP_EXT_HDR_SIZE;
5686 hdr_size = L2CAP_ENH_HDR_SIZE;
5688 if (chan->fcs == L2CAP_FCS_CRC16) {
5689 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5690 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5691 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5693 if (our_fcs != rcv_fcs)
5699 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5701 struct l2cap_ctrl control;
5703 BT_DBG("chan %p", chan);
5705 memset(&control, 0, sizeof(control));
5708 control.reqseq = chan->buffer_seq;
5709 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5711 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5712 control.super = L2CAP_SUPER_RNR;
5713 l2cap_send_sframe(chan, &control);
5716 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5717 chan->unacked_frames > 0)
5718 __set_retrans_timer(chan);
5720 /* Send pending iframes */
5721 l2cap_ertm_send(chan);
5723 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5724 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5725 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5728 control.super = L2CAP_SUPER_RR;
5729 l2cap_send_sframe(chan, &control);
5733 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5734 struct sk_buff **last_frag)
5736 /* skb->len reflects data in skb as well as all fragments
5737 * skb->data_len reflects only data in fragments
5739 if (!skb_has_frag_list(skb))
5740 skb_shinfo(skb)->frag_list = new_frag;
5742 new_frag->next = NULL;
5744 (*last_frag)->next = new_frag;
5745 *last_frag = new_frag;
5747 skb->len += new_frag->len;
5748 skb->data_len += new_frag->len;
5749 skb->truesize += new_frag->truesize;
5752 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5753 struct l2cap_ctrl *control)
5757 switch (control->sar) {
5758 case L2CAP_SAR_UNSEGMENTED:
5762 err = chan->ops->recv(chan, skb);
5765 case L2CAP_SAR_START:
5769 chan->sdu_len = get_unaligned_le16(skb->data);
5770 skb_pull(skb, L2CAP_SDULEN_SIZE);
5772 if (chan->sdu_len > chan->imtu) {
5777 if (skb->len >= chan->sdu_len)
5781 chan->sdu_last_frag = skb;
5787 case L2CAP_SAR_CONTINUE:
5791 append_skb_frag(chan->sdu, skb,
5792 &chan->sdu_last_frag);
5795 if (chan->sdu->len >= chan->sdu_len)
5805 append_skb_frag(chan->sdu, skb,
5806 &chan->sdu_last_frag);
5809 if (chan->sdu->len != chan->sdu_len)
5812 err = chan->ops->recv(chan, chan->sdu);
5815 /* Reassembly complete */
5817 chan->sdu_last_frag = NULL;
5825 kfree_skb(chan->sdu);
5827 chan->sdu_last_frag = NULL;
5834 static int l2cap_resegment(struct l2cap_chan *chan)
5840 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5844 if (chan->mode != L2CAP_MODE_ERTM)
5847 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5848 l2cap_tx(chan, NULL, NULL, event);
5851 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5854 /* Pass sequential frames to l2cap_reassemble_sdu()
5855 * until a gap is encountered.
5858 BT_DBG("chan %p", chan);
5860 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5861 struct sk_buff *skb;
5862 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5863 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5865 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5870 skb_unlink(skb, &chan->srej_q);
5871 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5872 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5877 if (skb_queue_empty(&chan->srej_q)) {
5878 chan->rx_state = L2CAP_RX_STATE_RECV;
5879 l2cap_send_ack(chan);
5885 static void l2cap_handle_srej(struct l2cap_chan *chan,
5886 struct l2cap_ctrl *control)
5888 struct sk_buff *skb;
5890 BT_DBG("chan %p, control %p", chan, control);
5892 if (control->reqseq == chan->next_tx_seq) {
5893 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5894 l2cap_send_disconn_req(chan, ECONNRESET);
5898 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5901 BT_DBG("Seq %d not available for retransmission",
5906 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5907 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5908 l2cap_send_disconn_req(chan, ECONNRESET);
5912 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5914 if (control->poll) {
5915 l2cap_pass_to_tx(chan, control);
5917 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5918 l2cap_retransmit(chan, control);
5919 l2cap_ertm_send(chan);
5921 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5922 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5923 chan->srej_save_reqseq = control->reqseq;
5926 l2cap_pass_to_tx_fbit(chan, control);
5928 if (control->final) {
5929 if (chan->srej_save_reqseq != control->reqseq ||
5930 !test_and_clear_bit(CONN_SREJ_ACT,
5932 l2cap_retransmit(chan, control);
5934 l2cap_retransmit(chan, control);
5935 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5936 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5937 chan->srej_save_reqseq = control->reqseq;
5943 static void l2cap_handle_rej(struct l2cap_chan *chan,
5944 struct l2cap_ctrl *control)
5946 struct sk_buff *skb;
5948 BT_DBG("chan %p, control %p", chan, control);
5950 if (control->reqseq == chan->next_tx_seq) {
5951 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5952 l2cap_send_disconn_req(chan, ECONNRESET);
5956 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5958 if (chan->max_tx && skb &&
5959 bt_cb(skb)->control.retries >= chan->max_tx) {
5960 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5961 l2cap_send_disconn_req(chan, ECONNRESET);
5965 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5967 l2cap_pass_to_tx(chan, control);
5969 if (control->final) {
5970 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5971 l2cap_retransmit_all(chan, control);
5973 l2cap_retransmit_all(chan, control);
5974 l2cap_ertm_send(chan);
5975 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5976 set_bit(CONN_REJ_ACT, &chan->conn_state);
5980 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5982 BT_DBG("chan %p, txseq %d", chan, txseq);
5984 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5985 chan->expected_tx_seq);
5987 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5988 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5990 /* See notes below regarding "double poll" and
5993 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5994 BT_DBG("Invalid/Ignore - after SREJ");
5995 return L2CAP_TXSEQ_INVALID_IGNORE;
5997 BT_DBG("Invalid - in window after SREJ sent");
5998 return L2CAP_TXSEQ_INVALID;
6002 if (chan->srej_list.head == txseq) {
6003 BT_DBG("Expected SREJ");
6004 return L2CAP_TXSEQ_EXPECTED_SREJ;
6007 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6008 BT_DBG("Duplicate SREJ - txseq already stored");
6009 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6012 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6013 BT_DBG("Unexpected SREJ - not requested");
6014 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6018 if (chan->expected_tx_seq == txseq) {
6019 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6021 BT_DBG("Invalid - txseq outside tx window");
6022 return L2CAP_TXSEQ_INVALID;
6025 return L2CAP_TXSEQ_EXPECTED;
6029 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6030 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6031 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6032 return L2CAP_TXSEQ_DUPLICATE;
6035 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6036 /* A source of invalid packets is a "double poll" condition,
6037 * where delays cause us to send multiple poll packets. If
6038 * the remote stack receives and processes both polls,
6039 * sequence numbers can wrap around in such a way that a
6040 * resent frame has a sequence number that looks like new data
6041 * with a sequence gap. This would trigger an erroneous SREJ
6044 * Fortunately, this is impossible with a tx window that's
6045 * less than half of the maximum sequence number, which allows
6046 * invalid frames to be safely ignored.
6048 * With tx window sizes greater than half of the tx window
6049 * maximum, the frame is invalid and cannot be ignored. This
6050 * causes a disconnect.
6053 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6054 BT_DBG("Invalid/Ignore - txseq outside tx window");
6055 return L2CAP_TXSEQ_INVALID_IGNORE;
6057 BT_DBG("Invalid - txseq outside tx window");
6058 return L2CAP_TXSEQ_INVALID;
6061 BT_DBG("Unexpected - txseq indicates missing frames");
6062 return L2CAP_TXSEQ_UNEXPECTED;
6066 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6067 struct l2cap_ctrl *control,
6068 struct sk_buff *skb, u8 event)
6071 bool skb_in_use = false;
6073 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6077 case L2CAP_EV_RECV_IFRAME:
6078 switch (l2cap_classify_txseq(chan, control->txseq)) {
6079 case L2CAP_TXSEQ_EXPECTED:
6080 l2cap_pass_to_tx(chan, control);
6082 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6083 BT_DBG("Busy, discarding expected seq %d",
6088 chan->expected_tx_seq = __next_seq(chan,
6091 chan->buffer_seq = chan->expected_tx_seq;
6094 err = l2cap_reassemble_sdu(chan, skb, control);
6098 if (control->final) {
6099 if (!test_and_clear_bit(CONN_REJ_ACT,
6100 &chan->conn_state)) {
6102 l2cap_retransmit_all(chan, control);
6103 l2cap_ertm_send(chan);
6107 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6108 l2cap_send_ack(chan);
6110 case L2CAP_TXSEQ_UNEXPECTED:
6111 l2cap_pass_to_tx(chan, control);
6113 /* Can't issue SREJ frames in the local busy state.
6114 * Drop this frame, it will be seen as missing
6115 * when local busy is exited.
6117 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6118 BT_DBG("Busy, discarding unexpected seq %d",
6123 /* There was a gap in the sequence, so an SREJ
6124 * must be sent for each missing frame. The
6125 * current frame is stored for later use.
6127 skb_queue_tail(&chan->srej_q, skb);
6129 BT_DBG("Queued %p (queue len %d)", skb,
6130 skb_queue_len(&chan->srej_q));
6132 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6133 l2cap_seq_list_clear(&chan->srej_list);
6134 l2cap_send_srej(chan, control->txseq);
6136 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6138 case L2CAP_TXSEQ_DUPLICATE:
6139 l2cap_pass_to_tx(chan, control);
6141 case L2CAP_TXSEQ_INVALID_IGNORE:
6143 case L2CAP_TXSEQ_INVALID:
6145 l2cap_send_disconn_req(chan, ECONNRESET);
6149 case L2CAP_EV_RECV_RR:
6150 l2cap_pass_to_tx(chan, control);
6151 if (control->final) {
6152 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6154 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6155 !__chan_is_moving(chan)) {
6157 l2cap_retransmit_all(chan, control);
6160 l2cap_ertm_send(chan);
6161 } else if (control->poll) {
6162 l2cap_send_i_or_rr_or_rnr(chan);
6164 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6165 &chan->conn_state) &&
6166 chan->unacked_frames)
6167 __set_retrans_timer(chan);
6169 l2cap_ertm_send(chan);
6172 case L2CAP_EV_RECV_RNR:
6173 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6174 l2cap_pass_to_tx(chan, control);
6175 if (control && control->poll) {
6176 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6177 l2cap_send_rr_or_rnr(chan, 0);
6179 __clear_retrans_timer(chan);
6180 l2cap_seq_list_clear(&chan->retrans_list);
6182 case L2CAP_EV_RECV_REJ:
6183 l2cap_handle_rej(chan, control);
6185 case L2CAP_EV_RECV_SREJ:
6186 l2cap_handle_srej(chan, control);
6192 if (skb && !skb_in_use) {
6193 BT_DBG("Freeing %p", skb);
6200 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6201 struct l2cap_ctrl *control,
6202 struct sk_buff *skb, u8 event)
6205 u16 txseq = control->txseq;
6206 bool skb_in_use = false;
6208 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6212 case L2CAP_EV_RECV_IFRAME:
6213 switch (l2cap_classify_txseq(chan, txseq)) {
6214 case L2CAP_TXSEQ_EXPECTED:
6215 /* Keep frame for reassembly later */
6216 l2cap_pass_to_tx(chan, control);
6217 skb_queue_tail(&chan->srej_q, skb);
6219 BT_DBG("Queued %p (queue len %d)", skb,
6220 skb_queue_len(&chan->srej_q));
6222 chan->expected_tx_seq = __next_seq(chan, txseq);
6224 case L2CAP_TXSEQ_EXPECTED_SREJ:
6225 l2cap_seq_list_pop(&chan->srej_list);
6227 l2cap_pass_to_tx(chan, control);
6228 skb_queue_tail(&chan->srej_q, skb);
6230 BT_DBG("Queued %p (queue len %d)", skb,
6231 skb_queue_len(&chan->srej_q));
6233 err = l2cap_rx_queued_iframes(chan);
6238 case L2CAP_TXSEQ_UNEXPECTED:
6239 /* Got a frame that can't be reassembled yet.
6240 * Save it for later, and send SREJs to cover
6241 * the missing frames.
6243 skb_queue_tail(&chan->srej_q, skb);
6245 BT_DBG("Queued %p (queue len %d)", skb,
6246 skb_queue_len(&chan->srej_q));
6248 l2cap_pass_to_tx(chan, control);
6249 l2cap_send_srej(chan, control->txseq);
6251 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6252 /* This frame was requested with an SREJ, but
6253 * some expected retransmitted frames are
6254 * missing. Request retransmission of missing
6257 skb_queue_tail(&chan->srej_q, skb);
6259 BT_DBG("Queued %p (queue len %d)", skb,
6260 skb_queue_len(&chan->srej_q));
6262 l2cap_pass_to_tx(chan, control);
6263 l2cap_send_srej_list(chan, control->txseq);
6265 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6266 /* We've already queued this frame. Drop this copy. */
6267 l2cap_pass_to_tx(chan, control);
6269 case L2CAP_TXSEQ_DUPLICATE:
6270 /* Expecting a later sequence number, so this frame
6271 * was already received. Ignore it completely.
6274 case L2CAP_TXSEQ_INVALID_IGNORE:
6276 case L2CAP_TXSEQ_INVALID:
6278 l2cap_send_disconn_req(chan, ECONNRESET);
6282 case L2CAP_EV_RECV_RR:
6283 l2cap_pass_to_tx(chan, control);
6284 if (control->final) {
6285 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6287 if (!test_and_clear_bit(CONN_REJ_ACT,
6288 &chan->conn_state)) {
6290 l2cap_retransmit_all(chan, control);
6293 l2cap_ertm_send(chan);
6294 } else if (control->poll) {
6295 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6296 &chan->conn_state) &&
6297 chan->unacked_frames) {
6298 __set_retrans_timer(chan);
6301 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6302 l2cap_send_srej_tail(chan);
6304 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6305 &chan->conn_state) &&
6306 chan->unacked_frames)
6307 __set_retrans_timer(chan);
6309 l2cap_send_ack(chan);
6312 case L2CAP_EV_RECV_RNR:
6313 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6314 l2cap_pass_to_tx(chan, control);
6315 if (control->poll) {
6316 l2cap_send_srej_tail(chan);
6318 struct l2cap_ctrl rr_control;
6319 memset(&rr_control, 0, sizeof(rr_control));
6320 rr_control.sframe = 1;
6321 rr_control.super = L2CAP_SUPER_RR;
6322 rr_control.reqseq = chan->buffer_seq;
6323 l2cap_send_sframe(chan, &rr_control);
6327 case L2CAP_EV_RECV_REJ:
6328 l2cap_handle_rej(chan, control);
6330 case L2CAP_EV_RECV_SREJ:
6331 l2cap_handle_srej(chan, control);
6335 if (skb && !skb_in_use) {
6336 BT_DBG("Freeing %p", skb);
6343 static int l2cap_finish_move(struct l2cap_chan *chan)
6345 BT_DBG("chan %p", chan);
6347 chan->rx_state = L2CAP_RX_STATE_RECV;
6350 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6352 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6354 return l2cap_resegment(chan);
6357 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6358 struct l2cap_ctrl *control,
6359 struct sk_buff *skb, u8 event)
6363 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6369 l2cap_process_reqseq(chan, control->reqseq);
6371 if (!skb_queue_empty(&chan->tx_q))
6372 chan->tx_send_head = skb_peek(&chan->tx_q);
6374 chan->tx_send_head = NULL;
6376 /* Rewind next_tx_seq to the point expected
6379 chan->next_tx_seq = control->reqseq;
6380 chan->unacked_frames = 0;
6382 err = l2cap_finish_move(chan);
6386 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6387 l2cap_send_i_or_rr_or_rnr(chan);
6389 if (event == L2CAP_EV_RECV_IFRAME)
6392 return l2cap_rx_state_recv(chan, control, NULL, event);
6395 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6396 struct l2cap_ctrl *control,
6397 struct sk_buff *skb, u8 event)
6401 if (!control->final)
6404 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6406 chan->rx_state = L2CAP_RX_STATE_RECV;
6407 l2cap_process_reqseq(chan, control->reqseq);
6409 if (!skb_queue_empty(&chan->tx_q))
6410 chan->tx_send_head = skb_peek(&chan->tx_q);
6412 chan->tx_send_head = NULL;
6414 /* Rewind next_tx_seq to the point expected
6417 chan->next_tx_seq = control->reqseq;
6418 chan->unacked_frames = 0;
6421 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6423 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6425 err = l2cap_resegment(chan);
6428 err = l2cap_rx_state_recv(chan, control, skb, event);
6433 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6435 /* Make sure reqseq is for a packet that has been sent but not acked */
6438 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6439 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6442 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6443 struct sk_buff *skb, u8 event)
6447 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6448 control, skb, event, chan->rx_state);
6450 if (__valid_reqseq(chan, control->reqseq)) {
6451 switch (chan->rx_state) {
6452 case L2CAP_RX_STATE_RECV:
6453 err = l2cap_rx_state_recv(chan, control, skb, event);
6455 case L2CAP_RX_STATE_SREJ_SENT:
6456 err = l2cap_rx_state_srej_sent(chan, control, skb,
6459 case L2CAP_RX_STATE_WAIT_P:
6460 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6462 case L2CAP_RX_STATE_WAIT_F:
6463 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6470 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6471 control->reqseq, chan->next_tx_seq,
6472 chan->expected_ack_seq);
6473 l2cap_send_disconn_req(chan, ECONNRESET);
6479 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6480 struct sk_buff *skb)
6484 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6487 if (l2cap_classify_txseq(chan, control->txseq) ==
6488 L2CAP_TXSEQ_EXPECTED) {
6489 l2cap_pass_to_tx(chan, control);
6491 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6492 __next_seq(chan, chan->buffer_seq));
6494 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6496 l2cap_reassemble_sdu(chan, skb, control);
6499 kfree_skb(chan->sdu);
6502 chan->sdu_last_frag = NULL;
6506 BT_DBG("Freeing %p", skb);
6511 chan->last_acked_seq = control->txseq;
6512 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6517 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6519 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6523 __unpack_control(chan, skb);
6528 * We can just drop the corrupted I-frame here.
6529 * Receiver will miss it and start proper recovery
6530 * procedures and ask for retransmission.
6532 if (l2cap_check_fcs(chan, skb))
6535 if (!control->sframe && control->sar == L2CAP_SAR_START)
6536 len -= L2CAP_SDULEN_SIZE;
6538 if (chan->fcs == L2CAP_FCS_CRC16)
6539 len -= L2CAP_FCS_SIZE;
6541 if (len > chan->mps) {
6542 l2cap_send_disconn_req(chan, ECONNRESET);
6546 if (!control->sframe) {
6549 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6550 control->sar, control->reqseq, control->final,
6553 /* Validate F-bit - F=0 always valid, F=1 only
6554 * valid in TX WAIT_F
6556 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6559 if (chan->mode != L2CAP_MODE_STREAMING) {
6560 event = L2CAP_EV_RECV_IFRAME;
6561 err = l2cap_rx(chan, control, skb, event);
6563 err = l2cap_stream_rx(chan, control, skb);
6567 l2cap_send_disconn_req(chan, ECONNRESET);
6569 const u8 rx_func_to_event[4] = {
6570 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6571 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6574 /* Only I-frames are expected in streaming mode */
6575 if (chan->mode == L2CAP_MODE_STREAMING)
6578 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6579 control->reqseq, control->final, control->poll,
6583 BT_ERR("Trailing bytes: %d in sframe", len);
6584 l2cap_send_disconn_req(chan, ECONNRESET);
6588 /* Validate F and P bits */
6589 if (control->final && (control->poll ||
6590 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6593 event = rx_func_to_event[control->super];
6594 if (l2cap_rx(chan, control, skb, event))
6595 l2cap_send_disconn_req(chan, ECONNRESET);
6605 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6607 struct l2cap_conn *conn = chan->conn;
6608 struct l2cap_le_credits pkt;
6611 /* We return more credits to the sender only after the amount of
6612 * credits falls below half of the initial amount.
6614 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6617 return_credits = le_max_credits - chan->rx_credits;
6619 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6621 chan->rx_credits += return_credits;
6623 pkt.cid = cpu_to_le16(chan->scid);
6624 pkt.credits = cpu_to_le16(return_credits);
6626 chan->ident = l2cap_get_ident(conn);
6628 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6631 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6635 if (!chan->rx_credits) {
6636 BT_ERR("No credits to receive LE L2CAP data");
6637 l2cap_send_disconn_req(chan, ECONNRESET);
6641 if (chan->imtu < skb->len) {
6642 BT_ERR("Too big LE L2CAP PDU");
6647 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6649 l2cap_chan_le_send_credits(chan);
6656 sdu_len = get_unaligned_le16(skb->data);
6657 skb_pull(skb, L2CAP_SDULEN_SIZE);
6659 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6660 sdu_len, skb->len, chan->imtu);
6662 if (sdu_len > chan->imtu) {
6663 BT_ERR("Too big LE L2CAP SDU length received");
6668 if (skb->len > sdu_len) {
6669 BT_ERR("Too much LE L2CAP data received");
6674 if (skb->len == sdu_len)
6675 return chan->ops->recv(chan, skb);
6678 chan->sdu_len = sdu_len;
6679 chan->sdu_last_frag = skb;
6684 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6685 chan->sdu->len, skb->len, chan->sdu_len);
6687 if (chan->sdu->len + skb->len > chan->sdu_len) {
6688 BT_ERR("Too much LE L2CAP data received");
6693 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6696 if (chan->sdu->len == chan->sdu_len) {
6697 err = chan->ops->recv(chan, chan->sdu);
6700 chan->sdu_last_frag = NULL;
6708 kfree_skb(chan->sdu);
6710 chan->sdu_last_frag = NULL;
6714 /* We can't return an error here since we took care of the skb
6715 * freeing internally. An error return would cause the caller to
6716 * do a double-free of the skb.
6721 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6722 struct sk_buff *skb)
6724 struct l2cap_chan *chan;
6726 chan = l2cap_get_chan_by_scid(conn, cid);
6728 if (cid == L2CAP_CID_A2MP) {
6729 chan = a2mp_channel_create(conn, skb);
6735 l2cap_chan_lock(chan);
6737 BT_DBG("unknown cid 0x%4.4x", cid);
6738 /* Drop packet and return */
6744 BT_DBG("chan %p, len %d", chan, skb->len);
6746 if (chan->state != BT_CONNECTED)
6749 switch (chan->mode) {
6750 case L2CAP_MODE_LE_FLOWCTL:
6751 if (l2cap_le_data_rcv(chan, skb) < 0)
6756 case L2CAP_MODE_BASIC:
6757 /* If socket recv buffers overflows we drop data here
6758 * which is *bad* because L2CAP has to be reliable.
6759 * But we don't have any other choice. L2CAP doesn't
6760 * provide flow control mechanism. */
6762 if (chan->imtu < skb->len)
6765 if (!chan->ops->recv(chan, skb))
6769 case L2CAP_MODE_ERTM:
6770 case L2CAP_MODE_STREAMING:
6771 l2cap_data_rcv(chan, skb);
6775 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6783 l2cap_chan_unlock(chan);
6786 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6787 struct sk_buff *skb)
6789 struct hci_conn *hcon = conn->hcon;
6790 struct l2cap_chan *chan;
6792 if (hcon->type != ACL_LINK)
6795 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6800 BT_DBG("chan %p, len %d", chan, skb->len);
6802 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6805 if (chan->imtu < skb->len)
6808 /* Store remote BD_ADDR and PSM for msg_name */
6809 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6810 bt_cb(skb)->psm = psm;
6812 if (!chan->ops->recv(chan, skb))
6819 static void l2cap_att_channel(struct l2cap_conn *conn,
6820 struct sk_buff *skb)
6822 struct hci_conn *hcon = conn->hcon;
6823 struct l2cap_chan *chan;
6825 if (hcon->type != LE_LINK)
6828 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6829 &hcon->src, &hcon->dst);
6833 BT_DBG("chan %p, len %d", chan, skb->len);
6835 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6838 if (chan->imtu < skb->len)
6841 if (!chan->ops->recv(chan, skb))
6848 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6850 struct l2cap_hdr *lh = (void *) skb->data;
6851 struct hci_conn *hcon = conn->hcon;
6855 if (hcon->state != BT_CONNECTED) {
6856 BT_DBG("queueing pending rx skb");
6857 skb_queue_tail(&conn->pending_rx, skb);
6861 skb_pull(skb, L2CAP_HDR_SIZE);
6862 cid = __le16_to_cpu(lh->cid);
6863 len = __le16_to_cpu(lh->len);
6865 if (len != skb->len) {
6870 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6873 case L2CAP_CID_SIGNALING:
6874 l2cap_sig_channel(conn, skb);
6877 case L2CAP_CID_CONN_LESS:
6878 psm = get_unaligned((__le16 *) skb->data);
6879 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6880 l2cap_conless_channel(conn, psm, skb);
6884 l2cap_att_channel(conn, skb);
6887 case L2CAP_CID_LE_SIGNALING:
6888 l2cap_le_sig_channel(conn, skb);
6892 if (smp_sig_channel(conn, skb))
6893 l2cap_conn_del(conn->hcon, EACCES);
6896 case L2CAP_FC_6LOWPAN:
6897 bt_6lowpan_recv(conn, skb);
6901 l2cap_data_channel(conn, cid, skb);
6906 static void process_pending_rx(struct work_struct *work)
6908 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6910 struct sk_buff *skb;
6914 while ((skb = skb_dequeue(&conn->pending_rx)))
6915 l2cap_recv_frame(conn, skb);
6918 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6920 struct l2cap_conn *conn = hcon->l2cap_data;
6921 struct hci_chan *hchan;
6926 hchan = hci_chan_create(hcon);
6930 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
6932 hci_chan_del(hchan);
6936 kref_init(&conn->ref);
6937 hcon->l2cap_data = conn;
6939 hci_conn_get(conn->hcon);
6940 conn->hchan = hchan;
6942 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6944 switch (hcon->type) {
6946 if (hcon->hdev->le_mtu) {
6947 conn->mtu = hcon->hdev->le_mtu;
6952 conn->mtu = hcon->hdev->acl_mtu;
6956 conn->feat_mask = 0;
6958 if (hcon->type == ACL_LINK)
6959 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6960 &hcon->hdev->dev_flags);
6962 spin_lock_init(&conn->lock);
6963 mutex_init(&conn->chan_lock);
6965 INIT_LIST_HEAD(&conn->chan_l);
6966 INIT_LIST_HEAD(&conn->users);
6968 if (hcon->type == LE_LINK)
6969 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
6971 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6973 skb_queue_head_init(&conn->pending_rx);
6974 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6976 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6981 static bool is_valid_psm(u16 psm, u8 dst_type) {
6985 if (bdaddr_type_is_le(dst_type))
6986 return (psm <= 0x00ff);
6988 /* PSM must be odd and lsb of upper byte must be 0 */
6989 return ((psm & 0x0101) == 0x0001);
6992 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6993 bdaddr_t *dst, u8 dst_type)
6995 struct l2cap_conn *conn;
6996 struct hci_conn *hcon;
6997 struct hci_dev *hdev;
7001 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7002 dst_type, __le16_to_cpu(psm));
7004 hdev = hci_get_route(dst, &chan->src);
7006 return -EHOSTUNREACH;
7010 l2cap_chan_lock(chan);
7012 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7013 chan->chan_type != L2CAP_CHAN_RAW) {
7018 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7023 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7028 switch (chan->mode) {
7029 case L2CAP_MODE_BASIC:
7031 case L2CAP_MODE_LE_FLOWCTL:
7032 l2cap_le_flowctl_init(chan);
7034 case L2CAP_MODE_ERTM:
7035 case L2CAP_MODE_STREAMING:
7044 switch (chan->state) {
7048 /* Already connecting */
7053 /* Already connected */
7067 /* Set destination address and psm */
7068 bacpy(&chan->dst, dst);
7069 chan->dst_type = dst_type;
7074 auth_type = l2cap_get_auth_type(chan);
7076 if (bdaddr_type_is_le(dst_type))
7077 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
7078 chan->sec_level, auth_type);
7080 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
7081 chan->sec_level, auth_type);
7084 err = PTR_ERR(hcon);
7088 conn = l2cap_conn_add(hcon);
7090 hci_conn_drop(hcon);
7095 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7096 hci_conn_drop(hcon);
7101 /* Update source addr of the socket */
7102 bacpy(&chan->src, &hcon->src);
7103 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7105 l2cap_chan_unlock(chan);
7106 l2cap_chan_add(conn, chan);
7107 l2cap_chan_lock(chan);
7109 /* l2cap_chan_add takes its own ref so we can drop this one */
7110 hci_conn_drop(hcon);
7112 l2cap_state_change(chan, BT_CONNECT);
7113 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7115 /* Release chan->sport so that it can be reused by other
7116 * sockets (as it's only used for listening sockets).
7118 write_lock(&chan_list_lock);
7120 write_unlock(&chan_list_lock);
7122 if (hcon->state == BT_CONNECTED) {
7123 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7124 __clear_chan_timer(chan);
7125 if (l2cap_chan_check_security(chan))
7126 l2cap_state_change(chan, BT_CONNECTED);
7128 l2cap_do_start(chan);
7134 l2cap_chan_unlock(chan);
7135 hci_dev_unlock(hdev);
7140 /* ---- L2CAP interface with lower layer (HCI) ---- */
7142 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7144 int exact = 0, lm1 = 0, lm2 = 0;
7145 struct l2cap_chan *c;
7147 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7149 /* Find listening sockets and check their link_mode */
7150 read_lock(&chan_list_lock);
7151 list_for_each_entry(c, &chan_list, global_l) {
7152 if (c->state != BT_LISTEN)
7155 if (!bacmp(&c->src, &hdev->bdaddr)) {
7156 lm1 |= HCI_LM_ACCEPT;
7157 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7158 lm1 |= HCI_LM_MASTER;
7160 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7161 lm2 |= HCI_LM_ACCEPT;
7162 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7163 lm2 |= HCI_LM_MASTER;
7166 read_unlock(&chan_list_lock);
7168 return exact ? lm1 : lm2;
7171 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7173 struct l2cap_conn *conn;
7175 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7178 conn = l2cap_conn_add(hcon);
7180 l2cap_conn_ready(conn);
7182 l2cap_conn_del(hcon, bt_to_errno(status));
7186 int l2cap_disconn_ind(struct hci_conn *hcon)
7188 struct l2cap_conn *conn = hcon->l2cap_data;
7190 BT_DBG("hcon %p", hcon);
7193 return HCI_ERROR_REMOTE_USER_TERM;
7194 return conn->disc_reason;
7197 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7199 BT_DBG("hcon %p reason %d", hcon, reason);
7201 bt_6lowpan_del_conn(hcon->l2cap_data);
7203 l2cap_conn_del(hcon, bt_to_errno(reason));
7206 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7208 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7211 if (encrypt == 0x00) {
7212 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7213 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7214 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7215 chan->sec_level == BT_SECURITY_FIPS)
7216 l2cap_chan_close(chan, ECONNREFUSED);
7218 if (chan->sec_level == BT_SECURITY_MEDIUM)
7219 __clear_chan_timer(chan);
7223 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7225 struct l2cap_conn *conn = hcon->l2cap_data;
7226 struct l2cap_chan *chan;
7231 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7233 if (hcon->type == LE_LINK) {
7234 if (!status && encrypt)
7235 smp_distribute_keys(conn, 0);
7236 cancel_delayed_work(&conn->security_timer);
7239 mutex_lock(&conn->chan_lock);
7241 list_for_each_entry(chan, &conn->chan_l, list) {
7242 l2cap_chan_lock(chan);
7244 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7245 state_to_string(chan->state));
7247 if (chan->scid == L2CAP_CID_A2MP) {
7248 l2cap_chan_unlock(chan);
7252 if (chan->scid == L2CAP_CID_ATT) {
7253 if (!status && encrypt) {
7254 chan->sec_level = hcon->sec_level;
7255 l2cap_chan_ready(chan);
7258 l2cap_chan_unlock(chan);
7262 if (!__l2cap_no_conn_pending(chan)) {
7263 l2cap_chan_unlock(chan);
7267 if (!status && (chan->state == BT_CONNECTED ||
7268 chan->state == BT_CONFIG)) {
7269 chan->ops->resume(chan);
7270 l2cap_check_encryption(chan, encrypt);
7271 l2cap_chan_unlock(chan);
7275 if (chan->state == BT_CONNECT) {
7277 l2cap_start_connection(chan);
7279 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7280 } else if (chan->state == BT_CONNECT2) {
7281 struct l2cap_conn_rsp rsp;
7285 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7286 res = L2CAP_CR_PEND;
7287 stat = L2CAP_CS_AUTHOR_PEND;
7288 chan->ops->defer(chan);
7290 l2cap_state_change(chan, BT_CONFIG);
7291 res = L2CAP_CR_SUCCESS;
7292 stat = L2CAP_CS_NO_INFO;
7295 l2cap_state_change(chan, BT_DISCONN);
7296 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7297 res = L2CAP_CR_SEC_BLOCK;
7298 stat = L2CAP_CS_NO_INFO;
7301 rsp.scid = cpu_to_le16(chan->dcid);
7302 rsp.dcid = cpu_to_le16(chan->scid);
7303 rsp.result = cpu_to_le16(res);
7304 rsp.status = cpu_to_le16(stat);
7305 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7308 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7309 res == L2CAP_CR_SUCCESS) {
7311 set_bit(CONF_REQ_SENT, &chan->conf_state);
7312 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7314 l2cap_build_conf_req(chan, buf),
7316 chan->num_conf_req++;
7320 l2cap_chan_unlock(chan);
7323 mutex_unlock(&conn->chan_lock);
7328 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7330 struct l2cap_conn *conn = hcon->l2cap_data;
7331 struct l2cap_hdr *hdr;
7334 /* For AMP controller do not create l2cap conn */
7335 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7339 conn = l2cap_conn_add(hcon);
7344 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7348 case ACL_START_NO_FLUSH:
7351 BT_ERR("Unexpected start frame (len %d)", skb->len);
7352 kfree_skb(conn->rx_skb);
7353 conn->rx_skb = NULL;
7355 l2cap_conn_unreliable(conn, ECOMM);
7358 /* Start fragment always begin with Basic L2CAP header */
7359 if (skb->len < L2CAP_HDR_SIZE) {
7360 BT_ERR("Frame is too short (len %d)", skb->len);
7361 l2cap_conn_unreliable(conn, ECOMM);
7365 hdr = (struct l2cap_hdr *) skb->data;
7366 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7368 if (len == skb->len) {
7369 /* Complete frame received */
7370 l2cap_recv_frame(conn, skb);
7374 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7376 if (skb->len > len) {
7377 BT_ERR("Frame is too long (len %d, expected len %d)",
7379 l2cap_conn_unreliable(conn, ECOMM);
7383 /* Allocate skb for the complete frame (with header) */
7384 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7388 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7390 conn->rx_len = len - skb->len;
7394 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7396 if (!conn->rx_len) {
7397 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7398 l2cap_conn_unreliable(conn, ECOMM);
7402 if (skb->len > conn->rx_len) {
7403 BT_ERR("Fragment is too long (len %d, expected %d)",
7404 skb->len, conn->rx_len);
7405 kfree_skb(conn->rx_skb);
7406 conn->rx_skb = NULL;
7408 l2cap_conn_unreliable(conn, ECOMM);
7412 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7414 conn->rx_len -= skb->len;
7416 if (!conn->rx_len) {
7417 /* Complete frame received. l2cap_recv_frame
7418 * takes ownership of the skb so set the global
7419 * rx_skb pointer to NULL first.
7421 struct sk_buff *rx_skb = conn->rx_skb;
7422 conn->rx_skb = NULL;
7423 l2cap_recv_frame(conn, rx_skb);
7433 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7435 struct l2cap_chan *c;
7437 read_lock(&chan_list_lock);
7439 list_for_each_entry(c, &chan_list, global_l) {
7440 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7442 c->state, __le16_to_cpu(c->psm),
7443 c->scid, c->dcid, c->imtu, c->omtu,
7444 c->sec_level, c->mode);
7447 read_unlock(&chan_list_lock);
7452 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7454 return single_open(file, l2cap_debugfs_show, inode->i_private);
7457 static const struct file_operations l2cap_debugfs_fops = {
7458 .open = l2cap_debugfs_open,
7460 .llseek = seq_lseek,
7461 .release = single_release,
7464 static struct dentry *l2cap_debugfs;
7466 int __init l2cap_init(void)
7470 err = l2cap_init_sockets();
7474 if (IS_ERR_OR_NULL(bt_debugfs))
7477 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7478 NULL, &l2cap_debugfs_fops);
7480 debugfs_create_u16("l2cap_le_max_credits", 0466, bt_debugfs,
7482 debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs,
7490 void l2cap_exit(void)
7492 bt_6lowpan_cleanup();
7493 debugfs_remove(l2cap_debugfs);
7494 l2cap_cleanup_sockets();
7497 module_param(disable_ertm, bool, 0644);
7498 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");