2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
59 /* ----- HCI requests ----- */
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
68 /* ---- HCI notifications ---- */
70 static void hci_notify(struct hci_dev *hdev, int event)
72 hci_sock_dev_event(hdev, event);
75 /* ---- HCI debugfs entries ---- */
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
80 struct hci_dev *hdev = file->private_data;
83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
92 struct hci_dev *hdev = file->private_data;
95 size_t buf_size = min(count, (sizeof(buf)-1));
98 if (!test_bit(HCI_UP, &hdev->flags))
101 if (copy_from_user(buf, user_buf, buf_size))
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 hci_req_unlock(hdev);
125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
130 static const struct file_operations dut_mode_fops = {
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
137 /* ---- HCI requests ---- */
139 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
142 BT_DBG("%s result 0x%2.2x", hdev->name, result);
144 if (hdev->req_status == HCI_REQ_PEND) {
145 hdev->req_result = result;
146 hdev->req_status = HCI_REQ_DONE;
148 hdev->req_skb = skb_get(skb);
149 wake_up_interruptible(&hdev->req_wait_q);
153 static void hci_req_cancel(struct hci_dev *hdev, int err)
155 BT_DBG("%s err 0x%2.2x", hdev->name, err);
157 if (hdev->req_status == HCI_REQ_PEND) {
158 hdev->req_result = err;
159 hdev->req_status = HCI_REQ_CANCELED;
160 wake_up_interruptible(&hdev->req_wait_q);
164 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
165 const void *param, u8 event, u32 timeout)
167 DECLARE_WAITQUEUE(wait, current);
168 struct hci_request req;
172 BT_DBG("%s", hdev->name);
174 hci_req_init(&req, hdev);
176 hci_req_add_ev(&req, opcode, plen, param, event);
178 hdev->req_status = HCI_REQ_PEND;
180 add_wait_queue(&hdev->req_wait_q, &wait);
181 set_current_state(TASK_INTERRUPTIBLE);
183 err = hci_req_run_skb(&req, hci_req_sync_complete);
185 remove_wait_queue(&hdev->req_wait_q, &wait);
186 set_current_state(TASK_RUNNING);
190 schedule_timeout(timeout);
192 remove_wait_queue(&hdev->req_wait_q, &wait);
194 if (signal_pending(current))
195 return ERR_PTR(-EINTR);
197 switch (hdev->req_status) {
199 err = -bt_to_errno(hdev->req_result);
202 case HCI_REQ_CANCELED:
203 err = -hdev->req_result;
211 hdev->req_status = hdev->req_result = 0;
213 hdev->req_skb = NULL;
215 BT_DBG("%s end: err %d", hdev->name, err);
223 return ERR_PTR(-ENODATA);
227 EXPORT_SYMBOL(__hci_cmd_sync_ev);
229 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
230 const void *param, u32 timeout)
232 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
234 EXPORT_SYMBOL(__hci_cmd_sync);
236 /* Execute request and wait for completion. */
237 static int __hci_req_sync(struct hci_dev *hdev,
238 void (*func)(struct hci_request *req,
240 unsigned long opt, __u32 timeout)
242 struct hci_request req;
243 DECLARE_WAITQUEUE(wait, current);
246 BT_DBG("%s start", hdev->name);
248 hci_req_init(&req, hdev);
250 hdev->req_status = HCI_REQ_PEND;
254 add_wait_queue(&hdev->req_wait_q, &wait);
255 set_current_state(TASK_INTERRUPTIBLE);
257 err = hci_req_run_skb(&req, hci_req_sync_complete);
259 hdev->req_status = 0;
261 remove_wait_queue(&hdev->req_wait_q, &wait);
262 set_current_state(TASK_RUNNING);
264 /* ENODATA means the HCI request command queue is empty.
265 * This can happen when a request with conditionals doesn't
266 * trigger any commands to be sent. This is normal behavior
267 * and should not trigger an error return.
275 schedule_timeout(timeout);
277 remove_wait_queue(&hdev->req_wait_q, &wait);
279 if (signal_pending(current))
282 switch (hdev->req_status) {
284 err = -bt_to_errno(hdev->req_result);
287 case HCI_REQ_CANCELED:
288 err = -hdev->req_result;
296 hdev->req_status = hdev->req_result = 0;
298 BT_DBG("%s end: err %d", hdev->name, err);
303 static int hci_req_sync(struct hci_dev *hdev,
304 void (*req)(struct hci_request *req,
306 unsigned long opt, __u32 timeout)
310 if (!test_bit(HCI_UP, &hdev->flags))
313 /* Serialize all requests */
315 ret = __hci_req_sync(hdev, req, opt, timeout);
316 hci_req_unlock(hdev);
321 static void hci_reset_req(struct hci_request *req, unsigned long opt)
323 BT_DBG("%s %ld", req->hdev->name, opt);
326 set_bit(HCI_RESET, &req->hdev->flags);
327 hci_req_add(req, HCI_OP_RESET, 0, NULL);
330 static void bredr_init(struct hci_request *req)
332 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
334 /* Read Local Supported Features */
335 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
337 /* Read Local Version */
338 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
340 /* Read BD Address */
341 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
344 static void amp_init1(struct hci_request *req)
346 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
348 /* Read Local Version */
349 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
351 /* Read Local Supported Commands */
352 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
354 /* Read Local AMP Info */
355 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
357 /* Read Data Blk size */
358 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
360 /* Read Flow Control Mode */
361 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
363 /* Read Location Data */
364 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
367 static void amp_init2(struct hci_request *req)
369 /* Read Local Supported Features. Not all AMP controllers
370 * support this so it's placed conditionally in the second
373 if (req->hdev->commands[14] & 0x20)
374 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
377 static void hci_init1_req(struct hci_request *req, unsigned long opt)
379 struct hci_dev *hdev = req->hdev;
381 BT_DBG("%s %ld", hdev->name, opt);
384 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
385 hci_reset_req(req, 0);
387 switch (hdev->dev_type) {
397 BT_ERR("Unknown device type %d", hdev->dev_type);
402 static void bredr_setup(struct hci_request *req)
407 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
408 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
410 /* Read Class of Device */
411 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
413 /* Read Local Name */
414 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
416 /* Read Voice Setting */
417 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
419 /* Read Number of Supported IAC */
420 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
422 /* Read Current IAC LAP */
423 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
425 /* Clear Event Filters */
426 flt_type = HCI_FLT_CLEAR_ALL;
427 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
429 /* Connection accept timeout ~20 secs */
430 param = cpu_to_le16(0x7d00);
431 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
434 static void le_setup(struct hci_request *req)
436 struct hci_dev *hdev = req->hdev;
438 /* Read LE Buffer Size */
439 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
441 /* Read LE Local Supported Features */
442 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
444 /* Read LE Supported States */
445 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
447 /* Read LE White List Size */
448 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
450 /* Clear LE White List */
451 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
453 /* LE-only controllers have LE implicitly enabled */
454 if (!lmp_bredr_capable(hdev))
455 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
458 static void hci_setup_event_mask(struct hci_request *req)
460 struct hci_dev *hdev = req->hdev;
462 /* The second byte is 0xff instead of 0x9f (two reserved bits
463 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
466 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
468 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
469 * any event mask for pre 1.2 devices.
471 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
474 if (lmp_bredr_capable(hdev)) {
475 events[4] |= 0x01; /* Flow Specification Complete */
476 events[4] |= 0x02; /* Inquiry Result with RSSI */
477 events[4] |= 0x04; /* Read Remote Extended Features Complete */
478 events[5] |= 0x08; /* Synchronous Connection Complete */
479 events[5] |= 0x10; /* Synchronous Connection Changed */
481 /* Use a different default for LE-only devices */
482 memset(events, 0, sizeof(events));
483 events[0] |= 0x10; /* Disconnection Complete */
484 events[1] |= 0x08; /* Read Remote Version Information Complete */
485 events[1] |= 0x20; /* Command Complete */
486 events[1] |= 0x40; /* Command Status */
487 events[1] |= 0x80; /* Hardware Error */
488 events[2] |= 0x04; /* Number of Completed Packets */
489 events[3] |= 0x02; /* Data Buffer Overflow */
491 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
492 events[0] |= 0x80; /* Encryption Change */
493 events[5] |= 0x80; /* Encryption Key Refresh Complete */
497 if (lmp_inq_rssi_capable(hdev))
498 events[4] |= 0x02; /* Inquiry Result with RSSI */
500 if (lmp_sniffsubr_capable(hdev))
501 events[5] |= 0x20; /* Sniff Subrating */
503 if (lmp_pause_enc_capable(hdev))
504 events[5] |= 0x80; /* Encryption Key Refresh Complete */
506 if (lmp_ext_inq_capable(hdev))
507 events[5] |= 0x40; /* Extended Inquiry Result */
509 if (lmp_no_flush_capable(hdev))
510 events[7] |= 0x01; /* Enhanced Flush Complete */
512 if (lmp_lsto_capable(hdev))
513 events[6] |= 0x80; /* Link Supervision Timeout Changed */
515 if (lmp_ssp_capable(hdev)) {
516 events[6] |= 0x01; /* IO Capability Request */
517 events[6] |= 0x02; /* IO Capability Response */
518 events[6] |= 0x04; /* User Confirmation Request */
519 events[6] |= 0x08; /* User Passkey Request */
520 events[6] |= 0x10; /* Remote OOB Data Request */
521 events[6] |= 0x20; /* Simple Pairing Complete */
522 events[7] |= 0x04; /* User Passkey Notification */
523 events[7] |= 0x08; /* Keypress Notification */
524 events[7] |= 0x10; /* Remote Host Supported
525 * Features Notification
529 if (lmp_le_capable(hdev))
530 events[7] |= 0x20; /* LE Meta-Event */
532 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
535 static void hci_init2_req(struct hci_request *req, unsigned long opt)
537 struct hci_dev *hdev = req->hdev;
539 if (hdev->dev_type == HCI_AMP)
540 return amp_init2(req);
542 if (lmp_bredr_capable(hdev))
545 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
547 if (lmp_le_capable(hdev))
550 /* All Bluetooth 1.2 and later controllers should support the
551 * HCI command for reading the local supported commands.
553 * Unfortunately some controllers indicate Bluetooth 1.2 support,
554 * but do not have support for this command. If that is the case,
555 * the driver can quirk the behavior and skip reading the local
556 * supported commands.
558 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
559 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
560 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
562 if (lmp_ssp_capable(hdev)) {
563 /* When SSP is available, then the host features page
564 * should also be available as well. However some
565 * controllers list the max_page as 0 as long as SSP
566 * has not been enabled. To achieve proper debugging
567 * output, force the minimum max_page to 1 at least.
569 hdev->max_page = 0x01;
571 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
574 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
575 sizeof(mode), &mode);
577 struct hci_cp_write_eir cp;
579 memset(hdev->eir, 0, sizeof(hdev->eir));
580 memset(&cp, 0, sizeof(cp));
582 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
586 if (lmp_inq_rssi_capable(hdev) ||
587 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
590 /* If Extended Inquiry Result events are supported, then
591 * they are clearly preferred over Inquiry Result with RSSI
594 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
596 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
599 if (lmp_inq_tx_pwr_capable(hdev))
600 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
602 if (lmp_ext_feat_capable(hdev)) {
603 struct hci_cp_read_local_ext_features cp;
606 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
610 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
612 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
617 static void hci_setup_link_policy(struct hci_request *req)
619 struct hci_dev *hdev = req->hdev;
620 struct hci_cp_write_def_link_policy cp;
623 if (lmp_rswitch_capable(hdev))
624 link_policy |= HCI_LP_RSWITCH;
625 if (lmp_hold_capable(hdev))
626 link_policy |= HCI_LP_HOLD;
627 if (lmp_sniff_capable(hdev))
628 link_policy |= HCI_LP_SNIFF;
629 if (lmp_park_capable(hdev))
630 link_policy |= HCI_LP_PARK;
632 cp.policy = cpu_to_le16(link_policy);
633 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
636 static void hci_set_le_support(struct hci_request *req)
638 struct hci_dev *hdev = req->hdev;
639 struct hci_cp_write_le_host_supported cp;
641 /* LE-only devices do not support explicit enablement */
642 if (!lmp_bredr_capable(hdev))
645 memset(&cp, 0, sizeof(cp));
647 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
652 if (cp.le != lmp_host_le_capable(hdev))
653 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
657 static void hci_set_event_mask_page_2(struct hci_request *req)
659 struct hci_dev *hdev = req->hdev;
660 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
662 /* If Connectionless Slave Broadcast master role is supported
663 * enable all necessary events for it.
665 if (lmp_csb_master_capable(hdev)) {
666 events[1] |= 0x40; /* Triggered Clock Capture */
667 events[1] |= 0x80; /* Synchronization Train Complete */
668 events[2] |= 0x10; /* Slave Page Response Timeout */
669 events[2] |= 0x20; /* CSB Channel Map Change */
672 /* If Connectionless Slave Broadcast slave role is supported
673 * enable all necessary events for it.
675 if (lmp_csb_slave_capable(hdev)) {
676 events[2] |= 0x01; /* Synchronization Train Received */
677 events[2] |= 0x02; /* CSB Receive */
678 events[2] |= 0x04; /* CSB Timeout */
679 events[2] |= 0x08; /* Truncated Page Complete */
682 /* Enable Authenticated Payload Timeout Expired event if supported */
683 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
686 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
689 static void hci_init3_req(struct hci_request *req, unsigned long opt)
691 struct hci_dev *hdev = req->hdev;
694 hci_setup_event_mask(req);
696 if (hdev->commands[6] & 0x20) {
697 struct hci_cp_read_stored_link_key cp;
699 bacpy(&cp.bdaddr, BDADDR_ANY);
701 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
704 if (hdev->commands[5] & 0x10)
705 hci_setup_link_policy(req);
707 if (hdev->commands[8] & 0x01)
708 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
710 /* Some older Broadcom based Bluetooth 1.2 controllers do not
711 * support the Read Page Scan Type command. Check support for
712 * this command in the bit mask of supported commands.
714 if (hdev->commands[13] & 0x01)
715 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
717 if (lmp_le_capable(hdev)) {
720 memset(events, 0, sizeof(events));
723 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
724 events[0] |= 0x10; /* LE Long Term Key Request */
726 /* If controller supports the Connection Parameters Request
727 * Link Layer Procedure, enable the corresponding event.
729 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
730 events[0] |= 0x20; /* LE Remote Connection
734 /* If the controller supports the Data Length Extension
735 * feature, enable the corresponding event.
737 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
738 events[0] |= 0x40; /* LE Data Length Change */
740 /* If the controller supports Extended Scanner Filter
741 * Policies, enable the correspondig event.
743 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
744 events[1] |= 0x04; /* LE Direct Advertising
748 /* If the controller supports the LE Read Local P-256
749 * Public Key command, enable the corresponding event.
751 if (hdev->commands[34] & 0x02)
752 events[0] |= 0x80; /* LE Read Local P-256
753 * Public Key Complete
756 /* If the controller supports the LE Generate DHKey
757 * command, enable the corresponding event.
759 if (hdev->commands[34] & 0x04)
760 events[1] |= 0x01; /* LE Generate DHKey Complete */
762 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
765 if (hdev->commands[25] & 0x40) {
766 /* Read LE Advertising Channel TX Power */
767 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
770 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
771 /* Read LE Maximum Data Length */
772 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
774 /* Read LE Suggested Default Data Length */
775 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
778 hci_set_le_support(req);
781 /* Read features beyond page 1 if available */
782 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
783 struct hci_cp_read_local_ext_features cp;
786 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
791 static void hci_init4_req(struct hci_request *req, unsigned long opt)
793 struct hci_dev *hdev = req->hdev;
795 /* Some Broadcom based Bluetooth controllers do not support the
796 * Delete Stored Link Key command. They are clearly indicating its
797 * absence in the bit mask of supported commands.
799 * Check the supported commands and only if the the command is marked
800 * as supported send it. If not supported assume that the controller
801 * does not have actual support for stored link keys which makes this
802 * command redundant anyway.
804 * Some controllers indicate that they support handling deleting
805 * stored link keys, but they don't. The quirk lets a driver
806 * just disable this command.
808 if (hdev->commands[6] & 0x80 &&
809 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
810 struct hci_cp_delete_stored_link_key cp;
812 bacpy(&cp.bdaddr, BDADDR_ANY);
813 cp.delete_all = 0x01;
814 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
818 /* Set event mask page 2 if the HCI command for it is supported */
819 if (hdev->commands[22] & 0x04)
820 hci_set_event_mask_page_2(req);
822 /* Read local codec list if the HCI command is supported */
823 if (hdev->commands[29] & 0x20)
824 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
826 /* Get MWS transport configuration if the HCI command is supported */
827 if (hdev->commands[30] & 0x08)
828 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
830 /* Check for Synchronization Train support */
831 if (lmp_sync_train_capable(hdev))
832 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
834 /* Enable Secure Connections if supported and configured */
835 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
836 bredr_sc_enabled(hdev)) {
839 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
840 sizeof(support), &support);
844 static int __hci_init(struct hci_dev *hdev)
848 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
852 /* The Device Under Test (DUT) mode is special and available for
853 * all controller types. So just create it early on.
855 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
856 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
860 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
864 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
865 * BR/EDR/LE type controllers. AMP controllers only need the
866 * first two stages of init.
868 if (hdev->dev_type != HCI_BREDR)
871 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
875 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
879 /* This function is only called when the controller is actually in
880 * configured state. When the controller is marked as unconfigured,
881 * this initialization procedure is not run.
883 * It means that it is possible that a controller runs through its
884 * setup phase and then discovers missing settings. If that is the
885 * case, then this function will not be called. It then will only
886 * be called during the config phase.
888 * So only when in setup phase or config phase, create the debugfs
889 * entries and register the SMP channels.
891 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
892 !hci_dev_test_flag(hdev, HCI_CONFIG))
895 hci_debugfs_create_common(hdev);
897 if (lmp_bredr_capable(hdev))
898 hci_debugfs_create_bredr(hdev);
900 if (lmp_le_capable(hdev))
901 hci_debugfs_create_le(hdev);
906 static void hci_init0_req(struct hci_request *req, unsigned long opt)
908 struct hci_dev *hdev = req->hdev;
910 BT_DBG("%s %ld", hdev->name, opt);
913 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
914 hci_reset_req(req, 0);
916 /* Read Local Version */
917 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
919 /* Read BD Address */
920 if (hdev->set_bdaddr)
921 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
924 static int __hci_unconf_init(struct hci_dev *hdev)
928 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
931 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
938 static void hci_scan_req(struct hci_request *req, unsigned long opt)
942 BT_DBG("%s %x", req->hdev->name, scan);
944 /* Inquiry and Page scans */
945 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
948 static void hci_auth_req(struct hci_request *req, unsigned long opt)
952 BT_DBG("%s %x", req->hdev->name, auth);
955 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
958 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
962 BT_DBG("%s %x", req->hdev->name, encrypt);
965 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
968 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
970 __le16 policy = cpu_to_le16(opt);
972 BT_DBG("%s %x", req->hdev->name, policy);
974 /* Default link policy */
975 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
978 /* Get HCI device by index.
979 * Device is held on return. */
980 struct hci_dev *hci_dev_get(int index)
982 struct hci_dev *hdev = NULL, *d;
989 read_lock(&hci_dev_list_lock);
990 list_for_each_entry(d, &hci_dev_list, list) {
991 if (d->id == index) {
992 hdev = hci_dev_hold(d);
996 read_unlock(&hci_dev_list_lock);
1000 /* ---- Inquiry support ---- */
1002 bool hci_discovery_active(struct hci_dev *hdev)
1004 struct discovery_state *discov = &hdev->discovery;
1006 switch (discov->state) {
1007 case DISCOVERY_FINDING:
1008 case DISCOVERY_RESOLVING:
1016 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1018 int old_state = hdev->discovery.state;
1020 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1022 if (old_state == state)
1025 hdev->discovery.state = state;
1028 case DISCOVERY_STOPPED:
1029 hci_update_background_scan(hdev);
1031 if (old_state != DISCOVERY_STARTING)
1032 mgmt_discovering(hdev, 0);
1034 case DISCOVERY_STARTING:
1036 case DISCOVERY_FINDING:
1037 mgmt_discovering(hdev, 1);
1039 case DISCOVERY_RESOLVING:
1041 case DISCOVERY_STOPPING:
1046 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1048 struct discovery_state *cache = &hdev->discovery;
1049 struct inquiry_entry *p, *n;
1051 list_for_each_entry_safe(p, n, &cache->all, all) {
1056 INIT_LIST_HEAD(&cache->unknown);
1057 INIT_LIST_HEAD(&cache->resolve);
1060 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1063 struct discovery_state *cache = &hdev->discovery;
1064 struct inquiry_entry *e;
1066 BT_DBG("cache %p, %pMR", cache, bdaddr);
1068 list_for_each_entry(e, &cache->all, all) {
1069 if (!bacmp(&e->data.bdaddr, bdaddr))
1076 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1079 struct discovery_state *cache = &hdev->discovery;
1080 struct inquiry_entry *e;
1082 BT_DBG("cache %p, %pMR", cache, bdaddr);
1084 list_for_each_entry(e, &cache->unknown, list) {
1085 if (!bacmp(&e->data.bdaddr, bdaddr))
1092 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1096 struct discovery_state *cache = &hdev->discovery;
1097 struct inquiry_entry *e;
1099 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1101 list_for_each_entry(e, &cache->resolve, list) {
1102 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1104 if (!bacmp(&e->data.bdaddr, bdaddr))
1111 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1112 struct inquiry_entry *ie)
1114 struct discovery_state *cache = &hdev->discovery;
1115 struct list_head *pos = &cache->resolve;
1116 struct inquiry_entry *p;
1118 list_del(&ie->list);
1120 list_for_each_entry(p, &cache->resolve, list) {
1121 if (p->name_state != NAME_PENDING &&
1122 abs(p->data.rssi) >= abs(ie->data.rssi))
1127 list_add(&ie->list, pos);
1130 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1133 struct discovery_state *cache = &hdev->discovery;
1134 struct inquiry_entry *ie;
1137 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1139 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1141 if (!data->ssp_mode)
1142 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1144 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1146 if (!ie->data.ssp_mode)
1147 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1149 if (ie->name_state == NAME_NEEDED &&
1150 data->rssi != ie->data.rssi) {
1151 ie->data.rssi = data->rssi;
1152 hci_inquiry_cache_update_resolve(hdev, ie);
1158 /* Entry not in the cache. Add new one. */
1159 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1161 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1165 list_add(&ie->all, &cache->all);
1168 ie->name_state = NAME_KNOWN;
1170 ie->name_state = NAME_NOT_KNOWN;
1171 list_add(&ie->list, &cache->unknown);
1175 if (name_known && ie->name_state != NAME_KNOWN &&
1176 ie->name_state != NAME_PENDING) {
1177 ie->name_state = NAME_KNOWN;
1178 list_del(&ie->list);
1181 memcpy(&ie->data, data, sizeof(*data));
1182 ie->timestamp = jiffies;
1183 cache->timestamp = jiffies;
1185 if (ie->name_state == NAME_NOT_KNOWN)
1186 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1192 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1194 struct discovery_state *cache = &hdev->discovery;
1195 struct inquiry_info *info = (struct inquiry_info *) buf;
1196 struct inquiry_entry *e;
1199 list_for_each_entry(e, &cache->all, all) {
1200 struct inquiry_data *data = &e->data;
1205 bacpy(&info->bdaddr, &data->bdaddr);
1206 info->pscan_rep_mode = data->pscan_rep_mode;
1207 info->pscan_period_mode = data->pscan_period_mode;
1208 info->pscan_mode = data->pscan_mode;
1209 memcpy(info->dev_class, data->dev_class, 3);
1210 info->clock_offset = data->clock_offset;
1216 BT_DBG("cache %p, copied %d", cache, copied);
1220 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1222 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1223 struct hci_dev *hdev = req->hdev;
1224 struct hci_cp_inquiry cp;
1226 BT_DBG("%s", hdev->name);
1228 if (test_bit(HCI_INQUIRY, &hdev->flags))
1232 memcpy(&cp.lap, &ir->lap, 3);
1233 cp.length = ir->length;
1234 cp.num_rsp = ir->num_rsp;
1235 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1238 int hci_inquiry(void __user *arg)
1240 __u8 __user *ptr = arg;
1241 struct hci_inquiry_req ir;
1242 struct hci_dev *hdev;
1243 int err = 0, do_inquiry = 0, max_rsp;
1247 if (copy_from_user(&ir, ptr, sizeof(ir)))
1250 hdev = hci_dev_get(ir.dev_id);
1254 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1259 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1264 if (hdev->dev_type != HCI_BREDR) {
1269 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1275 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1276 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1277 hci_inquiry_cache_flush(hdev);
1280 hci_dev_unlock(hdev);
1282 timeo = ir.length * msecs_to_jiffies(2000);
1285 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1290 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1291 * cleared). If it is interrupted by a signal, return -EINTR.
1293 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1294 TASK_INTERRUPTIBLE))
1298 /* for unlimited number of responses we will use buffer with
1301 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1303 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1304 * copy it to the user space.
1306 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1313 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1314 hci_dev_unlock(hdev);
1316 BT_DBG("num_rsp %d", ir.num_rsp);
1318 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1320 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1333 static int hci_dev_do_open(struct hci_dev *hdev)
1337 BT_DBG("%s %p", hdev->name, hdev);
1341 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1346 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1347 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1348 /* Check for rfkill but allow the HCI setup stage to
1349 * proceed (which in itself doesn't cause any RF activity).
1351 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1356 /* Check for valid public address or a configured static
1357 * random adddress, but let the HCI setup proceed to
1358 * be able to determine if there is a public address
1361 * In case of user channel usage, it is not important
1362 * if a public address or static random address is
1365 * This check is only valid for BR/EDR controllers
1366 * since AMP controllers do not have an address.
1368 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1369 hdev->dev_type == HCI_BREDR &&
1370 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1371 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1372 ret = -EADDRNOTAVAIL;
1377 if (test_bit(HCI_UP, &hdev->flags)) {
1382 if (hdev->open(hdev)) {
1387 atomic_set(&hdev->cmd_cnt, 1);
1388 set_bit(HCI_INIT, &hdev->flags);
1390 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1392 ret = hdev->setup(hdev);
1394 /* The transport driver can set these quirks before
1395 * creating the HCI device or in its setup callback.
1397 * In case any of them is set, the controller has to
1398 * start up as unconfigured.
1400 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1401 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1402 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1404 /* For an unconfigured controller it is required to
1405 * read at least the version information provided by
1406 * the Read Local Version Information command.
1408 * If the set_bdaddr driver callback is provided, then
1409 * also the original Bluetooth public device address
1410 * will be read using the Read BD Address command.
1412 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1413 ret = __hci_unconf_init(hdev);
1416 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1417 /* If public address change is configured, ensure that
1418 * the address gets programmed. If the driver does not
1419 * support changing the public address, fail the power
1422 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1424 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1426 ret = -EADDRNOTAVAIL;
1430 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1431 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1432 ret = __hci_init(hdev);
1435 clear_bit(HCI_INIT, &hdev->flags);
1439 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1440 set_bit(HCI_UP, &hdev->flags);
1441 hci_notify(hdev, HCI_DEV_UP);
1442 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1443 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1444 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1445 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1446 hdev->dev_type == HCI_BREDR) {
1448 mgmt_powered(hdev, 1);
1449 hci_dev_unlock(hdev);
1452 /* Init failed, cleanup */
1453 flush_work(&hdev->tx_work);
1454 flush_work(&hdev->cmd_work);
1455 flush_work(&hdev->rx_work);
1457 skb_queue_purge(&hdev->cmd_q);
1458 skb_queue_purge(&hdev->rx_q);
1463 if (hdev->sent_cmd) {
1464 kfree_skb(hdev->sent_cmd);
1465 hdev->sent_cmd = NULL;
1469 hdev->flags &= BIT(HCI_RAW);
1473 hci_req_unlock(hdev);
1477 /* ---- HCI ioctl helpers ---- */
1479 int hci_dev_open(__u16 dev)
1481 struct hci_dev *hdev;
1484 hdev = hci_dev_get(dev);
1488 /* Devices that are marked as unconfigured can only be powered
1489 * up as user channel. Trying to bring them up as normal devices
1490 * will result into a failure. Only user channel operation is
1493 * When this function is called for a user channel, the flag
1494 * HCI_USER_CHANNEL will be set first before attempting to
1497 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1498 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1503 /* We need to ensure that no other power on/off work is pending
1504 * before proceeding to call hci_dev_do_open. This is
1505 * particularly important if the setup procedure has not yet
1508 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1509 cancel_delayed_work(&hdev->power_off);
1511 /* After this call it is guaranteed that the setup procedure
1512 * has finished. This means that error conditions like RFKILL
1513 * or no valid public or static random address apply.
1515 flush_workqueue(hdev->req_workqueue);
1517 /* For controllers not using the management interface and that
1518 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1519 * so that pairing works for them. Once the management interface
1520 * is in use this bit will be cleared again and userspace has
1521 * to explicitly enable it.
1523 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1524 !hci_dev_test_flag(hdev, HCI_MGMT))
1525 hci_dev_set_flag(hdev, HCI_BONDABLE);
1527 err = hci_dev_do_open(hdev);
1534 /* This function requires the caller holds hdev->lock */
1535 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1537 struct hci_conn_params *p;
1539 list_for_each_entry(p, &hdev->le_conn_params, list) {
1541 hci_conn_drop(p->conn);
1542 hci_conn_put(p->conn);
1545 list_del_init(&p->action);
1548 BT_DBG("All LE pending actions cleared");
1551 static int hci_dev_do_close(struct hci_dev *hdev)
1553 BT_DBG("%s %p", hdev->name, hdev);
1555 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1556 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1557 test_bit(HCI_UP, &hdev->flags)) {
1558 /* Execute vendor specific shutdown routine */
1560 hdev->shutdown(hdev);
1563 cancel_delayed_work(&hdev->power_off);
1565 hci_req_cancel(hdev, ENODEV);
1568 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1569 cancel_delayed_work_sync(&hdev->cmd_timer);
1570 hci_req_unlock(hdev);
1574 /* Flush RX and TX works */
1575 flush_work(&hdev->tx_work);
1576 flush_work(&hdev->rx_work);
1578 if (hdev->discov_timeout > 0) {
1579 cancel_delayed_work(&hdev->discov_off);
1580 hdev->discov_timeout = 0;
1581 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1582 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1585 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1586 cancel_delayed_work(&hdev->service_cache);
1588 cancel_delayed_work_sync(&hdev->le_scan_disable);
1589 cancel_delayed_work_sync(&hdev->le_scan_restart);
1591 if (hci_dev_test_flag(hdev, HCI_MGMT))
1592 cancel_delayed_work_sync(&hdev->rpa_expired);
1594 if (hdev->adv_instance_timeout) {
1595 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1596 hdev->adv_instance_timeout = 0;
1599 /* Avoid potential lockdep warnings from the *_flush() calls by
1600 * ensuring the workqueue is empty up front.
1602 drain_workqueue(hdev->workqueue);
1606 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1608 if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1609 if (hdev->dev_type == HCI_BREDR)
1610 mgmt_powered(hdev, 0);
1613 hci_inquiry_cache_flush(hdev);
1614 hci_pend_le_actions_clear(hdev);
1615 hci_conn_hash_flush(hdev);
1616 hci_dev_unlock(hdev);
1618 smp_unregister(hdev);
1620 hci_notify(hdev, HCI_DEV_DOWN);
1626 skb_queue_purge(&hdev->cmd_q);
1627 atomic_set(&hdev->cmd_cnt, 1);
1628 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1629 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1630 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1631 set_bit(HCI_INIT, &hdev->flags);
1632 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1633 clear_bit(HCI_INIT, &hdev->flags);
1636 /* flush cmd work */
1637 flush_work(&hdev->cmd_work);
1640 skb_queue_purge(&hdev->rx_q);
1641 skb_queue_purge(&hdev->cmd_q);
1642 skb_queue_purge(&hdev->raw_q);
1644 /* Drop last sent command */
1645 if (hdev->sent_cmd) {
1646 cancel_delayed_work_sync(&hdev->cmd_timer);
1647 kfree_skb(hdev->sent_cmd);
1648 hdev->sent_cmd = NULL;
1651 /* After this point our queues are empty
1652 * and no tasks are scheduled. */
1656 hdev->flags &= BIT(HCI_RAW);
1657 hci_dev_clear_volatile_flags(hdev);
1659 /* Controller radio is available but is currently powered down */
1660 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1662 memset(hdev->eir, 0, sizeof(hdev->eir));
1663 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1664 bacpy(&hdev->random_addr, BDADDR_ANY);
1666 hci_req_unlock(hdev);
1672 int hci_dev_close(__u16 dev)
1674 struct hci_dev *hdev;
1677 hdev = hci_dev_get(dev);
1681 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1686 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1687 cancel_delayed_work(&hdev->power_off);
1689 err = hci_dev_do_close(hdev);
1696 static int hci_dev_do_reset(struct hci_dev *hdev)
1700 BT_DBG("%s %p", hdev->name, hdev);
1705 skb_queue_purge(&hdev->rx_q);
1706 skb_queue_purge(&hdev->cmd_q);
1708 /* Avoid potential lockdep warnings from the *_flush() calls by
1709 * ensuring the workqueue is empty up front.
1711 drain_workqueue(hdev->workqueue);
1714 hci_inquiry_cache_flush(hdev);
1715 hci_conn_hash_flush(hdev);
1716 hci_dev_unlock(hdev);
1721 atomic_set(&hdev->cmd_cnt, 1);
1722 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1724 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1726 hci_req_unlock(hdev);
1730 int hci_dev_reset(__u16 dev)
1732 struct hci_dev *hdev;
1735 hdev = hci_dev_get(dev);
1739 if (!test_bit(HCI_UP, &hdev->flags)) {
1744 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1749 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1754 err = hci_dev_do_reset(hdev);
1761 int hci_dev_reset_stat(__u16 dev)
1763 struct hci_dev *hdev;
1766 hdev = hci_dev_get(dev);
1770 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1775 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1780 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1787 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1789 bool conn_changed, discov_changed;
1791 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1793 if ((scan & SCAN_PAGE))
1794 conn_changed = !hci_dev_test_and_set_flag(hdev,
1797 conn_changed = hci_dev_test_and_clear_flag(hdev,
1800 if ((scan & SCAN_INQUIRY)) {
1801 discov_changed = !hci_dev_test_and_set_flag(hdev,
1804 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1805 discov_changed = hci_dev_test_and_clear_flag(hdev,
1809 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1812 if (conn_changed || discov_changed) {
1813 /* In case this was disabled through mgmt */
1814 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1816 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1817 mgmt_update_adv_data(hdev);
1819 mgmt_new_settings(hdev);
1823 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1825 struct hci_dev *hdev;
1826 struct hci_dev_req dr;
1829 if (copy_from_user(&dr, arg, sizeof(dr)))
1832 hdev = hci_dev_get(dr.dev_id);
1836 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1841 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1846 if (hdev->dev_type != HCI_BREDR) {
1851 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1858 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1863 if (!lmp_encrypt_capable(hdev)) {
1868 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1869 /* Auth must be enabled first */
1870 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1876 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1881 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1884 /* Ensure that the connectable and discoverable states
1885 * get correctly modified as this was a non-mgmt change.
1888 hci_update_scan_state(hdev, dr.dev_opt);
1892 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1896 case HCISETLINKMODE:
1897 hdev->link_mode = ((__u16) dr.dev_opt) &
1898 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1902 hdev->pkt_type = (__u16) dr.dev_opt;
1906 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1907 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1911 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1912 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1925 int hci_get_dev_list(void __user *arg)
1927 struct hci_dev *hdev;
1928 struct hci_dev_list_req *dl;
1929 struct hci_dev_req *dr;
1930 int n = 0, size, err;
1933 if (get_user(dev_num, (__u16 __user *) arg))
1936 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1939 size = sizeof(*dl) + dev_num * sizeof(*dr);
1941 dl = kzalloc(size, GFP_KERNEL);
1947 read_lock(&hci_dev_list_lock);
1948 list_for_each_entry(hdev, &hci_dev_list, list) {
1949 unsigned long flags = hdev->flags;
1951 /* When the auto-off is configured it means the transport
1952 * is running, but in that case still indicate that the
1953 * device is actually down.
1955 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1956 flags &= ~BIT(HCI_UP);
1958 (dr + n)->dev_id = hdev->id;
1959 (dr + n)->dev_opt = flags;
1964 read_unlock(&hci_dev_list_lock);
1967 size = sizeof(*dl) + n * sizeof(*dr);
1969 err = copy_to_user(arg, dl, size);
1972 return err ? -EFAULT : 0;
1975 int hci_get_dev_info(void __user *arg)
1977 struct hci_dev *hdev;
1978 struct hci_dev_info di;
1979 unsigned long flags;
1982 if (copy_from_user(&di, arg, sizeof(di)))
1985 hdev = hci_dev_get(di.dev_id);
1989 /* When the auto-off is configured it means the transport
1990 * is running, but in that case still indicate that the
1991 * device is actually down.
1993 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1994 flags = hdev->flags & ~BIT(HCI_UP);
1996 flags = hdev->flags;
1998 strcpy(di.name, hdev->name);
1999 di.bdaddr = hdev->bdaddr;
2000 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2002 di.pkt_type = hdev->pkt_type;
2003 if (lmp_bredr_capable(hdev)) {
2004 di.acl_mtu = hdev->acl_mtu;
2005 di.acl_pkts = hdev->acl_pkts;
2006 di.sco_mtu = hdev->sco_mtu;
2007 di.sco_pkts = hdev->sco_pkts;
2009 di.acl_mtu = hdev->le_mtu;
2010 di.acl_pkts = hdev->le_pkts;
2014 di.link_policy = hdev->link_policy;
2015 di.link_mode = hdev->link_mode;
2017 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2018 memcpy(&di.features, &hdev->features, sizeof(di.features));
2020 if (copy_to_user(arg, &di, sizeof(di)))
2028 /* ---- Interface to HCI drivers ---- */
2030 static int hci_rfkill_set_block(void *data, bool blocked)
2032 struct hci_dev *hdev = data;
2034 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2036 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2040 hci_dev_set_flag(hdev, HCI_RFKILLED);
2041 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2042 !hci_dev_test_flag(hdev, HCI_CONFIG))
2043 hci_dev_do_close(hdev);
2045 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2051 static const struct rfkill_ops hci_rfkill_ops = {
2052 .set_block = hci_rfkill_set_block,
2055 static void hci_power_on(struct work_struct *work)
2057 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2060 BT_DBG("%s", hdev->name);
2062 err = hci_dev_do_open(hdev);
2065 mgmt_set_powered_failed(hdev, err);
2066 hci_dev_unlock(hdev);
2070 /* During the HCI setup phase, a few error conditions are
2071 * ignored and they need to be checked now. If they are still
2072 * valid, it is important to turn the device back off.
2074 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2075 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2076 (hdev->dev_type == HCI_BREDR &&
2077 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2078 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2079 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2080 hci_dev_do_close(hdev);
2081 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2082 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2083 HCI_AUTO_OFF_TIMEOUT);
2086 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2087 /* For unconfigured devices, set the HCI_RAW flag
2088 * so that userspace can easily identify them.
2090 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2091 set_bit(HCI_RAW, &hdev->flags);
2093 /* For fully configured devices, this will send
2094 * the Index Added event. For unconfigured devices,
2095 * it will send Unconfigued Index Added event.
2097 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2098 * and no event will be send.
2100 mgmt_index_added(hdev);
2101 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2102 /* When the controller is now configured, then it
2103 * is important to clear the HCI_RAW flag.
2105 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2106 clear_bit(HCI_RAW, &hdev->flags);
2108 /* Powering on the controller with HCI_CONFIG set only
2109 * happens with the transition from unconfigured to
2110 * configured. This will send the Index Added event.
2112 mgmt_index_added(hdev);
2116 static void hci_power_off(struct work_struct *work)
2118 struct hci_dev *hdev = container_of(work, struct hci_dev,
2121 BT_DBG("%s", hdev->name);
2123 hci_dev_do_close(hdev);
2126 static void hci_error_reset(struct work_struct *work)
2128 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2130 BT_DBG("%s", hdev->name);
2133 hdev->hw_error(hdev, hdev->hw_error_code);
2135 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2136 hdev->hw_error_code);
2138 if (hci_dev_do_close(hdev))
2141 hci_dev_do_open(hdev);
2144 static void hci_discov_off(struct work_struct *work)
2146 struct hci_dev *hdev;
2148 hdev = container_of(work, struct hci_dev, discov_off.work);
2150 BT_DBG("%s", hdev->name);
2152 mgmt_discoverable_timeout(hdev);
2155 static void hci_adv_timeout_expire(struct work_struct *work)
2157 struct hci_dev *hdev;
2159 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2161 BT_DBG("%s", hdev->name);
2163 mgmt_adv_timeout_expired(hdev);
2166 void hci_uuids_clear(struct hci_dev *hdev)
2168 struct bt_uuid *uuid, *tmp;
2170 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2171 list_del(&uuid->list);
2176 void hci_link_keys_clear(struct hci_dev *hdev)
2178 struct link_key *key;
2180 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2181 list_del_rcu(&key->list);
2182 kfree_rcu(key, rcu);
2186 void hci_smp_ltks_clear(struct hci_dev *hdev)
2190 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2191 list_del_rcu(&k->list);
2196 void hci_smp_irks_clear(struct hci_dev *hdev)
2200 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2201 list_del_rcu(&k->list);
2206 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2211 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2212 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2222 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2223 u8 key_type, u8 old_key_type)
2226 if (key_type < 0x03)
2229 /* Debug keys are insecure so don't store them persistently */
2230 if (key_type == HCI_LK_DEBUG_COMBINATION)
2233 /* Changed combination key and there's no previous one */
2234 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2237 /* Security mode 3 case */
2241 /* BR/EDR key derived using SC from an LE link */
2242 if (conn->type == LE_LINK)
2245 /* Neither local nor remote side had no-bonding as requirement */
2246 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2249 /* Local side had dedicated bonding as requirement */
2250 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2253 /* Remote side had dedicated bonding as requirement */
2254 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2257 /* If none of the above criteria match, then don't store the key
2262 static u8 ltk_role(u8 type)
2264 if (type == SMP_LTK)
2265 return HCI_ROLE_MASTER;
2267 return HCI_ROLE_SLAVE;
2270 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2271 u8 addr_type, u8 role)
2276 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2277 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2280 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2290 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2292 struct smp_irk *irk;
2295 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2296 if (!bacmp(&irk->rpa, rpa)) {
2302 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2303 if (smp_irk_matches(hdev, irk->val, rpa)) {
2304 bacpy(&irk->rpa, rpa);
2314 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2317 struct smp_irk *irk;
2319 /* Identity Address must be public or static random */
2320 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2324 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2325 if (addr_type == irk->addr_type &&
2326 bacmp(bdaddr, &irk->bdaddr) == 0) {
2336 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2337 bdaddr_t *bdaddr, u8 *val, u8 type,
2338 u8 pin_len, bool *persistent)
2340 struct link_key *key, *old_key;
2343 old_key = hci_find_link_key(hdev, bdaddr);
2345 old_key_type = old_key->type;
2348 old_key_type = conn ? conn->key_type : 0xff;
2349 key = kzalloc(sizeof(*key), GFP_KERNEL);
2352 list_add_rcu(&key->list, &hdev->link_keys);
2355 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2357 /* Some buggy controller combinations generate a changed
2358 * combination key for legacy pairing even when there's no
2360 if (type == HCI_LK_CHANGED_COMBINATION &&
2361 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2362 type = HCI_LK_COMBINATION;
2364 conn->key_type = type;
2367 bacpy(&key->bdaddr, bdaddr);
2368 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2369 key->pin_len = pin_len;
2371 if (type == HCI_LK_CHANGED_COMBINATION)
2372 key->type = old_key_type;
2377 *persistent = hci_persistent_key(hdev, conn, type,
2383 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2384 u8 addr_type, u8 type, u8 authenticated,
2385 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2387 struct smp_ltk *key, *old_key;
2388 u8 role = ltk_role(type);
2390 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2394 key = kzalloc(sizeof(*key), GFP_KERNEL);
2397 list_add_rcu(&key->list, &hdev->long_term_keys);
2400 bacpy(&key->bdaddr, bdaddr);
2401 key->bdaddr_type = addr_type;
2402 memcpy(key->val, tk, sizeof(key->val));
2403 key->authenticated = authenticated;
2406 key->enc_size = enc_size;
2412 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2413 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2415 struct smp_irk *irk;
2417 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2419 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2423 bacpy(&irk->bdaddr, bdaddr);
2424 irk->addr_type = addr_type;
2426 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2429 memcpy(irk->val, val, 16);
2430 bacpy(&irk->rpa, rpa);
2435 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2437 struct link_key *key;
2439 key = hci_find_link_key(hdev, bdaddr);
2443 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2445 list_del_rcu(&key->list);
2446 kfree_rcu(key, rcu);
2451 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2456 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2457 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2460 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2462 list_del_rcu(&k->list);
2467 return removed ? 0 : -ENOENT;
2470 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2474 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2475 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2478 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2480 list_del_rcu(&k->list);
2485 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2488 struct smp_irk *irk;
2491 if (type == BDADDR_BREDR) {
2492 if (hci_find_link_key(hdev, bdaddr))
2497 /* Convert to HCI addr type which struct smp_ltk uses */
2498 if (type == BDADDR_LE_PUBLIC)
2499 addr_type = ADDR_LE_DEV_PUBLIC;
2501 addr_type = ADDR_LE_DEV_RANDOM;
2503 irk = hci_get_irk(hdev, bdaddr, addr_type);
2505 bdaddr = &irk->bdaddr;
2506 addr_type = irk->addr_type;
2510 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2511 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2521 /* HCI command timer function */
2522 static void hci_cmd_timeout(struct work_struct *work)
2524 struct hci_dev *hdev = container_of(work, struct hci_dev,
2527 if (hdev->sent_cmd) {
2528 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2529 u16 opcode = __le16_to_cpu(sent->opcode);
2531 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2533 BT_ERR("%s command tx timeout", hdev->name);
2536 atomic_set(&hdev->cmd_cnt, 1);
2537 queue_work(hdev->workqueue, &hdev->cmd_work);
2540 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2541 bdaddr_t *bdaddr, u8 bdaddr_type)
2543 struct oob_data *data;
2545 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2546 if (bacmp(bdaddr, &data->bdaddr) != 0)
2548 if (data->bdaddr_type != bdaddr_type)
2556 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2559 struct oob_data *data;
2561 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2565 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2567 list_del(&data->list);
2573 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2575 struct oob_data *data, *n;
2577 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2578 list_del(&data->list);
2583 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2584 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2585 u8 *hash256, u8 *rand256)
2587 struct oob_data *data;
2589 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2591 data = kmalloc(sizeof(*data), GFP_KERNEL);
2595 bacpy(&data->bdaddr, bdaddr);
2596 data->bdaddr_type = bdaddr_type;
2597 list_add(&data->list, &hdev->remote_oob_data);
2600 if (hash192 && rand192) {
2601 memcpy(data->hash192, hash192, sizeof(data->hash192));
2602 memcpy(data->rand192, rand192, sizeof(data->rand192));
2603 if (hash256 && rand256)
2604 data->present = 0x03;
2606 memset(data->hash192, 0, sizeof(data->hash192));
2607 memset(data->rand192, 0, sizeof(data->rand192));
2608 if (hash256 && rand256)
2609 data->present = 0x02;
2611 data->present = 0x00;
2614 if (hash256 && rand256) {
2615 memcpy(data->hash256, hash256, sizeof(data->hash256));
2616 memcpy(data->rand256, rand256, sizeof(data->rand256));
2618 memset(data->hash256, 0, sizeof(data->hash256));
2619 memset(data->rand256, 0, sizeof(data->rand256));
2620 if (hash192 && rand192)
2621 data->present = 0x01;
2624 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2629 /* This function requires the caller holds hdev->lock */
2630 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2632 struct adv_info *adv_instance;
2634 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2635 if (adv_instance->instance == instance)
2636 return adv_instance;
2642 /* This function requires the caller holds hdev->lock */
2643 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2644 struct adv_info *cur_instance;
2646 cur_instance = hci_find_adv_instance(hdev, instance);
2650 if (cur_instance == list_last_entry(&hdev->adv_instances,
2651 struct adv_info, list))
2652 return list_first_entry(&hdev->adv_instances,
2653 struct adv_info, list);
2655 return list_next_entry(cur_instance, list);
2658 /* This function requires the caller holds hdev->lock */
2659 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2661 struct adv_info *adv_instance;
2663 adv_instance = hci_find_adv_instance(hdev, instance);
2667 BT_DBG("%s removing %dMR", hdev->name, instance);
2669 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2670 cancel_delayed_work(&hdev->adv_instance_expire);
2671 hdev->adv_instance_timeout = 0;
2674 list_del(&adv_instance->list);
2675 kfree(adv_instance);
2677 hdev->adv_instance_cnt--;
2682 /* This function requires the caller holds hdev->lock */
2683 void hci_adv_instances_clear(struct hci_dev *hdev)
2685 struct adv_info *adv_instance, *n;
2687 if (hdev->adv_instance_timeout) {
2688 cancel_delayed_work(&hdev->adv_instance_expire);
2689 hdev->adv_instance_timeout = 0;
2692 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2693 list_del(&adv_instance->list);
2694 kfree(adv_instance);
2697 hdev->adv_instance_cnt = 0;
2700 /* This function requires the caller holds hdev->lock */
2701 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2702 u16 adv_data_len, u8 *adv_data,
2703 u16 scan_rsp_len, u8 *scan_rsp_data,
2704 u16 timeout, u16 duration)
2706 struct adv_info *adv_instance;
2708 adv_instance = hci_find_adv_instance(hdev, instance);
2710 memset(adv_instance->adv_data, 0,
2711 sizeof(adv_instance->adv_data));
2712 memset(adv_instance->scan_rsp_data, 0,
2713 sizeof(adv_instance->scan_rsp_data));
2715 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2716 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2719 adv_instance = kmalloc(sizeof(*adv_instance), GFP_KERNEL);
2723 memset(adv_instance, 0, sizeof(*adv_instance));
2724 adv_instance->pending = true;
2725 adv_instance->instance = instance;
2726 list_add(&adv_instance->list, &hdev->adv_instances);
2727 hdev->adv_instance_cnt++;
2730 adv_instance->flags = flags;
2731 adv_instance->adv_data_len = adv_data_len;
2732 adv_instance->scan_rsp_len = scan_rsp_len;
2735 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2738 memcpy(adv_instance->scan_rsp_data,
2739 scan_rsp_data, scan_rsp_len);
2741 adv_instance->timeout = timeout;
2742 adv_instance->remaining_time = timeout;
2745 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2747 adv_instance->duration = duration;
2749 BT_DBG("%s for %dMR", hdev->name, instance);
2754 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2755 bdaddr_t *bdaddr, u8 type)
2757 struct bdaddr_list *b;
2759 list_for_each_entry(b, bdaddr_list, list) {
2760 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2767 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2769 struct list_head *p, *n;
2771 list_for_each_safe(p, n, bdaddr_list) {
2772 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2779 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2781 struct bdaddr_list *entry;
2783 if (!bacmp(bdaddr, BDADDR_ANY))
2786 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2789 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2793 bacpy(&entry->bdaddr, bdaddr);
2794 entry->bdaddr_type = type;
2796 list_add(&entry->list, list);
2801 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2803 struct bdaddr_list *entry;
2805 if (!bacmp(bdaddr, BDADDR_ANY)) {
2806 hci_bdaddr_list_clear(list);
2810 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2814 list_del(&entry->list);
2820 /* This function requires the caller holds hdev->lock */
2821 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2822 bdaddr_t *addr, u8 addr_type)
2824 struct hci_conn_params *params;
2826 /* The conn params list only contains identity addresses */
2827 if (!hci_is_identity_address(addr, addr_type))
2830 list_for_each_entry(params, &hdev->le_conn_params, list) {
2831 if (bacmp(¶ms->addr, addr) == 0 &&
2832 params->addr_type == addr_type) {
2840 /* This function requires the caller holds hdev->lock */
2841 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2842 bdaddr_t *addr, u8 addr_type)
2844 struct hci_conn_params *param;
2846 /* The list only contains identity addresses */
2847 if (!hci_is_identity_address(addr, addr_type))
2850 list_for_each_entry(param, list, action) {
2851 if (bacmp(¶m->addr, addr) == 0 &&
2852 param->addr_type == addr_type)
2859 /* This function requires the caller holds hdev->lock */
2860 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2861 bdaddr_t *addr, u8 addr_type)
2863 struct hci_conn_params *params;
2865 if (!hci_is_identity_address(addr, addr_type))
2868 params = hci_conn_params_lookup(hdev, addr, addr_type);
2872 params = kzalloc(sizeof(*params), GFP_KERNEL);
2874 BT_ERR("Out of memory");
2878 bacpy(¶ms->addr, addr);
2879 params->addr_type = addr_type;
2881 list_add(¶ms->list, &hdev->le_conn_params);
2882 INIT_LIST_HEAD(¶ms->action);
2884 params->conn_min_interval = hdev->le_conn_min_interval;
2885 params->conn_max_interval = hdev->le_conn_max_interval;
2886 params->conn_latency = hdev->le_conn_latency;
2887 params->supervision_timeout = hdev->le_supv_timeout;
2888 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2890 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2895 static void hci_conn_params_free(struct hci_conn_params *params)
2898 hci_conn_drop(params->conn);
2899 hci_conn_put(params->conn);
2902 list_del(¶ms->action);
2903 list_del(¶ms->list);
2907 /* This function requires the caller holds hdev->lock */
2908 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2910 struct hci_conn_params *params;
2912 params = hci_conn_params_lookup(hdev, addr, addr_type);
2916 hci_conn_params_free(params);
2918 hci_update_background_scan(hdev);
2920 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2923 /* This function requires the caller holds hdev->lock */
2924 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2926 struct hci_conn_params *params, *tmp;
2928 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2929 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2931 list_del(¶ms->list);
2935 BT_DBG("All LE disabled connection parameters were removed");
2938 /* This function requires the caller holds hdev->lock */
2939 void hci_conn_params_clear_all(struct hci_dev *hdev)
2941 struct hci_conn_params *params, *tmp;
2943 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2944 hci_conn_params_free(params);
2946 hci_update_background_scan(hdev);
2948 BT_DBG("All LE connection parameters were removed");
2951 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2954 BT_ERR("Failed to start inquiry: status %d", status);
2957 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2958 hci_dev_unlock(hdev);
2963 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2966 /* General inquiry access code (GIAC) */
2967 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2968 struct hci_cp_inquiry cp;
2972 BT_ERR("Failed to disable LE scanning: status %d", status);
2976 hdev->discovery.scan_start = 0;
2978 switch (hdev->discovery.type) {
2979 case DISCOV_TYPE_LE:
2981 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2982 hci_dev_unlock(hdev);
2985 case DISCOV_TYPE_INTERLEAVED:
2988 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2990 /* If we were running LE only scan, change discovery
2991 * state. If we were running both LE and BR/EDR inquiry
2992 * simultaneously, and BR/EDR inquiry is already
2993 * finished, stop discovery, otherwise BR/EDR inquiry
2994 * will stop discovery when finished. If we will resolve
2995 * remote device name, do not change discovery state.
2997 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2998 hdev->discovery.state != DISCOVERY_RESOLVING)
2999 hci_discovery_set_state(hdev,
3002 struct hci_request req;
3004 hci_inquiry_cache_flush(hdev);
3006 hci_req_init(&req, hdev);
3008 memset(&cp, 0, sizeof(cp));
3009 memcpy(&cp.lap, lap, sizeof(cp.lap));
3010 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3011 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3013 err = hci_req_run(&req, inquiry_complete);
3015 BT_ERR("Inquiry request failed: err %d", err);
3016 hci_discovery_set_state(hdev,
3021 hci_dev_unlock(hdev);
3026 static void le_scan_disable_work(struct work_struct *work)
3028 struct hci_dev *hdev = container_of(work, struct hci_dev,
3029 le_scan_disable.work);
3030 struct hci_request req;
3033 BT_DBG("%s", hdev->name);
3035 cancel_delayed_work_sync(&hdev->le_scan_restart);
3037 hci_req_init(&req, hdev);
3039 hci_req_add_le_scan_disable(&req);
3041 err = hci_req_run(&req, le_scan_disable_work_complete);
3043 BT_ERR("Disable LE scanning request failed: err %d", err);
3046 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3049 unsigned long timeout, duration, scan_start, now;
3051 BT_DBG("%s", hdev->name);
3054 BT_ERR("Failed to restart LE scan: status %d", status);
3058 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3059 !hdev->discovery.scan_start)
3062 /* When the scan was started, hdev->le_scan_disable has been queued
3063 * after duration from scan_start. During scan restart this job
3064 * has been canceled, and we need to queue it again after proper
3065 * timeout, to make sure that scan does not run indefinitely.
3067 duration = hdev->discovery.scan_duration;
3068 scan_start = hdev->discovery.scan_start;
3070 if (now - scan_start <= duration) {
3073 if (now >= scan_start)
3074 elapsed = now - scan_start;
3076 elapsed = ULONG_MAX - scan_start + now;
3078 timeout = duration - elapsed;
3082 queue_delayed_work(hdev->workqueue,
3083 &hdev->le_scan_disable, timeout);
3086 static void le_scan_restart_work(struct work_struct *work)
3088 struct hci_dev *hdev = container_of(work, struct hci_dev,
3089 le_scan_restart.work);
3090 struct hci_request req;
3091 struct hci_cp_le_set_scan_enable cp;
3094 BT_DBG("%s", hdev->name);
3096 /* If controller is not scanning we are done. */
3097 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3100 hci_req_init(&req, hdev);
3102 hci_req_add_le_scan_disable(&req);
3104 memset(&cp, 0, sizeof(cp));
3105 cp.enable = LE_SCAN_ENABLE;
3106 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3107 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3109 err = hci_req_run(&req, le_scan_restart_work_complete);
3111 BT_ERR("Restart LE scan request failed: err %d", err);
3114 /* Copy the Identity Address of the controller.
3116 * If the controller has a public BD_ADDR, then by default use that one.
3117 * If this is a LE only controller without a public address, default to
3118 * the static random address.
3120 * For debugging purposes it is possible to force controllers with a
3121 * public address to use the static random address instead.
3123 * In case BR/EDR has been disabled on a dual-mode controller and
3124 * userspace has configured a static address, then that address
3125 * becomes the identity address instead of the public BR/EDR address.
3127 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3130 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3131 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3132 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3133 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3134 bacpy(bdaddr, &hdev->static_addr);
3135 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3137 bacpy(bdaddr, &hdev->bdaddr);
3138 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3142 /* Alloc HCI device */
3143 struct hci_dev *hci_alloc_dev(void)
3145 struct hci_dev *hdev;
3147 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3151 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3152 hdev->esco_type = (ESCO_HV1);
3153 hdev->link_mode = (HCI_LM_ACCEPT);
3154 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3155 hdev->io_capability = 0x03; /* No Input No Output */
3156 hdev->manufacturer = 0xffff; /* Default to internal use */
3157 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3158 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3159 hdev->adv_instance_cnt = 0;
3160 hdev->cur_adv_instance = 0x00;
3161 hdev->adv_instance_timeout = 0;
3163 hdev->sniff_max_interval = 800;
3164 hdev->sniff_min_interval = 80;
3166 hdev->le_adv_channel_map = 0x07;
3167 hdev->le_adv_min_interval = 0x0800;
3168 hdev->le_adv_max_interval = 0x0800;
3169 hdev->le_scan_interval = 0x0060;
3170 hdev->le_scan_window = 0x0030;
3171 hdev->le_conn_min_interval = 0x0028;
3172 hdev->le_conn_max_interval = 0x0038;
3173 hdev->le_conn_latency = 0x0000;
3174 hdev->le_supv_timeout = 0x002a;
3175 hdev->le_def_tx_len = 0x001b;
3176 hdev->le_def_tx_time = 0x0148;
3177 hdev->le_max_tx_len = 0x001b;
3178 hdev->le_max_tx_time = 0x0148;
3179 hdev->le_max_rx_len = 0x001b;
3180 hdev->le_max_rx_time = 0x0148;
3182 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3183 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3184 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3185 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3187 mutex_init(&hdev->lock);
3188 mutex_init(&hdev->req_lock);
3190 INIT_LIST_HEAD(&hdev->mgmt_pending);
3191 INIT_LIST_HEAD(&hdev->blacklist);
3192 INIT_LIST_HEAD(&hdev->whitelist);
3193 INIT_LIST_HEAD(&hdev->uuids);
3194 INIT_LIST_HEAD(&hdev->link_keys);
3195 INIT_LIST_HEAD(&hdev->long_term_keys);
3196 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3197 INIT_LIST_HEAD(&hdev->remote_oob_data);
3198 INIT_LIST_HEAD(&hdev->le_white_list);
3199 INIT_LIST_HEAD(&hdev->le_conn_params);
3200 INIT_LIST_HEAD(&hdev->pend_le_conns);
3201 INIT_LIST_HEAD(&hdev->pend_le_reports);
3202 INIT_LIST_HEAD(&hdev->conn_hash.list);
3203 INIT_LIST_HEAD(&hdev->adv_instances);
3205 INIT_WORK(&hdev->rx_work, hci_rx_work);
3206 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3207 INIT_WORK(&hdev->tx_work, hci_tx_work);
3208 INIT_WORK(&hdev->power_on, hci_power_on);
3209 INIT_WORK(&hdev->error_reset, hci_error_reset);
3211 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3212 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3213 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3214 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3215 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3217 skb_queue_head_init(&hdev->rx_q);
3218 skb_queue_head_init(&hdev->cmd_q);
3219 skb_queue_head_init(&hdev->raw_q);
3221 init_waitqueue_head(&hdev->req_wait_q);
3223 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3225 hci_init_sysfs(hdev);
3226 discovery_init(hdev);
3227 adv_info_init(hdev);
3231 EXPORT_SYMBOL(hci_alloc_dev);
3233 /* Free HCI device */
3234 void hci_free_dev(struct hci_dev *hdev)
3236 /* will free via device release */
3237 put_device(&hdev->dev);
3239 EXPORT_SYMBOL(hci_free_dev);
3241 /* Register HCI device */
3242 int hci_register_dev(struct hci_dev *hdev)
3246 if (!hdev->open || !hdev->close || !hdev->send)
3249 /* Do not allow HCI_AMP devices to register at index 0,
3250 * so the index can be used as the AMP controller ID.
3252 switch (hdev->dev_type) {
3254 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3257 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3266 sprintf(hdev->name, "hci%d", id);
3269 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3271 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3272 WQ_MEM_RECLAIM, 1, hdev->name);
3273 if (!hdev->workqueue) {
3278 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3279 WQ_MEM_RECLAIM, 1, hdev->name);
3280 if (!hdev->req_workqueue) {
3281 destroy_workqueue(hdev->workqueue);
3286 if (!IS_ERR_OR_NULL(bt_debugfs))
3287 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3289 dev_set_name(&hdev->dev, "%s", hdev->name);
3291 error = device_add(&hdev->dev);
3295 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3296 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3299 if (rfkill_register(hdev->rfkill) < 0) {
3300 rfkill_destroy(hdev->rfkill);
3301 hdev->rfkill = NULL;
3305 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3306 hci_dev_set_flag(hdev, HCI_RFKILLED);
3308 hci_dev_set_flag(hdev, HCI_SETUP);
3309 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3311 if (hdev->dev_type == HCI_BREDR) {
3312 /* Assume BR/EDR support until proven otherwise (such as
3313 * through reading supported features during init.
3315 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3318 write_lock(&hci_dev_list_lock);
3319 list_add(&hdev->list, &hci_dev_list);
3320 write_unlock(&hci_dev_list_lock);
3322 /* Devices that are marked for raw-only usage are unconfigured
3323 * and should not be included in normal operation.
3325 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3326 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3328 hci_notify(hdev, HCI_DEV_REG);
3331 queue_work(hdev->req_workqueue, &hdev->power_on);
3336 destroy_workqueue(hdev->workqueue);
3337 destroy_workqueue(hdev->req_workqueue);
3339 ida_simple_remove(&hci_index_ida, hdev->id);
3343 EXPORT_SYMBOL(hci_register_dev);
3345 /* Unregister HCI device */
3346 void hci_unregister_dev(struct hci_dev *hdev)
3350 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3352 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3356 write_lock(&hci_dev_list_lock);
3357 list_del(&hdev->list);
3358 write_unlock(&hci_dev_list_lock);
3360 hci_dev_do_close(hdev);
3362 cancel_work_sync(&hdev->power_on);
3364 if (!test_bit(HCI_INIT, &hdev->flags) &&
3365 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3366 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3368 mgmt_index_removed(hdev);
3369 hci_dev_unlock(hdev);
3372 /* mgmt_index_removed should take care of emptying the
3374 BUG_ON(!list_empty(&hdev->mgmt_pending));
3376 hci_notify(hdev, HCI_DEV_UNREG);
3379 rfkill_unregister(hdev->rfkill);
3380 rfkill_destroy(hdev->rfkill);
3383 device_del(&hdev->dev);
3385 debugfs_remove_recursive(hdev->debugfs);
3387 destroy_workqueue(hdev->workqueue);
3388 destroy_workqueue(hdev->req_workqueue);
3391 hci_bdaddr_list_clear(&hdev->blacklist);
3392 hci_bdaddr_list_clear(&hdev->whitelist);
3393 hci_uuids_clear(hdev);
3394 hci_link_keys_clear(hdev);
3395 hci_smp_ltks_clear(hdev);
3396 hci_smp_irks_clear(hdev);
3397 hci_remote_oob_data_clear(hdev);
3398 hci_adv_instances_clear(hdev);
3399 hci_bdaddr_list_clear(&hdev->le_white_list);
3400 hci_conn_params_clear_all(hdev);
3401 hci_discovery_filter_clear(hdev);
3402 hci_dev_unlock(hdev);
3406 ida_simple_remove(&hci_index_ida, id);
3408 EXPORT_SYMBOL(hci_unregister_dev);
3410 /* Suspend HCI device */
3411 int hci_suspend_dev(struct hci_dev *hdev)
3413 hci_notify(hdev, HCI_DEV_SUSPEND);
3416 EXPORT_SYMBOL(hci_suspend_dev);
3418 /* Resume HCI device */
3419 int hci_resume_dev(struct hci_dev *hdev)
3421 hci_notify(hdev, HCI_DEV_RESUME);
3424 EXPORT_SYMBOL(hci_resume_dev);
3426 /* Reset HCI device */
3427 int hci_reset_dev(struct hci_dev *hdev)
3429 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3430 struct sk_buff *skb;
3432 skb = bt_skb_alloc(3, GFP_ATOMIC);
3436 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3437 memcpy(skb_put(skb, 3), hw_err, 3);
3439 /* Send Hardware Error to upper stack */
3440 return hci_recv_frame(hdev, skb);
3442 EXPORT_SYMBOL(hci_reset_dev);
3444 /* Receive frame from HCI drivers */
3445 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3447 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3448 && !test_bit(HCI_INIT, &hdev->flags))) {
3454 bt_cb(skb)->incoming = 1;
3457 __net_timestamp(skb);
3459 skb_queue_tail(&hdev->rx_q, skb);
3460 queue_work(hdev->workqueue, &hdev->rx_work);
3464 EXPORT_SYMBOL(hci_recv_frame);
3466 /* ---- Interface to upper protocols ---- */
3468 int hci_register_cb(struct hci_cb *cb)
3470 BT_DBG("%p name %s", cb, cb->name);
3472 mutex_lock(&hci_cb_list_lock);
3473 list_add_tail(&cb->list, &hci_cb_list);
3474 mutex_unlock(&hci_cb_list_lock);
3478 EXPORT_SYMBOL(hci_register_cb);
3480 int hci_unregister_cb(struct hci_cb *cb)
3482 BT_DBG("%p name %s", cb, cb->name);
3484 mutex_lock(&hci_cb_list_lock);
3485 list_del(&cb->list);
3486 mutex_unlock(&hci_cb_list_lock);
3490 EXPORT_SYMBOL(hci_unregister_cb);
3492 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3496 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3499 __net_timestamp(skb);
3501 /* Send copy to monitor */
3502 hci_send_to_monitor(hdev, skb);
3504 if (atomic_read(&hdev->promisc)) {
3505 /* Send copy to the sockets */
3506 hci_send_to_sock(hdev, skb);
3509 /* Get rid of skb owner, prior to sending to the driver. */
3512 err = hdev->send(hdev, skb);
3514 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3519 /* Send HCI command */
3520 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3523 struct sk_buff *skb;
3525 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3527 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3529 BT_ERR("%s no memory for command", hdev->name);
3533 /* Stand-alone HCI commands must be flagged as
3534 * single-command requests.
3536 bt_cb(skb)->req.start = true;
3538 skb_queue_tail(&hdev->cmd_q, skb);
3539 queue_work(hdev->workqueue, &hdev->cmd_work);
3544 /* Get data from the previously sent command */
3545 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3547 struct hci_command_hdr *hdr;
3549 if (!hdev->sent_cmd)
3552 hdr = (void *) hdev->sent_cmd->data;
3554 if (hdr->opcode != cpu_to_le16(opcode))
3557 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3559 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3563 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3565 struct hci_acl_hdr *hdr;
3568 skb_push(skb, HCI_ACL_HDR_SIZE);
3569 skb_reset_transport_header(skb);
3570 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3571 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3572 hdr->dlen = cpu_to_le16(len);
3575 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3576 struct sk_buff *skb, __u16 flags)
3578 struct hci_conn *conn = chan->conn;
3579 struct hci_dev *hdev = conn->hdev;
3580 struct sk_buff *list;
3582 skb->len = skb_headlen(skb);
3585 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3587 switch (hdev->dev_type) {
3589 hci_add_acl_hdr(skb, conn->handle, flags);
3592 hci_add_acl_hdr(skb, chan->handle, flags);
3595 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3599 list = skb_shinfo(skb)->frag_list;
3601 /* Non fragmented */
3602 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3604 skb_queue_tail(queue, skb);
3607 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3609 skb_shinfo(skb)->frag_list = NULL;
3611 /* Queue all fragments atomically. We need to use spin_lock_bh
3612 * here because of 6LoWPAN links, as there this function is
3613 * called from softirq and using normal spin lock could cause
3616 spin_lock_bh(&queue->lock);
3618 __skb_queue_tail(queue, skb);
3620 flags &= ~ACL_START;
3623 skb = list; list = list->next;
3625 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3626 hci_add_acl_hdr(skb, conn->handle, flags);
3628 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3630 __skb_queue_tail(queue, skb);
3633 spin_unlock_bh(&queue->lock);
3637 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3639 struct hci_dev *hdev = chan->conn->hdev;
3641 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3643 hci_queue_acl(chan, &chan->data_q, skb, flags);
3645 queue_work(hdev->workqueue, &hdev->tx_work);
3649 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3651 struct hci_dev *hdev = conn->hdev;
3652 struct hci_sco_hdr hdr;
3654 BT_DBG("%s len %d", hdev->name, skb->len);
3656 hdr.handle = cpu_to_le16(conn->handle);
3657 hdr.dlen = skb->len;
3659 skb_push(skb, HCI_SCO_HDR_SIZE);
3660 skb_reset_transport_header(skb);
3661 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3663 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3665 skb_queue_tail(&conn->data_q, skb);
3666 queue_work(hdev->workqueue, &hdev->tx_work);
3669 /* ---- HCI TX task (outgoing data) ---- */
3671 /* HCI Connection scheduler */
3672 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3675 struct hci_conn_hash *h = &hdev->conn_hash;
3676 struct hci_conn *conn = NULL, *c;
3677 unsigned int num = 0, min = ~0;
3679 /* We don't have to lock device here. Connections are always
3680 * added and removed with TX task disabled. */
3684 list_for_each_entry_rcu(c, &h->list, list) {
3685 if (c->type != type || skb_queue_empty(&c->data_q))
3688 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3693 if (c->sent < min) {
3698 if (hci_conn_num(hdev, type) == num)
3707 switch (conn->type) {
3709 cnt = hdev->acl_cnt;
3713 cnt = hdev->sco_cnt;
3716 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3720 BT_ERR("Unknown link type");
3728 BT_DBG("conn %p quote %d", conn, *quote);
3732 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3734 struct hci_conn_hash *h = &hdev->conn_hash;
3737 BT_ERR("%s link tx timeout", hdev->name);
3741 /* Kill stalled connections */
3742 list_for_each_entry_rcu(c, &h->list, list) {
3743 if (c->type == type && c->sent) {
3744 BT_ERR("%s killing stalled connection %pMR",
3745 hdev->name, &c->dst);
3746 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3753 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3756 struct hci_conn_hash *h = &hdev->conn_hash;
3757 struct hci_chan *chan = NULL;
3758 unsigned int num = 0, min = ~0, cur_prio = 0;
3759 struct hci_conn *conn;
3760 int cnt, q, conn_num = 0;
3762 BT_DBG("%s", hdev->name);
3766 list_for_each_entry_rcu(conn, &h->list, list) {
3767 struct hci_chan *tmp;
3769 if (conn->type != type)
3772 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3777 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3778 struct sk_buff *skb;
3780 if (skb_queue_empty(&tmp->data_q))
3783 skb = skb_peek(&tmp->data_q);
3784 if (skb->priority < cur_prio)
3787 if (skb->priority > cur_prio) {
3790 cur_prio = skb->priority;
3795 if (conn->sent < min) {
3801 if (hci_conn_num(hdev, type) == conn_num)
3810 switch (chan->conn->type) {
3812 cnt = hdev->acl_cnt;
3815 cnt = hdev->block_cnt;
3819 cnt = hdev->sco_cnt;
3822 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3826 BT_ERR("Unknown link type");
3831 BT_DBG("chan %p quote %d", chan, *quote);
3835 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3837 struct hci_conn_hash *h = &hdev->conn_hash;
3838 struct hci_conn *conn;
3841 BT_DBG("%s", hdev->name);
3845 list_for_each_entry_rcu(conn, &h->list, list) {
3846 struct hci_chan *chan;
3848 if (conn->type != type)
3851 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3856 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3857 struct sk_buff *skb;
3864 if (skb_queue_empty(&chan->data_q))
3867 skb = skb_peek(&chan->data_q);
3868 if (skb->priority >= HCI_PRIO_MAX - 1)
3871 skb->priority = HCI_PRIO_MAX - 1;
3873 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3877 if (hci_conn_num(hdev, type) == num)
3885 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3887 /* Calculate count of blocks used by this packet */
3888 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3891 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3893 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3894 /* ACL tx timeout must be longer than maximum
3895 * link supervision timeout (40.9 seconds) */
3896 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3897 HCI_ACL_TX_TIMEOUT))
3898 hci_link_tx_to(hdev, ACL_LINK);
3902 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3904 unsigned int cnt = hdev->acl_cnt;
3905 struct hci_chan *chan;
3906 struct sk_buff *skb;
3909 __check_timeout(hdev, cnt);
3911 while (hdev->acl_cnt &&
3912 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3913 u32 priority = (skb_peek(&chan->data_q))->priority;
3914 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3915 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3916 skb->len, skb->priority);
3918 /* Stop if priority has changed */
3919 if (skb->priority < priority)
3922 skb = skb_dequeue(&chan->data_q);
3924 hci_conn_enter_active_mode(chan->conn,
3925 bt_cb(skb)->force_active);
3927 hci_send_frame(hdev, skb);
3928 hdev->acl_last_tx = jiffies;
3936 if (cnt != hdev->acl_cnt)
3937 hci_prio_recalculate(hdev, ACL_LINK);
3940 static void hci_sched_acl_blk(struct hci_dev *hdev)
3942 unsigned int cnt = hdev->block_cnt;
3943 struct hci_chan *chan;
3944 struct sk_buff *skb;
3948 __check_timeout(hdev, cnt);
3950 BT_DBG("%s", hdev->name);
3952 if (hdev->dev_type == HCI_AMP)
3957 while (hdev->block_cnt > 0 &&
3958 (chan = hci_chan_sent(hdev, type, "e))) {
3959 u32 priority = (skb_peek(&chan->data_q))->priority;
3960 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3963 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3964 skb->len, skb->priority);
3966 /* Stop if priority has changed */
3967 if (skb->priority < priority)
3970 skb = skb_dequeue(&chan->data_q);
3972 blocks = __get_blocks(hdev, skb);
3973 if (blocks > hdev->block_cnt)
3976 hci_conn_enter_active_mode(chan->conn,
3977 bt_cb(skb)->force_active);
3979 hci_send_frame(hdev, skb);
3980 hdev->acl_last_tx = jiffies;
3982 hdev->block_cnt -= blocks;
3985 chan->sent += blocks;
3986 chan->conn->sent += blocks;
3990 if (cnt != hdev->block_cnt)
3991 hci_prio_recalculate(hdev, type);
3994 static void hci_sched_acl(struct hci_dev *hdev)
3996 BT_DBG("%s", hdev->name);
3998 /* No ACL link over BR/EDR controller */
3999 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4002 /* No AMP link over AMP controller */
4003 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4006 switch (hdev->flow_ctl_mode) {
4007 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4008 hci_sched_acl_pkt(hdev);
4011 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4012 hci_sched_acl_blk(hdev);
4018 static void hci_sched_sco(struct hci_dev *hdev)
4020 struct hci_conn *conn;
4021 struct sk_buff *skb;
4024 BT_DBG("%s", hdev->name);
4026 if (!hci_conn_num(hdev, SCO_LINK))
4029 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4030 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4031 BT_DBG("skb %p len %d", skb, skb->len);
4032 hci_send_frame(hdev, skb);
4035 if (conn->sent == ~0)
4041 static void hci_sched_esco(struct hci_dev *hdev)
4043 struct hci_conn *conn;
4044 struct sk_buff *skb;
4047 BT_DBG("%s", hdev->name);
4049 if (!hci_conn_num(hdev, ESCO_LINK))
4052 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4054 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4055 BT_DBG("skb %p len %d", skb, skb->len);
4056 hci_send_frame(hdev, skb);
4059 if (conn->sent == ~0)
4065 static void hci_sched_le(struct hci_dev *hdev)
4067 struct hci_chan *chan;
4068 struct sk_buff *skb;
4069 int quote, cnt, tmp;
4071 BT_DBG("%s", hdev->name);
4073 if (!hci_conn_num(hdev, LE_LINK))
4076 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4077 /* LE tx timeout must be longer than maximum
4078 * link supervision timeout (40.9 seconds) */
4079 if (!hdev->le_cnt && hdev->le_pkts &&
4080 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4081 hci_link_tx_to(hdev, LE_LINK);
4084 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4086 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4087 u32 priority = (skb_peek(&chan->data_q))->priority;
4088 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4089 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4090 skb->len, skb->priority);
4092 /* Stop if priority has changed */
4093 if (skb->priority < priority)
4096 skb = skb_dequeue(&chan->data_q);
4098 hci_send_frame(hdev, skb);
4099 hdev->le_last_tx = jiffies;
4110 hdev->acl_cnt = cnt;
4113 hci_prio_recalculate(hdev, LE_LINK);
4116 static void hci_tx_work(struct work_struct *work)
4118 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4119 struct sk_buff *skb;
4121 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4122 hdev->sco_cnt, hdev->le_cnt);
4124 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4125 /* Schedule queues and send stuff to HCI driver */
4126 hci_sched_acl(hdev);
4127 hci_sched_sco(hdev);
4128 hci_sched_esco(hdev);
4132 /* Send next queued raw (unknown type) packet */
4133 while ((skb = skb_dequeue(&hdev->raw_q)))
4134 hci_send_frame(hdev, skb);
4137 /* ----- HCI RX task (incoming data processing) ----- */
4139 /* ACL data packet */
4140 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4142 struct hci_acl_hdr *hdr = (void *) skb->data;
4143 struct hci_conn *conn;
4144 __u16 handle, flags;
4146 skb_pull(skb, HCI_ACL_HDR_SIZE);
4148 handle = __le16_to_cpu(hdr->handle);
4149 flags = hci_flags(handle);
4150 handle = hci_handle(handle);
4152 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4155 hdev->stat.acl_rx++;
4158 conn = hci_conn_hash_lookup_handle(hdev, handle);
4159 hci_dev_unlock(hdev);
4162 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4164 /* Send to upper protocol */
4165 l2cap_recv_acldata(conn, skb, flags);
4168 BT_ERR("%s ACL packet for unknown connection handle %d",
4169 hdev->name, handle);
4175 /* SCO data packet */
4176 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4178 struct hci_sco_hdr *hdr = (void *) skb->data;
4179 struct hci_conn *conn;
4182 skb_pull(skb, HCI_SCO_HDR_SIZE);
4184 handle = __le16_to_cpu(hdr->handle);
4186 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4188 hdev->stat.sco_rx++;
4191 conn = hci_conn_hash_lookup_handle(hdev, handle);
4192 hci_dev_unlock(hdev);
4195 /* Send to upper protocol */
4196 sco_recv_scodata(conn, skb);
4199 BT_ERR("%s SCO packet for unknown connection handle %d",
4200 hdev->name, handle);
4206 static bool hci_req_is_complete(struct hci_dev *hdev)
4208 struct sk_buff *skb;
4210 skb = skb_peek(&hdev->cmd_q);
4214 return bt_cb(skb)->req.start;
4217 static void hci_resend_last(struct hci_dev *hdev)
4219 struct hci_command_hdr *sent;
4220 struct sk_buff *skb;
4223 if (!hdev->sent_cmd)
4226 sent = (void *) hdev->sent_cmd->data;
4227 opcode = __le16_to_cpu(sent->opcode);
4228 if (opcode == HCI_OP_RESET)
4231 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4235 skb_queue_head(&hdev->cmd_q, skb);
4236 queue_work(hdev->workqueue, &hdev->cmd_work);
4239 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4240 hci_req_complete_t *req_complete,
4241 hci_req_complete_skb_t *req_complete_skb)
4243 struct sk_buff *skb;
4244 unsigned long flags;
4246 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4248 /* If the completed command doesn't match the last one that was
4249 * sent we need to do special handling of it.
4251 if (!hci_sent_cmd_data(hdev, opcode)) {
4252 /* Some CSR based controllers generate a spontaneous
4253 * reset complete event during init and any pending
4254 * command will never be completed. In such a case we
4255 * need to resend whatever was the last sent
4258 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4259 hci_resend_last(hdev);
4264 /* If the command succeeded and there's still more commands in
4265 * this request the request is not yet complete.
4267 if (!status && !hci_req_is_complete(hdev))
4270 /* If this was the last command in a request the complete
4271 * callback would be found in hdev->sent_cmd instead of the
4272 * command queue (hdev->cmd_q).
4274 if (bt_cb(hdev->sent_cmd)->req.complete) {
4275 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4279 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4280 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4284 /* Remove all pending commands belonging to this request */
4285 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4286 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4287 if (bt_cb(skb)->req.start) {
4288 __skb_queue_head(&hdev->cmd_q, skb);
4292 *req_complete = bt_cb(skb)->req.complete;
4293 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4296 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4299 static void hci_rx_work(struct work_struct *work)
4301 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4302 struct sk_buff *skb;
4304 BT_DBG("%s", hdev->name);
4306 while ((skb = skb_dequeue(&hdev->rx_q))) {
4307 /* Send copy to monitor */
4308 hci_send_to_monitor(hdev, skb);
4310 if (atomic_read(&hdev->promisc)) {
4311 /* Send copy to the sockets */
4312 hci_send_to_sock(hdev, skb);
4315 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4320 if (test_bit(HCI_INIT, &hdev->flags)) {
4321 /* Don't process data packets in this states. */
4322 switch (bt_cb(skb)->pkt_type) {
4323 case HCI_ACLDATA_PKT:
4324 case HCI_SCODATA_PKT:
4331 switch (bt_cb(skb)->pkt_type) {
4333 BT_DBG("%s Event packet", hdev->name);
4334 hci_event_packet(hdev, skb);
4337 case HCI_ACLDATA_PKT:
4338 BT_DBG("%s ACL data packet", hdev->name);
4339 hci_acldata_packet(hdev, skb);
4342 case HCI_SCODATA_PKT:
4343 BT_DBG("%s SCO data packet", hdev->name);
4344 hci_scodata_packet(hdev, skb);
4354 static void hci_cmd_work(struct work_struct *work)
4356 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4357 struct sk_buff *skb;
4359 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4360 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4362 /* Send queued commands */
4363 if (atomic_read(&hdev->cmd_cnt)) {
4364 skb = skb_dequeue(&hdev->cmd_q);
4368 kfree_skb(hdev->sent_cmd);
4370 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4371 if (hdev->sent_cmd) {
4372 atomic_dec(&hdev->cmd_cnt);
4373 hci_send_frame(hdev, skb);
4374 if (test_bit(HCI_RESET, &hdev->flags))
4375 cancel_delayed_work(&hdev->cmd_timer);
4377 schedule_delayed_work(&hdev->cmd_timer,
4380 skb_queue_head(&hdev->cmd_q, skb);
4381 queue_work(hdev->workqueue, &hdev->cmd_work);