2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
40 static void hci_rx_work(struct work_struct *work);
41 static void hci_cmd_work(struct work_struct *work);
42 static void hci_tx_work(struct work_struct *work);
45 LIST_HEAD(hci_dev_list);
46 DEFINE_RWLOCK(hci_dev_list_lock);
48 /* HCI callback list */
49 LIST_HEAD(hci_cb_list);
50 DEFINE_RWLOCK(hci_cb_list_lock);
52 /* HCI ID Numbering */
53 static DEFINE_IDA(hci_index_ida);
55 /* ---- HCI notifications ---- */
57 static void hci_notify(struct hci_dev *hdev, int event)
59 hci_sock_dev_event(hdev, event);
62 /* ---- HCI debugfs entries ---- */
64 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
67 struct hci_dev *hdev = file->private_data;
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
79 struct hci_dev *hdev = file->private_data;
82 size_t buf_size = min(count, (sizeof(buf)-1));
86 if (!test_bit(HCI_UP, &hdev->flags))
89 if (copy_from_user(buf, user_buf, buf_size))
93 if (strtobool(buf, &enable))
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 hci_req_unlock(hdev);
111 err = -bt_to_errno(skb->data[0]);
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
122 static const struct file_operations dut_mode_fops = {
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
129 static int features_show(struct seq_file *f, void *ptr)
131 struct hci_dev *hdev = f->private;
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
150 hci_dev_unlock(hdev);
155 static int features_open(struct inode *inode, struct file *file)
157 return single_open(file, features_show, inode->i_private);
160 static const struct file_operations features_fops = {
161 .open = features_open,
164 .release = single_release,
167 static int blacklist_show(struct seq_file *f, void *p)
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
173 list_for_each_entry(b, &hdev->blacklist, list)
174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
175 hci_dev_unlock(hdev);
180 static int blacklist_open(struct inode *inode, struct file *file)
182 return single_open(file, blacklist_show, inode->i_private);
185 static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
189 .release = single_release,
192 static int uuids_show(struct seq_file *f, void *p)
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
198 list_for_each_entry(uuid, &hdev->uuids, list) {
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
208 seq_printf(f, "%pUb\n", val);
210 hci_dev_unlock(hdev);
215 static int uuids_open(struct inode *inode, struct file *file)
217 return single_open(file, uuids_show, inode->i_private);
220 static const struct file_operations uuids_fops = {
224 .release = single_release,
227 static int inquiry_cache_show(struct seq_file *f, void *p)
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
246 hci_dev_unlock(hdev);
251 static int inquiry_cache_open(struct inode *inode, struct file *file)
253 return single_open(file, inquiry_cache_show, inode->i_private);
256 static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
260 .release = single_release,
263 static int link_keys_show(struct seq_file *f, void *ptr)
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 hci_dev_unlock(hdev);
279 static int link_keys_open(struct inode *inode, struct file *file)
281 return single_open(file, link_keys_show, inode->i_private);
284 static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
288 .release = single_release,
291 static int dev_class_show(struct seq_file *f, void *ptr)
293 struct hci_dev *hdev = f->private;
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
303 static int dev_class_open(struct inode *inode, struct file *file)
305 return single_open(file, dev_class_show, inode->i_private);
308 static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
312 .release = single_release,
315 static int voice_setting_get(void *data, u64 *val)
317 struct hci_dev *hdev = data;
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
326 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
329 static int auto_accept_delay_set(void *data, u64 val)
331 struct hci_dev *hdev = data;
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
340 static int auto_accept_delay_get(void *data, u64 *val)
342 struct hci_dev *hdev = data;
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
351 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
354 static int ssp_debug_mode_set(void *data, u64 val)
356 struct hci_dev *hdev = data;
361 if (val != 0 && val != 1)
364 if (!test_bit(HCI_UP, &hdev->flags))
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
376 err = -bt_to_errno(skb->data[0]);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
389 static int ssp_debug_mode_get(void *data, u64 *val)
391 struct hci_dev *hdev = data;
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
400 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
403 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
406 struct hci_dev *hdev = file->private_data;
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
415 static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
419 struct hci_dev *hdev = file->private_data;
421 size_t buf_size = min(count, (sizeof(buf)-1));
424 if (test_bit(HCI_UP, &hdev->flags))
427 if (copy_from_user(buf, user_buf, buf_size))
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
442 static const struct file_operations force_sc_support_fops = {
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
449 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
452 struct hci_dev *hdev = file->private_data;
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
461 static const struct file_operations sc_only_mode_fops = {
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
467 static int idle_timeout_set(void *data, u64 val)
469 struct hci_dev *hdev = data;
471 if (val != 0 && (val < 500 || val > 3600000))
475 hdev->idle_timeout = val;
476 hci_dev_unlock(hdev);
481 static int idle_timeout_get(void *data, u64 *val)
483 struct hci_dev *hdev = data;
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
492 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
495 static int rpa_timeout_set(void *data, u64 val)
497 struct hci_dev *hdev = data;
499 /* Require the RPA timeout to be at least 30 seconds and at most
502 if (val < 30 || val > (60 * 60 * 24))
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
512 static int rpa_timeout_get(void *data, u64 *val)
514 struct hci_dev *hdev = data;
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
523 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
526 static int sniff_min_interval_set(void *data, u64 val)
528 struct hci_dev *hdev = data;
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
534 hdev->sniff_min_interval = val;
535 hci_dev_unlock(hdev);
540 static int sniff_min_interval_get(void *data, u64 *val)
542 struct hci_dev *hdev = data;
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
551 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
554 static int sniff_max_interval_set(void *data, u64 val)
556 struct hci_dev *hdev = data;
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
562 hdev->sniff_max_interval = val;
563 hci_dev_unlock(hdev);
568 static int sniff_max_interval_get(void *data, u64 *val)
570 struct hci_dev *hdev = data;
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
579 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
582 static int identity_show(struct seq_file *f, void *p)
584 struct hci_dev *hdev = f->private;
590 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
591 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
592 addr = &hdev->static_addr;
593 addr_type = ADDR_LE_DEV_RANDOM;
595 addr = &hdev->bdaddr;
596 addr_type = ADDR_LE_DEV_PUBLIC;
599 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", addr, addr_type,
600 16, hdev->irk, &hdev->rpa);
602 hci_dev_unlock(hdev);
607 static int identity_open(struct inode *inode, struct file *file)
609 return single_open(file, identity_show, inode->i_private);
612 static const struct file_operations identity_fops = {
613 .open = identity_open,
616 .release = single_release,
619 static int random_address_show(struct seq_file *f, void *p)
621 struct hci_dev *hdev = f->private;
624 seq_printf(f, "%pMR\n", &hdev->random_addr);
625 hci_dev_unlock(hdev);
630 static int random_address_open(struct inode *inode, struct file *file)
632 return single_open(file, random_address_show, inode->i_private);
635 static const struct file_operations random_address_fops = {
636 .open = random_address_open,
639 .release = single_release,
642 static int static_address_show(struct seq_file *f, void *p)
644 struct hci_dev *hdev = f->private;
647 seq_printf(f, "%pMR\n", &hdev->static_addr);
648 hci_dev_unlock(hdev);
653 static int static_address_open(struct inode *inode, struct file *file)
655 return single_open(file, static_address_show, inode->i_private);
658 static const struct file_operations static_address_fops = {
659 .open = static_address_open,
662 .release = single_release,
665 static ssize_t force_static_address_read(struct file *file,
666 char __user *user_buf,
667 size_t count, loff_t *ppos)
669 struct hci_dev *hdev = file->private_data;
672 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
675 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
678 static ssize_t force_static_address_write(struct file *file,
679 const char __user *user_buf,
680 size_t count, loff_t *ppos)
682 struct hci_dev *hdev = file->private_data;
684 size_t buf_size = min(count, (sizeof(buf)-1));
687 if (test_bit(HCI_UP, &hdev->flags))
690 if (copy_from_user(buf, user_buf, buf_size))
693 buf[buf_size] = '\0';
694 if (strtobool(buf, &enable))
697 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
700 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
705 static const struct file_operations force_static_address_fops = {
707 .read = force_static_address_read,
708 .write = force_static_address_write,
709 .llseek = default_llseek,
712 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
714 struct hci_dev *hdev = f->private;
715 struct list_head *p, *n;
718 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
719 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
720 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
721 &irk->bdaddr, irk->addr_type,
722 16, irk->val, &irk->rpa);
724 hci_dev_unlock(hdev);
729 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
731 return single_open(file, identity_resolving_keys_show,
735 static const struct file_operations identity_resolving_keys_fops = {
736 .open = identity_resolving_keys_open,
739 .release = single_release,
742 static int long_term_keys_show(struct seq_file *f, void *ptr)
744 struct hci_dev *hdev = f->private;
745 struct list_head *p, *n;
748 list_for_each_safe(p, n, &hdev->long_term_keys) {
749 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
750 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
751 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
752 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
753 8, ltk->rand, 16, ltk->val);
755 hci_dev_unlock(hdev);
760 static int long_term_keys_open(struct inode *inode, struct file *file)
762 return single_open(file, long_term_keys_show, inode->i_private);
765 static const struct file_operations long_term_keys_fops = {
766 .open = long_term_keys_open,
769 .release = single_release,
772 static int conn_min_interval_set(void *data, u64 val)
774 struct hci_dev *hdev = data;
776 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
780 hdev->le_conn_min_interval = val;
781 hci_dev_unlock(hdev);
786 static int conn_min_interval_get(void *data, u64 *val)
788 struct hci_dev *hdev = data;
791 *val = hdev->le_conn_min_interval;
792 hci_dev_unlock(hdev);
797 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
798 conn_min_interval_set, "%llu\n");
800 static int conn_max_interval_set(void *data, u64 val)
802 struct hci_dev *hdev = data;
804 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
808 hdev->le_conn_max_interval = val;
809 hci_dev_unlock(hdev);
814 static int conn_max_interval_get(void *data, u64 *val)
816 struct hci_dev *hdev = data;
819 *val = hdev->le_conn_max_interval;
820 hci_dev_unlock(hdev);
825 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
826 conn_max_interval_set, "%llu\n");
828 static int adv_channel_map_set(void *data, u64 val)
830 struct hci_dev *hdev = data;
832 if (val < 0x01 || val > 0x07)
836 hdev->le_adv_channel_map = val;
837 hci_dev_unlock(hdev);
842 static int adv_channel_map_get(void *data, u64 *val)
844 struct hci_dev *hdev = data;
847 *val = hdev->le_adv_channel_map;
848 hci_dev_unlock(hdev);
853 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
854 adv_channel_map_set, "%llu\n");
856 static ssize_t lowpan_read(struct file *file, char __user *user_buf,
857 size_t count, loff_t *ppos)
859 struct hci_dev *hdev = file->private_data;
862 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
865 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
868 static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
869 size_t count, loff_t *position)
871 struct hci_dev *hdev = fp->private_data;
874 size_t buf_size = min(count, (sizeof(buf)-1));
876 if (copy_from_user(buf, user_buffer, buf_size))
879 buf[buf_size] = '\0';
881 if (strtobool(buf, &enable) < 0)
884 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
887 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
892 static const struct file_operations lowpan_debugfs_fops = {
895 .write = lowpan_write,
896 .llseek = default_llseek,
899 /* ---- HCI requests ---- */
901 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
903 BT_DBG("%s result 0x%2.2x", hdev->name, result);
905 if (hdev->req_status == HCI_REQ_PEND) {
906 hdev->req_result = result;
907 hdev->req_status = HCI_REQ_DONE;
908 wake_up_interruptible(&hdev->req_wait_q);
912 static void hci_req_cancel(struct hci_dev *hdev, int err)
914 BT_DBG("%s err 0x%2.2x", hdev->name, err);
916 if (hdev->req_status == HCI_REQ_PEND) {
917 hdev->req_result = err;
918 hdev->req_status = HCI_REQ_CANCELED;
919 wake_up_interruptible(&hdev->req_wait_q);
923 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
926 struct hci_ev_cmd_complete *ev;
927 struct hci_event_hdr *hdr;
932 skb = hdev->recv_evt;
933 hdev->recv_evt = NULL;
935 hci_dev_unlock(hdev);
938 return ERR_PTR(-ENODATA);
940 if (skb->len < sizeof(*hdr)) {
941 BT_ERR("Too short HCI event");
945 hdr = (void *) skb->data;
946 skb_pull(skb, HCI_EVENT_HDR_SIZE);
949 if (hdr->evt != event)
954 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
955 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
959 if (skb->len < sizeof(*ev)) {
960 BT_ERR("Too short cmd_complete event");
964 ev = (void *) skb->data;
965 skb_pull(skb, sizeof(*ev));
967 if (opcode == __le16_to_cpu(ev->opcode))
970 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
971 __le16_to_cpu(ev->opcode));
975 return ERR_PTR(-ENODATA);
978 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
979 const void *param, u8 event, u32 timeout)
981 DECLARE_WAITQUEUE(wait, current);
982 struct hci_request req;
985 BT_DBG("%s", hdev->name);
987 hci_req_init(&req, hdev);
989 hci_req_add_ev(&req, opcode, plen, param, event);
991 hdev->req_status = HCI_REQ_PEND;
993 err = hci_req_run(&req, hci_req_sync_complete);
997 add_wait_queue(&hdev->req_wait_q, &wait);
998 set_current_state(TASK_INTERRUPTIBLE);
1000 schedule_timeout(timeout);
1002 remove_wait_queue(&hdev->req_wait_q, &wait);
1004 if (signal_pending(current))
1005 return ERR_PTR(-EINTR);
1007 switch (hdev->req_status) {
1009 err = -bt_to_errno(hdev->req_result);
1012 case HCI_REQ_CANCELED:
1013 err = -hdev->req_result;
1021 hdev->req_status = hdev->req_result = 0;
1023 BT_DBG("%s end: err %d", hdev->name, err);
1026 return ERR_PTR(err);
1028 return hci_get_cmd_complete(hdev, opcode, event);
1030 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1032 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1033 const void *param, u32 timeout)
1035 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1037 EXPORT_SYMBOL(__hci_cmd_sync);
1039 /* Execute request and wait for completion. */
1040 static int __hci_req_sync(struct hci_dev *hdev,
1041 void (*func)(struct hci_request *req,
1043 unsigned long opt, __u32 timeout)
1045 struct hci_request req;
1046 DECLARE_WAITQUEUE(wait, current);
1049 BT_DBG("%s start", hdev->name);
1051 hci_req_init(&req, hdev);
1053 hdev->req_status = HCI_REQ_PEND;
1057 err = hci_req_run(&req, hci_req_sync_complete);
1059 hdev->req_status = 0;
1061 /* ENODATA means the HCI request command queue is empty.
1062 * This can happen when a request with conditionals doesn't
1063 * trigger any commands to be sent. This is normal behavior
1064 * and should not trigger an error return.
1066 if (err == -ENODATA)
1072 add_wait_queue(&hdev->req_wait_q, &wait);
1073 set_current_state(TASK_INTERRUPTIBLE);
1075 schedule_timeout(timeout);
1077 remove_wait_queue(&hdev->req_wait_q, &wait);
1079 if (signal_pending(current))
1082 switch (hdev->req_status) {
1084 err = -bt_to_errno(hdev->req_result);
1087 case HCI_REQ_CANCELED:
1088 err = -hdev->req_result;
1096 hdev->req_status = hdev->req_result = 0;
1098 BT_DBG("%s end: err %d", hdev->name, err);
1103 static int hci_req_sync(struct hci_dev *hdev,
1104 void (*req)(struct hci_request *req,
1106 unsigned long opt, __u32 timeout)
1110 if (!test_bit(HCI_UP, &hdev->flags))
1113 /* Serialize all requests */
1115 ret = __hci_req_sync(hdev, req, opt, timeout);
1116 hci_req_unlock(hdev);
1121 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1123 BT_DBG("%s %ld", req->hdev->name, opt);
1126 set_bit(HCI_RESET, &req->hdev->flags);
1127 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1130 static void bredr_init(struct hci_request *req)
1132 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1134 /* Read Local Supported Features */
1135 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1137 /* Read Local Version */
1138 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1140 /* Read BD Address */
1141 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1144 static void amp_init(struct hci_request *req)
1146 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1148 /* Read Local Version */
1149 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1151 /* Read Local Supported Commands */
1152 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1154 /* Read Local Supported Features */
1155 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1157 /* Read Local AMP Info */
1158 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1160 /* Read Data Blk size */
1161 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1163 /* Read Flow Control Mode */
1164 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1166 /* Read Location Data */
1167 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1170 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1172 struct hci_dev *hdev = req->hdev;
1174 BT_DBG("%s %ld", hdev->name, opt);
1177 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1178 hci_reset_req(req, 0);
1180 switch (hdev->dev_type) {
1190 BT_ERR("Unknown device type %d", hdev->dev_type);
1195 static void bredr_setup(struct hci_request *req)
1197 struct hci_dev *hdev = req->hdev;
1202 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1203 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1205 /* Read Class of Device */
1206 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1208 /* Read Local Name */
1209 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1211 /* Read Voice Setting */
1212 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1214 /* Read Number of Supported IAC */
1215 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1217 /* Read Current IAC LAP */
1218 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1220 /* Clear Event Filters */
1221 flt_type = HCI_FLT_CLEAR_ALL;
1222 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1224 /* Connection accept timeout ~20 secs */
1225 param = __constant_cpu_to_le16(0x7d00);
1226 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1228 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1229 * but it does not support page scan related HCI commands.
1231 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1232 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1233 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1237 static void le_setup(struct hci_request *req)
1239 struct hci_dev *hdev = req->hdev;
1241 /* Read LE Buffer Size */
1242 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1244 /* Read LE Local Supported Features */
1245 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1247 /* Read LE Advertising Channel TX Power */
1248 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1250 /* Read LE White List Size */
1251 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1253 /* Read LE Supported States */
1254 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1256 /* LE-only controllers have LE implicitly enabled */
1257 if (!lmp_bredr_capable(hdev))
1258 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1261 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1263 if (lmp_ext_inq_capable(hdev))
1266 if (lmp_inq_rssi_capable(hdev))
1269 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1270 hdev->lmp_subver == 0x0757)
1273 if (hdev->manufacturer == 15) {
1274 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1276 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1278 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1282 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1283 hdev->lmp_subver == 0x1805)
1289 static void hci_setup_inquiry_mode(struct hci_request *req)
1293 mode = hci_get_inquiry_mode(req->hdev);
1295 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1298 static void hci_setup_event_mask(struct hci_request *req)
1300 struct hci_dev *hdev = req->hdev;
1302 /* The second byte is 0xff instead of 0x9f (two reserved bits
1303 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1304 * command otherwise.
1306 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1308 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1309 * any event mask for pre 1.2 devices.
1311 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1314 if (lmp_bredr_capable(hdev)) {
1315 events[4] |= 0x01; /* Flow Specification Complete */
1316 events[4] |= 0x02; /* Inquiry Result with RSSI */
1317 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1318 events[5] |= 0x08; /* Synchronous Connection Complete */
1319 events[5] |= 0x10; /* Synchronous Connection Changed */
1321 /* Use a different default for LE-only devices */
1322 memset(events, 0, sizeof(events));
1323 events[0] |= 0x10; /* Disconnection Complete */
1324 events[0] |= 0x80; /* Encryption Change */
1325 events[1] |= 0x08; /* Read Remote Version Information Complete */
1326 events[1] |= 0x20; /* Command Complete */
1327 events[1] |= 0x40; /* Command Status */
1328 events[1] |= 0x80; /* Hardware Error */
1329 events[2] |= 0x04; /* Number of Completed Packets */
1330 events[3] |= 0x02; /* Data Buffer Overflow */
1331 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1334 if (lmp_inq_rssi_capable(hdev))
1335 events[4] |= 0x02; /* Inquiry Result with RSSI */
1337 if (lmp_sniffsubr_capable(hdev))
1338 events[5] |= 0x20; /* Sniff Subrating */
1340 if (lmp_pause_enc_capable(hdev))
1341 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1343 if (lmp_ext_inq_capable(hdev))
1344 events[5] |= 0x40; /* Extended Inquiry Result */
1346 if (lmp_no_flush_capable(hdev))
1347 events[7] |= 0x01; /* Enhanced Flush Complete */
1349 if (lmp_lsto_capable(hdev))
1350 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1352 if (lmp_ssp_capable(hdev)) {
1353 events[6] |= 0x01; /* IO Capability Request */
1354 events[6] |= 0x02; /* IO Capability Response */
1355 events[6] |= 0x04; /* User Confirmation Request */
1356 events[6] |= 0x08; /* User Passkey Request */
1357 events[6] |= 0x10; /* Remote OOB Data Request */
1358 events[6] |= 0x20; /* Simple Pairing Complete */
1359 events[7] |= 0x04; /* User Passkey Notification */
1360 events[7] |= 0x08; /* Keypress Notification */
1361 events[7] |= 0x10; /* Remote Host Supported
1362 * Features Notification
1366 if (lmp_le_capable(hdev))
1367 events[7] |= 0x20; /* LE Meta-Event */
1369 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1371 if (lmp_le_capable(hdev)) {
1372 memset(events, 0, sizeof(events));
1374 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1375 sizeof(events), events);
1379 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1381 struct hci_dev *hdev = req->hdev;
1383 if (lmp_bredr_capable(hdev))
1386 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1388 if (lmp_le_capable(hdev))
1391 hci_setup_event_mask(req);
1393 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1394 * local supported commands HCI command.
1396 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1397 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1399 if (lmp_ssp_capable(hdev)) {
1400 /* When SSP is available, then the host features page
1401 * should also be available as well. However some
1402 * controllers list the max_page as 0 as long as SSP
1403 * has not been enabled. To achieve proper debugging
1404 * output, force the minimum max_page to 1 at least.
1406 hdev->max_page = 0x01;
1408 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1410 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1411 sizeof(mode), &mode);
1413 struct hci_cp_write_eir cp;
1415 memset(hdev->eir, 0, sizeof(hdev->eir));
1416 memset(&cp, 0, sizeof(cp));
1418 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1422 if (lmp_inq_rssi_capable(hdev))
1423 hci_setup_inquiry_mode(req);
1425 if (lmp_inq_tx_pwr_capable(hdev))
1426 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1428 if (lmp_ext_feat_capable(hdev)) {
1429 struct hci_cp_read_local_ext_features cp;
1432 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1436 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1438 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1443 static void hci_setup_link_policy(struct hci_request *req)
1445 struct hci_dev *hdev = req->hdev;
1446 struct hci_cp_write_def_link_policy cp;
1447 u16 link_policy = 0;
1449 if (lmp_rswitch_capable(hdev))
1450 link_policy |= HCI_LP_RSWITCH;
1451 if (lmp_hold_capable(hdev))
1452 link_policy |= HCI_LP_HOLD;
1453 if (lmp_sniff_capable(hdev))
1454 link_policy |= HCI_LP_SNIFF;
1455 if (lmp_park_capable(hdev))
1456 link_policy |= HCI_LP_PARK;
1458 cp.policy = cpu_to_le16(link_policy);
1459 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1462 static void hci_set_le_support(struct hci_request *req)
1464 struct hci_dev *hdev = req->hdev;
1465 struct hci_cp_write_le_host_supported cp;
1467 /* LE-only devices do not support explicit enablement */
1468 if (!lmp_bredr_capable(hdev))
1471 memset(&cp, 0, sizeof(cp));
1473 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1475 cp.simul = lmp_le_br_capable(hdev);
1478 if (cp.le != lmp_host_le_capable(hdev))
1479 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1483 static void hci_set_event_mask_page_2(struct hci_request *req)
1485 struct hci_dev *hdev = req->hdev;
1486 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1488 /* If Connectionless Slave Broadcast master role is supported
1489 * enable all necessary events for it.
1491 if (lmp_csb_master_capable(hdev)) {
1492 events[1] |= 0x40; /* Triggered Clock Capture */
1493 events[1] |= 0x80; /* Synchronization Train Complete */
1494 events[2] |= 0x10; /* Slave Page Response Timeout */
1495 events[2] |= 0x20; /* CSB Channel Map Change */
1498 /* If Connectionless Slave Broadcast slave role is supported
1499 * enable all necessary events for it.
1501 if (lmp_csb_slave_capable(hdev)) {
1502 events[2] |= 0x01; /* Synchronization Train Received */
1503 events[2] |= 0x02; /* CSB Receive */
1504 events[2] |= 0x04; /* CSB Timeout */
1505 events[2] |= 0x08; /* Truncated Page Complete */
1508 /* Enable Authenticated Payload Timeout Expired event if supported */
1509 if (lmp_ping_capable(hdev))
1512 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1515 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1517 struct hci_dev *hdev = req->hdev;
1520 /* Some Broadcom based Bluetooth controllers do not support the
1521 * Delete Stored Link Key command. They are clearly indicating its
1522 * absence in the bit mask of supported commands.
1524 * Check the supported commands and only if the the command is marked
1525 * as supported send it. If not supported assume that the controller
1526 * does not have actual support for stored link keys which makes this
1527 * command redundant anyway.
1529 * Some controllers indicate that they support handling deleting
1530 * stored link keys, but they don't. The quirk lets a driver
1531 * just disable this command.
1533 if (hdev->commands[6] & 0x80 &&
1534 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1535 struct hci_cp_delete_stored_link_key cp;
1537 bacpy(&cp.bdaddr, BDADDR_ANY);
1538 cp.delete_all = 0x01;
1539 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1543 if (hdev->commands[5] & 0x10)
1544 hci_setup_link_policy(req);
1546 if (lmp_le_capable(hdev))
1547 hci_set_le_support(req);
1549 /* Read features beyond page 1 if available */
1550 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1551 struct hci_cp_read_local_ext_features cp;
1554 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1559 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1561 struct hci_dev *hdev = req->hdev;
1563 /* Set event mask page 2 if the HCI command for it is supported */
1564 if (hdev->commands[22] & 0x04)
1565 hci_set_event_mask_page_2(req);
1567 /* Check for Synchronization Train support */
1568 if (lmp_sync_train_capable(hdev))
1569 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1571 /* Enable Secure Connections if supported and configured */
1572 if ((lmp_sc_capable(hdev) ||
1573 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
1574 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1576 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1577 sizeof(support), &support);
1581 static int __hci_init(struct hci_dev *hdev)
1585 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1589 /* The Device Under Test (DUT) mode is special and available for
1590 * all controller types. So just create it early on.
1592 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1593 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1597 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1598 * BR/EDR/LE type controllers. AMP controllers only need the
1601 if (hdev->dev_type != HCI_BREDR)
1604 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1608 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1612 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1616 /* Only create debugfs entries during the initial setup
1617 * phase and not every time the controller gets powered on.
1619 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1622 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1624 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1625 &hdev->manufacturer);
1626 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1627 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1628 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1630 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1632 if (lmp_bredr_capable(hdev)) {
1633 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1634 hdev, &inquiry_cache_fops);
1635 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1636 hdev, &link_keys_fops);
1637 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1638 hdev, &dev_class_fops);
1639 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1640 hdev, &voice_setting_fops);
1643 if (lmp_ssp_capable(hdev)) {
1644 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1645 hdev, &auto_accept_delay_fops);
1646 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1647 hdev, &ssp_debug_mode_fops);
1648 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1649 hdev, &force_sc_support_fops);
1650 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1651 hdev, &sc_only_mode_fops);
1654 if (lmp_sniff_capable(hdev)) {
1655 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1656 hdev, &idle_timeout_fops);
1657 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1658 hdev, &sniff_min_interval_fops);
1659 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1660 hdev, &sniff_max_interval_fops);
1663 if (lmp_le_capable(hdev)) {
1664 debugfs_create_file("identity", 0400, hdev->debugfs,
1665 hdev, &identity_fops);
1666 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1667 hdev, &rpa_timeout_fops);
1668 debugfs_create_file("random_address", 0444, hdev->debugfs,
1669 hdev, &random_address_fops);
1670 debugfs_create_file("static_address", 0444, hdev->debugfs,
1671 hdev, &static_address_fops);
1673 /* For controllers with a public address, provide a debug
1674 * option to force the usage of the configured static
1675 * address. By default the public address is used.
1677 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1678 debugfs_create_file("force_static_address", 0644,
1679 hdev->debugfs, hdev,
1680 &force_static_address_fops);
1682 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1683 &hdev->le_white_list_size);
1684 debugfs_create_file("identity_resolving_keys", 0400,
1685 hdev->debugfs, hdev,
1686 &identity_resolving_keys_fops);
1687 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1688 hdev, &long_term_keys_fops);
1689 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1690 hdev, &conn_min_interval_fops);
1691 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1692 hdev, &conn_max_interval_fops);
1693 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1694 hdev, &adv_channel_map_fops);
1695 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1696 &lowpan_debugfs_fops);
1702 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1706 BT_DBG("%s %x", req->hdev->name, scan);
1708 /* Inquiry and Page scans */
1709 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1712 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1716 BT_DBG("%s %x", req->hdev->name, auth);
1718 /* Authentication */
1719 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1722 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1726 BT_DBG("%s %x", req->hdev->name, encrypt);
1729 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1732 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1734 __le16 policy = cpu_to_le16(opt);
1736 BT_DBG("%s %x", req->hdev->name, policy);
1738 /* Default link policy */
1739 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1742 /* Get HCI device by index.
1743 * Device is held on return. */
1744 struct hci_dev *hci_dev_get(int index)
1746 struct hci_dev *hdev = NULL, *d;
1748 BT_DBG("%d", index);
1753 read_lock(&hci_dev_list_lock);
1754 list_for_each_entry(d, &hci_dev_list, list) {
1755 if (d->id == index) {
1756 hdev = hci_dev_hold(d);
1760 read_unlock(&hci_dev_list_lock);
1764 /* ---- Inquiry support ---- */
1766 bool hci_discovery_active(struct hci_dev *hdev)
1768 struct discovery_state *discov = &hdev->discovery;
1770 switch (discov->state) {
1771 case DISCOVERY_FINDING:
1772 case DISCOVERY_RESOLVING:
1780 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1782 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1784 if (hdev->discovery.state == state)
1788 case DISCOVERY_STOPPED:
1789 hci_update_background_scan(hdev);
1791 if (hdev->discovery.state != DISCOVERY_STARTING)
1792 mgmt_discovering(hdev, 0);
1794 case DISCOVERY_STARTING:
1796 case DISCOVERY_FINDING:
1797 mgmt_discovering(hdev, 1);
1799 case DISCOVERY_RESOLVING:
1801 case DISCOVERY_STOPPING:
1805 hdev->discovery.state = state;
1808 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1810 struct discovery_state *cache = &hdev->discovery;
1811 struct inquiry_entry *p, *n;
1813 list_for_each_entry_safe(p, n, &cache->all, all) {
1818 INIT_LIST_HEAD(&cache->unknown);
1819 INIT_LIST_HEAD(&cache->resolve);
1822 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1825 struct discovery_state *cache = &hdev->discovery;
1826 struct inquiry_entry *e;
1828 BT_DBG("cache %p, %pMR", cache, bdaddr);
1830 list_for_each_entry(e, &cache->all, all) {
1831 if (!bacmp(&e->data.bdaddr, bdaddr))
1838 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1841 struct discovery_state *cache = &hdev->discovery;
1842 struct inquiry_entry *e;
1844 BT_DBG("cache %p, %pMR", cache, bdaddr);
1846 list_for_each_entry(e, &cache->unknown, list) {
1847 if (!bacmp(&e->data.bdaddr, bdaddr))
1854 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1858 struct discovery_state *cache = &hdev->discovery;
1859 struct inquiry_entry *e;
1861 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1863 list_for_each_entry(e, &cache->resolve, list) {
1864 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1866 if (!bacmp(&e->data.bdaddr, bdaddr))
1873 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1874 struct inquiry_entry *ie)
1876 struct discovery_state *cache = &hdev->discovery;
1877 struct list_head *pos = &cache->resolve;
1878 struct inquiry_entry *p;
1880 list_del(&ie->list);
1882 list_for_each_entry(p, &cache->resolve, list) {
1883 if (p->name_state != NAME_PENDING &&
1884 abs(p->data.rssi) >= abs(ie->data.rssi))
1889 list_add(&ie->list, pos);
1892 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1893 bool name_known, bool *ssp)
1895 struct discovery_state *cache = &hdev->discovery;
1896 struct inquiry_entry *ie;
1898 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1900 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1903 *ssp = data->ssp_mode;
1905 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1907 if (ie->data.ssp_mode && ssp)
1910 if (ie->name_state == NAME_NEEDED &&
1911 data->rssi != ie->data.rssi) {
1912 ie->data.rssi = data->rssi;
1913 hci_inquiry_cache_update_resolve(hdev, ie);
1919 /* Entry not in the cache. Add new one. */
1920 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1924 list_add(&ie->all, &cache->all);
1927 ie->name_state = NAME_KNOWN;
1929 ie->name_state = NAME_NOT_KNOWN;
1930 list_add(&ie->list, &cache->unknown);
1934 if (name_known && ie->name_state != NAME_KNOWN &&
1935 ie->name_state != NAME_PENDING) {
1936 ie->name_state = NAME_KNOWN;
1937 list_del(&ie->list);
1940 memcpy(&ie->data, data, sizeof(*data));
1941 ie->timestamp = jiffies;
1942 cache->timestamp = jiffies;
1944 if (ie->name_state == NAME_NOT_KNOWN)
1950 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1952 struct discovery_state *cache = &hdev->discovery;
1953 struct inquiry_info *info = (struct inquiry_info *) buf;
1954 struct inquiry_entry *e;
1957 list_for_each_entry(e, &cache->all, all) {
1958 struct inquiry_data *data = &e->data;
1963 bacpy(&info->bdaddr, &data->bdaddr);
1964 info->pscan_rep_mode = data->pscan_rep_mode;
1965 info->pscan_period_mode = data->pscan_period_mode;
1966 info->pscan_mode = data->pscan_mode;
1967 memcpy(info->dev_class, data->dev_class, 3);
1968 info->clock_offset = data->clock_offset;
1974 BT_DBG("cache %p, copied %d", cache, copied);
1978 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1980 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1981 struct hci_dev *hdev = req->hdev;
1982 struct hci_cp_inquiry cp;
1984 BT_DBG("%s", hdev->name);
1986 if (test_bit(HCI_INQUIRY, &hdev->flags))
1990 memcpy(&cp.lap, &ir->lap, 3);
1991 cp.length = ir->length;
1992 cp.num_rsp = ir->num_rsp;
1993 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1996 static int wait_inquiry(void *word)
1999 return signal_pending(current);
2002 int hci_inquiry(void __user *arg)
2004 __u8 __user *ptr = arg;
2005 struct hci_inquiry_req ir;
2006 struct hci_dev *hdev;
2007 int err = 0, do_inquiry = 0, max_rsp;
2011 if (copy_from_user(&ir, ptr, sizeof(ir)))
2014 hdev = hci_dev_get(ir.dev_id);
2018 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2023 if (hdev->dev_type != HCI_BREDR) {
2028 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2034 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2035 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2036 hci_inquiry_cache_flush(hdev);
2039 hci_dev_unlock(hdev);
2041 timeo = ir.length * msecs_to_jiffies(2000);
2044 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2049 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2050 * cleared). If it is interrupted by a signal, return -EINTR.
2052 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2053 TASK_INTERRUPTIBLE))
2057 /* for unlimited number of responses we will use buffer with
2060 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2062 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2063 * copy it to the user space.
2065 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2072 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2073 hci_dev_unlock(hdev);
2075 BT_DBG("num_rsp %d", ir.num_rsp);
2077 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2079 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2092 static int hci_dev_do_open(struct hci_dev *hdev)
2096 BT_DBG("%s %p", hdev->name, hdev);
2100 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2105 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2106 /* Check for rfkill but allow the HCI setup stage to
2107 * proceed (which in itself doesn't cause any RF activity).
2109 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2114 /* Check for valid public address or a configured static
2115 * random adddress, but let the HCI setup proceed to
2116 * be able to determine if there is a public address
2119 * In case of user channel usage, it is not important
2120 * if a public address or static random address is
2123 * This check is only valid for BR/EDR controllers
2124 * since AMP controllers do not have an address.
2126 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2127 hdev->dev_type == HCI_BREDR &&
2128 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2129 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2130 ret = -EADDRNOTAVAIL;
2135 if (test_bit(HCI_UP, &hdev->flags)) {
2140 if (hdev->open(hdev)) {
2145 atomic_set(&hdev->cmd_cnt, 1);
2146 set_bit(HCI_INIT, &hdev->flags);
2148 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2149 ret = hdev->setup(hdev);
2152 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2153 set_bit(HCI_RAW, &hdev->flags);
2155 if (!test_bit(HCI_RAW, &hdev->flags) &&
2156 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2157 ret = __hci_init(hdev);
2160 clear_bit(HCI_INIT, &hdev->flags);
2164 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2165 set_bit(HCI_UP, &hdev->flags);
2166 hci_notify(hdev, HCI_DEV_UP);
2167 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2168 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2169 hdev->dev_type == HCI_BREDR) {
2171 mgmt_powered(hdev, 1);
2172 hci_dev_unlock(hdev);
2175 /* Init failed, cleanup */
2176 flush_work(&hdev->tx_work);
2177 flush_work(&hdev->cmd_work);
2178 flush_work(&hdev->rx_work);
2180 skb_queue_purge(&hdev->cmd_q);
2181 skb_queue_purge(&hdev->rx_q);
2186 if (hdev->sent_cmd) {
2187 kfree_skb(hdev->sent_cmd);
2188 hdev->sent_cmd = NULL;
2196 hci_req_unlock(hdev);
2200 /* ---- HCI ioctl helpers ---- */
2202 int hci_dev_open(__u16 dev)
2204 struct hci_dev *hdev;
2207 hdev = hci_dev_get(dev);
2211 /* We need to ensure that no other power on/off work is pending
2212 * before proceeding to call hci_dev_do_open. This is
2213 * particularly important if the setup procedure has not yet
2216 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2217 cancel_delayed_work(&hdev->power_off);
2219 /* After this call it is guaranteed that the setup procedure
2220 * has finished. This means that error conditions like RFKILL
2221 * or no valid public or static random address apply.
2223 flush_workqueue(hdev->req_workqueue);
2225 err = hci_dev_do_open(hdev);
2232 static int hci_dev_do_close(struct hci_dev *hdev)
2234 BT_DBG("%s %p", hdev->name, hdev);
2236 cancel_delayed_work(&hdev->power_off);
2238 hci_req_cancel(hdev, ENODEV);
2241 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2242 del_timer_sync(&hdev->cmd_timer);
2243 hci_req_unlock(hdev);
2247 /* Flush RX and TX works */
2248 flush_work(&hdev->tx_work);
2249 flush_work(&hdev->rx_work);
2251 if (hdev->discov_timeout > 0) {
2252 cancel_delayed_work(&hdev->discov_off);
2253 hdev->discov_timeout = 0;
2254 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2255 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2258 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2259 cancel_delayed_work(&hdev->service_cache);
2261 cancel_delayed_work_sync(&hdev->le_scan_disable);
2263 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2264 cancel_delayed_work_sync(&hdev->rpa_expired);
2267 hci_inquiry_cache_flush(hdev);
2268 hci_conn_hash_flush(hdev);
2269 hci_pend_le_conns_clear(hdev);
2270 hci_dev_unlock(hdev);
2272 hci_notify(hdev, HCI_DEV_DOWN);
2278 skb_queue_purge(&hdev->cmd_q);
2279 atomic_set(&hdev->cmd_cnt, 1);
2280 if (!test_bit(HCI_RAW, &hdev->flags) &&
2281 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2282 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2283 set_bit(HCI_INIT, &hdev->flags);
2284 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2285 clear_bit(HCI_INIT, &hdev->flags);
2288 /* flush cmd work */
2289 flush_work(&hdev->cmd_work);
2292 skb_queue_purge(&hdev->rx_q);
2293 skb_queue_purge(&hdev->cmd_q);
2294 skb_queue_purge(&hdev->raw_q);
2296 /* Drop last sent command */
2297 if (hdev->sent_cmd) {
2298 del_timer_sync(&hdev->cmd_timer);
2299 kfree_skb(hdev->sent_cmd);
2300 hdev->sent_cmd = NULL;
2303 kfree_skb(hdev->recv_evt);
2304 hdev->recv_evt = NULL;
2306 /* After this point our queues are empty
2307 * and no tasks are scheduled. */
2312 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2314 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2315 if (hdev->dev_type == HCI_BREDR) {
2317 mgmt_powered(hdev, 0);
2318 hci_dev_unlock(hdev);
2322 /* Controller radio is available but is currently powered down */
2323 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2325 memset(hdev->eir, 0, sizeof(hdev->eir));
2326 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2327 bacpy(&hdev->random_addr, BDADDR_ANY);
2329 hci_req_unlock(hdev);
2335 int hci_dev_close(__u16 dev)
2337 struct hci_dev *hdev;
2340 hdev = hci_dev_get(dev);
2344 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2349 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2350 cancel_delayed_work(&hdev->power_off);
2352 err = hci_dev_do_close(hdev);
2359 int hci_dev_reset(__u16 dev)
2361 struct hci_dev *hdev;
2364 hdev = hci_dev_get(dev);
2370 if (!test_bit(HCI_UP, &hdev->flags)) {
2375 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2381 skb_queue_purge(&hdev->rx_q);
2382 skb_queue_purge(&hdev->cmd_q);
2385 hci_inquiry_cache_flush(hdev);
2386 hci_conn_hash_flush(hdev);
2387 hci_dev_unlock(hdev);
2392 atomic_set(&hdev->cmd_cnt, 1);
2393 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2395 if (!test_bit(HCI_RAW, &hdev->flags))
2396 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2399 hci_req_unlock(hdev);
2404 int hci_dev_reset_stat(__u16 dev)
2406 struct hci_dev *hdev;
2409 hdev = hci_dev_get(dev);
2413 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2418 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2425 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2427 struct hci_dev *hdev;
2428 struct hci_dev_req dr;
2431 if (copy_from_user(&dr, arg, sizeof(dr)))
2434 hdev = hci_dev_get(dr.dev_id);
2438 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2443 if (hdev->dev_type != HCI_BREDR) {
2448 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2455 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2460 if (!lmp_encrypt_capable(hdev)) {
2465 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2466 /* Auth must be enabled first */
2467 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2473 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2478 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2483 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2487 case HCISETLINKMODE:
2488 hdev->link_mode = ((__u16) dr.dev_opt) &
2489 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2493 hdev->pkt_type = (__u16) dr.dev_opt;
2497 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2498 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2502 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2503 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2516 int hci_get_dev_list(void __user *arg)
2518 struct hci_dev *hdev;
2519 struct hci_dev_list_req *dl;
2520 struct hci_dev_req *dr;
2521 int n = 0, size, err;
2524 if (get_user(dev_num, (__u16 __user *) arg))
2527 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2530 size = sizeof(*dl) + dev_num * sizeof(*dr);
2532 dl = kzalloc(size, GFP_KERNEL);
2538 read_lock(&hci_dev_list_lock);
2539 list_for_each_entry(hdev, &hci_dev_list, list) {
2540 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2541 cancel_delayed_work(&hdev->power_off);
2543 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2544 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2546 (dr + n)->dev_id = hdev->id;
2547 (dr + n)->dev_opt = hdev->flags;
2552 read_unlock(&hci_dev_list_lock);
2555 size = sizeof(*dl) + n * sizeof(*dr);
2557 err = copy_to_user(arg, dl, size);
2560 return err ? -EFAULT : 0;
2563 int hci_get_dev_info(void __user *arg)
2565 struct hci_dev *hdev;
2566 struct hci_dev_info di;
2569 if (copy_from_user(&di, arg, sizeof(di)))
2572 hdev = hci_dev_get(di.dev_id);
2576 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2577 cancel_delayed_work_sync(&hdev->power_off);
2579 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2580 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2582 strcpy(di.name, hdev->name);
2583 di.bdaddr = hdev->bdaddr;
2584 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2585 di.flags = hdev->flags;
2586 di.pkt_type = hdev->pkt_type;
2587 if (lmp_bredr_capable(hdev)) {
2588 di.acl_mtu = hdev->acl_mtu;
2589 di.acl_pkts = hdev->acl_pkts;
2590 di.sco_mtu = hdev->sco_mtu;
2591 di.sco_pkts = hdev->sco_pkts;
2593 di.acl_mtu = hdev->le_mtu;
2594 di.acl_pkts = hdev->le_pkts;
2598 di.link_policy = hdev->link_policy;
2599 di.link_mode = hdev->link_mode;
2601 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2602 memcpy(&di.features, &hdev->features, sizeof(di.features));
2604 if (copy_to_user(arg, &di, sizeof(di)))
2612 /* ---- Interface to HCI drivers ---- */
2614 static int hci_rfkill_set_block(void *data, bool blocked)
2616 struct hci_dev *hdev = data;
2618 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2620 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2624 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2625 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2626 hci_dev_do_close(hdev);
2628 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2634 static const struct rfkill_ops hci_rfkill_ops = {
2635 .set_block = hci_rfkill_set_block,
2638 static void hci_power_on(struct work_struct *work)
2640 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2643 BT_DBG("%s", hdev->name);
2645 err = hci_dev_do_open(hdev);
2647 mgmt_set_powered_failed(hdev, err);
2651 /* During the HCI setup phase, a few error conditions are
2652 * ignored and they need to be checked now. If they are still
2653 * valid, it is important to turn the device back off.
2655 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2656 (hdev->dev_type == HCI_BREDR &&
2657 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2658 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2659 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2660 hci_dev_do_close(hdev);
2661 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2662 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2663 HCI_AUTO_OFF_TIMEOUT);
2666 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2667 mgmt_index_added(hdev);
2670 static void hci_power_off(struct work_struct *work)
2672 struct hci_dev *hdev = container_of(work, struct hci_dev,
2675 BT_DBG("%s", hdev->name);
2677 hci_dev_do_close(hdev);
2680 static void hci_discov_off(struct work_struct *work)
2682 struct hci_dev *hdev;
2684 hdev = container_of(work, struct hci_dev, discov_off.work);
2686 BT_DBG("%s", hdev->name);
2688 mgmt_discoverable_timeout(hdev);
2691 void hci_uuids_clear(struct hci_dev *hdev)
2693 struct bt_uuid *uuid, *tmp;
2695 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2696 list_del(&uuid->list);
2701 void hci_link_keys_clear(struct hci_dev *hdev)
2703 struct list_head *p, *n;
2705 list_for_each_safe(p, n, &hdev->link_keys) {
2706 struct link_key *key;
2708 key = list_entry(p, struct link_key, list);
2715 void hci_smp_ltks_clear(struct hci_dev *hdev)
2717 struct smp_ltk *k, *tmp;
2719 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2725 void hci_smp_irks_clear(struct hci_dev *hdev)
2727 struct smp_irk *k, *tmp;
2729 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2735 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2739 list_for_each_entry(k, &hdev->link_keys, list)
2740 if (bacmp(bdaddr, &k->bdaddr) == 0)
2746 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2747 u8 key_type, u8 old_key_type)
2750 if (key_type < 0x03)
2753 /* Debug keys are insecure so don't store them persistently */
2754 if (key_type == HCI_LK_DEBUG_COMBINATION)
2757 /* Changed combination key and there's no previous one */
2758 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2761 /* Security mode 3 case */
2765 /* Neither local nor remote side had no-bonding as requirement */
2766 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2769 /* Local side had dedicated bonding as requirement */
2770 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2773 /* Remote side had dedicated bonding as requirement */
2774 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2777 /* If none of the above criteria match, then don't store the key
2782 static bool ltk_type_master(u8 type)
2784 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2790 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2795 list_for_each_entry(k, &hdev->long_term_keys, list) {
2796 if (k->ediv != ediv ||
2797 memcmp(rand, k->rand, sizeof(k->rand)))
2800 if (ltk_type_master(k->type) != master)
2809 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2810 u8 addr_type, bool master)
2814 list_for_each_entry(k, &hdev->long_term_keys, list)
2815 if (addr_type == k->bdaddr_type &&
2816 bacmp(bdaddr, &k->bdaddr) == 0 &&
2817 ltk_type_master(k->type) == master)
2823 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2825 struct smp_irk *irk;
2827 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2828 if (!bacmp(&irk->rpa, rpa))
2832 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2833 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2834 bacpy(&irk->rpa, rpa);
2842 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2845 struct smp_irk *irk;
2847 /* Identity Address must be public or static random */
2848 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2851 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2852 if (addr_type == irk->addr_type &&
2853 bacmp(bdaddr, &irk->bdaddr) == 0)
2860 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2861 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2863 struct link_key *key, *old_key;
2867 old_key = hci_find_link_key(hdev, bdaddr);
2869 old_key_type = old_key->type;
2872 old_key_type = conn ? conn->key_type : 0xff;
2873 key = kzalloc(sizeof(*key), GFP_KERNEL);
2876 list_add(&key->list, &hdev->link_keys);
2879 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2881 /* Some buggy controller combinations generate a changed
2882 * combination key for legacy pairing even when there's no
2884 if (type == HCI_LK_CHANGED_COMBINATION &&
2885 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2886 type = HCI_LK_COMBINATION;
2888 conn->key_type = type;
2891 bacpy(&key->bdaddr, bdaddr);
2892 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2893 key->pin_len = pin_len;
2895 if (type == HCI_LK_CHANGED_COMBINATION)
2896 key->type = old_key_type;
2903 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2905 mgmt_new_link_key(hdev, key, persistent);
2908 conn->flush_key = !persistent;
2913 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2914 u8 addr_type, u8 type, u8 authenticated,
2915 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
2917 struct smp_ltk *key, *old_key;
2918 bool master = ltk_type_master(type);
2920 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
2924 key = kzalloc(sizeof(*key), GFP_KERNEL);
2927 list_add(&key->list, &hdev->long_term_keys);
2930 bacpy(&key->bdaddr, bdaddr);
2931 key->bdaddr_type = addr_type;
2932 memcpy(key->val, tk, sizeof(key->val));
2933 key->authenticated = authenticated;
2935 key->enc_size = enc_size;
2937 memcpy(key->rand, rand, sizeof(key->rand));
2942 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2943 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2945 struct smp_irk *irk;
2947 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2949 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2953 bacpy(&irk->bdaddr, bdaddr);
2954 irk->addr_type = addr_type;
2956 list_add(&irk->list, &hdev->identity_resolving_keys);
2959 memcpy(irk->val, val, 16);
2960 bacpy(&irk->rpa, rpa);
2965 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2967 struct link_key *key;
2969 key = hci_find_link_key(hdev, bdaddr);
2973 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2975 list_del(&key->list);
2981 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2983 struct smp_ltk *k, *tmp;
2986 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2987 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2990 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2997 return removed ? 0 : -ENOENT;
3000 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3002 struct smp_irk *k, *tmp;
3004 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3005 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3008 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3015 /* HCI command timer function */
3016 static void hci_cmd_timeout(unsigned long arg)
3018 struct hci_dev *hdev = (void *) arg;
3020 if (hdev->sent_cmd) {
3021 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3022 u16 opcode = __le16_to_cpu(sent->opcode);
3024 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3026 BT_ERR("%s command tx timeout", hdev->name);
3029 atomic_set(&hdev->cmd_cnt, 1);
3030 queue_work(hdev->workqueue, &hdev->cmd_work);
3033 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3036 struct oob_data *data;
3038 list_for_each_entry(data, &hdev->remote_oob_data, list)
3039 if (bacmp(bdaddr, &data->bdaddr) == 0)
3045 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3047 struct oob_data *data;
3049 data = hci_find_remote_oob_data(hdev, bdaddr);
3053 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3055 list_del(&data->list);
3061 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3063 struct oob_data *data, *n;
3065 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3066 list_del(&data->list);
3071 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3072 u8 *hash, u8 *randomizer)
3074 struct oob_data *data;
3076 data = hci_find_remote_oob_data(hdev, bdaddr);
3078 data = kmalloc(sizeof(*data), GFP_KERNEL);
3082 bacpy(&data->bdaddr, bdaddr);
3083 list_add(&data->list, &hdev->remote_oob_data);
3086 memcpy(data->hash192, hash, sizeof(data->hash192));
3087 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3089 memset(data->hash256, 0, sizeof(data->hash256));
3090 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3092 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3097 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3098 u8 *hash192, u8 *randomizer192,
3099 u8 *hash256, u8 *randomizer256)
3101 struct oob_data *data;
3103 data = hci_find_remote_oob_data(hdev, bdaddr);
3105 data = kmalloc(sizeof(*data), GFP_KERNEL);
3109 bacpy(&data->bdaddr, bdaddr);
3110 list_add(&data->list, &hdev->remote_oob_data);
3113 memcpy(data->hash192, hash192, sizeof(data->hash192));
3114 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3116 memcpy(data->hash256, hash256, sizeof(data->hash256));
3117 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3119 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3124 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3125 bdaddr_t *bdaddr, u8 type)
3127 struct bdaddr_list *b;
3129 list_for_each_entry(b, &hdev->blacklist, list) {
3130 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3137 void hci_blacklist_clear(struct hci_dev *hdev)
3139 struct list_head *p, *n;
3141 list_for_each_safe(p, n, &hdev->blacklist) {
3142 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3149 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3151 struct bdaddr_list *entry;
3153 if (!bacmp(bdaddr, BDADDR_ANY))
3156 if (hci_blacklist_lookup(hdev, bdaddr, type))
3159 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3163 bacpy(&entry->bdaddr, bdaddr);
3164 entry->bdaddr_type = type;
3166 list_add(&entry->list, &hdev->blacklist);
3168 return mgmt_device_blocked(hdev, bdaddr, type);
3171 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3173 struct bdaddr_list *entry;
3175 if (!bacmp(bdaddr, BDADDR_ANY)) {
3176 hci_blacklist_clear(hdev);
3180 entry = hci_blacklist_lookup(hdev, bdaddr, type);
3184 list_del(&entry->list);
3187 return mgmt_device_unblocked(hdev, bdaddr, type);
3190 /* This function requires the caller holds hdev->lock */
3191 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3192 bdaddr_t *addr, u8 addr_type)
3194 struct hci_conn_params *params;
3196 list_for_each_entry(params, &hdev->le_conn_params, list) {
3197 if (bacmp(¶ms->addr, addr) == 0 &&
3198 params->addr_type == addr_type) {
3206 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3208 struct hci_conn *conn;
3210 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3214 if (conn->dst_type != type)
3217 if (conn->state != BT_CONNECTED)
3223 /* This function requires the caller holds hdev->lock */
3224 void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3225 u8 auto_connect, u16 conn_min_interval,
3226 u16 conn_max_interval)
3228 struct hci_conn_params *params;
3230 params = hci_conn_params_lookup(hdev, addr, addr_type);
3234 params = kzalloc(sizeof(*params), GFP_KERNEL);
3236 BT_ERR("Out of memory");
3240 bacpy(¶ms->addr, addr);
3241 params->addr_type = addr_type;
3243 list_add(¶ms->list, &hdev->le_conn_params);
3246 params->conn_min_interval = conn_min_interval;
3247 params->conn_max_interval = conn_max_interval;
3248 params->auto_connect = auto_connect;
3250 switch (auto_connect) {
3251 case HCI_AUTO_CONN_DISABLED:
3252 case HCI_AUTO_CONN_LINK_LOSS:
3253 hci_pend_le_conn_del(hdev, addr, addr_type);
3255 case HCI_AUTO_CONN_ALWAYS:
3256 if (!is_connected(hdev, addr, addr_type))
3257 hci_pend_le_conn_add(hdev, addr, addr_type);
3261 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3262 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3263 conn_min_interval, conn_max_interval);
3266 /* This function requires the caller holds hdev->lock */
3267 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3269 struct hci_conn_params *params;
3271 params = hci_conn_params_lookup(hdev, addr, addr_type);
3275 hci_pend_le_conn_del(hdev, addr, addr_type);
3277 list_del(¶ms->list);
3280 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3283 /* This function requires the caller holds hdev->lock */
3284 void hci_conn_params_clear(struct hci_dev *hdev)
3286 struct hci_conn_params *params, *tmp;
3288 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3289 list_del(¶ms->list);
3293 BT_DBG("All LE connection parameters were removed");
3296 /* This function requires the caller holds hdev->lock */
3297 struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3298 bdaddr_t *addr, u8 addr_type)
3300 struct bdaddr_list *entry;
3302 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3303 if (bacmp(&entry->bdaddr, addr) == 0 &&
3304 entry->bdaddr_type == addr_type)
3311 /* This function requires the caller holds hdev->lock */
3312 void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3314 struct bdaddr_list *entry;
3316 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3320 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3322 BT_ERR("Out of memory");
3326 bacpy(&entry->bdaddr, addr);
3327 entry->bdaddr_type = addr_type;
3329 list_add(&entry->list, &hdev->pend_le_conns);
3331 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3334 hci_update_background_scan(hdev);
3337 /* This function requires the caller holds hdev->lock */
3338 void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3340 struct bdaddr_list *entry;
3342 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3346 list_del(&entry->list);
3349 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3352 hci_update_background_scan(hdev);
3355 /* This function requires the caller holds hdev->lock */
3356 void hci_pend_le_conns_clear(struct hci_dev *hdev)
3358 struct bdaddr_list *entry, *tmp;
3360 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3361 list_del(&entry->list);
3365 BT_DBG("All LE pending connections cleared");
3368 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3371 BT_ERR("Failed to start inquiry: status %d", status);
3374 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3375 hci_dev_unlock(hdev);
3380 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3382 /* General inquiry access code (GIAC) */
3383 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3384 struct hci_request req;
3385 struct hci_cp_inquiry cp;
3389 BT_ERR("Failed to disable LE scanning: status %d", status);
3393 switch (hdev->discovery.type) {
3394 case DISCOV_TYPE_LE:
3396 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3397 hci_dev_unlock(hdev);
3400 case DISCOV_TYPE_INTERLEAVED:
3401 hci_req_init(&req, hdev);
3403 memset(&cp, 0, sizeof(cp));
3404 memcpy(&cp.lap, lap, sizeof(cp.lap));
3405 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3406 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3410 hci_inquiry_cache_flush(hdev);
3412 err = hci_req_run(&req, inquiry_complete);
3414 BT_ERR("Inquiry request failed: err %d", err);
3415 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3418 hci_dev_unlock(hdev);
3423 static void le_scan_disable_work(struct work_struct *work)
3425 struct hci_dev *hdev = container_of(work, struct hci_dev,
3426 le_scan_disable.work);
3427 struct hci_request req;
3430 BT_DBG("%s", hdev->name);
3432 hci_req_init(&req, hdev);
3434 hci_req_add_le_scan_disable(&req);
3436 err = hci_req_run(&req, le_scan_disable_work_complete);
3438 BT_ERR("Disable LE scanning request failed: err %d", err);
3441 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3444 struct hci_dev *hdev = req->hdev;
3447 /* If privacy is enabled use a resolvable private address. If
3448 * current RPA has expired or there is something else than
3449 * the current RPA in use, then generate a new one.
3451 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3454 *own_addr_type = ADDR_LE_DEV_RANDOM;
3456 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3457 !bacmp(&hdev->random_addr, &hdev->rpa))
3460 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3462 BT_ERR("%s failed to generate new RPA", hdev->name);
3466 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->rpa);
3468 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3469 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3474 /* In case of required privacy without resolvable private address,
3475 * use an unresolvable private address. This is useful for active
3476 * scanning and non-connectable advertising.
3478 if (require_privacy) {
3481 get_random_bytes(&urpa, 6);
3482 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3484 *own_addr_type = ADDR_LE_DEV_RANDOM;
3485 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &urpa);
3489 /* If forcing static address is in use or there is no public
3490 * address use the static address as random address (but skip
3491 * the HCI command if the current random address is already the
3494 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3495 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3496 *own_addr_type = ADDR_LE_DEV_RANDOM;
3497 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3498 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3499 &hdev->static_addr);
3503 /* Neither privacy nor static address is being used so use a
3506 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3511 /* Alloc HCI device */
3512 struct hci_dev *hci_alloc_dev(void)
3514 struct hci_dev *hdev;
3516 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3520 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3521 hdev->esco_type = (ESCO_HV1);
3522 hdev->link_mode = (HCI_LM_ACCEPT);
3523 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3524 hdev->io_capability = 0x03; /* No Input No Output */
3525 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3526 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3528 hdev->sniff_max_interval = 800;
3529 hdev->sniff_min_interval = 80;
3531 hdev->le_adv_channel_map = 0x07;
3532 hdev->le_scan_interval = 0x0060;
3533 hdev->le_scan_window = 0x0030;
3534 hdev->le_conn_min_interval = 0x0028;
3535 hdev->le_conn_max_interval = 0x0038;
3537 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3539 mutex_init(&hdev->lock);
3540 mutex_init(&hdev->req_lock);
3542 INIT_LIST_HEAD(&hdev->mgmt_pending);
3543 INIT_LIST_HEAD(&hdev->blacklist);
3544 INIT_LIST_HEAD(&hdev->uuids);
3545 INIT_LIST_HEAD(&hdev->link_keys);
3546 INIT_LIST_HEAD(&hdev->long_term_keys);
3547 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3548 INIT_LIST_HEAD(&hdev->remote_oob_data);
3549 INIT_LIST_HEAD(&hdev->le_conn_params);
3550 INIT_LIST_HEAD(&hdev->pend_le_conns);
3551 INIT_LIST_HEAD(&hdev->conn_hash.list);
3553 INIT_WORK(&hdev->rx_work, hci_rx_work);
3554 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3555 INIT_WORK(&hdev->tx_work, hci_tx_work);
3556 INIT_WORK(&hdev->power_on, hci_power_on);
3558 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3559 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3560 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3562 skb_queue_head_init(&hdev->rx_q);
3563 skb_queue_head_init(&hdev->cmd_q);
3564 skb_queue_head_init(&hdev->raw_q);
3566 init_waitqueue_head(&hdev->req_wait_q);
3568 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
3570 hci_init_sysfs(hdev);
3571 discovery_init(hdev);
3575 EXPORT_SYMBOL(hci_alloc_dev);
3577 /* Free HCI device */
3578 void hci_free_dev(struct hci_dev *hdev)
3580 /* will free via device release */
3581 put_device(&hdev->dev);
3583 EXPORT_SYMBOL(hci_free_dev);
3585 /* Register HCI device */
3586 int hci_register_dev(struct hci_dev *hdev)
3590 if (!hdev->open || !hdev->close)
3593 /* Do not allow HCI_AMP devices to register at index 0,
3594 * so the index can be used as the AMP controller ID.
3596 switch (hdev->dev_type) {
3598 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3601 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3610 sprintf(hdev->name, "hci%d", id);
3613 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3615 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3616 WQ_MEM_RECLAIM, 1, hdev->name);
3617 if (!hdev->workqueue) {
3622 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3623 WQ_MEM_RECLAIM, 1, hdev->name);
3624 if (!hdev->req_workqueue) {
3625 destroy_workqueue(hdev->workqueue);
3630 if (!IS_ERR_OR_NULL(bt_debugfs))
3631 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3633 dev_set_name(&hdev->dev, "%s", hdev->name);
3635 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3637 if (IS_ERR(hdev->tfm_aes)) {
3638 BT_ERR("Unable to create crypto context");
3639 error = PTR_ERR(hdev->tfm_aes);
3640 hdev->tfm_aes = NULL;
3644 error = device_add(&hdev->dev);
3648 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3649 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3652 if (rfkill_register(hdev->rfkill) < 0) {
3653 rfkill_destroy(hdev->rfkill);
3654 hdev->rfkill = NULL;
3658 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3659 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3661 set_bit(HCI_SETUP, &hdev->dev_flags);
3662 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3664 if (hdev->dev_type == HCI_BREDR) {
3665 /* Assume BR/EDR support until proven otherwise (such as
3666 * through reading supported features during init.
3668 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3671 write_lock(&hci_dev_list_lock);
3672 list_add(&hdev->list, &hci_dev_list);
3673 write_unlock(&hci_dev_list_lock);
3675 hci_notify(hdev, HCI_DEV_REG);
3678 queue_work(hdev->req_workqueue, &hdev->power_on);
3683 crypto_free_blkcipher(hdev->tfm_aes);
3685 destroy_workqueue(hdev->workqueue);
3686 destroy_workqueue(hdev->req_workqueue);
3688 ida_simple_remove(&hci_index_ida, hdev->id);
3692 EXPORT_SYMBOL(hci_register_dev);
3694 /* Unregister HCI device */
3695 void hci_unregister_dev(struct hci_dev *hdev)
3699 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3701 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3705 write_lock(&hci_dev_list_lock);
3706 list_del(&hdev->list);
3707 write_unlock(&hci_dev_list_lock);
3709 hci_dev_do_close(hdev);
3711 for (i = 0; i < NUM_REASSEMBLY; i++)
3712 kfree_skb(hdev->reassembly[i]);
3714 cancel_work_sync(&hdev->power_on);
3716 if (!test_bit(HCI_INIT, &hdev->flags) &&
3717 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
3719 mgmt_index_removed(hdev);
3720 hci_dev_unlock(hdev);
3723 /* mgmt_index_removed should take care of emptying the
3725 BUG_ON(!list_empty(&hdev->mgmt_pending));
3727 hci_notify(hdev, HCI_DEV_UNREG);
3730 rfkill_unregister(hdev->rfkill);
3731 rfkill_destroy(hdev->rfkill);
3735 crypto_free_blkcipher(hdev->tfm_aes);
3737 device_del(&hdev->dev);
3739 debugfs_remove_recursive(hdev->debugfs);
3741 destroy_workqueue(hdev->workqueue);
3742 destroy_workqueue(hdev->req_workqueue);
3745 hci_blacklist_clear(hdev);
3746 hci_uuids_clear(hdev);
3747 hci_link_keys_clear(hdev);
3748 hci_smp_ltks_clear(hdev);
3749 hci_smp_irks_clear(hdev);
3750 hci_remote_oob_data_clear(hdev);
3751 hci_conn_params_clear(hdev);
3752 hci_pend_le_conns_clear(hdev);
3753 hci_dev_unlock(hdev);
3757 ida_simple_remove(&hci_index_ida, id);
3759 EXPORT_SYMBOL(hci_unregister_dev);
3761 /* Suspend HCI device */
3762 int hci_suspend_dev(struct hci_dev *hdev)
3764 hci_notify(hdev, HCI_DEV_SUSPEND);
3767 EXPORT_SYMBOL(hci_suspend_dev);
3769 /* Resume HCI device */
3770 int hci_resume_dev(struct hci_dev *hdev)
3772 hci_notify(hdev, HCI_DEV_RESUME);
3775 EXPORT_SYMBOL(hci_resume_dev);
3777 /* Receive frame from HCI drivers */
3778 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3780 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3781 && !test_bit(HCI_INIT, &hdev->flags))) {
3787 bt_cb(skb)->incoming = 1;
3790 __net_timestamp(skb);
3792 skb_queue_tail(&hdev->rx_q, skb);
3793 queue_work(hdev->workqueue, &hdev->rx_work);
3797 EXPORT_SYMBOL(hci_recv_frame);
3799 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3800 int count, __u8 index)
3805 struct sk_buff *skb;
3806 struct bt_skb_cb *scb;
3808 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3809 index >= NUM_REASSEMBLY)
3812 skb = hdev->reassembly[index];
3816 case HCI_ACLDATA_PKT:
3817 len = HCI_MAX_FRAME_SIZE;
3818 hlen = HCI_ACL_HDR_SIZE;
3821 len = HCI_MAX_EVENT_SIZE;
3822 hlen = HCI_EVENT_HDR_SIZE;
3824 case HCI_SCODATA_PKT:
3825 len = HCI_MAX_SCO_SIZE;
3826 hlen = HCI_SCO_HDR_SIZE;
3830 skb = bt_skb_alloc(len, GFP_ATOMIC);
3834 scb = (void *) skb->cb;
3836 scb->pkt_type = type;
3838 hdev->reassembly[index] = skb;
3842 scb = (void *) skb->cb;
3843 len = min_t(uint, scb->expect, count);
3845 memcpy(skb_put(skb, len), data, len);
3854 if (skb->len == HCI_EVENT_HDR_SIZE) {
3855 struct hci_event_hdr *h = hci_event_hdr(skb);
3856 scb->expect = h->plen;
3858 if (skb_tailroom(skb) < scb->expect) {
3860 hdev->reassembly[index] = NULL;
3866 case HCI_ACLDATA_PKT:
3867 if (skb->len == HCI_ACL_HDR_SIZE) {
3868 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3869 scb->expect = __le16_to_cpu(h->dlen);
3871 if (skb_tailroom(skb) < scb->expect) {
3873 hdev->reassembly[index] = NULL;
3879 case HCI_SCODATA_PKT:
3880 if (skb->len == HCI_SCO_HDR_SIZE) {
3881 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3882 scb->expect = h->dlen;
3884 if (skb_tailroom(skb) < scb->expect) {
3886 hdev->reassembly[index] = NULL;
3893 if (scb->expect == 0) {
3894 /* Complete frame */
3896 bt_cb(skb)->pkt_type = type;
3897 hci_recv_frame(hdev, skb);
3899 hdev->reassembly[index] = NULL;
3907 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3911 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3915 rem = hci_reassembly(hdev, type, data, count, type - 1);
3919 data += (count - rem);
3925 EXPORT_SYMBOL(hci_recv_fragment);
3927 #define STREAM_REASSEMBLY 0
3929 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3935 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3938 struct { char type; } *pkt;
3940 /* Start of the frame */
3947 type = bt_cb(skb)->pkt_type;
3949 rem = hci_reassembly(hdev, type, data, count,
3954 data += (count - rem);
3960 EXPORT_SYMBOL(hci_recv_stream_fragment);
3962 /* ---- Interface to upper protocols ---- */
3964 int hci_register_cb(struct hci_cb *cb)
3966 BT_DBG("%p name %s", cb, cb->name);
3968 write_lock(&hci_cb_list_lock);
3969 list_add(&cb->list, &hci_cb_list);
3970 write_unlock(&hci_cb_list_lock);
3974 EXPORT_SYMBOL(hci_register_cb);
3976 int hci_unregister_cb(struct hci_cb *cb)
3978 BT_DBG("%p name %s", cb, cb->name);
3980 write_lock(&hci_cb_list_lock);
3981 list_del(&cb->list);
3982 write_unlock(&hci_cb_list_lock);
3986 EXPORT_SYMBOL(hci_unregister_cb);
3988 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3990 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3993 __net_timestamp(skb);
3995 /* Send copy to monitor */
3996 hci_send_to_monitor(hdev, skb);
3998 if (atomic_read(&hdev->promisc)) {
3999 /* Send copy to the sockets */
4000 hci_send_to_sock(hdev, skb);
4003 /* Get rid of skb owner, prior to sending to the driver. */
4006 if (hdev->send(hdev, skb) < 0)
4007 BT_ERR("%s sending frame failed", hdev->name);
4010 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4012 skb_queue_head_init(&req->cmd_q);
4017 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4019 struct hci_dev *hdev = req->hdev;
4020 struct sk_buff *skb;
4021 unsigned long flags;
4023 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4025 /* If an error occured during request building, remove all HCI
4026 * commands queued on the HCI request queue.
4029 skb_queue_purge(&req->cmd_q);
4033 /* Do not allow empty requests */
4034 if (skb_queue_empty(&req->cmd_q))
4037 skb = skb_peek_tail(&req->cmd_q);
4038 bt_cb(skb)->req.complete = complete;
4040 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4041 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4042 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4044 queue_work(hdev->workqueue, &hdev->cmd_work);
4049 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4050 u32 plen, const void *param)
4052 int len = HCI_COMMAND_HDR_SIZE + plen;
4053 struct hci_command_hdr *hdr;
4054 struct sk_buff *skb;
4056 skb = bt_skb_alloc(len, GFP_ATOMIC);
4060 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4061 hdr->opcode = cpu_to_le16(opcode);
4065 memcpy(skb_put(skb, plen), param, plen);
4067 BT_DBG("skb len %d", skb->len);
4069 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4074 /* Send HCI command */
4075 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4078 struct sk_buff *skb;
4080 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4082 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4084 BT_ERR("%s no memory for command", hdev->name);
4088 /* Stand-alone HCI commands must be flaged as
4089 * single-command requests.
4091 bt_cb(skb)->req.start = true;
4093 skb_queue_tail(&hdev->cmd_q, skb);
4094 queue_work(hdev->workqueue, &hdev->cmd_work);
4099 /* Queue a command to an asynchronous HCI request */
4100 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4101 const void *param, u8 event)
4103 struct hci_dev *hdev = req->hdev;
4104 struct sk_buff *skb;
4106 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4108 /* If an error occured during request building, there is no point in
4109 * queueing the HCI command. We can simply return.
4114 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4116 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4117 hdev->name, opcode);
4122 if (skb_queue_empty(&req->cmd_q))
4123 bt_cb(skb)->req.start = true;
4125 bt_cb(skb)->req.event = event;
4127 skb_queue_tail(&req->cmd_q, skb);
4130 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4133 hci_req_add_ev(req, opcode, plen, param, 0);
4136 /* Get data from the previously sent command */
4137 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4139 struct hci_command_hdr *hdr;
4141 if (!hdev->sent_cmd)
4144 hdr = (void *) hdev->sent_cmd->data;
4146 if (hdr->opcode != cpu_to_le16(opcode))
4149 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4151 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4155 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4157 struct hci_acl_hdr *hdr;
4160 skb_push(skb, HCI_ACL_HDR_SIZE);
4161 skb_reset_transport_header(skb);
4162 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4163 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4164 hdr->dlen = cpu_to_le16(len);
4167 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4168 struct sk_buff *skb, __u16 flags)
4170 struct hci_conn *conn = chan->conn;
4171 struct hci_dev *hdev = conn->hdev;
4172 struct sk_buff *list;
4174 skb->len = skb_headlen(skb);
4177 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4179 switch (hdev->dev_type) {
4181 hci_add_acl_hdr(skb, conn->handle, flags);
4184 hci_add_acl_hdr(skb, chan->handle, flags);
4187 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4191 list = skb_shinfo(skb)->frag_list;
4193 /* Non fragmented */
4194 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4196 skb_queue_tail(queue, skb);
4199 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4201 skb_shinfo(skb)->frag_list = NULL;
4203 /* Queue all fragments atomically */
4204 spin_lock(&queue->lock);
4206 __skb_queue_tail(queue, skb);
4208 flags &= ~ACL_START;
4211 skb = list; list = list->next;
4213 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4214 hci_add_acl_hdr(skb, conn->handle, flags);
4216 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4218 __skb_queue_tail(queue, skb);
4221 spin_unlock(&queue->lock);
4225 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4227 struct hci_dev *hdev = chan->conn->hdev;
4229 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4231 hci_queue_acl(chan, &chan->data_q, skb, flags);
4233 queue_work(hdev->workqueue, &hdev->tx_work);
4237 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4239 struct hci_dev *hdev = conn->hdev;
4240 struct hci_sco_hdr hdr;
4242 BT_DBG("%s len %d", hdev->name, skb->len);
4244 hdr.handle = cpu_to_le16(conn->handle);
4245 hdr.dlen = skb->len;
4247 skb_push(skb, HCI_SCO_HDR_SIZE);
4248 skb_reset_transport_header(skb);
4249 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4251 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4253 skb_queue_tail(&conn->data_q, skb);
4254 queue_work(hdev->workqueue, &hdev->tx_work);
4257 /* ---- HCI TX task (outgoing data) ---- */
4259 /* HCI Connection scheduler */
4260 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4263 struct hci_conn_hash *h = &hdev->conn_hash;
4264 struct hci_conn *conn = NULL, *c;
4265 unsigned int num = 0, min = ~0;
4267 /* We don't have to lock device here. Connections are always
4268 * added and removed with TX task disabled. */
4272 list_for_each_entry_rcu(c, &h->list, list) {
4273 if (c->type != type || skb_queue_empty(&c->data_q))
4276 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4281 if (c->sent < min) {
4286 if (hci_conn_num(hdev, type) == num)
4295 switch (conn->type) {
4297 cnt = hdev->acl_cnt;
4301 cnt = hdev->sco_cnt;
4304 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4308 BT_ERR("Unknown link type");
4316 BT_DBG("conn %p quote %d", conn, *quote);
4320 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4322 struct hci_conn_hash *h = &hdev->conn_hash;
4325 BT_ERR("%s link tx timeout", hdev->name);
4329 /* Kill stalled connections */
4330 list_for_each_entry_rcu(c, &h->list, list) {
4331 if (c->type == type && c->sent) {
4332 BT_ERR("%s killing stalled connection %pMR",
4333 hdev->name, &c->dst);
4334 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4341 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4344 struct hci_conn_hash *h = &hdev->conn_hash;
4345 struct hci_chan *chan = NULL;
4346 unsigned int num = 0, min = ~0, cur_prio = 0;
4347 struct hci_conn *conn;
4348 int cnt, q, conn_num = 0;
4350 BT_DBG("%s", hdev->name);
4354 list_for_each_entry_rcu(conn, &h->list, list) {
4355 struct hci_chan *tmp;
4357 if (conn->type != type)
4360 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4365 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4366 struct sk_buff *skb;
4368 if (skb_queue_empty(&tmp->data_q))
4371 skb = skb_peek(&tmp->data_q);
4372 if (skb->priority < cur_prio)
4375 if (skb->priority > cur_prio) {
4378 cur_prio = skb->priority;
4383 if (conn->sent < min) {
4389 if (hci_conn_num(hdev, type) == conn_num)
4398 switch (chan->conn->type) {
4400 cnt = hdev->acl_cnt;
4403 cnt = hdev->block_cnt;
4407 cnt = hdev->sco_cnt;
4410 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4414 BT_ERR("Unknown link type");
4419 BT_DBG("chan %p quote %d", chan, *quote);
4423 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4425 struct hci_conn_hash *h = &hdev->conn_hash;
4426 struct hci_conn *conn;
4429 BT_DBG("%s", hdev->name);
4433 list_for_each_entry_rcu(conn, &h->list, list) {
4434 struct hci_chan *chan;
4436 if (conn->type != type)
4439 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4444 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4445 struct sk_buff *skb;
4452 if (skb_queue_empty(&chan->data_q))
4455 skb = skb_peek(&chan->data_q);
4456 if (skb->priority >= HCI_PRIO_MAX - 1)
4459 skb->priority = HCI_PRIO_MAX - 1;
4461 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4465 if (hci_conn_num(hdev, type) == num)
4473 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4475 /* Calculate count of blocks used by this packet */
4476 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4479 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4481 if (!test_bit(HCI_RAW, &hdev->flags)) {
4482 /* ACL tx timeout must be longer than maximum
4483 * link supervision timeout (40.9 seconds) */
4484 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4485 HCI_ACL_TX_TIMEOUT))
4486 hci_link_tx_to(hdev, ACL_LINK);
4490 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4492 unsigned int cnt = hdev->acl_cnt;
4493 struct hci_chan *chan;
4494 struct sk_buff *skb;
4497 __check_timeout(hdev, cnt);
4499 while (hdev->acl_cnt &&
4500 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4501 u32 priority = (skb_peek(&chan->data_q))->priority;
4502 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4503 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4504 skb->len, skb->priority);
4506 /* Stop if priority has changed */
4507 if (skb->priority < priority)
4510 skb = skb_dequeue(&chan->data_q);
4512 hci_conn_enter_active_mode(chan->conn,
4513 bt_cb(skb)->force_active);
4515 hci_send_frame(hdev, skb);
4516 hdev->acl_last_tx = jiffies;
4524 if (cnt != hdev->acl_cnt)
4525 hci_prio_recalculate(hdev, ACL_LINK);
4528 static void hci_sched_acl_blk(struct hci_dev *hdev)
4530 unsigned int cnt = hdev->block_cnt;
4531 struct hci_chan *chan;
4532 struct sk_buff *skb;
4536 __check_timeout(hdev, cnt);
4538 BT_DBG("%s", hdev->name);
4540 if (hdev->dev_type == HCI_AMP)
4545 while (hdev->block_cnt > 0 &&
4546 (chan = hci_chan_sent(hdev, type, "e))) {
4547 u32 priority = (skb_peek(&chan->data_q))->priority;
4548 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4551 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4552 skb->len, skb->priority);
4554 /* Stop if priority has changed */
4555 if (skb->priority < priority)
4558 skb = skb_dequeue(&chan->data_q);
4560 blocks = __get_blocks(hdev, skb);
4561 if (blocks > hdev->block_cnt)
4564 hci_conn_enter_active_mode(chan->conn,
4565 bt_cb(skb)->force_active);
4567 hci_send_frame(hdev, skb);
4568 hdev->acl_last_tx = jiffies;
4570 hdev->block_cnt -= blocks;
4573 chan->sent += blocks;
4574 chan->conn->sent += blocks;
4578 if (cnt != hdev->block_cnt)
4579 hci_prio_recalculate(hdev, type);
4582 static void hci_sched_acl(struct hci_dev *hdev)
4584 BT_DBG("%s", hdev->name);
4586 /* No ACL link over BR/EDR controller */
4587 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4590 /* No AMP link over AMP controller */
4591 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4594 switch (hdev->flow_ctl_mode) {
4595 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4596 hci_sched_acl_pkt(hdev);
4599 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4600 hci_sched_acl_blk(hdev);
4606 static void hci_sched_sco(struct hci_dev *hdev)
4608 struct hci_conn *conn;
4609 struct sk_buff *skb;
4612 BT_DBG("%s", hdev->name);
4614 if (!hci_conn_num(hdev, SCO_LINK))
4617 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4618 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4619 BT_DBG("skb %p len %d", skb, skb->len);
4620 hci_send_frame(hdev, skb);
4623 if (conn->sent == ~0)
4629 static void hci_sched_esco(struct hci_dev *hdev)
4631 struct hci_conn *conn;
4632 struct sk_buff *skb;
4635 BT_DBG("%s", hdev->name);
4637 if (!hci_conn_num(hdev, ESCO_LINK))
4640 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4642 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4643 BT_DBG("skb %p len %d", skb, skb->len);
4644 hci_send_frame(hdev, skb);
4647 if (conn->sent == ~0)
4653 static void hci_sched_le(struct hci_dev *hdev)
4655 struct hci_chan *chan;
4656 struct sk_buff *skb;
4657 int quote, cnt, tmp;
4659 BT_DBG("%s", hdev->name);
4661 if (!hci_conn_num(hdev, LE_LINK))
4664 if (!test_bit(HCI_RAW, &hdev->flags)) {
4665 /* LE tx timeout must be longer than maximum
4666 * link supervision timeout (40.9 seconds) */
4667 if (!hdev->le_cnt && hdev->le_pkts &&
4668 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4669 hci_link_tx_to(hdev, LE_LINK);
4672 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4674 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4675 u32 priority = (skb_peek(&chan->data_q))->priority;
4676 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4677 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4678 skb->len, skb->priority);
4680 /* Stop if priority has changed */
4681 if (skb->priority < priority)
4684 skb = skb_dequeue(&chan->data_q);
4686 hci_send_frame(hdev, skb);
4687 hdev->le_last_tx = jiffies;
4698 hdev->acl_cnt = cnt;
4701 hci_prio_recalculate(hdev, LE_LINK);
4704 static void hci_tx_work(struct work_struct *work)
4706 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4707 struct sk_buff *skb;
4709 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4710 hdev->sco_cnt, hdev->le_cnt);
4712 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4713 /* Schedule queues and send stuff to HCI driver */
4714 hci_sched_acl(hdev);
4715 hci_sched_sco(hdev);
4716 hci_sched_esco(hdev);
4720 /* Send next queued raw (unknown type) packet */
4721 while ((skb = skb_dequeue(&hdev->raw_q)))
4722 hci_send_frame(hdev, skb);
4725 /* ----- HCI RX task (incoming data processing) ----- */
4727 /* ACL data packet */
4728 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4730 struct hci_acl_hdr *hdr = (void *) skb->data;
4731 struct hci_conn *conn;
4732 __u16 handle, flags;
4734 skb_pull(skb, HCI_ACL_HDR_SIZE);
4736 handle = __le16_to_cpu(hdr->handle);
4737 flags = hci_flags(handle);
4738 handle = hci_handle(handle);
4740 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4743 hdev->stat.acl_rx++;
4746 conn = hci_conn_hash_lookup_handle(hdev, handle);
4747 hci_dev_unlock(hdev);
4750 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4752 /* Send to upper protocol */
4753 l2cap_recv_acldata(conn, skb, flags);
4756 BT_ERR("%s ACL packet for unknown connection handle %d",
4757 hdev->name, handle);
4763 /* SCO data packet */
4764 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4766 struct hci_sco_hdr *hdr = (void *) skb->data;
4767 struct hci_conn *conn;
4770 skb_pull(skb, HCI_SCO_HDR_SIZE);
4772 handle = __le16_to_cpu(hdr->handle);
4774 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4776 hdev->stat.sco_rx++;
4779 conn = hci_conn_hash_lookup_handle(hdev, handle);
4780 hci_dev_unlock(hdev);
4783 /* Send to upper protocol */
4784 sco_recv_scodata(conn, skb);
4787 BT_ERR("%s SCO packet for unknown connection handle %d",
4788 hdev->name, handle);
4794 static bool hci_req_is_complete(struct hci_dev *hdev)
4796 struct sk_buff *skb;
4798 skb = skb_peek(&hdev->cmd_q);
4802 return bt_cb(skb)->req.start;
4805 static void hci_resend_last(struct hci_dev *hdev)
4807 struct hci_command_hdr *sent;
4808 struct sk_buff *skb;
4811 if (!hdev->sent_cmd)
4814 sent = (void *) hdev->sent_cmd->data;
4815 opcode = __le16_to_cpu(sent->opcode);
4816 if (opcode == HCI_OP_RESET)
4819 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4823 skb_queue_head(&hdev->cmd_q, skb);
4824 queue_work(hdev->workqueue, &hdev->cmd_work);
4827 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4829 hci_req_complete_t req_complete = NULL;
4830 struct sk_buff *skb;
4831 unsigned long flags;
4833 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4835 /* If the completed command doesn't match the last one that was
4836 * sent we need to do special handling of it.
4838 if (!hci_sent_cmd_data(hdev, opcode)) {
4839 /* Some CSR based controllers generate a spontaneous
4840 * reset complete event during init and any pending
4841 * command will never be completed. In such a case we
4842 * need to resend whatever was the last sent
4845 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4846 hci_resend_last(hdev);
4851 /* If the command succeeded and there's still more commands in
4852 * this request the request is not yet complete.
4854 if (!status && !hci_req_is_complete(hdev))
4857 /* If this was the last command in a request the complete
4858 * callback would be found in hdev->sent_cmd instead of the
4859 * command queue (hdev->cmd_q).
4861 if (hdev->sent_cmd) {
4862 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4865 /* We must set the complete callback to NULL to
4866 * avoid calling the callback more than once if
4867 * this function gets called again.
4869 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4875 /* Remove all pending commands belonging to this request */
4876 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4877 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4878 if (bt_cb(skb)->req.start) {
4879 __skb_queue_head(&hdev->cmd_q, skb);
4883 req_complete = bt_cb(skb)->req.complete;
4886 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4890 req_complete(hdev, status);
4893 static void hci_rx_work(struct work_struct *work)
4895 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4896 struct sk_buff *skb;
4898 BT_DBG("%s", hdev->name);
4900 while ((skb = skb_dequeue(&hdev->rx_q))) {
4901 /* Send copy to monitor */
4902 hci_send_to_monitor(hdev, skb);
4904 if (atomic_read(&hdev->promisc)) {
4905 /* Send copy to the sockets */
4906 hci_send_to_sock(hdev, skb);
4909 if (test_bit(HCI_RAW, &hdev->flags) ||
4910 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4915 if (test_bit(HCI_INIT, &hdev->flags)) {
4916 /* Don't process data packets in this states. */
4917 switch (bt_cb(skb)->pkt_type) {
4918 case HCI_ACLDATA_PKT:
4919 case HCI_SCODATA_PKT:
4926 switch (bt_cb(skb)->pkt_type) {
4928 BT_DBG("%s Event packet", hdev->name);
4929 hci_event_packet(hdev, skb);
4932 case HCI_ACLDATA_PKT:
4933 BT_DBG("%s ACL data packet", hdev->name);
4934 hci_acldata_packet(hdev, skb);
4937 case HCI_SCODATA_PKT:
4938 BT_DBG("%s SCO data packet", hdev->name);
4939 hci_scodata_packet(hdev, skb);
4949 static void hci_cmd_work(struct work_struct *work)
4951 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4952 struct sk_buff *skb;
4954 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4955 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4957 /* Send queued commands */
4958 if (atomic_read(&hdev->cmd_cnt)) {
4959 skb = skb_dequeue(&hdev->cmd_q);
4963 kfree_skb(hdev->sent_cmd);
4965 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4966 if (hdev->sent_cmd) {
4967 atomic_dec(&hdev->cmd_cnt);
4968 hci_send_frame(hdev, skb);
4969 if (test_bit(HCI_RESET, &hdev->flags))
4970 del_timer(&hdev->cmd_timer);
4972 mod_timer(&hdev->cmd_timer,
4973 jiffies + HCI_CMD_TIMEOUT);
4975 skb_queue_head(&hdev->cmd_q, skb);
4976 queue_work(hdev->workqueue, &hdev->cmd_work);
4981 void hci_req_add_le_scan_disable(struct hci_request *req)
4983 struct hci_cp_le_set_scan_enable cp;
4985 memset(&cp, 0, sizeof(cp));
4986 cp.enable = LE_SCAN_DISABLE;
4987 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
4990 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
4993 BT_DBG("HCI request failed to update background scanning: "
4994 "status 0x%2.2x", status);
4997 /* This function controls the background scanning based on hdev->pend_le_conns
4998 * list. If there are pending LE connection we start the background scanning,
4999 * otherwise we stop it.
5001 * This function requires the caller holds hdev->lock.
5003 void hci_update_background_scan(struct hci_dev *hdev)
5005 struct hci_cp_le_set_scan_param param_cp;
5006 struct hci_cp_le_set_scan_enable enable_cp;
5007 struct hci_request req;
5008 struct hci_conn *conn;
5011 hci_req_init(&req, hdev);
5013 if (list_empty(&hdev->pend_le_conns)) {
5014 /* If there is no pending LE connections, we should stop
5015 * the background scanning.
5018 /* If controller is not scanning we are done. */
5019 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5022 hci_req_add_le_scan_disable(&req);
5024 BT_DBG("%s stopping background scanning", hdev->name);
5028 /* If there is at least one pending LE connection, we should
5029 * keep the background scan running.
5032 /* If controller is already scanning we are done. */
5033 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5036 /* If controller is connecting, we should not start scanning
5037 * since some controllers are not able to scan and connect at
5040 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5044 /* Set require_privacy to true to avoid identification from
5045 * unknown peer devices. Since this is passive scanning, no
5046 * SCAN_REQ using the local identity should be sent. Mandating
5047 * privacy is just an extra precaution.
5049 if (hci_update_random_address(&req, true, &own_addr_type))
5052 memset(¶m_cp, 0, sizeof(param_cp));
5053 param_cp.type = LE_SCAN_PASSIVE;
5054 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5055 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5056 param_cp.own_address_type = own_addr_type;
5057 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5060 memset(&enable_cp, 0, sizeof(enable_cp));
5061 enable_cp.enable = LE_SCAN_ENABLE;
5062 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5063 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5066 BT_DBG("%s starting background scanning", hdev->name);
5069 err = hci_req_run(&req, update_background_scan_complete);
5071 BT_ERR("Failed to run HCI request: err %d", err);