2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
40 static void hci_rx_work(struct work_struct *work);
41 static void hci_cmd_work(struct work_struct *work);
42 static void hci_tx_work(struct work_struct *work);
45 LIST_HEAD(hci_dev_list);
46 DEFINE_RWLOCK(hci_dev_list_lock);
48 /* HCI callback list */
49 LIST_HEAD(hci_cb_list);
50 DEFINE_RWLOCK(hci_cb_list_lock);
52 /* HCI ID Numbering */
53 static DEFINE_IDA(hci_index_ida);
55 /* ---- HCI notifications ---- */
57 static void hci_notify(struct hci_dev *hdev, int event)
59 hci_sock_dev_event(hdev, event);
62 /* ---- HCI debugfs entries ---- */
64 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
67 struct hci_dev *hdev = file->private_data;
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
79 struct hci_dev *hdev = file->private_data;
82 size_t buf_size = min(count, (sizeof(buf)-1));
86 if (!test_bit(HCI_UP, &hdev->flags))
89 if (copy_from_user(buf, user_buf, buf_size))
93 if (strtobool(buf, &enable))
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 hci_req_unlock(hdev);
111 err = -bt_to_errno(skb->data[0]);
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
122 static const struct file_operations dut_mode_fops = {
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
129 static int features_show(struct seq_file *f, void *ptr)
131 struct hci_dev *hdev = f->private;
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
150 hci_dev_unlock(hdev);
155 static int features_open(struct inode *inode, struct file *file)
157 return single_open(file, features_show, inode->i_private);
160 static const struct file_operations features_fops = {
161 .open = features_open,
164 .release = single_release,
167 static int blacklist_show(struct seq_file *f, void *p)
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
173 list_for_each_entry(b, &hdev->blacklist, list)
174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
175 hci_dev_unlock(hdev);
180 static int blacklist_open(struct inode *inode, struct file *file)
182 return single_open(file, blacklist_show, inode->i_private);
185 static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
189 .release = single_release,
192 static int uuids_show(struct seq_file *f, void *p)
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
198 list_for_each_entry(uuid, &hdev->uuids, list) {
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
208 seq_printf(f, "%pUb\n", val);
210 hci_dev_unlock(hdev);
215 static int uuids_open(struct inode *inode, struct file *file)
217 return single_open(file, uuids_show, inode->i_private);
220 static const struct file_operations uuids_fops = {
224 .release = single_release,
227 static int inquiry_cache_show(struct seq_file *f, void *p)
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
246 hci_dev_unlock(hdev);
251 static int inquiry_cache_open(struct inode *inode, struct file *file)
253 return single_open(file, inquiry_cache_show, inode->i_private);
256 static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
260 .release = single_release,
263 static int link_keys_show(struct seq_file *f, void *ptr)
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 hci_dev_unlock(hdev);
279 static int link_keys_open(struct inode *inode, struct file *file)
281 return single_open(file, link_keys_show, inode->i_private);
284 static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
288 .release = single_release,
291 static int dev_class_show(struct seq_file *f, void *ptr)
293 struct hci_dev *hdev = f->private;
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
303 static int dev_class_open(struct inode *inode, struct file *file)
305 return single_open(file, dev_class_show, inode->i_private);
308 static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
312 .release = single_release,
315 static int voice_setting_get(void *data, u64 *val)
317 struct hci_dev *hdev = data;
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
326 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
329 static int auto_accept_delay_set(void *data, u64 val)
331 struct hci_dev *hdev = data;
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
340 static int auto_accept_delay_get(void *data, u64 *val)
342 struct hci_dev *hdev = data;
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
351 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
354 static int ssp_debug_mode_set(void *data, u64 val)
356 struct hci_dev *hdev = data;
361 if (val != 0 && val != 1)
364 if (!test_bit(HCI_UP, &hdev->flags))
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
376 err = -bt_to_errno(skb->data[0]);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
389 static int ssp_debug_mode_get(void *data, u64 *val)
391 struct hci_dev *hdev = data;
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
400 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
403 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
406 struct hci_dev *hdev = file->private_data;
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
415 static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
419 struct hci_dev *hdev = file->private_data;
421 size_t buf_size = min(count, (sizeof(buf)-1));
424 if (test_bit(HCI_UP, &hdev->flags))
427 if (copy_from_user(buf, user_buf, buf_size))
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
442 static const struct file_operations force_sc_support_fops = {
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
449 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
452 struct hci_dev *hdev = file->private_data;
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
461 static const struct file_operations sc_only_mode_fops = {
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
467 static int idle_timeout_set(void *data, u64 val)
469 struct hci_dev *hdev = data;
471 if (val != 0 && (val < 500 || val > 3600000))
475 hdev->idle_timeout = val;
476 hci_dev_unlock(hdev);
481 static int idle_timeout_get(void *data, u64 *val)
483 struct hci_dev *hdev = data;
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
492 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
495 static int sniff_min_interval_set(void *data, u64 val)
497 struct hci_dev *hdev = data;
499 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
503 hdev->sniff_min_interval = val;
504 hci_dev_unlock(hdev);
509 static int sniff_min_interval_get(void *data, u64 *val)
511 struct hci_dev *hdev = data;
514 *val = hdev->sniff_min_interval;
515 hci_dev_unlock(hdev);
520 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
521 sniff_min_interval_set, "%llu\n");
523 static int sniff_max_interval_set(void *data, u64 val)
525 struct hci_dev *hdev = data;
527 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
531 hdev->sniff_max_interval = val;
532 hci_dev_unlock(hdev);
537 static int sniff_max_interval_get(void *data, u64 *val)
539 struct hci_dev *hdev = data;
542 *val = hdev->sniff_max_interval;
543 hci_dev_unlock(hdev);
548 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
549 sniff_max_interval_set, "%llu\n");
551 static int static_address_show(struct seq_file *f, void *p)
553 struct hci_dev *hdev = f->private;
556 seq_printf(f, "%pMR\n", &hdev->static_addr);
557 hci_dev_unlock(hdev);
562 static int static_address_open(struct inode *inode, struct file *file)
564 return single_open(file, static_address_show, inode->i_private);
567 static const struct file_operations static_address_fops = {
568 .open = static_address_open,
571 .release = single_release,
574 static int own_address_type_set(void *data, u64 val)
576 struct hci_dev *hdev = data;
578 if (val != 0 && val != 1)
582 hdev->own_addr_type = val;
583 hci_dev_unlock(hdev);
588 static int own_address_type_get(void *data, u64 *val)
590 struct hci_dev *hdev = data;
593 *val = hdev->own_addr_type;
594 hci_dev_unlock(hdev);
599 DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
600 own_address_type_set, "%llu\n");
602 static int long_term_keys_show(struct seq_file *f, void *ptr)
604 struct hci_dev *hdev = f->private;
605 struct list_head *p, *n;
608 list_for_each_safe(p, n, &hdev->long_term_keys) {
609 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
610 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
611 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
612 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
613 8, ltk->rand, 16, ltk->val);
615 hci_dev_unlock(hdev);
620 static int long_term_keys_open(struct inode *inode, struct file *file)
622 return single_open(file, long_term_keys_show, inode->i_private);
625 static const struct file_operations long_term_keys_fops = {
626 .open = long_term_keys_open,
629 .release = single_release,
632 static int conn_min_interval_set(void *data, u64 val)
634 struct hci_dev *hdev = data;
636 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
640 hdev->le_conn_min_interval = val;
641 hci_dev_unlock(hdev);
646 static int conn_min_interval_get(void *data, u64 *val)
648 struct hci_dev *hdev = data;
651 *val = hdev->le_conn_min_interval;
652 hci_dev_unlock(hdev);
657 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
658 conn_min_interval_set, "%llu\n");
660 static int conn_max_interval_set(void *data, u64 val)
662 struct hci_dev *hdev = data;
664 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
668 hdev->le_conn_max_interval = val;
669 hci_dev_unlock(hdev);
674 static int conn_max_interval_get(void *data, u64 *val)
676 struct hci_dev *hdev = data;
679 *val = hdev->le_conn_max_interval;
680 hci_dev_unlock(hdev);
685 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
686 conn_max_interval_set, "%llu\n");
688 static ssize_t lowpan_read(struct file *file, char __user *user_buf,
689 size_t count, loff_t *ppos)
691 struct hci_dev *hdev = file->private_data;
694 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
697 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
700 static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
701 size_t count, loff_t *position)
703 struct hci_dev *hdev = fp->private_data;
706 size_t buf_size = min(count, (sizeof(buf)-1));
708 if (copy_from_user(buf, user_buffer, buf_size))
711 buf[buf_size] = '\0';
713 if (strtobool(buf, &enable) < 0)
716 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
719 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
724 static const struct file_operations lowpan_debugfs_fops = {
727 .write = lowpan_write,
728 .llseek = default_llseek,
731 /* ---- HCI requests ---- */
733 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
735 BT_DBG("%s result 0x%2.2x", hdev->name, result);
737 if (hdev->req_status == HCI_REQ_PEND) {
738 hdev->req_result = result;
739 hdev->req_status = HCI_REQ_DONE;
740 wake_up_interruptible(&hdev->req_wait_q);
744 static void hci_req_cancel(struct hci_dev *hdev, int err)
746 BT_DBG("%s err 0x%2.2x", hdev->name, err);
748 if (hdev->req_status == HCI_REQ_PEND) {
749 hdev->req_result = err;
750 hdev->req_status = HCI_REQ_CANCELED;
751 wake_up_interruptible(&hdev->req_wait_q);
755 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
758 struct hci_ev_cmd_complete *ev;
759 struct hci_event_hdr *hdr;
764 skb = hdev->recv_evt;
765 hdev->recv_evt = NULL;
767 hci_dev_unlock(hdev);
770 return ERR_PTR(-ENODATA);
772 if (skb->len < sizeof(*hdr)) {
773 BT_ERR("Too short HCI event");
777 hdr = (void *) skb->data;
778 skb_pull(skb, HCI_EVENT_HDR_SIZE);
781 if (hdr->evt != event)
786 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
787 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
791 if (skb->len < sizeof(*ev)) {
792 BT_ERR("Too short cmd_complete event");
796 ev = (void *) skb->data;
797 skb_pull(skb, sizeof(*ev));
799 if (opcode == __le16_to_cpu(ev->opcode))
802 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
803 __le16_to_cpu(ev->opcode));
807 return ERR_PTR(-ENODATA);
810 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
811 const void *param, u8 event, u32 timeout)
813 DECLARE_WAITQUEUE(wait, current);
814 struct hci_request req;
817 BT_DBG("%s", hdev->name);
819 hci_req_init(&req, hdev);
821 hci_req_add_ev(&req, opcode, plen, param, event);
823 hdev->req_status = HCI_REQ_PEND;
825 err = hci_req_run(&req, hci_req_sync_complete);
829 add_wait_queue(&hdev->req_wait_q, &wait);
830 set_current_state(TASK_INTERRUPTIBLE);
832 schedule_timeout(timeout);
834 remove_wait_queue(&hdev->req_wait_q, &wait);
836 if (signal_pending(current))
837 return ERR_PTR(-EINTR);
839 switch (hdev->req_status) {
841 err = -bt_to_errno(hdev->req_result);
844 case HCI_REQ_CANCELED:
845 err = -hdev->req_result;
853 hdev->req_status = hdev->req_result = 0;
855 BT_DBG("%s end: err %d", hdev->name, err);
860 return hci_get_cmd_complete(hdev, opcode, event);
862 EXPORT_SYMBOL(__hci_cmd_sync_ev);
864 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
865 const void *param, u32 timeout)
867 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
869 EXPORT_SYMBOL(__hci_cmd_sync);
871 /* Execute request and wait for completion. */
872 static int __hci_req_sync(struct hci_dev *hdev,
873 void (*func)(struct hci_request *req,
875 unsigned long opt, __u32 timeout)
877 struct hci_request req;
878 DECLARE_WAITQUEUE(wait, current);
881 BT_DBG("%s start", hdev->name);
883 hci_req_init(&req, hdev);
885 hdev->req_status = HCI_REQ_PEND;
889 err = hci_req_run(&req, hci_req_sync_complete);
891 hdev->req_status = 0;
893 /* ENODATA means the HCI request command queue is empty.
894 * This can happen when a request with conditionals doesn't
895 * trigger any commands to be sent. This is normal behavior
896 * and should not trigger an error return.
904 add_wait_queue(&hdev->req_wait_q, &wait);
905 set_current_state(TASK_INTERRUPTIBLE);
907 schedule_timeout(timeout);
909 remove_wait_queue(&hdev->req_wait_q, &wait);
911 if (signal_pending(current))
914 switch (hdev->req_status) {
916 err = -bt_to_errno(hdev->req_result);
919 case HCI_REQ_CANCELED:
920 err = -hdev->req_result;
928 hdev->req_status = hdev->req_result = 0;
930 BT_DBG("%s end: err %d", hdev->name, err);
935 static int hci_req_sync(struct hci_dev *hdev,
936 void (*req)(struct hci_request *req,
938 unsigned long opt, __u32 timeout)
942 if (!test_bit(HCI_UP, &hdev->flags))
945 /* Serialize all requests */
947 ret = __hci_req_sync(hdev, req, opt, timeout);
948 hci_req_unlock(hdev);
953 static void hci_reset_req(struct hci_request *req, unsigned long opt)
955 BT_DBG("%s %ld", req->hdev->name, opt);
958 set_bit(HCI_RESET, &req->hdev->flags);
959 hci_req_add(req, HCI_OP_RESET, 0, NULL);
962 static void bredr_init(struct hci_request *req)
964 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
966 /* Read Local Supported Features */
967 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
969 /* Read Local Version */
970 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
972 /* Read BD Address */
973 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
976 static void amp_init(struct hci_request *req)
978 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
980 /* Read Local Version */
981 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
983 /* Read Local Supported Commands */
984 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
986 /* Read Local Supported Features */
987 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
989 /* Read Local AMP Info */
990 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
992 /* Read Data Blk size */
993 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
995 /* Read Flow Control Mode */
996 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
998 /* Read Location Data */
999 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1002 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1004 struct hci_dev *hdev = req->hdev;
1006 BT_DBG("%s %ld", hdev->name, opt);
1009 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1010 hci_reset_req(req, 0);
1012 switch (hdev->dev_type) {
1022 BT_ERR("Unknown device type %d", hdev->dev_type);
1027 static void bredr_setup(struct hci_request *req)
1029 struct hci_dev *hdev = req->hdev;
1034 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1035 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1037 /* Read Class of Device */
1038 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1040 /* Read Local Name */
1041 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1043 /* Read Voice Setting */
1044 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1046 /* Read Number of Supported IAC */
1047 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1049 /* Read Current IAC LAP */
1050 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1052 /* Clear Event Filters */
1053 flt_type = HCI_FLT_CLEAR_ALL;
1054 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1056 /* Connection accept timeout ~20 secs */
1057 param = __constant_cpu_to_le16(0x7d00);
1058 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1060 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1061 * but it does not support page scan related HCI commands.
1063 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1064 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1065 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1069 static void le_setup(struct hci_request *req)
1071 struct hci_dev *hdev = req->hdev;
1073 /* Read LE Buffer Size */
1074 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1076 /* Read LE Local Supported Features */
1077 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1079 /* Read LE Advertising Channel TX Power */
1080 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1082 /* Read LE White List Size */
1083 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1085 /* Read LE Supported States */
1086 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1088 /* LE-only controllers have LE implicitly enabled */
1089 if (!lmp_bredr_capable(hdev))
1090 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1093 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1095 if (lmp_ext_inq_capable(hdev))
1098 if (lmp_inq_rssi_capable(hdev))
1101 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1102 hdev->lmp_subver == 0x0757)
1105 if (hdev->manufacturer == 15) {
1106 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1108 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1110 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1114 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1115 hdev->lmp_subver == 0x1805)
1121 static void hci_setup_inquiry_mode(struct hci_request *req)
1125 mode = hci_get_inquiry_mode(req->hdev);
1127 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1130 static void hci_setup_event_mask(struct hci_request *req)
1132 struct hci_dev *hdev = req->hdev;
1134 /* The second byte is 0xff instead of 0x9f (two reserved bits
1135 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1136 * command otherwise.
1138 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1140 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1141 * any event mask for pre 1.2 devices.
1143 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1146 if (lmp_bredr_capable(hdev)) {
1147 events[4] |= 0x01; /* Flow Specification Complete */
1148 events[4] |= 0x02; /* Inquiry Result with RSSI */
1149 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1150 events[5] |= 0x08; /* Synchronous Connection Complete */
1151 events[5] |= 0x10; /* Synchronous Connection Changed */
1153 /* Use a different default for LE-only devices */
1154 memset(events, 0, sizeof(events));
1155 events[0] |= 0x10; /* Disconnection Complete */
1156 events[0] |= 0x80; /* Encryption Change */
1157 events[1] |= 0x08; /* Read Remote Version Information Complete */
1158 events[1] |= 0x20; /* Command Complete */
1159 events[1] |= 0x40; /* Command Status */
1160 events[1] |= 0x80; /* Hardware Error */
1161 events[2] |= 0x04; /* Number of Completed Packets */
1162 events[3] |= 0x02; /* Data Buffer Overflow */
1163 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1166 if (lmp_inq_rssi_capable(hdev))
1167 events[4] |= 0x02; /* Inquiry Result with RSSI */
1169 if (lmp_sniffsubr_capable(hdev))
1170 events[5] |= 0x20; /* Sniff Subrating */
1172 if (lmp_pause_enc_capable(hdev))
1173 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1175 if (lmp_ext_inq_capable(hdev))
1176 events[5] |= 0x40; /* Extended Inquiry Result */
1178 if (lmp_no_flush_capable(hdev))
1179 events[7] |= 0x01; /* Enhanced Flush Complete */
1181 if (lmp_lsto_capable(hdev))
1182 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1184 if (lmp_ssp_capable(hdev)) {
1185 events[6] |= 0x01; /* IO Capability Request */
1186 events[6] |= 0x02; /* IO Capability Response */
1187 events[6] |= 0x04; /* User Confirmation Request */
1188 events[6] |= 0x08; /* User Passkey Request */
1189 events[6] |= 0x10; /* Remote OOB Data Request */
1190 events[6] |= 0x20; /* Simple Pairing Complete */
1191 events[7] |= 0x04; /* User Passkey Notification */
1192 events[7] |= 0x08; /* Keypress Notification */
1193 events[7] |= 0x10; /* Remote Host Supported
1194 * Features Notification
1198 if (lmp_le_capable(hdev))
1199 events[7] |= 0x20; /* LE Meta-Event */
1201 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1203 if (lmp_le_capable(hdev)) {
1204 memset(events, 0, sizeof(events));
1206 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1207 sizeof(events), events);
1211 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1213 struct hci_dev *hdev = req->hdev;
1215 if (lmp_bredr_capable(hdev))
1218 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1220 if (lmp_le_capable(hdev))
1223 hci_setup_event_mask(req);
1225 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1226 * local supported commands HCI command.
1228 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1229 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1231 if (lmp_ssp_capable(hdev)) {
1232 /* When SSP is available, then the host features page
1233 * should also be available as well. However some
1234 * controllers list the max_page as 0 as long as SSP
1235 * has not been enabled. To achieve proper debugging
1236 * output, force the minimum max_page to 1 at least.
1238 hdev->max_page = 0x01;
1240 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1242 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1243 sizeof(mode), &mode);
1245 struct hci_cp_write_eir cp;
1247 memset(hdev->eir, 0, sizeof(hdev->eir));
1248 memset(&cp, 0, sizeof(cp));
1250 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1254 if (lmp_inq_rssi_capable(hdev))
1255 hci_setup_inquiry_mode(req);
1257 if (lmp_inq_tx_pwr_capable(hdev))
1258 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1260 if (lmp_ext_feat_capable(hdev)) {
1261 struct hci_cp_read_local_ext_features cp;
1264 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1268 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1270 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1275 static void hci_setup_link_policy(struct hci_request *req)
1277 struct hci_dev *hdev = req->hdev;
1278 struct hci_cp_write_def_link_policy cp;
1279 u16 link_policy = 0;
1281 if (lmp_rswitch_capable(hdev))
1282 link_policy |= HCI_LP_RSWITCH;
1283 if (lmp_hold_capable(hdev))
1284 link_policy |= HCI_LP_HOLD;
1285 if (lmp_sniff_capable(hdev))
1286 link_policy |= HCI_LP_SNIFF;
1287 if (lmp_park_capable(hdev))
1288 link_policy |= HCI_LP_PARK;
1290 cp.policy = cpu_to_le16(link_policy);
1291 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1294 static void hci_set_le_support(struct hci_request *req)
1296 struct hci_dev *hdev = req->hdev;
1297 struct hci_cp_write_le_host_supported cp;
1299 /* LE-only devices do not support explicit enablement */
1300 if (!lmp_bredr_capable(hdev))
1303 memset(&cp, 0, sizeof(cp));
1305 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1307 cp.simul = lmp_le_br_capable(hdev);
1310 if (cp.le != lmp_host_le_capable(hdev))
1311 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1315 static void hci_set_event_mask_page_2(struct hci_request *req)
1317 struct hci_dev *hdev = req->hdev;
1318 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1320 /* If Connectionless Slave Broadcast master role is supported
1321 * enable all necessary events for it.
1323 if (lmp_csb_master_capable(hdev)) {
1324 events[1] |= 0x40; /* Triggered Clock Capture */
1325 events[1] |= 0x80; /* Synchronization Train Complete */
1326 events[2] |= 0x10; /* Slave Page Response Timeout */
1327 events[2] |= 0x20; /* CSB Channel Map Change */
1330 /* If Connectionless Slave Broadcast slave role is supported
1331 * enable all necessary events for it.
1333 if (lmp_csb_slave_capable(hdev)) {
1334 events[2] |= 0x01; /* Synchronization Train Received */
1335 events[2] |= 0x02; /* CSB Receive */
1336 events[2] |= 0x04; /* CSB Timeout */
1337 events[2] |= 0x08; /* Truncated Page Complete */
1340 /* Enable Authenticated Payload Timeout Expired event if supported */
1341 if (lmp_ping_capable(hdev))
1344 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1347 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1349 struct hci_dev *hdev = req->hdev;
1352 /* Some Broadcom based Bluetooth controllers do not support the
1353 * Delete Stored Link Key command. They are clearly indicating its
1354 * absence in the bit mask of supported commands.
1356 * Check the supported commands and only if the the command is marked
1357 * as supported send it. If not supported assume that the controller
1358 * does not have actual support for stored link keys which makes this
1359 * command redundant anyway.
1361 * Some controllers indicate that they support handling deleting
1362 * stored link keys, but they don't. The quirk lets a driver
1363 * just disable this command.
1365 if (hdev->commands[6] & 0x80 &&
1366 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1367 struct hci_cp_delete_stored_link_key cp;
1369 bacpy(&cp.bdaddr, BDADDR_ANY);
1370 cp.delete_all = 0x01;
1371 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1375 if (hdev->commands[5] & 0x10)
1376 hci_setup_link_policy(req);
1378 if (lmp_le_capable(hdev)) {
1379 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1380 /* If the controller has a public BD_ADDR, then
1381 * by default use that one. If this is a LE only
1382 * controller without a public address, default
1383 * to the random address.
1385 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1386 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1388 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1391 hci_set_le_support(req);
1394 /* Read features beyond page 1 if available */
1395 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1396 struct hci_cp_read_local_ext_features cp;
1399 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1404 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1406 struct hci_dev *hdev = req->hdev;
1408 /* Set event mask page 2 if the HCI command for it is supported */
1409 if (hdev->commands[22] & 0x04)
1410 hci_set_event_mask_page_2(req);
1412 /* Check for Synchronization Train support */
1413 if (lmp_sync_train_capable(hdev))
1414 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1416 /* Enable Secure Connections if supported and configured */
1417 if ((lmp_sc_capable(hdev) ||
1418 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
1419 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1421 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1422 sizeof(support), &support);
1426 static int __hci_init(struct hci_dev *hdev)
1430 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1434 /* The Device Under Test (DUT) mode is special and available for
1435 * all controller types. So just create it early on.
1437 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1438 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1442 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1443 * BR/EDR/LE type controllers. AMP controllers only need the
1446 if (hdev->dev_type != HCI_BREDR)
1449 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1453 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1457 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1461 /* Only create debugfs entries during the initial setup
1462 * phase and not every time the controller gets powered on.
1464 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1467 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1469 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1470 &hdev->manufacturer);
1471 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1472 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1473 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1475 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1477 if (lmp_bredr_capable(hdev)) {
1478 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1479 hdev, &inquiry_cache_fops);
1480 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1481 hdev, &link_keys_fops);
1482 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1483 hdev, &dev_class_fops);
1484 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1485 hdev, &voice_setting_fops);
1488 if (lmp_ssp_capable(hdev)) {
1489 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1490 hdev, &auto_accept_delay_fops);
1491 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1492 hdev, &ssp_debug_mode_fops);
1493 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1494 hdev, &force_sc_support_fops);
1495 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1496 hdev, &sc_only_mode_fops);
1499 if (lmp_sniff_capable(hdev)) {
1500 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1501 hdev, &idle_timeout_fops);
1502 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1503 hdev, &sniff_min_interval_fops);
1504 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1505 hdev, &sniff_max_interval_fops);
1508 if (lmp_le_capable(hdev)) {
1509 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1510 &hdev->le_white_list_size);
1511 debugfs_create_file("static_address", 0444, hdev->debugfs,
1512 hdev, &static_address_fops);
1513 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1514 hdev, &own_address_type_fops);
1515 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1516 hdev, &long_term_keys_fops);
1517 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1518 hdev, &conn_min_interval_fops);
1519 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1520 hdev, &conn_max_interval_fops);
1521 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1522 &lowpan_debugfs_fops);
1528 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1532 BT_DBG("%s %x", req->hdev->name, scan);
1534 /* Inquiry and Page scans */
1535 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1538 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1542 BT_DBG("%s %x", req->hdev->name, auth);
1544 /* Authentication */
1545 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1548 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1552 BT_DBG("%s %x", req->hdev->name, encrypt);
1555 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1558 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1560 __le16 policy = cpu_to_le16(opt);
1562 BT_DBG("%s %x", req->hdev->name, policy);
1564 /* Default link policy */
1565 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1568 /* Get HCI device by index.
1569 * Device is held on return. */
1570 struct hci_dev *hci_dev_get(int index)
1572 struct hci_dev *hdev = NULL, *d;
1574 BT_DBG("%d", index);
1579 read_lock(&hci_dev_list_lock);
1580 list_for_each_entry(d, &hci_dev_list, list) {
1581 if (d->id == index) {
1582 hdev = hci_dev_hold(d);
1586 read_unlock(&hci_dev_list_lock);
1590 /* ---- Inquiry support ---- */
1592 bool hci_discovery_active(struct hci_dev *hdev)
1594 struct discovery_state *discov = &hdev->discovery;
1596 switch (discov->state) {
1597 case DISCOVERY_FINDING:
1598 case DISCOVERY_RESOLVING:
1606 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1608 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1610 if (hdev->discovery.state == state)
1614 case DISCOVERY_STOPPED:
1615 if (hdev->discovery.state != DISCOVERY_STARTING)
1616 mgmt_discovering(hdev, 0);
1618 case DISCOVERY_STARTING:
1620 case DISCOVERY_FINDING:
1621 mgmt_discovering(hdev, 1);
1623 case DISCOVERY_RESOLVING:
1625 case DISCOVERY_STOPPING:
1629 hdev->discovery.state = state;
1632 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1634 struct discovery_state *cache = &hdev->discovery;
1635 struct inquiry_entry *p, *n;
1637 list_for_each_entry_safe(p, n, &cache->all, all) {
1642 INIT_LIST_HEAD(&cache->unknown);
1643 INIT_LIST_HEAD(&cache->resolve);
1646 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1649 struct discovery_state *cache = &hdev->discovery;
1650 struct inquiry_entry *e;
1652 BT_DBG("cache %p, %pMR", cache, bdaddr);
1654 list_for_each_entry(e, &cache->all, all) {
1655 if (!bacmp(&e->data.bdaddr, bdaddr))
1662 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1665 struct discovery_state *cache = &hdev->discovery;
1666 struct inquiry_entry *e;
1668 BT_DBG("cache %p, %pMR", cache, bdaddr);
1670 list_for_each_entry(e, &cache->unknown, list) {
1671 if (!bacmp(&e->data.bdaddr, bdaddr))
1678 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1682 struct discovery_state *cache = &hdev->discovery;
1683 struct inquiry_entry *e;
1685 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1687 list_for_each_entry(e, &cache->resolve, list) {
1688 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1690 if (!bacmp(&e->data.bdaddr, bdaddr))
1697 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1698 struct inquiry_entry *ie)
1700 struct discovery_state *cache = &hdev->discovery;
1701 struct list_head *pos = &cache->resolve;
1702 struct inquiry_entry *p;
1704 list_del(&ie->list);
1706 list_for_each_entry(p, &cache->resolve, list) {
1707 if (p->name_state != NAME_PENDING &&
1708 abs(p->data.rssi) >= abs(ie->data.rssi))
1713 list_add(&ie->list, pos);
1716 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1717 bool name_known, bool *ssp)
1719 struct discovery_state *cache = &hdev->discovery;
1720 struct inquiry_entry *ie;
1722 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1724 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1727 *ssp = data->ssp_mode;
1729 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1731 if (ie->data.ssp_mode && ssp)
1734 if (ie->name_state == NAME_NEEDED &&
1735 data->rssi != ie->data.rssi) {
1736 ie->data.rssi = data->rssi;
1737 hci_inquiry_cache_update_resolve(hdev, ie);
1743 /* Entry not in the cache. Add new one. */
1744 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1748 list_add(&ie->all, &cache->all);
1751 ie->name_state = NAME_KNOWN;
1753 ie->name_state = NAME_NOT_KNOWN;
1754 list_add(&ie->list, &cache->unknown);
1758 if (name_known && ie->name_state != NAME_KNOWN &&
1759 ie->name_state != NAME_PENDING) {
1760 ie->name_state = NAME_KNOWN;
1761 list_del(&ie->list);
1764 memcpy(&ie->data, data, sizeof(*data));
1765 ie->timestamp = jiffies;
1766 cache->timestamp = jiffies;
1768 if (ie->name_state == NAME_NOT_KNOWN)
1774 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1776 struct discovery_state *cache = &hdev->discovery;
1777 struct inquiry_info *info = (struct inquiry_info *) buf;
1778 struct inquiry_entry *e;
1781 list_for_each_entry(e, &cache->all, all) {
1782 struct inquiry_data *data = &e->data;
1787 bacpy(&info->bdaddr, &data->bdaddr);
1788 info->pscan_rep_mode = data->pscan_rep_mode;
1789 info->pscan_period_mode = data->pscan_period_mode;
1790 info->pscan_mode = data->pscan_mode;
1791 memcpy(info->dev_class, data->dev_class, 3);
1792 info->clock_offset = data->clock_offset;
1798 BT_DBG("cache %p, copied %d", cache, copied);
1802 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1804 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1805 struct hci_dev *hdev = req->hdev;
1806 struct hci_cp_inquiry cp;
1808 BT_DBG("%s", hdev->name);
1810 if (test_bit(HCI_INQUIRY, &hdev->flags))
1814 memcpy(&cp.lap, &ir->lap, 3);
1815 cp.length = ir->length;
1816 cp.num_rsp = ir->num_rsp;
1817 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1820 static int wait_inquiry(void *word)
1823 return signal_pending(current);
1826 int hci_inquiry(void __user *arg)
1828 __u8 __user *ptr = arg;
1829 struct hci_inquiry_req ir;
1830 struct hci_dev *hdev;
1831 int err = 0, do_inquiry = 0, max_rsp;
1835 if (copy_from_user(&ir, ptr, sizeof(ir)))
1838 hdev = hci_dev_get(ir.dev_id);
1842 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1847 if (hdev->dev_type != HCI_BREDR) {
1852 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1858 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1859 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1860 hci_inquiry_cache_flush(hdev);
1863 hci_dev_unlock(hdev);
1865 timeo = ir.length * msecs_to_jiffies(2000);
1868 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1873 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1874 * cleared). If it is interrupted by a signal, return -EINTR.
1876 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1877 TASK_INTERRUPTIBLE))
1881 /* for unlimited number of responses we will use buffer with
1884 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1886 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1887 * copy it to the user space.
1889 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1896 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1897 hci_dev_unlock(hdev);
1899 BT_DBG("num_rsp %d", ir.num_rsp);
1901 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1903 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1916 static int hci_dev_do_open(struct hci_dev *hdev)
1920 BT_DBG("%s %p", hdev->name, hdev);
1924 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1929 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1930 /* Check for rfkill but allow the HCI setup stage to
1931 * proceed (which in itself doesn't cause any RF activity).
1933 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1938 /* Check for valid public address or a configured static
1939 * random adddress, but let the HCI setup proceed to
1940 * be able to determine if there is a public address
1943 * In case of user channel usage, it is not important
1944 * if a public address or static random address is
1947 * This check is only valid for BR/EDR controllers
1948 * since AMP controllers do not have an address.
1950 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1951 hdev->dev_type == HCI_BREDR &&
1952 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1953 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1954 ret = -EADDRNOTAVAIL;
1959 if (test_bit(HCI_UP, &hdev->flags)) {
1964 if (hdev->open(hdev)) {
1969 atomic_set(&hdev->cmd_cnt, 1);
1970 set_bit(HCI_INIT, &hdev->flags);
1972 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1973 ret = hdev->setup(hdev);
1976 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1977 set_bit(HCI_RAW, &hdev->flags);
1979 if (!test_bit(HCI_RAW, &hdev->flags) &&
1980 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1981 ret = __hci_init(hdev);
1984 clear_bit(HCI_INIT, &hdev->flags);
1988 set_bit(HCI_UP, &hdev->flags);
1989 hci_notify(hdev, HCI_DEV_UP);
1990 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1991 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1992 hdev->dev_type == HCI_BREDR) {
1994 mgmt_powered(hdev, 1);
1995 hci_dev_unlock(hdev);
1998 /* Init failed, cleanup */
1999 flush_work(&hdev->tx_work);
2000 flush_work(&hdev->cmd_work);
2001 flush_work(&hdev->rx_work);
2003 skb_queue_purge(&hdev->cmd_q);
2004 skb_queue_purge(&hdev->rx_q);
2009 if (hdev->sent_cmd) {
2010 kfree_skb(hdev->sent_cmd);
2011 hdev->sent_cmd = NULL;
2019 hci_req_unlock(hdev);
2023 /* ---- HCI ioctl helpers ---- */
2025 int hci_dev_open(__u16 dev)
2027 struct hci_dev *hdev;
2030 hdev = hci_dev_get(dev);
2034 /* We need to ensure that no other power on/off work is pending
2035 * before proceeding to call hci_dev_do_open. This is
2036 * particularly important if the setup procedure has not yet
2039 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2040 cancel_delayed_work(&hdev->power_off);
2042 /* After this call it is guaranteed that the setup procedure
2043 * has finished. This means that error conditions like RFKILL
2044 * or no valid public or static random address apply.
2046 flush_workqueue(hdev->req_workqueue);
2048 err = hci_dev_do_open(hdev);
2055 static int hci_dev_do_close(struct hci_dev *hdev)
2057 BT_DBG("%s %p", hdev->name, hdev);
2059 cancel_delayed_work(&hdev->power_off);
2061 hci_req_cancel(hdev, ENODEV);
2064 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2065 del_timer_sync(&hdev->cmd_timer);
2066 hci_req_unlock(hdev);
2070 /* Flush RX and TX works */
2071 flush_work(&hdev->tx_work);
2072 flush_work(&hdev->rx_work);
2074 if (hdev->discov_timeout > 0) {
2075 cancel_delayed_work(&hdev->discov_off);
2076 hdev->discov_timeout = 0;
2077 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2078 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2081 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2082 cancel_delayed_work(&hdev->service_cache);
2084 cancel_delayed_work_sync(&hdev->le_scan_disable);
2087 hci_inquiry_cache_flush(hdev);
2088 hci_conn_hash_flush(hdev);
2089 hci_dev_unlock(hdev);
2091 hci_notify(hdev, HCI_DEV_DOWN);
2097 skb_queue_purge(&hdev->cmd_q);
2098 atomic_set(&hdev->cmd_cnt, 1);
2099 if (!test_bit(HCI_RAW, &hdev->flags) &&
2100 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2101 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2102 set_bit(HCI_INIT, &hdev->flags);
2103 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2104 clear_bit(HCI_INIT, &hdev->flags);
2107 /* flush cmd work */
2108 flush_work(&hdev->cmd_work);
2111 skb_queue_purge(&hdev->rx_q);
2112 skb_queue_purge(&hdev->cmd_q);
2113 skb_queue_purge(&hdev->raw_q);
2115 /* Drop last sent command */
2116 if (hdev->sent_cmd) {
2117 del_timer_sync(&hdev->cmd_timer);
2118 kfree_skb(hdev->sent_cmd);
2119 hdev->sent_cmd = NULL;
2122 kfree_skb(hdev->recv_evt);
2123 hdev->recv_evt = NULL;
2125 /* After this point our queues are empty
2126 * and no tasks are scheduled. */
2131 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2133 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2134 if (hdev->dev_type == HCI_BREDR) {
2136 mgmt_powered(hdev, 0);
2137 hci_dev_unlock(hdev);
2141 /* Controller radio is available but is currently powered down */
2142 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2144 memset(hdev->eir, 0, sizeof(hdev->eir));
2145 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2147 hci_req_unlock(hdev);
2153 int hci_dev_close(__u16 dev)
2155 struct hci_dev *hdev;
2158 hdev = hci_dev_get(dev);
2162 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2167 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2168 cancel_delayed_work(&hdev->power_off);
2170 err = hci_dev_do_close(hdev);
2177 int hci_dev_reset(__u16 dev)
2179 struct hci_dev *hdev;
2182 hdev = hci_dev_get(dev);
2188 if (!test_bit(HCI_UP, &hdev->flags)) {
2193 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2199 skb_queue_purge(&hdev->rx_q);
2200 skb_queue_purge(&hdev->cmd_q);
2203 hci_inquiry_cache_flush(hdev);
2204 hci_conn_hash_flush(hdev);
2205 hci_dev_unlock(hdev);
2210 atomic_set(&hdev->cmd_cnt, 1);
2211 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2213 if (!test_bit(HCI_RAW, &hdev->flags))
2214 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2217 hci_req_unlock(hdev);
2222 int hci_dev_reset_stat(__u16 dev)
2224 struct hci_dev *hdev;
2227 hdev = hci_dev_get(dev);
2231 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2236 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2243 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2245 struct hci_dev *hdev;
2246 struct hci_dev_req dr;
2249 if (copy_from_user(&dr, arg, sizeof(dr)))
2252 hdev = hci_dev_get(dr.dev_id);
2256 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2261 if (hdev->dev_type != HCI_BREDR) {
2266 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2273 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2278 if (!lmp_encrypt_capable(hdev)) {
2283 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2284 /* Auth must be enabled first */
2285 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2291 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2296 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2301 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2305 case HCISETLINKMODE:
2306 hdev->link_mode = ((__u16) dr.dev_opt) &
2307 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2311 hdev->pkt_type = (__u16) dr.dev_opt;
2315 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2316 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2320 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2321 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2334 int hci_get_dev_list(void __user *arg)
2336 struct hci_dev *hdev;
2337 struct hci_dev_list_req *dl;
2338 struct hci_dev_req *dr;
2339 int n = 0, size, err;
2342 if (get_user(dev_num, (__u16 __user *) arg))
2345 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2348 size = sizeof(*dl) + dev_num * sizeof(*dr);
2350 dl = kzalloc(size, GFP_KERNEL);
2356 read_lock(&hci_dev_list_lock);
2357 list_for_each_entry(hdev, &hci_dev_list, list) {
2358 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2359 cancel_delayed_work(&hdev->power_off);
2361 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2362 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2364 (dr + n)->dev_id = hdev->id;
2365 (dr + n)->dev_opt = hdev->flags;
2370 read_unlock(&hci_dev_list_lock);
2373 size = sizeof(*dl) + n * sizeof(*dr);
2375 err = copy_to_user(arg, dl, size);
2378 return err ? -EFAULT : 0;
2381 int hci_get_dev_info(void __user *arg)
2383 struct hci_dev *hdev;
2384 struct hci_dev_info di;
2387 if (copy_from_user(&di, arg, sizeof(di)))
2390 hdev = hci_dev_get(di.dev_id);
2394 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2395 cancel_delayed_work_sync(&hdev->power_off);
2397 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2398 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2400 strcpy(di.name, hdev->name);
2401 di.bdaddr = hdev->bdaddr;
2402 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2403 di.flags = hdev->flags;
2404 di.pkt_type = hdev->pkt_type;
2405 if (lmp_bredr_capable(hdev)) {
2406 di.acl_mtu = hdev->acl_mtu;
2407 di.acl_pkts = hdev->acl_pkts;
2408 di.sco_mtu = hdev->sco_mtu;
2409 di.sco_pkts = hdev->sco_pkts;
2411 di.acl_mtu = hdev->le_mtu;
2412 di.acl_pkts = hdev->le_pkts;
2416 di.link_policy = hdev->link_policy;
2417 di.link_mode = hdev->link_mode;
2419 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2420 memcpy(&di.features, &hdev->features, sizeof(di.features));
2422 if (copy_to_user(arg, &di, sizeof(di)))
2430 /* ---- Interface to HCI drivers ---- */
2432 static int hci_rfkill_set_block(void *data, bool blocked)
2434 struct hci_dev *hdev = data;
2436 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2438 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2442 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2443 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2444 hci_dev_do_close(hdev);
2446 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2452 static const struct rfkill_ops hci_rfkill_ops = {
2453 .set_block = hci_rfkill_set_block,
2456 static void hci_power_on(struct work_struct *work)
2458 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2461 BT_DBG("%s", hdev->name);
2463 err = hci_dev_do_open(hdev);
2465 mgmt_set_powered_failed(hdev, err);
2469 /* During the HCI setup phase, a few error conditions are
2470 * ignored and they need to be checked now. If they are still
2471 * valid, it is important to turn the device back off.
2473 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2474 (hdev->dev_type == HCI_BREDR &&
2475 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2476 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2477 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2478 hci_dev_do_close(hdev);
2479 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2480 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2481 HCI_AUTO_OFF_TIMEOUT);
2484 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2485 mgmt_index_added(hdev);
2488 static void hci_power_off(struct work_struct *work)
2490 struct hci_dev *hdev = container_of(work, struct hci_dev,
2493 BT_DBG("%s", hdev->name);
2495 hci_dev_do_close(hdev);
2498 static void hci_discov_off(struct work_struct *work)
2500 struct hci_dev *hdev;
2502 hdev = container_of(work, struct hci_dev, discov_off.work);
2504 BT_DBG("%s", hdev->name);
2506 mgmt_discoverable_timeout(hdev);
2509 int hci_uuids_clear(struct hci_dev *hdev)
2511 struct bt_uuid *uuid, *tmp;
2513 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2514 list_del(&uuid->list);
2521 int hci_link_keys_clear(struct hci_dev *hdev)
2523 struct list_head *p, *n;
2525 list_for_each_safe(p, n, &hdev->link_keys) {
2526 struct link_key *key;
2528 key = list_entry(p, struct link_key, list);
2537 int hci_smp_ltks_clear(struct hci_dev *hdev)
2539 struct smp_ltk *k, *tmp;
2541 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2549 void hci_smp_irks_clear(struct hci_dev *hdev)
2551 struct smp_irk *k, *tmp;
2553 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2559 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2563 list_for_each_entry(k, &hdev->link_keys, list)
2564 if (bacmp(bdaddr, &k->bdaddr) == 0)
2570 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2571 u8 key_type, u8 old_key_type)
2574 if (key_type < 0x03)
2577 /* Debug keys are insecure so don't store them persistently */
2578 if (key_type == HCI_LK_DEBUG_COMBINATION)
2581 /* Changed combination key and there's no previous one */
2582 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2585 /* Security mode 3 case */
2589 /* Neither local nor remote side had no-bonding as requirement */
2590 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2593 /* Local side had dedicated bonding as requirement */
2594 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2597 /* Remote side had dedicated bonding as requirement */
2598 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2601 /* If none of the above criteria match, then don't store the key
2606 static bool ltk_type_master(u8 type)
2608 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2614 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2619 list_for_each_entry(k, &hdev->long_term_keys, list) {
2620 if (k->ediv != ediv ||
2621 memcmp(rand, k->rand, sizeof(k->rand)))
2624 if (ltk_type_master(k->type) != master)
2633 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2634 u8 addr_type, bool master)
2638 list_for_each_entry(k, &hdev->long_term_keys, list)
2639 if (addr_type == k->bdaddr_type &&
2640 bacmp(bdaddr, &k->bdaddr) == 0 &&
2641 ltk_type_master(k->type) == master)
2647 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2649 struct smp_irk *irk;
2651 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2652 if (!bacmp(&irk->rpa, rpa))
2656 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2657 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2658 bacpy(&irk->rpa, rpa);
2666 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2669 struct smp_irk *irk;
2671 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2672 if (addr_type == irk->addr_type &&
2673 bacmp(bdaddr, &irk->bdaddr) == 0)
2680 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2681 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2683 struct link_key *key, *old_key;
2687 old_key = hci_find_link_key(hdev, bdaddr);
2689 old_key_type = old_key->type;
2692 old_key_type = conn ? conn->key_type : 0xff;
2693 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2696 list_add(&key->list, &hdev->link_keys);
2699 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2701 /* Some buggy controller combinations generate a changed
2702 * combination key for legacy pairing even when there's no
2704 if (type == HCI_LK_CHANGED_COMBINATION &&
2705 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2706 type = HCI_LK_COMBINATION;
2708 conn->key_type = type;
2711 bacpy(&key->bdaddr, bdaddr);
2712 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2713 key->pin_len = pin_len;
2715 if (type == HCI_LK_CHANGED_COMBINATION)
2716 key->type = old_key_type;
2723 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2725 mgmt_new_link_key(hdev, key, persistent);
2728 conn->flush_key = !persistent;
2733 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
2734 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
2737 struct smp_ltk *key, *old_key;
2738 bool master = ltk_type_master(type);
2741 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
2745 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2748 list_add(&key->list, &hdev->long_term_keys);
2751 bacpy(&key->bdaddr, bdaddr);
2752 key->bdaddr_type = addr_type;
2753 memcpy(key->val, tk, sizeof(key->val));
2754 key->authenticated = authenticated;
2756 key->enc_size = enc_size;
2758 memcpy(key->rand, rand, sizeof(key->rand));
2763 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2768 if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
2769 mgmt_new_ltk(hdev, key, persistent);
2774 int hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type,
2775 u8 val[16], bdaddr_t *rpa)
2777 struct smp_irk *irk;
2779 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2781 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2785 bacpy(&irk->bdaddr, bdaddr);
2786 irk->addr_type = addr_type;
2788 list_add(&irk->list, &hdev->identity_resolving_keys);
2791 memcpy(irk->val, val, 16);
2792 bacpy(&irk->rpa, rpa);
2797 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2799 struct link_key *key;
2801 key = hci_find_link_key(hdev, bdaddr);
2805 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2807 list_del(&key->list);
2813 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2815 struct smp_ltk *k, *tmp;
2817 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2818 if (bacmp(bdaddr, &k->bdaddr))
2821 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2830 /* HCI command timer function */
2831 static void hci_cmd_timeout(unsigned long arg)
2833 struct hci_dev *hdev = (void *) arg;
2835 if (hdev->sent_cmd) {
2836 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2837 u16 opcode = __le16_to_cpu(sent->opcode);
2839 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2841 BT_ERR("%s command tx timeout", hdev->name);
2844 atomic_set(&hdev->cmd_cnt, 1);
2845 queue_work(hdev->workqueue, &hdev->cmd_work);
2848 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2851 struct oob_data *data;
2853 list_for_each_entry(data, &hdev->remote_oob_data, list)
2854 if (bacmp(bdaddr, &data->bdaddr) == 0)
2860 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2862 struct oob_data *data;
2864 data = hci_find_remote_oob_data(hdev, bdaddr);
2868 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2870 list_del(&data->list);
2876 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2878 struct oob_data *data, *n;
2880 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2881 list_del(&data->list);
2888 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2889 u8 *hash, u8 *randomizer)
2891 struct oob_data *data;
2893 data = hci_find_remote_oob_data(hdev, bdaddr);
2895 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2899 bacpy(&data->bdaddr, bdaddr);
2900 list_add(&data->list, &hdev->remote_oob_data);
2903 memcpy(data->hash192, hash, sizeof(data->hash192));
2904 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2906 memset(data->hash256, 0, sizeof(data->hash256));
2907 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2909 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2914 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2915 u8 *hash192, u8 *randomizer192,
2916 u8 *hash256, u8 *randomizer256)
2918 struct oob_data *data;
2920 data = hci_find_remote_oob_data(hdev, bdaddr);
2922 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2926 bacpy(&data->bdaddr, bdaddr);
2927 list_add(&data->list, &hdev->remote_oob_data);
2930 memcpy(data->hash192, hash192, sizeof(data->hash192));
2931 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2933 memcpy(data->hash256, hash256, sizeof(data->hash256));
2934 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2936 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2941 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2942 bdaddr_t *bdaddr, u8 type)
2944 struct bdaddr_list *b;
2946 list_for_each_entry(b, &hdev->blacklist, list) {
2947 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2954 int hci_blacklist_clear(struct hci_dev *hdev)
2956 struct list_head *p, *n;
2958 list_for_each_safe(p, n, &hdev->blacklist) {
2959 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2968 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2970 struct bdaddr_list *entry;
2972 if (!bacmp(bdaddr, BDADDR_ANY))
2975 if (hci_blacklist_lookup(hdev, bdaddr, type))
2978 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2982 bacpy(&entry->bdaddr, bdaddr);
2983 entry->bdaddr_type = type;
2985 list_add(&entry->list, &hdev->blacklist);
2987 return mgmt_device_blocked(hdev, bdaddr, type);
2990 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2992 struct bdaddr_list *entry;
2994 if (!bacmp(bdaddr, BDADDR_ANY))
2995 return hci_blacklist_clear(hdev);
2997 entry = hci_blacklist_lookup(hdev, bdaddr, type);
3001 list_del(&entry->list);
3004 return mgmt_device_unblocked(hdev, bdaddr, type);
3007 /* This function requires the caller holds hdev->lock */
3008 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3009 bdaddr_t *addr, u8 addr_type)
3011 struct hci_conn_params *params;
3013 list_for_each_entry(params, &hdev->le_conn_params, list) {
3014 if (bacmp(¶ms->addr, addr) == 0 &&
3015 params->addr_type == addr_type) {
3023 /* This function requires the caller holds hdev->lock */
3024 void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3025 u16 conn_min_interval, u16 conn_max_interval)
3027 struct hci_conn_params *params;
3029 params = hci_conn_params_lookup(hdev, addr, addr_type);
3031 params->conn_min_interval = conn_min_interval;
3032 params->conn_max_interval = conn_max_interval;
3036 params = kzalloc(sizeof(*params), GFP_KERNEL);
3038 BT_ERR("Out of memory");
3042 bacpy(¶ms->addr, addr);
3043 params->addr_type = addr_type;
3044 params->conn_min_interval = conn_min_interval;
3045 params->conn_max_interval = conn_max_interval;
3047 list_add(¶ms->list, &hdev->le_conn_params);
3049 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3050 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3054 /* This function requires the caller holds hdev->lock */
3055 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3057 struct hci_conn_params *params;
3059 params = hci_conn_params_lookup(hdev, addr, addr_type);
3063 list_del(¶ms->list);
3066 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3069 /* This function requires the caller holds hdev->lock */
3070 void hci_conn_params_clear(struct hci_dev *hdev)
3072 struct hci_conn_params *params, *tmp;
3074 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3075 list_del(¶ms->list);
3079 BT_DBG("All LE connection parameters were removed");
3082 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3085 BT_ERR("Failed to start inquiry: status %d", status);
3088 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3089 hci_dev_unlock(hdev);
3094 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3096 /* General inquiry access code (GIAC) */
3097 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3098 struct hci_request req;
3099 struct hci_cp_inquiry cp;
3103 BT_ERR("Failed to disable LE scanning: status %d", status);
3107 switch (hdev->discovery.type) {
3108 case DISCOV_TYPE_LE:
3110 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3111 hci_dev_unlock(hdev);
3114 case DISCOV_TYPE_INTERLEAVED:
3115 hci_req_init(&req, hdev);
3117 memset(&cp, 0, sizeof(cp));
3118 memcpy(&cp.lap, lap, sizeof(cp.lap));
3119 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3120 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3124 hci_inquiry_cache_flush(hdev);
3126 err = hci_req_run(&req, inquiry_complete);
3128 BT_ERR("Inquiry request failed: err %d", err);
3129 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3132 hci_dev_unlock(hdev);
3137 static void le_scan_disable_work(struct work_struct *work)
3139 struct hci_dev *hdev = container_of(work, struct hci_dev,
3140 le_scan_disable.work);
3141 struct hci_cp_le_set_scan_enable cp;
3142 struct hci_request req;
3145 BT_DBG("%s", hdev->name);
3147 hci_req_init(&req, hdev);
3149 memset(&cp, 0, sizeof(cp));
3150 cp.enable = LE_SCAN_DISABLE;
3151 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3153 err = hci_req_run(&req, le_scan_disable_work_complete);
3155 BT_ERR("Disable LE scanning request failed: err %d", err);
3158 /* Alloc HCI device */
3159 struct hci_dev *hci_alloc_dev(void)
3161 struct hci_dev *hdev;
3163 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3167 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3168 hdev->esco_type = (ESCO_HV1);
3169 hdev->link_mode = (HCI_LM_ACCEPT);
3170 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3171 hdev->io_capability = 0x03; /* No Input No Output */
3172 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3173 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3175 hdev->sniff_max_interval = 800;
3176 hdev->sniff_min_interval = 80;
3178 hdev->le_scan_interval = 0x0060;
3179 hdev->le_scan_window = 0x0030;
3180 hdev->le_conn_min_interval = 0x0028;
3181 hdev->le_conn_max_interval = 0x0038;
3183 mutex_init(&hdev->lock);
3184 mutex_init(&hdev->req_lock);
3186 INIT_LIST_HEAD(&hdev->mgmt_pending);
3187 INIT_LIST_HEAD(&hdev->blacklist);
3188 INIT_LIST_HEAD(&hdev->uuids);
3189 INIT_LIST_HEAD(&hdev->link_keys);
3190 INIT_LIST_HEAD(&hdev->long_term_keys);
3191 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3192 INIT_LIST_HEAD(&hdev->remote_oob_data);
3193 INIT_LIST_HEAD(&hdev->le_conn_params);
3194 INIT_LIST_HEAD(&hdev->conn_hash.list);
3196 INIT_WORK(&hdev->rx_work, hci_rx_work);
3197 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3198 INIT_WORK(&hdev->tx_work, hci_tx_work);
3199 INIT_WORK(&hdev->power_on, hci_power_on);
3201 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3202 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3203 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3205 skb_queue_head_init(&hdev->rx_q);
3206 skb_queue_head_init(&hdev->cmd_q);
3207 skb_queue_head_init(&hdev->raw_q);
3209 init_waitqueue_head(&hdev->req_wait_q);
3211 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
3213 hci_init_sysfs(hdev);
3214 discovery_init(hdev);
3218 EXPORT_SYMBOL(hci_alloc_dev);
3220 /* Free HCI device */
3221 void hci_free_dev(struct hci_dev *hdev)
3223 /* will free via device release */
3224 put_device(&hdev->dev);
3226 EXPORT_SYMBOL(hci_free_dev);
3228 /* Register HCI device */
3229 int hci_register_dev(struct hci_dev *hdev)
3233 if (!hdev->open || !hdev->close)
3236 /* Do not allow HCI_AMP devices to register at index 0,
3237 * so the index can be used as the AMP controller ID.
3239 switch (hdev->dev_type) {
3241 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3244 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3253 sprintf(hdev->name, "hci%d", id);
3256 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3258 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3259 WQ_MEM_RECLAIM, 1, hdev->name);
3260 if (!hdev->workqueue) {
3265 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3266 WQ_MEM_RECLAIM, 1, hdev->name);
3267 if (!hdev->req_workqueue) {
3268 destroy_workqueue(hdev->workqueue);
3273 if (!IS_ERR_OR_NULL(bt_debugfs))
3274 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3276 dev_set_name(&hdev->dev, "%s", hdev->name);
3278 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3280 if (IS_ERR(hdev->tfm_aes)) {
3281 BT_ERR("Unable to create crypto context");
3282 error = PTR_ERR(hdev->tfm_aes);
3283 hdev->tfm_aes = NULL;
3287 error = device_add(&hdev->dev);
3291 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3292 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3295 if (rfkill_register(hdev->rfkill) < 0) {
3296 rfkill_destroy(hdev->rfkill);
3297 hdev->rfkill = NULL;
3301 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3302 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3304 set_bit(HCI_SETUP, &hdev->dev_flags);
3305 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3307 if (hdev->dev_type == HCI_BREDR) {
3308 /* Assume BR/EDR support until proven otherwise (such as
3309 * through reading supported features during init.
3311 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3314 write_lock(&hci_dev_list_lock);
3315 list_add(&hdev->list, &hci_dev_list);
3316 write_unlock(&hci_dev_list_lock);
3318 hci_notify(hdev, HCI_DEV_REG);
3321 queue_work(hdev->req_workqueue, &hdev->power_on);
3326 crypto_free_blkcipher(hdev->tfm_aes);
3328 destroy_workqueue(hdev->workqueue);
3329 destroy_workqueue(hdev->req_workqueue);
3331 ida_simple_remove(&hci_index_ida, hdev->id);
3335 EXPORT_SYMBOL(hci_register_dev);
3337 /* Unregister HCI device */
3338 void hci_unregister_dev(struct hci_dev *hdev)
3342 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3344 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3348 write_lock(&hci_dev_list_lock);
3349 list_del(&hdev->list);
3350 write_unlock(&hci_dev_list_lock);
3352 hci_dev_do_close(hdev);
3354 for (i = 0; i < NUM_REASSEMBLY; i++)
3355 kfree_skb(hdev->reassembly[i]);
3357 cancel_work_sync(&hdev->power_on);
3359 if (!test_bit(HCI_INIT, &hdev->flags) &&
3360 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
3362 mgmt_index_removed(hdev);
3363 hci_dev_unlock(hdev);
3366 /* mgmt_index_removed should take care of emptying the
3368 BUG_ON(!list_empty(&hdev->mgmt_pending));
3370 hci_notify(hdev, HCI_DEV_UNREG);
3373 rfkill_unregister(hdev->rfkill);
3374 rfkill_destroy(hdev->rfkill);
3378 crypto_free_blkcipher(hdev->tfm_aes);
3380 device_del(&hdev->dev);
3382 debugfs_remove_recursive(hdev->debugfs);
3384 destroy_workqueue(hdev->workqueue);
3385 destroy_workqueue(hdev->req_workqueue);
3388 hci_blacklist_clear(hdev);
3389 hci_uuids_clear(hdev);
3390 hci_link_keys_clear(hdev);
3391 hci_smp_ltks_clear(hdev);
3392 hci_smp_irks_clear(hdev);
3393 hci_remote_oob_data_clear(hdev);
3394 hci_conn_params_clear(hdev);
3395 hci_dev_unlock(hdev);
3399 ida_simple_remove(&hci_index_ida, id);
3401 EXPORT_SYMBOL(hci_unregister_dev);
3403 /* Suspend HCI device */
3404 int hci_suspend_dev(struct hci_dev *hdev)
3406 hci_notify(hdev, HCI_DEV_SUSPEND);
3409 EXPORT_SYMBOL(hci_suspend_dev);
3411 /* Resume HCI device */
3412 int hci_resume_dev(struct hci_dev *hdev)
3414 hci_notify(hdev, HCI_DEV_RESUME);
3417 EXPORT_SYMBOL(hci_resume_dev);
3419 /* Receive frame from HCI drivers */
3420 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3422 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3423 && !test_bit(HCI_INIT, &hdev->flags))) {
3429 bt_cb(skb)->incoming = 1;
3432 __net_timestamp(skb);
3434 skb_queue_tail(&hdev->rx_q, skb);
3435 queue_work(hdev->workqueue, &hdev->rx_work);
3439 EXPORT_SYMBOL(hci_recv_frame);
3441 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3442 int count, __u8 index)
3447 struct sk_buff *skb;
3448 struct bt_skb_cb *scb;
3450 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3451 index >= NUM_REASSEMBLY)
3454 skb = hdev->reassembly[index];
3458 case HCI_ACLDATA_PKT:
3459 len = HCI_MAX_FRAME_SIZE;
3460 hlen = HCI_ACL_HDR_SIZE;
3463 len = HCI_MAX_EVENT_SIZE;
3464 hlen = HCI_EVENT_HDR_SIZE;
3466 case HCI_SCODATA_PKT:
3467 len = HCI_MAX_SCO_SIZE;
3468 hlen = HCI_SCO_HDR_SIZE;
3472 skb = bt_skb_alloc(len, GFP_ATOMIC);
3476 scb = (void *) skb->cb;
3478 scb->pkt_type = type;
3480 hdev->reassembly[index] = skb;
3484 scb = (void *) skb->cb;
3485 len = min_t(uint, scb->expect, count);
3487 memcpy(skb_put(skb, len), data, len);
3496 if (skb->len == HCI_EVENT_HDR_SIZE) {
3497 struct hci_event_hdr *h = hci_event_hdr(skb);
3498 scb->expect = h->plen;
3500 if (skb_tailroom(skb) < scb->expect) {
3502 hdev->reassembly[index] = NULL;
3508 case HCI_ACLDATA_PKT:
3509 if (skb->len == HCI_ACL_HDR_SIZE) {
3510 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3511 scb->expect = __le16_to_cpu(h->dlen);
3513 if (skb_tailroom(skb) < scb->expect) {
3515 hdev->reassembly[index] = NULL;
3521 case HCI_SCODATA_PKT:
3522 if (skb->len == HCI_SCO_HDR_SIZE) {
3523 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3524 scb->expect = h->dlen;
3526 if (skb_tailroom(skb) < scb->expect) {
3528 hdev->reassembly[index] = NULL;
3535 if (scb->expect == 0) {
3536 /* Complete frame */
3538 bt_cb(skb)->pkt_type = type;
3539 hci_recv_frame(hdev, skb);
3541 hdev->reassembly[index] = NULL;
3549 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3553 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3557 rem = hci_reassembly(hdev, type, data, count, type - 1);
3561 data += (count - rem);
3567 EXPORT_SYMBOL(hci_recv_fragment);
3569 #define STREAM_REASSEMBLY 0
3571 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3577 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3580 struct { char type; } *pkt;
3582 /* Start of the frame */
3589 type = bt_cb(skb)->pkt_type;
3591 rem = hci_reassembly(hdev, type, data, count,
3596 data += (count - rem);
3602 EXPORT_SYMBOL(hci_recv_stream_fragment);
3604 /* ---- Interface to upper protocols ---- */
3606 int hci_register_cb(struct hci_cb *cb)
3608 BT_DBG("%p name %s", cb, cb->name);
3610 write_lock(&hci_cb_list_lock);
3611 list_add(&cb->list, &hci_cb_list);
3612 write_unlock(&hci_cb_list_lock);
3616 EXPORT_SYMBOL(hci_register_cb);
3618 int hci_unregister_cb(struct hci_cb *cb)
3620 BT_DBG("%p name %s", cb, cb->name);
3622 write_lock(&hci_cb_list_lock);
3623 list_del(&cb->list);
3624 write_unlock(&hci_cb_list_lock);
3628 EXPORT_SYMBOL(hci_unregister_cb);
3630 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3632 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3635 __net_timestamp(skb);
3637 /* Send copy to monitor */
3638 hci_send_to_monitor(hdev, skb);
3640 if (atomic_read(&hdev->promisc)) {
3641 /* Send copy to the sockets */
3642 hci_send_to_sock(hdev, skb);
3645 /* Get rid of skb owner, prior to sending to the driver. */
3648 if (hdev->send(hdev, skb) < 0)
3649 BT_ERR("%s sending frame failed", hdev->name);
3652 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3654 skb_queue_head_init(&req->cmd_q);
3659 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3661 struct hci_dev *hdev = req->hdev;
3662 struct sk_buff *skb;
3663 unsigned long flags;
3665 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3667 /* If an error occured during request building, remove all HCI
3668 * commands queued on the HCI request queue.
3671 skb_queue_purge(&req->cmd_q);
3675 /* Do not allow empty requests */
3676 if (skb_queue_empty(&req->cmd_q))
3679 skb = skb_peek_tail(&req->cmd_q);
3680 bt_cb(skb)->req.complete = complete;
3682 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3683 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3684 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3686 queue_work(hdev->workqueue, &hdev->cmd_work);
3691 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
3692 u32 plen, const void *param)
3694 int len = HCI_COMMAND_HDR_SIZE + plen;
3695 struct hci_command_hdr *hdr;
3696 struct sk_buff *skb;
3698 skb = bt_skb_alloc(len, GFP_ATOMIC);
3702 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
3703 hdr->opcode = cpu_to_le16(opcode);
3707 memcpy(skb_put(skb, plen), param, plen);
3709 BT_DBG("skb len %d", skb->len);
3711 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
3716 /* Send HCI command */
3717 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3720 struct sk_buff *skb;
3722 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3724 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3726 BT_ERR("%s no memory for command", hdev->name);
3730 /* Stand-alone HCI commands must be flaged as
3731 * single-command requests.
3733 bt_cb(skb)->req.start = true;
3735 skb_queue_tail(&hdev->cmd_q, skb);
3736 queue_work(hdev->workqueue, &hdev->cmd_work);
3741 /* Queue a command to an asynchronous HCI request */
3742 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3743 const void *param, u8 event)
3745 struct hci_dev *hdev = req->hdev;
3746 struct sk_buff *skb;
3748 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3750 /* If an error occured during request building, there is no point in
3751 * queueing the HCI command. We can simply return.
3756 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3758 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3759 hdev->name, opcode);
3764 if (skb_queue_empty(&req->cmd_q))
3765 bt_cb(skb)->req.start = true;
3767 bt_cb(skb)->req.event = event;
3769 skb_queue_tail(&req->cmd_q, skb);
3772 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3775 hci_req_add_ev(req, opcode, plen, param, 0);
3778 /* Get data from the previously sent command */
3779 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3781 struct hci_command_hdr *hdr;
3783 if (!hdev->sent_cmd)
3786 hdr = (void *) hdev->sent_cmd->data;
3788 if (hdr->opcode != cpu_to_le16(opcode))
3791 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3793 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3797 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3799 struct hci_acl_hdr *hdr;
3802 skb_push(skb, HCI_ACL_HDR_SIZE);
3803 skb_reset_transport_header(skb);
3804 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3805 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3806 hdr->dlen = cpu_to_le16(len);
3809 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3810 struct sk_buff *skb, __u16 flags)
3812 struct hci_conn *conn = chan->conn;
3813 struct hci_dev *hdev = conn->hdev;
3814 struct sk_buff *list;
3816 skb->len = skb_headlen(skb);
3819 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3821 switch (hdev->dev_type) {
3823 hci_add_acl_hdr(skb, conn->handle, flags);
3826 hci_add_acl_hdr(skb, chan->handle, flags);
3829 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3833 list = skb_shinfo(skb)->frag_list;
3835 /* Non fragmented */
3836 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3838 skb_queue_tail(queue, skb);
3841 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3843 skb_shinfo(skb)->frag_list = NULL;
3845 /* Queue all fragments atomically */
3846 spin_lock(&queue->lock);
3848 __skb_queue_tail(queue, skb);
3850 flags &= ~ACL_START;
3853 skb = list; list = list->next;
3855 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3856 hci_add_acl_hdr(skb, conn->handle, flags);
3858 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3860 __skb_queue_tail(queue, skb);
3863 spin_unlock(&queue->lock);
3867 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3869 struct hci_dev *hdev = chan->conn->hdev;
3871 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3873 hci_queue_acl(chan, &chan->data_q, skb, flags);
3875 queue_work(hdev->workqueue, &hdev->tx_work);
3879 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3881 struct hci_dev *hdev = conn->hdev;
3882 struct hci_sco_hdr hdr;
3884 BT_DBG("%s len %d", hdev->name, skb->len);
3886 hdr.handle = cpu_to_le16(conn->handle);
3887 hdr.dlen = skb->len;
3889 skb_push(skb, HCI_SCO_HDR_SIZE);
3890 skb_reset_transport_header(skb);
3891 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3893 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3895 skb_queue_tail(&conn->data_q, skb);
3896 queue_work(hdev->workqueue, &hdev->tx_work);
3899 /* ---- HCI TX task (outgoing data) ---- */
3901 /* HCI Connection scheduler */
3902 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3905 struct hci_conn_hash *h = &hdev->conn_hash;
3906 struct hci_conn *conn = NULL, *c;
3907 unsigned int num = 0, min = ~0;
3909 /* We don't have to lock device here. Connections are always
3910 * added and removed with TX task disabled. */
3914 list_for_each_entry_rcu(c, &h->list, list) {
3915 if (c->type != type || skb_queue_empty(&c->data_q))
3918 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3923 if (c->sent < min) {
3928 if (hci_conn_num(hdev, type) == num)
3937 switch (conn->type) {
3939 cnt = hdev->acl_cnt;
3943 cnt = hdev->sco_cnt;
3946 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3950 BT_ERR("Unknown link type");
3958 BT_DBG("conn %p quote %d", conn, *quote);
3962 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3964 struct hci_conn_hash *h = &hdev->conn_hash;
3967 BT_ERR("%s link tx timeout", hdev->name);
3971 /* Kill stalled connections */
3972 list_for_each_entry_rcu(c, &h->list, list) {
3973 if (c->type == type && c->sent) {
3974 BT_ERR("%s killing stalled connection %pMR",
3975 hdev->name, &c->dst);
3976 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3983 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3986 struct hci_conn_hash *h = &hdev->conn_hash;
3987 struct hci_chan *chan = NULL;
3988 unsigned int num = 0, min = ~0, cur_prio = 0;
3989 struct hci_conn *conn;
3990 int cnt, q, conn_num = 0;
3992 BT_DBG("%s", hdev->name);
3996 list_for_each_entry_rcu(conn, &h->list, list) {
3997 struct hci_chan *tmp;
3999 if (conn->type != type)
4002 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4007 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4008 struct sk_buff *skb;
4010 if (skb_queue_empty(&tmp->data_q))
4013 skb = skb_peek(&tmp->data_q);
4014 if (skb->priority < cur_prio)
4017 if (skb->priority > cur_prio) {
4020 cur_prio = skb->priority;
4025 if (conn->sent < min) {
4031 if (hci_conn_num(hdev, type) == conn_num)
4040 switch (chan->conn->type) {
4042 cnt = hdev->acl_cnt;
4045 cnt = hdev->block_cnt;
4049 cnt = hdev->sco_cnt;
4052 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4056 BT_ERR("Unknown link type");
4061 BT_DBG("chan %p quote %d", chan, *quote);
4065 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4067 struct hci_conn_hash *h = &hdev->conn_hash;
4068 struct hci_conn *conn;
4071 BT_DBG("%s", hdev->name);
4075 list_for_each_entry_rcu(conn, &h->list, list) {
4076 struct hci_chan *chan;
4078 if (conn->type != type)
4081 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4086 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4087 struct sk_buff *skb;
4094 if (skb_queue_empty(&chan->data_q))
4097 skb = skb_peek(&chan->data_q);
4098 if (skb->priority >= HCI_PRIO_MAX - 1)
4101 skb->priority = HCI_PRIO_MAX - 1;
4103 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4107 if (hci_conn_num(hdev, type) == num)
4115 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4117 /* Calculate count of blocks used by this packet */
4118 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4121 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4123 if (!test_bit(HCI_RAW, &hdev->flags)) {
4124 /* ACL tx timeout must be longer than maximum
4125 * link supervision timeout (40.9 seconds) */
4126 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4127 HCI_ACL_TX_TIMEOUT))
4128 hci_link_tx_to(hdev, ACL_LINK);
4132 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4134 unsigned int cnt = hdev->acl_cnt;
4135 struct hci_chan *chan;
4136 struct sk_buff *skb;
4139 __check_timeout(hdev, cnt);
4141 while (hdev->acl_cnt &&
4142 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4143 u32 priority = (skb_peek(&chan->data_q))->priority;
4144 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4145 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4146 skb->len, skb->priority);
4148 /* Stop if priority has changed */
4149 if (skb->priority < priority)
4152 skb = skb_dequeue(&chan->data_q);
4154 hci_conn_enter_active_mode(chan->conn,
4155 bt_cb(skb)->force_active);
4157 hci_send_frame(hdev, skb);
4158 hdev->acl_last_tx = jiffies;
4166 if (cnt != hdev->acl_cnt)
4167 hci_prio_recalculate(hdev, ACL_LINK);
4170 static void hci_sched_acl_blk(struct hci_dev *hdev)
4172 unsigned int cnt = hdev->block_cnt;
4173 struct hci_chan *chan;
4174 struct sk_buff *skb;
4178 __check_timeout(hdev, cnt);
4180 BT_DBG("%s", hdev->name);
4182 if (hdev->dev_type == HCI_AMP)
4187 while (hdev->block_cnt > 0 &&
4188 (chan = hci_chan_sent(hdev, type, "e))) {
4189 u32 priority = (skb_peek(&chan->data_q))->priority;
4190 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4193 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4194 skb->len, skb->priority);
4196 /* Stop if priority has changed */
4197 if (skb->priority < priority)
4200 skb = skb_dequeue(&chan->data_q);
4202 blocks = __get_blocks(hdev, skb);
4203 if (blocks > hdev->block_cnt)
4206 hci_conn_enter_active_mode(chan->conn,
4207 bt_cb(skb)->force_active);
4209 hci_send_frame(hdev, skb);
4210 hdev->acl_last_tx = jiffies;
4212 hdev->block_cnt -= blocks;
4215 chan->sent += blocks;
4216 chan->conn->sent += blocks;
4220 if (cnt != hdev->block_cnt)
4221 hci_prio_recalculate(hdev, type);
4224 static void hci_sched_acl(struct hci_dev *hdev)
4226 BT_DBG("%s", hdev->name);
4228 /* No ACL link over BR/EDR controller */
4229 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4232 /* No AMP link over AMP controller */
4233 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4236 switch (hdev->flow_ctl_mode) {
4237 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4238 hci_sched_acl_pkt(hdev);
4241 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4242 hci_sched_acl_blk(hdev);
4248 static void hci_sched_sco(struct hci_dev *hdev)
4250 struct hci_conn *conn;
4251 struct sk_buff *skb;
4254 BT_DBG("%s", hdev->name);
4256 if (!hci_conn_num(hdev, SCO_LINK))
4259 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4260 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4261 BT_DBG("skb %p len %d", skb, skb->len);
4262 hci_send_frame(hdev, skb);
4265 if (conn->sent == ~0)
4271 static void hci_sched_esco(struct hci_dev *hdev)
4273 struct hci_conn *conn;
4274 struct sk_buff *skb;
4277 BT_DBG("%s", hdev->name);
4279 if (!hci_conn_num(hdev, ESCO_LINK))
4282 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4284 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4285 BT_DBG("skb %p len %d", skb, skb->len);
4286 hci_send_frame(hdev, skb);
4289 if (conn->sent == ~0)
4295 static void hci_sched_le(struct hci_dev *hdev)
4297 struct hci_chan *chan;
4298 struct sk_buff *skb;
4299 int quote, cnt, tmp;
4301 BT_DBG("%s", hdev->name);
4303 if (!hci_conn_num(hdev, LE_LINK))
4306 if (!test_bit(HCI_RAW, &hdev->flags)) {
4307 /* LE tx timeout must be longer than maximum
4308 * link supervision timeout (40.9 seconds) */
4309 if (!hdev->le_cnt && hdev->le_pkts &&
4310 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4311 hci_link_tx_to(hdev, LE_LINK);
4314 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4316 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4317 u32 priority = (skb_peek(&chan->data_q))->priority;
4318 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4319 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4320 skb->len, skb->priority);
4322 /* Stop if priority has changed */
4323 if (skb->priority < priority)
4326 skb = skb_dequeue(&chan->data_q);
4328 hci_send_frame(hdev, skb);
4329 hdev->le_last_tx = jiffies;
4340 hdev->acl_cnt = cnt;
4343 hci_prio_recalculate(hdev, LE_LINK);
4346 static void hci_tx_work(struct work_struct *work)
4348 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4349 struct sk_buff *skb;
4351 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4352 hdev->sco_cnt, hdev->le_cnt);
4354 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4355 /* Schedule queues and send stuff to HCI driver */
4356 hci_sched_acl(hdev);
4357 hci_sched_sco(hdev);
4358 hci_sched_esco(hdev);
4362 /* Send next queued raw (unknown type) packet */
4363 while ((skb = skb_dequeue(&hdev->raw_q)))
4364 hci_send_frame(hdev, skb);
4367 /* ----- HCI RX task (incoming data processing) ----- */
4369 /* ACL data packet */
4370 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4372 struct hci_acl_hdr *hdr = (void *) skb->data;
4373 struct hci_conn *conn;
4374 __u16 handle, flags;
4376 skb_pull(skb, HCI_ACL_HDR_SIZE);
4378 handle = __le16_to_cpu(hdr->handle);
4379 flags = hci_flags(handle);
4380 handle = hci_handle(handle);
4382 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4385 hdev->stat.acl_rx++;
4388 conn = hci_conn_hash_lookup_handle(hdev, handle);
4389 hci_dev_unlock(hdev);
4392 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4394 /* Send to upper protocol */
4395 l2cap_recv_acldata(conn, skb, flags);
4398 BT_ERR("%s ACL packet for unknown connection handle %d",
4399 hdev->name, handle);
4405 /* SCO data packet */
4406 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4408 struct hci_sco_hdr *hdr = (void *) skb->data;
4409 struct hci_conn *conn;
4412 skb_pull(skb, HCI_SCO_HDR_SIZE);
4414 handle = __le16_to_cpu(hdr->handle);
4416 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4418 hdev->stat.sco_rx++;
4421 conn = hci_conn_hash_lookup_handle(hdev, handle);
4422 hci_dev_unlock(hdev);
4425 /* Send to upper protocol */
4426 sco_recv_scodata(conn, skb);
4429 BT_ERR("%s SCO packet for unknown connection handle %d",
4430 hdev->name, handle);
4436 static bool hci_req_is_complete(struct hci_dev *hdev)
4438 struct sk_buff *skb;
4440 skb = skb_peek(&hdev->cmd_q);
4444 return bt_cb(skb)->req.start;
4447 static void hci_resend_last(struct hci_dev *hdev)
4449 struct hci_command_hdr *sent;
4450 struct sk_buff *skb;
4453 if (!hdev->sent_cmd)
4456 sent = (void *) hdev->sent_cmd->data;
4457 opcode = __le16_to_cpu(sent->opcode);
4458 if (opcode == HCI_OP_RESET)
4461 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4465 skb_queue_head(&hdev->cmd_q, skb);
4466 queue_work(hdev->workqueue, &hdev->cmd_work);
4469 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4471 hci_req_complete_t req_complete = NULL;
4472 struct sk_buff *skb;
4473 unsigned long flags;
4475 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4477 /* If the completed command doesn't match the last one that was
4478 * sent we need to do special handling of it.
4480 if (!hci_sent_cmd_data(hdev, opcode)) {
4481 /* Some CSR based controllers generate a spontaneous
4482 * reset complete event during init and any pending
4483 * command will never be completed. In such a case we
4484 * need to resend whatever was the last sent
4487 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4488 hci_resend_last(hdev);
4493 /* If the command succeeded and there's still more commands in
4494 * this request the request is not yet complete.
4496 if (!status && !hci_req_is_complete(hdev))
4499 /* If this was the last command in a request the complete
4500 * callback would be found in hdev->sent_cmd instead of the
4501 * command queue (hdev->cmd_q).
4503 if (hdev->sent_cmd) {
4504 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4507 /* We must set the complete callback to NULL to
4508 * avoid calling the callback more than once if
4509 * this function gets called again.
4511 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4517 /* Remove all pending commands belonging to this request */
4518 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4519 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4520 if (bt_cb(skb)->req.start) {
4521 __skb_queue_head(&hdev->cmd_q, skb);
4525 req_complete = bt_cb(skb)->req.complete;
4528 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4532 req_complete(hdev, status);
4535 static void hci_rx_work(struct work_struct *work)
4537 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4538 struct sk_buff *skb;
4540 BT_DBG("%s", hdev->name);
4542 while ((skb = skb_dequeue(&hdev->rx_q))) {
4543 /* Send copy to monitor */
4544 hci_send_to_monitor(hdev, skb);
4546 if (atomic_read(&hdev->promisc)) {
4547 /* Send copy to the sockets */
4548 hci_send_to_sock(hdev, skb);
4551 if (test_bit(HCI_RAW, &hdev->flags) ||
4552 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4557 if (test_bit(HCI_INIT, &hdev->flags)) {
4558 /* Don't process data packets in this states. */
4559 switch (bt_cb(skb)->pkt_type) {
4560 case HCI_ACLDATA_PKT:
4561 case HCI_SCODATA_PKT:
4568 switch (bt_cb(skb)->pkt_type) {
4570 BT_DBG("%s Event packet", hdev->name);
4571 hci_event_packet(hdev, skb);
4574 case HCI_ACLDATA_PKT:
4575 BT_DBG("%s ACL data packet", hdev->name);
4576 hci_acldata_packet(hdev, skb);
4579 case HCI_SCODATA_PKT:
4580 BT_DBG("%s SCO data packet", hdev->name);
4581 hci_scodata_packet(hdev, skb);
4591 static void hci_cmd_work(struct work_struct *work)
4593 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4594 struct sk_buff *skb;
4596 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4597 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4599 /* Send queued commands */
4600 if (atomic_read(&hdev->cmd_cnt)) {
4601 skb = skb_dequeue(&hdev->cmd_q);
4605 kfree_skb(hdev->sent_cmd);
4607 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4608 if (hdev->sent_cmd) {
4609 atomic_dec(&hdev->cmd_cnt);
4610 hci_send_frame(hdev, skb);
4611 if (test_bit(HCI_RESET, &hdev->flags))
4612 del_timer(&hdev->cmd_timer);
4614 mod_timer(&hdev->cmd_timer,
4615 jiffies + HCI_CMD_TIMEOUT);
4617 skb_queue_head(&hdev->cmd_q, skb);
4618 queue_work(hdev->workqueue, &hdev->cmd_work);