2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 14
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
51 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
105 MGMT_OP_GET_ADV_SIZE_INFO,
106 MGMT_OP_START_LIMITED_DISCOVERY,
107 MGMT_OP_READ_EXT_INFO,
108 MGMT_OP_SET_APPEARANCE,
111 static const u16 mgmt_events[] = {
112 MGMT_EV_CONTROLLER_ERROR,
114 MGMT_EV_INDEX_REMOVED,
115 MGMT_EV_NEW_SETTINGS,
116 MGMT_EV_CLASS_OF_DEV_CHANGED,
117 MGMT_EV_LOCAL_NAME_CHANGED,
118 MGMT_EV_NEW_LINK_KEY,
119 MGMT_EV_NEW_LONG_TERM_KEY,
120 MGMT_EV_DEVICE_CONNECTED,
121 MGMT_EV_DEVICE_DISCONNECTED,
122 MGMT_EV_CONNECT_FAILED,
123 MGMT_EV_PIN_CODE_REQUEST,
124 MGMT_EV_USER_CONFIRM_REQUEST,
125 MGMT_EV_USER_PASSKEY_REQUEST,
127 MGMT_EV_DEVICE_FOUND,
129 MGMT_EV_DEVICE_BLOCKED,
130 MGMT_EV_DEVICE_UNBLOCKED,
131 MGMT_EV_DEVICE_UNPAIRED,
132 MGMT_EV_PASSKEY_NOTIFY,
135 MGMT_EV_DEVICE_ADDED,
136 MGMT_EV_DEVICE_REMOVED,
137 MGMT_EV_NEW_CONN_PARAM,
138 MGMT_EV_UNCONF_INDEX_ADDED,
139 MGMT_EV_UNCONF_INDEX_REMOVED,
140 MGMT_EV_NEW_CONFIG_OPTIONS,
141 MGMT_EV_EXT_INDEX_ADDED,
142 MGMT_EV_EXT_INDEX_REMOVED,
143 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
144 MGMT_EV_ADVERTISING_ADDED,
145 MGMT_EV_ADVERTISING_REMOVED,
146 MGMT_EV_EXT_INFO_CHANGED,
149 static const u16 mgmt_untrusted_commands[] = {
150 MGMT_OP_READ_INDEX_LIST,
152 MGMT_OP_READ_UNCONF_INDEX_LIST,
153 MGMT_OP_READ_CONFIG_INFO,
154 MGMT_OP_READ_EXT_INDEX_LIST,
155 MGMT_OP_READ_EXT_INFO,
158 static const u16 mgmt_untrusted_events[] = {
160 MGMT_EV_INDEX_REMOVED,
161 MGMT_EV_NEW_SETTINGS,
162 MGMT_EV_CLASS_OF_DEV_CHANGED,
163 MGMT_EV_LOCAL_NAME_CHANGED,
164 MGMT_EV_UNCONF_INDEX_ADDED,
165 MGMT_EV_UNCONF_INDEX_REMOVED,
166 MGMT_EV_NEW_CONFIG_OPTIONS,
167 MGMT_EV_EXT_INDEX_ADDED,
168 MGMT_EV_EXT_INDEX_REMOVED,
169 MGMT_EV_EXT_INFO_CHANGED,
172 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
174 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
175 "\x00\x00\x00\x00\x00\x00\x00\x00"
177 /* HCI to MGMT error code conversion table */
178 static u8 mgmt_status_table[] = {
180 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
181 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
182 MGMT_STATUS_FAILED, /* Hardware Failure */
183 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
184 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
185 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
186 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
187 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
188 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
189 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
190 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
191 MGMT_STATUS_BUSY, /* Command Disallowed */
192 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
193 MGMT_STATUS_REJECTED, /* Rejected Security */
194 MGMT_STATUS_REJECTED, /* Rejected Personal */
195 MGMT_STATUS_TIMEOUT, /* Host Timeout */
196 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
197 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
198 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
199 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
200 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
201 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
202 MGMT_STATUS_BUSY, /* Repeated Attempts */
203 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
204 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
205 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
206 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
207 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
208 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
209 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
210 MGMT_STATUS_FAILED, /* Unspecified Error */
211 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
212 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
213 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
214 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
215 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
216 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
217 MGMT_STATUS_FAILED, /* Unit Link Key Used */
218 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
219 MGMT_STATUS_TIMEOUT, /* Instant Passed */
220 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
221 MGMT_STATUS_FAILED, /* Transaction Collision */
222 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
223 MGMT_STATUS_REJECTED, /* QoS Rejected */
224 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
225 MGMT_STATUS_REJECTED, /* Insufficient Security */
226 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
227 MGMT_STATUS_BUSY, /* Role Switch Pending */
228 MGMT_STATUS_FAILED, /* Slot Violation */
229 MGMT_STATUS_FAILED, /* Role Switch Failed */
230 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
231 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
232 MGMT_STATUS_BUSY, /* Host Busy Pairing */
233 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
234 MGMT_STATUS_BUSY, /* Controller Busy */
235 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
236 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
237 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
238 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
239 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
242 static u8 mgmt_status(u8 hci_status)
244 if (hci_status < ARRAY_SIZE(mgmt_status_table))
245 return mgmt_status_table[hci_status];
247 return MGMT_STATUS_FAILED;
250 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
253 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
257 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
258 u16 len, int flag, struct sock *skip_sk)
260 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 struct sock *skip_sk)
267 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 HCI_SOCK_TRUSTED, skip_sk);
271 static u8 le_addr_type(u8 mgmt_addr_type)
273 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
274 return ADDR_LE_DEV_PUBLIC;
276 return ADDR_LE_DEV_RANDOM;
279 void mgmt_fill_version_info(void *ver)
281 struct mgmt_rp_read_version *rp = ver;
283 rp->version = MGMT_VERSION;
284 rp->revision = cpu_to_le16(MGMT_REVISION);
287 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
290 struct mgmt_rp_read_version rp;
292 BT_DBG("sock %p", sk);
294 mgmt_fill_version_info(&rp);
296 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
300 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
303 struct mgmt_rp_read_commands *rp;
304 u16 num_commands, num_events;
308 BT_DBG("sock %p", sk);
310 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
311 num_commands = ARRAY_SIZE(mgmt_commands);
312 num_events = ARRAY_SIZE(mgmt_events);
314 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
315 num_events = ARRAY_SIZE(mgmt_untrusted_events);
318 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
320 rp = kmalloc(rp_size, GFP_KERNEL);
324 rp->num_commands = cpu_to_le16(num_commands);
325 rp->num_events = cpu_to_le16(num_events);
327 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
328 __le16 *opcode = rp->opcodes;
330 for (i = 0; i < num_commands; i++, opcode++)
331 put_unaligned_le16(mgmt_commands[i], opcode);
333 for (i = 0; i < num_events; i++, opcode++)
334 put_unaligned_le16(mgmt_events[i], opcode);
336 __le16 *opcode = rp->opcodes;
338 for (i = 0; i < num_commands; i++, opcode++)
339 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
341 for (i = 0; i < num_events; i++, opcode++)
342 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
345 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
352 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
355 struct mgmt_rp_read_index_list *rp;
361 BT_DBG("sock %p", sk);
363 read_lock(&hci_dev_list_lock);
366 list_for_each_entry(d, &hci_dev_list, list) {
367 if (d->dev_type == HCI_PRIMARY &&
368 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
372 rp_len = sizeof(*rp) + (2 * count);
373 rp = kmalloc(rp_len, GFP_ATOMIC);
375 read_unlock(&hci_dev_list_lock);
380 list_for_each_entry(d, &hci_dev_list, list) {
381 if (hci_dev_test_flag(d, HCI_SETUP) ||
382 hci_dev_test_flag(d, HCI_CONFIG) ||
383 hci_dev_test_flag(d, HCI_USER_CHANNEL))
386 /* Devices marked as raw-only are neither configured
387 * nor unconfigured controllers.
389 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
392 if (d->dev_type == HCI_PRIMARY &&
393 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
394 rp->index[count++] = cpu_to_le16(d->id);
395 BT_DBG("Added hci%u", d->id);
399 rp->num_controllers = cpu_to_le16(count);
400 rp_len = sizeof(*rp) + (2 * count);
402 read_unlock(&hci_dev_list_lock);
404 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
412 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
413 void *data, u16 data_len)
415 struct mgmt_rp_read_unconf_index_list *rp;
421 BT_DBG("sock %p", sk);
423 read_lock(&hci_dev_list_lock);
426 list_for_each_entry(d, &hci_dev_list, list) {
427 if (d->dev_type == HCI_PRIMARY &&
428 hci_dev_test_flag(d, HCI_UNCONFIGURED))
432 rp_len = sizeof(*rp) + (2 * count);
433 rp = kmalloc(rp_len, GFP_ATOMIC);
435 read_unlock(&hci_dev_list_lock);
440 list_for_each_entry(d, &hci_dev_list, list) {
441 if (hci_dev_test_flag(d, HCI_SETUP) ||
442 hci_dev_test_flag(d, HCI_CONFIG) ||
443 hci_dev_test_flag(d, HCI_USER_CHANNEL))
446 /* Devices marked as raw-only are neither configured
447 * nor unconfigured controllers.
449 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
452 if (d->dev_type == HCI_PRIMARY &&
453 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
454 rp->index[count++] = cpu_to_le16(d->id);
455 BT_DBG("Added hci%u", d->id);
459 rp->num_controllers = cpu_to_le16(count);
460 rp_len = sizeof(*rp) + (2 * count);
462 read_unlock(&hci_dev_list_lock);
464 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
465 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
472 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
473 void *data, u16 data_len)
475 struct mgmt_rp_read_ext_index_list *rp;
481 BT_DBG("sock %p", sk);
483 read_lock(&hci_dev_list_lock);
486 list_for_each_entry(d, &hci_dev_list, list) {
487 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
491 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
492 rp = kmalloc(rp_len, GFP_ATOMIC);
494 read_unlock(&hci_dev_list_lock);
499 list_for_each_entry(d, &hci_dev_list, list) {
500 if (hci_dev_test_flag(d, HCI_SETUP) ||
501 hci_dev_test_flag(d, HCI_CONFIG) ||
502 hci_dev_test_flag(d, HCI_USER_CHANNEL))
505 /* Devices marked as raw-only are neither configured
506 * nor unconfigured controllers.
508 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
511 if (d->dev_type == HCI_PRIMARY) {
512 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
513 rp->entry[count].type = 0x01;
515 rp->entry[count].type = 0x00;
516 } else if (d->dev_type == HCI_AMP) {
517 rp->entry[count].type = 0x02;
522 rp->entry[count].bus = d->bus;
523 rp->entry[count++].index = cpu_to_le16(d->id);
524 BT_DBG("Added hci%u", d->id);
527 rp->num_controllers = cpu_to_le16(count);
528 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
530 read_unlock(&hci_dev_list_lock);
532 /* If this command is called at least once, then all the
533 * default index and unconfigured index events are disabled
534 * and from now on only extended index events are used.
536 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
537 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
538 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
540 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
541 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
548 static bool is_configured(struct hci_dev *hdev)
550 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
554 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
555 !bacmp(&hdev->public_addr, BDADDR_ANY))
561 static __le32 get_missing_options(struct hci_dev *hdev)
565 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
566 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
567 options |= MGMT_OPTION_EXTERNAL_CONFIG;
569 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
570 !bacmp(&hdev->public_addr, BDADDR_ANY))
571 options |= MGMT_OPTION_PUBLIC_ADDRESS;
573 return cpu_to_le32(options);
576 static int new_options(struct hci_dev *hdev, struct sock *skip)
578 __le32 options = get_missing_options(hdev);
580 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
581 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
584 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
586 __le32 options = get_missing_options(hdev);
588 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
592 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
593 void *data, u16 data_len)
595 struct mgmt_rp_read_config_info rp;
598 BT_DBG("sock %p %s", sk, hdev->name);
602 memset(&rp, 0, sizeof(rp));
603 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
605 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
606 options |= MGMT_OPTION_EXTERNAL_CONFIG;
608 if (hdev->set_bdaddr)
609 options |= MGMT_OPTION_PUBLIC_ADDRESS;
611 rp.supported_options = cpu_to_le32(options);
612 rp.missing_options = get_missing_options(hdev);
614 hci_dev_unlock(hdev);
616 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
620 static u32 get_supported_settings(struct hci_dev *hdev)
624 settings |= MGMT_SETTING_POWERED;
625 settings |= MGMT_SETTING_BONDABLE;
626 settings |= MGMT_SETTING_DEBUG_KEYS;
627 settings |= MGMT_SETTING_CONNECTABLE;
628 settings |= MGMT_SETTING_DISCOVERABLE;
630 if (lmp_bredr_capable(hdev)) {
631 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
632 settings |= MGMT_SETTING_FAST_CONNECTABLE;
633 settings |= MGMT_SETTING_BREDR;
634 settings |= MGMT_SETTING_LINK_SECURITY;
636 if (lmp_ssp_capable(hdev)) {
637 settings |= MGMT_SETTING_SSP;
638 settings |= MGMT_SETTING_HS;
641 if (lmp_sc_capable(hdev))
642 settings |= MGMT_SETTING_SECURE_CONN;
645 if (lmp_le_capable(hdev)) {
646 settings |= MGMT_SETTING_LE;
647 settings |= MGMT_SETTING_ADVERTISING;
648 settings |= MGMT_SETTING_SECURE_CONN;
649 settings |= MGMT_SETTING_PRIVACY;
650 settings |= MGMT_SETTING_STATIC_ADDRESS;
653 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
655 settings |= MGMT_SETTING_CONFIGURATION;
660 static u32 get_current_settings(struct hci_dev *hdev)
664 if (hdev_is_powered(hdev))
665 settings |= MGMT_SETTING_POWERED;
667 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
668 settings |= MGMT_SETTING_CONNECTABLE;
670 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
671 settings |= MGMT_SETTING_FAST_CONNECTABLE;
673 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
674 settings |= MGMT_SETTING_DISCOVERABLE;
676 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
677 settings |= MGMT_SETTING_BONDABLE;
679 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
680 settings |= MGMT_SETTING_BREDR;
682 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
683 settings |= MGMT_SETTING_LE;
685 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
686 settings |= MGMT_SETTING_LINK_SECURITY;
688 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
689 settings |= MGMT_SETTING_SSP;
691 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
692 settings |= MGMT_SETTING_HS;
694 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
695 settings |= MGMT_SETTING_ADVERTISING;
697 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
698 settings |= MGMT_SETTING_SECURE_CONN;
700 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
701 settings |= MGMT_SETTING_DEBUG_KEYS;
703 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
704 settings |= MGMT_SETTING_PRIVACY;
706 /* The current setting for static address has two purposes. The
707 * first is to indicate if the static address will be used and
708 * the second is to indicate if it is actually set.
710 * This means if the static address is not configured, this flag
711 * will never be set. If the address is configured, then if the
712 * address is actually used decides if the flag is set or not.
714 * For single mode LE only controllers and dual-mode controllers
715 * with BR/EDR disabled, the existence of the static address will
718 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
719 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
720 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
721 if (bacmp(&hdev->static_addr, BDADDR_ANY))
722 settings |= MGMT_SETTING_STATIC_ADDRESS;
728 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
730 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
733 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
734 struct hci_dev *hdev,
737 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
740 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
742 struct mgmt_pending_cmd *cmd;
744 /* If there's a pending mgmt command the flags will not yet have
745 * their final values, so check for this first.
747 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
749 struct mgmt_mode *cp = cmd->param;
751 return LE_AD_GENERAL;
752 else if (cp->val == 0x02)
753 return LE_AD_LIMITED;
755 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
756 return LE_AD_LIMITED;
757 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
758 return LE_AD_GENERAL;
764 bool mgmt_get_connectable(struct hci_dev *hdev)
766 struct mgmt_pending_cmd *cmd;
768 /* If there's a pending mgmt command the flag will not yet have
769 * it's final value, so check for this first.
771 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
773 struct mgmt_mode *cp = cmd->param;
778 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
781 static void service_cache_off(struct work_struct *work)
783 struct hci_dev *hdev = container_of(work, struct hci_dev,
785 struct hci_request req;
787 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
790 hci_req_init(&req, hdev);
794 __hci_req_update_eir(&req);
795 __hci_req_update_class(&req);
797 hci_dev_unlock(hdev);
799 hci_req_run(&req, NULL);
802 static void rpa_expired(struct work_struct *work)
804 struct hci_dev *hdev = container_of(work, struct hci_dev,
806 struct hci_request req;
810 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
812 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
815 /* The generation of a new RPA and programming it into the
816 * controller happens in the hci_req_enable_advertising()
819 hci_req_init(&req, hdev);
820 __hci_req_enable_advertising(&req);
821 hci_req_run(&req, NULL);
824 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
826 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
829 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
830 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
832 /* Non-mgmt controlled devices get this bit set
833 * implicitly so that pairing works for them, however
834 * for mgmt we require user-space to explicitly enable
837 hci_dev_clear_flag(hdev, HCI_BONDABLE);
840 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
841 void *data, u16 data_len)
843 struct mgmt_rp_read_info rp;
845 BT_DBG("sock %p %s", sk, hdev->name);
849 memset(&rp, 0, sizeof(rp));
851 bacpy(&rp.bdaddr, &hdev->bdaddr);
853 rp.version = hdev->hci_ver;
854 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
856 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
857 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
859 memcpy(rp.dev_class, hdev->dev_class, 3);
861 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
862 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
864 hci_dev_unlock(hdev);
866 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
870 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
873 eir[eir_len++] = sizeof(type) + data_len;
874 eir[eir_len++] = type;
875 memcpy(&eir[eir_len], data, data_len);
881 static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data)
883 eir[eir_len++] = sizeof(type) + sizeof(data);
884 eir[eir_len++] = type;
885 put_unaligned_le16(data, &eir[eir_len]);
886 eir_len += sizeof(data);
891 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
896 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
897 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
900 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
901 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
904 name_len = strlen(hdev->dev_name);
905 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
906 hdev->dev_name, name_len);
908 name_len = strlen(hdev->short_name);
909 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
910 hdev->short_name, name_len);
915 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
916 void *data, u16 data_len)
919 struct mgmt_rp_read_ext_info *rp = (void *)buf;
922 BT_DBG("sock %p %s", sk, hdev->name);
924 memset(&buf, 0, sizeof(buf));
928 bacpy(&rp->bdaddr, &hdev->bdaddr);
930 rp->version = hdev->hci_ver;
931 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
933 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
934 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
937 eir_len = append_eir_data_to_buf(hdev, rp->eir);
938 rp->eir_len = cpu_to_le16(eir_len);
940 hci_dev_unlock(hdev);
942 /* If this command is called at least once, then the events
943 * for class of device and local name changes are disabled
944 * and only the new extended controller information event
947 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
948 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
949 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
951 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
952 sizeof(*rp) + eir_len);
955 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
957 struct mgmt_ev_ext_info_changed ev;
959 ev.eir_len = cpu_to_le16(0);
961 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, &ev,
962 sizeof(ev), HCI_MGMT_EXT_INFO_EVENTS, skip);
965 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
967 __le32 settings = cpu_to_le32(get_current_settings(hdev));
969 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
973 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
975 BT_DBG("%s status 0x%02x", hdev->name, status);
977 if (hci_conn_count(hdev) == 0) {
978 cancel_delayed_work(&hdev->power_off);
979 queue_work(hdev->req_workqueue, &hdev->power_off.work);
983 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
985 struct mgmt_ev_advertising_added ev;
987 ev.instance = instance;
989 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
992 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
995 struct mgmt_ev_advertising_removed ev;
997 ev.instance = instance;
999 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1002 static void cancel_adv_timeout(struct hci_dev *hdev)
1004 if (hdev->adv_instance_timeout) {
1005 hdev->adv_instance_timeout = 0;
1006 cancel_delayed_work(&hdev->adv_instance_expire);
1010 static int clean_up_hci_state(struct hci_dev *hdev)
1012 struct hci_request req;
1013 struct hci_conn *conn;
1014 bool discov_stopped;
1017 hci_req_init(&req, hdev);
1019 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1020 test_bit(HCI_PSCAN, &hdev->flags)) {
1022 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1025 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1027 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1028 __hci_req_disable_advertising(&req);
1030 discov_stopped = hci_req_stop_discovery(&req);
1032 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1033 /* 0x15 == Terminated due to Power Off */
1034 __hci_abort_conn(&req, conn, 0x15);
1037 err = hci_req_run(&req, clean_up_hci_complete);
1038 if (!err && discov_stopped)
1039 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1044 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1047 struct mgmt_mode *cp = data;
1048 struct mgmt_pending_cmd *cmd;
1051 BT_DBG("request for %s", hdev->name);
1053 if (cp->val != 0x00 && cp->val != 0x01)
1054 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1055 MGMT_STATUS_INVALID_PARAMS);
1059 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1060 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1065 if (!!cp->val == hdev_is_powered(hdev)) {
1066 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1070 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1077 queue_work(hdev->req_workqueue, &hdev->power_on);
1080 /* Disconnect connections, stop scans, etc */
1081 err = clean_up_hci_state(hdev);
1083 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1084 HCI_POWER_OFF_TIMEOUT);
1086 /* ENODATA means there were no HCI commands queued */
1087 if (err == -ENODATA) {
1088 cancel_delayed_work(&hdev->power_off);
1089 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1095 hci_dev_unlock(hdev);
1099 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1101 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1103 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1104 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1107 int mgmt_new_settings(struct hci_dev *hdev)
1109 return new_settings(hdev, NULL);
1114 struct hci_dev *hdev;
1118 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1120 struct cmd_lookup *match = data;
1122 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1124 list_del(&cmd->list);
1126 if (match->sk == NULL) {
1127 match->sk = cmd->sk;
1128 sock_hold(match->sk);
1131 mgmt_pending_free(cmd);
1134 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1138 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1139 mgmt_pending_remove(cmd);
1142 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1144 if (cmd->cmd_complete) {
1147 cmd->cmd_complete(cmd, *status);
1148 mgmt_pending_remove(cmd);
1153 cmd_status_rsp(cmd, data);
1156 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1158 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1159 cmd->param, cmd->param_len);
1162 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1164 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1165 cmd->param, sizeof(struct mgmt_addr_info));
1168 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1170 if (!lmp_bredr_capable(hdev))
1171 return MGMT_STATUS_NOT_SUPPORTED;
1172 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 return MGMT_STATUS_REJECTED;
1175 return MGMT_STATUS_SUCCESS;
1178 static u8 mgmt_le_support(struct hci_dev *hdev)
1180 if (!lmp_le_capable(hdev))
1181 return MGMT_STATUS_NOT_SUPPORTED;
1182 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1183 return MGMT_STATUS_REJECTED;
1185 return MGMT_STATUS_SUCCESS;
1188 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1190 struct mgmt_pending_cmd *cmd;
1192 BT_DBG("status 0x%02x", status);
1196 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1201 u8 mgmt_err = mgmt_status(status);
1202 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1203 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1207 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1208 hdev->discov_timeout > 0) {
1209 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1210 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1213 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1214 new_settings(hdev, cmd->sk);
1217 mgmt_pending_remove(cmd);
1220 hci_dev_unlock(hdev);
1223 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1226 struct mgmt_cp_set_discoverable *cp = data;
1227 struct mgmt_pending_cmd *cmd;
1231 BT_DBG("request for %s", hdev->name);
1233 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1234 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1235 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1236 MGMT_STATUS_REJECTED);
1238 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1239 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1240 MGMT_STATUS_INVALID_PARAMS);
1242 timeout = __le16_to_cpu(cp->timeout);
1244 /* Disabling discoverable requires that no timeout is set,
1245 * and enabling limited discoverable requires a timeout.
1247 if ((cp->val == 0x00 && timeout > 0) ||
1248 (cp->val == 0x02 && timeout == 0))
1249 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1250 MGMT_STATUS_INVALID_PARAMS);
1254 if (!hdev_is_powered(hdev) && timeout > 0) {
1255 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1256 MGMT_STATUS_NOT_POWERED);
1260 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1261 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1262 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1267 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1268 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1269 MGMT_STATUS_REJECTED);
1273 if (!hdev_is_powered(hdev)) {
1274 bool changed = false;
1276 /* Setting limited discoverable when powered off is
1277 * not a valid operation since it requires a timeout
1278 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1280 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1281 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1285 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1290 err = new_settings(hdev, sk);
1295 /* If the current mode is the same, then just update the timeout
1296 * value with the new value. And if only the timeout gets updated,
1297 * then no need for any HCI transactions.
1299 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1300 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1301 HCI_LIMITED_DISCOVERABLE)) {
1302 cancel_delayed_work(&hdev->discov_off);
1303 hdev->discov_timeout = timeout;
1305 if (cp->val && hdev->discov_timeout > 0) {
1306 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1307 queue_delayed_work(hdev->req_workqueue,
1308 &hdev->discov_off, to);
1311 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1315 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1321 /* Cancel any potential discoverable timeout that might be
1322 * still active and store new timeout value. The arming of
1323 * the timeout happens in the complete handler.
1325 cancel_delayed_work(&hdev->discov_off);
1326 hdev->discov_timeout = timeout;
1329 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1331 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1333 /* Limited discoverable mode */
1334 if (cp->val == 0x02)
1335 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1337 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1339 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1343 hci_dev_unlock(hdev);
1347 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1349 struct mgmt_pending_cmd *cmd;
1351 BT_DBG("status 0x%02x", status);
1355 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1360 u8 mgmt_err = mgmt_status(status);
1361 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1365 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1366 new_settings(hdev, cmd->sk);
1369 mgmt_pending_remove(cmd);
1372 hci_dev_unlock(hdev);
1375 static int set_connectable_update_settings(struct hci_dev *hdev,
1376 struct sock *sk, u8 val)
1378 bool changed = false;
1381 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1385 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1387 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1388 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1391 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1396 hci_req_update_scan(hdev);
1397 hci_update_background_scan(hdev);
1398 return new_settings(hdev, sk);
1404 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1407 struct mgmt_mode *cp = data;
1408 struct mgmt_pending_cmd *cmd;
1411 BT_DBG("request for %s", hdev->name);
1413 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1414 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1416 MGMT_STATUS_REJECTED);
1418 if (cp->val != 0x00 && cp->val != 0x01)
1419 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1420 MGMT_STATUS_INVALID_PARAMS);
1424 if (!hdev_is_powered(hdev)) {
1425 err = set_connectable_update_settings(hdev, sk, cp->val);
1429 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1430 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1431 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1436 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1443 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1445 if (hdev->discov_timeout > 0)
1446 cancel_delayed_work(&hdev->discov_off);
1448 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1449 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1450 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1453 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1457 hci_dev_unlock(hdev);
1461 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1464 struct mgmt_mode *cp = data;
1468 BT_DBG("request for %s", hdev->name);
1470 if (cp->val != 0x00 && cp->val != 0x01)
1471 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1472 MGMT_STATUS_INVALID_PARAMS);
1477 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1479 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1481 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1486 /* In limited privacy mode the change of bondable mode
1487 * may affect the local advertising address.
1489 if (hdev_is_powered(hdev) &&
1490 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1491 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1492 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1493 queue_work(hdev->req_workqueue,
1494 &hdev->discoverable_update);
1496 err = new_settings(hdev, sk);
1500 hci_dev_unlock(hdev);
1504 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1507 struct mgmt_mode *cp = data;
1508 struct mgmt_pending_cmd *cmd;
1512 BT_DBG("request for %s", hdev->name);
1514 status = mgmt_bredr_support(hdev);
1516 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1519 if (cp->val != 0x00 && cp->val != 0x01)
1520 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1521 MGMT_STATUS_INVALID_PARAMS);
1525 if (!hdev_is_powered(hdev)) {
1526 bool changed = false;
1528 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1529 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1533 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1538 err = new_settings(hdev, sk);
1543 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1544 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1551 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1552 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1556 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1562 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1564 mgmt_pending_remove(cmd);
1569 hci_dev_unlock(hdev);
1573 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1575 struct mgmt_mode *cp = data;
1576 struct mgmt_pending_cmd *cmd;
1580 BT_DBG("request for %s", hdev->name);
1582 status = mgmt_bredr_support(hdev);
1584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1586 if (!lmp_ssp_capable(hdev))
1587 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1588 MGMT_STATUS_NOT_SUPPORTED);
1590 if (cp->val != 0x00 && cp->val != 0x01)
1591 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1592 MGMT_STATUS_INVALID_PARAMS);
1596 if (!hdev_is_powered(hdev)) {
1600 changed = !hci_dev_test_and_set_flag(hdev,
1603 changed = hci_dev_test_and_clear_flag(hdev,
1606 changed = hci_dev_test_and_clear_flag(hdev,
1609 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1612 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1617 err = new_settings(hdev, sk);
1622 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1623 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1628 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1629 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1633 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1639 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1640 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1641 sizeof(cp->val), &cp->val);
1643 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1645 mgmt_pending_remove(cmd);
1650 hci_dev_unlock(hdev);
1654 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1656 struct mgmt_mode *cp = data;
1661 BT_DBG("request for %s", hdev->name);
1663 status = mgmt_bredr_support(hdev);
1665 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1667 if (!lmp_ssp_capable(hdev))
1668 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1669 MGMT_STATUS_NOT_SUPPORTED);
1671 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1672 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1673 MGMT_STATUS_REJECTED);
1675 if (cp->val != 0x00 && cp->val != 0x01)
1676 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1677 MGMT_STATUS_INVALID_PARAMS);
1681 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1682 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1688 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1690 if (hdev_is_powered(hdev)) {
1691 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1692 MGMT_STATUS_REJECTED);
1696 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1699 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1704 err = new_settings(hdev, sk);
1707 hci_dev_unlock(hdev);
1711 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1713 struct cmd_lookup match = { NULL, hdev };
1718 u8 mgmt_err = mgmt_status(status);
1720 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1725 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1727 new_settings(hdev, match.sk);
1732 /* Make sure the controller has a good default for
1733 * advertising data. Restrict the update to when LE
1734 * has actually been enabled. During power on, the
1735 * update in powered_update_hci will take care of it.
1737 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1738 struct hci_request req;
1740 hci_req_init(&req, hdev);
1741 __hci_req_update_adv_data(&req, 0x00);
1742 __hci_req_update_scan_rsp_data(&req, 0x00);
1743 hci_req_run(&req, NULL);
1744 hci_update_background_scan(hdev);
1748 hci_dev_unlock(hdev);
1751 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1753 struct mgmt_mode *cp = data;
1754 struct hci_cp_write_le_host_supported hci_cp;
1755 struct mgmt_pending_cmd *cmd;
1756 struct hci_request req;
1760 BT_DBG("request for %s", hdev->name);
1762 if (!lmp_le_capable(hdev))
1763 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1764 MGMT_STATUS_NOT_SUPPORTED);
1766 if (cp->val != 0x00 && cp->val != 0x01)
1767 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1768 MGMT_STATUS_INVALID_PARAMS);
1770 /* Bluetooth single mode LE only controllers or dual-mode
1771 * controllers configured as LE only devices, do not allow
1772 * switching LE off. These have either LE enabled explicitly
1773 * or BR/EDR has been previously switched off.
1775 * When trying to enable an already enabled LE, then gracefully
1776 * send a positive response. Trying to disable it however will
1777 * result into rejection.
1779 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1780 if (cp->val == 0x01)
1781 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1783 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1784 MGMT_STATUS_REJECTED);
1790 enabled = lmp_host_le_capable(hdev);
1793 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1795 if (!hdev_is_powered(hdev) || val == enabled) {
1796 bool changed = false;
1798 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1799 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1803 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1804 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1808 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1813 err = new_settings(hdev, sk);
1818 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1819 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1820 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1825 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1831 hci_req_init(&req, hdev);
1833 memset(&hci_cp, 0, sizeof(hci_cp));
1837 hci_cp.simul = 0x00;
1839 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1840 __hci_req_disable_advertising(&req);
1843 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1846 err = hci_req_run(&req, le_enable_complete);
1848 mgmt_pending_remove(cmd);
1851 hci_dev_unlock(hdev);
1855 /* This is a helper function to test for pending mgmt commands that can
1856 * cause CoD or EIR HCI commands. We can only allow one such pending
1857 * mgmt command at a time since otherwise we cannot easily track what
1858 * the current values are, will be, and based on that calculate if a new
1859 * HCI command needs to be sent and if yes with what value.
1861 static bool pending_eir_or_class(struct hci_dev *hdev)
1863 struct mgmt_pending_cmd *cmd;
1865 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1866 switch (cmd->opcode) {
1867 case MGMT_OP_ADD_UUID:
1868 case MGMT_OP_REMOVE_UUID:
1869 case MGMT_OP_SET_DEV_CLASS:
1870 case MGMT_OP_SET_POWERED:
1878 static const u8 bluetooth_base_uuid[] = {
1879 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1880 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1883 static u8 get_uuid_size(const u8 *uuid)
1887 if (memcmp(uuid, bluetooth_base_uuid, 12))
1890 val = get_unaligned_le32(&uuid[12]);
1897 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1899 struct mgmt_pending_cmd *cmd;
1903 cmd = pending_find(mgmt_op, hdev);
1907 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
1908 mgmt_status(status), hdev->dev_class, 3);
1910 mgmt_pending_remove(cmd);
1913 hci_dev_unlock(hdev);
1916 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1918 BT_DBG("status 0x%02x", status);
1920 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1923 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1925 struct mgmt_cp_add_uuid *cp = data;
1926 struct mgmt_pending_cmd *cmd;
1927 struct hci_request req;
1928 struct bt_uuid *uuid;
1931 BT_DBG("request for %s", hdev->name);
1935 if (pending_eir_or_class(hdev)) {
1936 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1941 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1947 memcpy(uuid->uuid, cp->uuid, 16);
1948 uuid->svc_hint = cp->svc_hint;
1949 uuid->size = get_uuid_size(cp->uuid);
1951 list_add_tail(&uuid->list, &hdev->uuids);
1953 hci_req_init(&req, hdev);
1955 __hci_req_update_class(&req);
1956 __hci_req_update_eir(&req);
1958 err = hci_req_run(&req, add_uuid_complete);
1960 if (err != -ENODATA)
1963 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1964 hdev->dev_class, 3);
1968 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1977 hci_dev_unlock(hdev);
1981 static bool enable_service_cache(struct hci_dev *hdev)
1983 if (!hdev_is_powered(hdev))
1986 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
1987 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1995 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1997 BT_DBG("status 0x%02x", status);
1999 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2002 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2005 struct mgmt_cp_remove_uuid *cp = data;
2006 struct mgmt_pending_cmd *cmd;
2007 struct bt_uuid *match, *tmp;
2008 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2009 struct hci_request req;
2012 BT_DBG("request for %s", hdev->name);
2016 if (pending_eir_or_class(hdev)) {
2017 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2022 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2023 hci_uuids_clear(hdev);
2025 if (enable_service_cache(hdev)) {
2026 err = mgmt_cmd_complete(sk, hdev->id,
2027 MGMT_OP_REMOVE_UUID,
2028 0, hdev->dev_class, 3);
2037 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2038 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2041 list_del(&match->list);
2047 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2048 MGMT_STATUS_INVALID_PARAMS);
2053 hci_req_init(&req, hdev);
2055 __hci_req_update_class(&req);
2056 __hci_req_update_eir(&req);
2058 err = hci_req_run(&req, remove_uuid_complete);
2060 if (err != -ENODATA)
2063 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2064 hdev->dev_class, 3);
2068 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2077 hci_dev_unlock(hdev);
2081 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2083 BT_DBG("status 0x%02x", status);
2085 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2088 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2091 struct mgmt_cp_set_dev_class *cp = data;
2092 struct mgmt_pending_cmd *cmd;
2093 struct hci_request req;
2096 BT_DBG("request for %s", hdev->name);
2098 if (!lmp_bredr_capable(hdev))
2099 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2100 MGMT_STATUS_NOT_SUPPORTED);
2104 if (pending_eir_or_class(hdev)) {
2105 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2110 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2111 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2112 MGMT_STATUS_INVALID_PARAMS);
2116 hdev->major_class = cp->major;
2117 hdev->minor_class = cp->minor;
2119 if (!hdev_is_powered(hdev)) {
2120 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2121 hdev->dev_class, 3);
2125 hci_req_init(&req, hdev);
2127 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2128 hci_dev_unlock(hdev);
2129 cancel_delayed_work_sync(&hdev->service_cache);
2131 __hci_req_update_eir(&req);
2134 __hci_req_update_class(&req);
2136 err = hci_req_run(&req, set_class_complete);
2138 if (err != -ENODATA)
2141 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2142 hdev->dev_class, 3);
2146 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2155 hci_dev_unlock(hdev);
2159 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2162 struct mgmt_cp_load_link_keys *cp = data;
2163 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2164 sizeof(struct mgmt_link_key_info));
2165 u16 key_count, expected_len;
2169 BT_DBG("request for %s", hdev->name);
2171 if (!lmp_bredr_capable(hdev))
2172 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2173 MGMT_STATUS_NOT_SUPPORTED);
2175 key_count = __le16_to_cpu(cp->key_count);
2176 if (key_count > max_key_count) {
2177 BT_ERR("load_link_keys: too big key_count value %u",
2179 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2180 MGMT_STATUS_INVALID_PARAMS);
2183 expected_len = sizeof(*cp) + key_count *
2184 sizeof(struct mgmt_link_key_info);
2185 if (expected_len != len) {
2186 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2188 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2189 MGMT_STATUS_INVALID_PARAMS);
2192 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2193 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2194 MGMT_STATUS_INVALID_PARAMS);
2196 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2199 for (i = 0; i < key_count; i++) {
2200 struct mgmt_link_key_info *key = &cp->keys[i];
2202 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2203 return mgmt_cmd_status(sk, hdev->id,
2204 MGMT_OP_LOAD_LINK_KEYS,
2205 MGMT_STATUS_INVALID_PARAMS);
2210 hci_link_keys_clear(hdev);
2213 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2215 changed = hci_dev_test_and_clear_flag(hdev,
2216 HCI_KEEP_DEBUG_KEYS);
2219 new_settings(hdev, NULL);
2221 for (i = 0; i < key_count; i++) {
2222 struct mgmt_link_key_info *key = &cp->keys[i];
2224 /* Always ignore debug keys and require a new pairing if
2225 * the user wants to use them.
2227 if (key->type == HCI_LK_DEBUG_COMBINATION)
2230 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2231 key->type, key->pin_len, NULL);
2234 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2236 hci_dev_unlock(hdev);
2241 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2242 u8 addr_type, struct sock *skip_sk)
2244 struct mgmt_ev_device_unpaired ev;
2246 bacpy(&ev.addr.bdaddr, bdaddr);
2247 ev.addr.type = addr_type;
2249 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2253 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2256 struct mgmt_cp_unpair_device *cp = data;
2257 struct mgmt_rp_unpair_device rp;
2258 struct hci_conn_params *params;
2259 struct mgmt_pending_cmd *cmd;
2260 struct hci_conn *conn;
2264 memset(&rp, 0, sizeof(rp));
2265 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2266 rp.addr.type = cp->addr.type;
2268 if (!bdaddr_type_is_valid(cp->addr.type))
2269 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2270 MGMT_STATUS_INVALID_PARAMS,
2273 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2274 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2275 MGMT_STATUS_INVALID_PARAMS,
2280 if (!hdev_is_powered(hdev)) {
2281 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2282 MGMT_STATUS_NOT_POWERED, &rp,
2287 if (cp->addr.type == BDADDR_BREDR) {
2288 /* If disconnection is requested, then look up the
2289 * connection. If the remote device is connected, it
2290 * will be later used to terminate the link.
2292 * Setting it to NULL explicitly will cause no
2293 * termination of the link.
2296 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2301 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2303 err = mgmt_cmd_complete(sk, hdev->id,
2304 MGMT_OP_UNPAIR_DEVICE,
2305 MGMT_STATUS_NOT_PAIRED, &rp,
2313 /* LE address type */
2314 addr_type = le_addr_type(cp->addr.type);
2316 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2318 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2320 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2321 MGMT_STATUS_NOT_PAIRED, &rp,
2326 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2328 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2332 /* Abort any ongoing SMP pairing */
2333 smp_cancel_pairing(conn);
2335 /* Defer clearing up the connection parameters until closing to
2336 * give a chance of keeping them if a repairing happens.
2338 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2340 /* Disable auto-connection parameters if present */
2341 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2343 if (params->explicit_connect)
2344 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2346 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2349 /* If disconnection is not requested, then clear the connection
2350 * variable so that the link is not terminated.
2352 if (!cp->disconnect)
2356 /* If the connection variable is set, then termination of the
2357 * link is requested.
2360 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2362 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2366 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2373 cmd->cmd_complete = addr_cmd_complete;
2375 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2377 mgmt_pending_remove(cmd);
2380 hci_dev_unlock(hdev);
2384 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2387 struct mgmt_cp_disconnect *cp = data;
2388 struct mgmt_rp_disconnect rp;
2389 struct mgmt_pending_cmd *cmd;
2390 struct hci_conn *conn;
2395 memset(&rp, 0, sizeof(rp));
2396 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2397 rp.addr.type = cp->addr.type;
2399 if (!bdaddr_type_is_valid(cp->addr.type))
2400 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2401 MGMT_STATUS_INVALID_PARAMS,
2406 if (!test_bit(HCI_UP, &hdev->flags)) {
2407 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2408 MGMT_STATUS_NOT_POWERED, &rp,
2413 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2414 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2415 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2419 if (cp->addr.type == BDADDR_BREDR)
2420 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2423 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2424 le_addr_type(cp->addr.type));
2426 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2427 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2428 MGMT_STATUS_NOT_CONNECTED, &rp,
2433 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2439 cmd->cmd_complete = generic_cmd_complete;
2441 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2443 mgmt_pending_remove(cmd);
2446 hci_dev_unlock(hdev);
2450 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2452 switch (link_type) {
2454 switch (addr_type) {
2455 case ADDR_LE_DEV_PUBLIC:
2456 return BDADDR_LE_PUBLIC;
2459 /* Fallback to LE Random address type */
2460 return BDADDR_LE_RANDOM;
2464 /* Fallback to BR/EDR type */
2465 return BDADDR_BREDR;
2469 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2472 struct mgmt_rp_get_connections *rp;
2482 if (!hdev_is_powered(hdev)) {
2483 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2484 MGMT_STATUS_NOT_POWERED);
2489 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2490 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2494 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2495 rp = kmalloc(rp_len, GFP_KERNEL);
2502 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2503 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2505 bacpy(&rp->addr[i].bdaddr, &c->dst);
2506 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2507 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2512 rp->conn_count = cpu_to_le16(i);
2514 /* Recalculate length in case of filtered SCO connections, etc */
2515 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2517 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2523 hci_dev_unlock(hdev);
2527 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2528 struct mgmt_cp_pin_code_neg_reply *cp)
2530 struct mgmt_pending_cmd *cmd;
2533 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2538 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2539 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2541 mgmt_pending_remove(cmd);
2546 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2549 struct hci_conn *conn;
2550 struct mgmt_cp_pin_code_reply *cp = data;
2551 struct hci_cp_pin_code_reply reply;
2552 struct mgmt_pending_cmd *cmd;
2559 if (!hdev_is_powered(hdev)) {
2560 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2561 MGMT_STATUS_NOT_POWERED);
2565 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2567 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2568 MGMT_STATUS_NOT_CONNECTED);
2572 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2573 struct mgmt_cp_pin_code_neg_reply ncp;
2575 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2577 BT_ERR("PIN code is not 16 bytes long");
2579 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2581 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2582 MGMT_STATUS_INVALID_PARAMS);
2587 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2593 cmd->cmd_complete = addr_cmd_complete;
2595 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2596 reply.pin_len = cp->pin_len;
2597 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2599 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2601 mgmt_pending_remove(cmd);
2604 hci_dev_unlock(hdev);
2608 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2611 struct mgmt_cp_set_io_capability *cp = data;
2615 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2616 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2617 MGMT_STATUS_INVALID_PARAMS);
2621 hdev->io_capability = cp->io_capability;
2623 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2624 hdev->io_capability);
2626 hci_dev_unlock(hdev);
2628 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2632 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2634 struct hci_dev *hdev = conn->hdev;
2635 struct mgmt_pending_cmd *cmd;
2637 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2638 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2641 if (cmd->user_data != conn)
2650 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2652 struct mgmt_rp_pair_device rp;
2653 struct hci_conn *conn = cmd->user_data;
2656 bacpy(&rp.addr.bdaddr, &conn->dst);
2657 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2659 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2660 status, &rp, sizeof(rp));
2662 /* So we don't get further callbacks for this connection */
2663 conn->connect_cfm_cb = NULL;
2664 conn->security_cfm_cb = NULL;
2665 conn->disconn_cfm_cb = NULL;
2667 hci_conn_drop(conn);
2669 /* The device is paired so there is no need to remove
2670 * its connection parameters anymore.
2672 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2679 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2681 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2682 struct mgmt_pending_cmd *cmd;
2684 cmd = find_pairing(conn);
2686 cmd->cmd_complete(cmd, status);
2687 mgmt_pending_remove(cmd);
2691 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2693 struct mgmt_pending_cmd *cmd;
2695 BT_DBG("status %u", status);
2697 cmd = find_pairing(conn);
2699 BT_DBG("Unable to find a pending command");
2703 cmd->cmd_complete(cmd, mgmt_status(status));
2704 mgmt_pending_remove(cmd);
2707 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2709 struct mgmt_pending_cmd *cmd;
2711 BT_DBG("status %u", status);
2716 cmd = find_pairing(conn);
2718 BT_DBG("Unable to find a pending command");
2722 cmd->cmd_complete(cmd, mgmt_status(status));
2723 mgmt_pending_remove(cmd);
2726 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2729 struct mgmt_cp_pair_device *cp = data;
2730 struct mgmt_rp_pair_device rp;
2731 struct mgmt_pending_cmd *cmd;
2732 u8 sec_level, auth_type;
2733 struct hci_conn *conn;
2738 memset(&rp, 0, sizeof(rp));
2739 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2740 rp.addr.type = cp->addr.type;
2742 if (!bdaddr_type_is_valid(cp->addr.type))
2743 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2744 MGMT_STATUS_INVALID_PARAMS,
2747 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2748 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2749 MGMT_STATUS_INVALID_PARAMS,
2754 if (!hdev_is_powered(hdev)) {
2755 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2756 MGMT_STATUS_NOT_POWERED, &rp,
2761 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2762 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2763 MGMT_STATUS_ALREADY_PAIRED, &rp,
2768 sec_level = BT_SECURITY_MEDIUM;
2769 auth_type = HCI_AT_DEDICATED_BONDING;
2771 if (cp->addr.type == BDADDR_BREDR) {
2772 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2775 u8 addr_type = le_addr_type(cp->addr.type);
2776 struct hci_conn_params *p;
2778 /* When pairing a new device, it is expected to remember
2779 * this device for future connections. Adding the connection
2780 * parameter information ahead of time allows tracking
2781 * of the slave preferred values and will speed up any
2782 * further connection establishment.
2784 * If connection parameters already exist, then they
2785 * will be kept and this function does nothing.
2787 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2789 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2790 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2792 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2793 addr_type, sec_level,
2794 HCI_LE_CONN_TIMEOUT);
2800 if (PTR_ERR(conn) == -EBUSY)
2801 status = MGMT_STATUS_BUSY;
2802 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2803 status = MGMT_STATUS_NOT_SUPPORTED;
2804 else if (PTR_ERR(conn) == -ECONNREFUSED)
2805 status = MGMT_STATUS_REJECTED;
2807 status = MGMT_STATUS_CONNECT_FAILED;
2809 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2810 status, &rp, sizeof(rp));
2814 if (conn->connect_cfm_cb) {
2815 hci_conn_drop(conn);
2816 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2817 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2821 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2824 hci_conn_drop(conn);
2828 cmd->cmd_complete = pairing_complete;
2830 /* For LE, just connecting isn't a proof that the pairing finished */
2831 if (cp->addr.type == BDADDR_BREDR) {
2832 conn->connect_cfm_cb = pairing_complete_cb;
2833 conn->security_cfm_cb = pairing_complete_cb;
2834 conn->disconn_cfm_cb = pairing_complete_cb;
2836 conn->connect_cfm_cb = le_pairing_complete_cb;
2837 conn->security_cfm_cb = le_pairing_complete_cb;
2838 conn->disconn_cfm_cb = le_pairing_complete_cb;
2841 conn->io_capability = cp->io_cap;
2842 cmd->user_data = hci_conn_get(conn);
2844 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2845 hci_conn_security(conn, sec_level, auth_type, true)) {
2846 cmd->cmd_complete(cmd, 0);
2847 mgmt_pending_remove(cmd);
2853 hci_dev_unlock(hdev);
2857 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2860 struct mgmt_addr_info *addr = data;
2861 struct mgmt_pending_cmd *cmd;
2862 struct hci_conn *conn;
2869 if (!hdev_is_powered(hdev)) {
2870 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2871 MGMT_STATUS_NOT_POWERED);
2875 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2877 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2878 MGMT_STATUS_INVALID_PARAMS);
2882 conn = cmd->user_data;
2884 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2885 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2886 MGMT_STATUS_INVALID_PARAMS);
2890 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
2891 mgmt_pending_remove(cmd);
2893 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2894 addr, sizeof(*addr));
2896 hci_dev_unlock(hdev);
2900 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2901 struct mgmt_addr_info *addr, u16 mgmt_op,
2902 u16 hci_op, __le32 passkey)
2904 struct mgmt_pending_cmd *cmd;
2905 struct hci_conn *conn;
2910 if (!hdev_is_powered(hdev)) {
2911 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2912 MGMT_STATUS_NOT_POWERED, addr,
2917 if (addr->type == BDADDR_BREDR)
2918 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2920 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
2921 le_addr_type(addr->type));
2924 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2925 MGMT_STATUS_NOT_CONNECTED, addr,
2930 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2931 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2933 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2934 MGMT_STATUS_SUCCESS, addr,
2937 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2938 MGMT_STATUS_FAILED, addr,
2944 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2950 cmd->cmd_complete = addr_cmd_complete;
2952 /* Continue with pairing via HCI */
2953 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2954 struct hci_cp_user_passkey_reply cp;
2956 bacpy(&cp.bdaddr, &addr->bdaddr);
2957 cp.passkey = passkey;
2958 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2960 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2964 mgmt_pending_remove(cmd);
2967 hci_dev_unlock(hdev);
2971 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2972 void *data, u16 len)
2974 struct mgmt_cp_pin_code_neg_reply *cp = data;
2978 return user_pairing_resp(sk, hdev, &cp->addr,
2979 MGMT_OP_PIN_CODE_NEG_REPLY,
2980 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2983 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2986 struct mgmt_cp_user_confirm_reply *cp = data;
2990 if (len != sizeof(*cp))
2991 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2992 MGMT_STATUS_INVALID_PARAMS);
2994 return user_pairing_resp(sk, hdev, &cp->addr,
2995 MGMT_OP_USER_CONFIRM_REPLY,
2996 HCI_OP_USER_CONFIRM_REPLY, 0);
2999 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3000 void *data, u16 len)
3002 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3006 return user_pairing_resp(sk, hdev, &cp->addr,
3007 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3008 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3011 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3014 struct mgmt_cp_user_passkey_reply *cp = data;
3018 return user_pairing_resp(sk, hdev, &cp->addr,
3019 MGMT_OP_USER_PASSKEY_REPLY,
3020 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3023 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3024 void *data, u16 len)
3026 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3030 return user_pairing_resp(sk, hdev, &cp->addr,
3031 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3032 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3035 static void adv_expire(struct hci_dev *hdev, u32 flags)
3037 struct adv_info *adv_instance;
3038 struct hci_request req;
3041 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3045 /* stop if current instance doesn't need to be changed */
3046 if (!(adv_instance->flags & flags))
3049 cancel_adv_timeout(hdev);
3051 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3055 hci_req_init(&req, hdev);
3056 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3061 hci_req_run(&req, NULL);
3064 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3066 struct mgmt_cp_set_local_name *cp;
3067 struct mgmt_pending_cmd *cmd;
3069 BT_DBG("status 0x%02x", status);
3073 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3080 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3081 mgmt_status(status));
3083 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3086 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3087 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3090 mgmt_pending_remove(cmd);
3093 hci_dev_unlock(hdev);
3096 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3099 struct mgmt_cp_set_local_name *cp = data;
3100 struct mgmt_pending_cmd *cmd;
3101 struct hci_request req;
3108 /* If the old values are the same as the new ones just return a
3109 * direct command complete event.
3111 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3112 !memcmp(hdev->short_name, cp->short_name,
3113 sizeof(hdev->short_name))) {
3114 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3119 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3121 if (!hdev_is_powered(hdev)) {
3122 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3124 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3129 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3130 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3131 ext_info_changed(hdev, sk);
3136 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3142 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3144 hci_req_init(&req, hdev);
3146 if (lmp_bredr_capable(hdev)) {
3147 __hci_req_update_name(&req);
3148 __hci_req_update_eir(&req);
3151 /* The name is stored in the scan response data and so
3152 * no need to udpate the advertising data here.
3154 if (lmp_le_capable(hdev))
3155 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3157 err = hci_req_run(&req, set_name_complete);
3159 mgmt_pending_remove(cmd);
3162 hci_dev_unlock(hdev);
3166 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3169 struct mgmt_cp_set_appearance *cp = data;
3175 apperance = le16_to_cpu(cp->appearance);
3179 if (hdev->appearance != apperance) {
3180 hdev->appearance = apperance;
3182 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3183 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3186 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3189 hci_dev_unlock(hdev);
3194 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3195 u16 opcode, struct sk_buff *skb)
3197 struct mgmt_rp_read_local_oob_data mgmt_rp;
3198 size_t rp_size = sizeof(mgmt_rp);
3199 struct mgmt_pending_cmd *cmd;
3201 BT_DBG("%s status %u", hdev->name, status);
3203 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3207 if (status || !skb) {
3208 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3209 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3213 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3215 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3216 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3218 if (skb->len < sizeof(*rp)) {
3219 mgmt_cmd_status(cmd->sk, hdev->id,
3220 MGMT_OP_READ_LOCAL_OOB_DATA,
3221 MGMT_STATUS_FAILED);
3225 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3226 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3228 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3230 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3232 if (skb->len < sizeof(*rp)) {
3233 mgmt_cmd_status(cmd->sk, hdev->id,
3234 MGMT_OP_READ_LOCAL_OOB_DATA,
3235 MGMT_STATUS_FAILED);
3239 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3240 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3242 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3243 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3246 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3247 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3250 mgmt_pending_remove(cmd);
3253 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3254 void *data, u16 data_len)
3256 struct mgmt_pending_cmd *cmd;
3257 struct hci_request req;
3260 BT_DBG("%s", hdev->name);
3264 if (!hdev_is_powered(hdev)) {
3265 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3266 MGMT_STATUS_NOT_POWERED);
3270 if (!lmp_ssp_capable(hdev)) {
3271 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3272 MGMT_STATUS_NOT_SUPPORTED);
3276 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3277 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3282 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3288 hci_req_init(&req, hdev);
3290 if (bredr_sc_enabled(hdev))
3291 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3293 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3295 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3297 mgmt_pending_remove(cmd);
3300 hci_dev_unlock(hdev);
3304 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3305 void *data, u16 len)
3307 struct mgmt_addr_info *addr = data;
3310 BT_DBG("%s ", hdev->name);
3312 if (!bdaddr_type_is_valid(addr->type))
3313 return mgmt_cmd_complete(sk, hdev->id,
3314 MGMT_OP_ADD_REMOTE_OOB_DATA,
3315 MGMT_STATUS_INVALID_PARAMS,
3316 addr, sizeof(*addr));
3320 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3321 struct mgmt_cp_add_remote_oob_data *cp = data;
3324 if (cp->addr.type != BDADDR_BREDR) {
3325 err = mgmt_cmd_complete(sk, hdev->id,
3326 MGMT_OP_ADD_REMOTE_OOB_DATA,
3327 MGMT_STATUS_INVALID_PARAMS,
3328 &cp->addr, sizeof(cp->addr));
3332 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3333 cp->addr.type, cp->hash,
3334 cp->rand, NULL, NULL);
3336 status = MGMT_STATUS_FAILED;
3338 status = MGMT_STATUS_SUCCESS;
3340 err = mgmt_cmd_complete(sk, hdev->id,
3341 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3342 &cp->addr, sizeof(cp->addr));
3343 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3344 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3345 u8 *rand192, *hash192, *rand256, *hash256;
3348 if (bdaddr_type_is_le(cp->addr.type)) {
3349 /* Enforce zero-valued 192-bit parameters as
3350 * long as legacy SMP OOB isn't implemented.
3352 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3353 memcmp(cp->hash192, ZERO_KEY, 16)) {
3354 err = mgmt_cmd_complete(sk, hdev->id,
3355 MGMT_OP_ADD_REMOTE_OOB_DATA,
3356 MGMT_STATUS_INVALID_PARAMS,
3357 addr, sizeof(*addr));
3364 /* In case one of the P-192 values is set to zero,
3365 * then just disable OOB data for P-192.
3367 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3368 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3372 rand192 = cp->rand192;
3373 hash192 = cp->hash192;
3377 /* In case one of the P-256 values is set to zero, then just
3378 * disable OOB data for P-256.
3380 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3381 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3385 rand256 = cp->rand256;
3386 hash256 = cp->hash256;
3389 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3390 cp->addr.type, hash192, rand192,
3393 status = MGMT_STATUS_FAILED;
3395 status = MGMT_STATUS_SUCCESS;
3397 err = mgmt_cmd_complete(sk, hdev->id,
3398 MGMT_OP_ADD_REMOTE_OOB_DATA,
3399 status, &cp->addr, sizeof(cp->addr));
3401 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3402 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3403 MGMT_STATUS_INVALID_PARAMS);
3407 hci_dev_unlock(hdev);
3411 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3412 void *data, u16 len)
3414 struct mgmt_cp_remove_remote_oob_data *cp = data;
3418 BT_DBG("%s", hdev->name);
3420 if (cp->addr.type != BDADDR_BREDR)
3421 return mgmt_cmd_complete(sk, hdev->id,
3422 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3423 MGMT_STATUS_INVALID_PARAMS,
3424 &cp->addr, sizeof(cp->addr));
3428 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3429 hci_remote_oob_data_clear(hdev);
3430 status = MGMT_STATUS_SUCCESS;
3434 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3436 status = MGMT_STATUS_INVALID_PARAMS;
3438 status = MGMT_STATUS_SUCCESS;
3441 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3442 status, &cp->addr, sizeof(cp->addr));
3444 hci_dev_unlock(hdev);
3448 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3450 struct mgmt_pending_cmd *cmd;
3452 BT_DBG("status %d", status);
3456 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3458 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3461 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3464 cmd->cmd_complete(cmd, mgmt_status(status));
3465 mgmt_pending_remove(cmd);
3468 hci_dev_unlock(hdev);
3471 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3472 uint8_t *mgmt_status)
3475 case DISCOV_TYPE_LE:
3476 *mgmt_status = mgmt_le_support(hdev);
3480 case DISCOV_TYPE_INTERLEAVED:
3481 *mgmt_status = mgmt_le_support(hdev);
3484 /* Intentional fall-through */
3485 case DISCOV_TYPE_BREDR:
3486 *mgmt_status = mgmt_bredr_support(hdev);
3491 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3498 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3499 u16 op, void *data, u16 len)
3501 struct mgmt_cp_start_discovery *cp = data;
3502 struct mgmt_pending_cmd *cmd;
3506 BT_DBG("%s", hdev->name);
3510 if (!hdev_is_powered(hdev)) {
3511 err = mgmt_cmd_complete(sk, hdev->id, op,
3512 MGMT_STATUS_NOT_POWERED,
3513 &cp->type, sizeof(cp->type));
3517 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3518 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3519 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3520 &cp->type, sizeof(cp->type));
3524 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3525 err = mgmt_cmd_complete(sk, hdev->id, op, status,
3526 &cp->type, sizeof(cp->type));
3530 /* Clear the discovery filter first to free any previously
3531 * allocated memory for the UUID list.
3533 hci_discovery_filter_clear(hdev);
3535 hdev->discovery.type = cp->type;
3536 hdev->discovery.report_invalid_rssi = false;
3537 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3538 hdev->discovery.limited = true;
3540 hdev->discovery.limited = false;
3542 cmd = mgmt_pending_add(sk, op, hdev, data, len);
3548 cmd->cmd_complete = generic_cmd_complete;
3550 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3551 queue_work(hdev->req_workqueue, &hdev->discov_update);
3555 hci_dev_unlock(hdev);
3559 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3560 void *data, u16 len)
3562 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3566 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3567 void *data, u16 len)
3569 return start_discovery_internal(sk, hdev,
3570 MGMT_OP_START_LIMITED_DISCOVERY,
3574 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3577 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3581 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3582 void *data, u16 len)
3584 struct mgmt_cp_start_service_discovery *cp = data;
3585 struct mgmt_pending_cmd *cmd;
3586 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3587 u16 uuid_count, expected_len;
3591 BT_DBG("%s", hdev->name);
3595 if (!hdev_is_powered(hdev)) {
3596 err = mgmt_cmd_complete(sk, hdev->id,
3597 MGMT_OP_START_SERVICE_DISCOVERY,
3598 MGMT_STATUS_NOT_POWERED,
3599 &cp->type, sizeof(cp->type));
3603 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3604 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3605 err = mgmt_cmd_complete(sk, hdev->id,
3606 MGMT_OP_START_SERVICE_DISCOVERY,
3607 MGMT_STATUS_BUSY, &cp->type,
3612 uuid_count = __le16_to_cpu(cp->uuid_count);
3613 if (uuid_count > max_uuid_count) {
3614 BT_ERR("service_discovery: too big uuid_count value %u",
3616 err = mgmt_cmd_complete(sk, hdev->id,
3617 MGMT_OP_START_SERVICE_DISCOVERY,
3618 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3623 expected_len = sizeof(*cp) + uuid_count * 16;
3624 if (expected_len != len) {
3625 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
3627 err = mgmt_cmd_complete(sk, hdev->id,
3628 MGMT_OP_START_SERVICE_DISCOVERY,
3629 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3634 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3635 err = mgmt_cmd_complete(sk, hdev->id,
3636 MGMT_OP_START_SERVICE_DISCOVERY,
3637 status, &cp->type, sizeof(cp->type));
3641 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3648 cmd->cmd_complete = service_discovery_cmd_complete;
3650 /* Clear the discovery filter first to free any previously
3651 * allocated memory for the UUID list.
3653 hci_discovery_filter_clear(hdev);
3655 hdev->discovery.result_filtering = true;
3656 hdev->discovery.type = cp->type;
3657 hdev->discovery.rssi = cp->rssi;
3658 hdev->discovery.uuid_count = uuid_count;
3660 if (uuid_count > 0) {
3661 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
3663 if (!hdev->discovery.uuids) {
3664 err = mgmt_cmd_complete(sk, hdev->id,
3665 MGMT_OP_START_SERVICE_DISCOVERY,
3667 &cp->type, sizeof(cp->type));
3668 mgmt_pending_remove(cmd);
3673 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3674 queue_work(hdev->req_workqueue, &hdev->discov_update);
3678 hci_dev_unlock(hdev);
3682 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
3684 struct mgmt_pending_cmd *cmd;
3686 BT_DBG("status %d", status);
3690 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3692 cmd->cmd_complete(cmd, mgmt_status(status));
3693 mgmt_pending_remove(cmd);
3696 hci_dev_unlock(hdev);
3699 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3702 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3703 struct mgmt_pending_cmd *cmd;
3706 BT_DBG("%s", hdev->name);
3710 if (!hci_discovery_active(hdev)) {
3711 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3712 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3713 sizeof(mgmt_cp->type));
3717 if (hdev->discovery.type != mgmt_cp->type) {
3718 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3719 MGMT_STATUS_INVALID_PARAMS,
3720 &mgmt_cp->type, sizeof(mgmt_cp->type));
3724 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
3730 cmd->cmd_complete = generic_cmd_complete;
3732 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3733 queue_work(hdev->req_workqueue, &hdev->discov_update);
3737 hci_dev_unlock(hdev);
3741 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3744 struct mgmt_cp_confirm_name *cp = data;
3745 struct inquiry_entry *e;
3748 BT_DBG("%s", hdev->name);
3752 if (!hci_discovery_active(hdev)) {
3753 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3754 MGMT_STATUS_FAILED, &cp->addr,
3759 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3761 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3762 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3767 if (cp->name_known) {
3768 e->name_state = NAME_KNOWN;
3771 e->name_state = NAME_NEEDED;
3772 hci_inquiry_cache_update_resolve(hdev, e);
3775 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
3776 &cp->addr, sizeof(cp->addr));
3779 hci_dev_unlock(hdev);
3783 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3786 struct mgmt_cp_block_device *cp = data;
3790 BT_DBG("%s", hdev->name);
3792 if (!bdaddr_type_is_valid(cp->addr.type))
3793 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3794 MGMT_STATUS_INVALID_PARAMS,
3795 &cp->addr, sizeof(cp->addr));
3799 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3802 status = MGMT_STATUS_FAILED;
3806 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3808 status = MGMT_STATUS_SUCCESS;
3811 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3812 &cp->addr, sizeof(cp->addr));
3814 hci_dev_unlock(hdev);
3819 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3822 struct mgmt_cp_unblock_device *cp = data;
3826 BT_DBG("%s", hdev->name);
3828 if (!bdaddr_type_is_valid(cp->addr.type))
3829 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3830 MGMT_STATUS_INVALID_PARAMS,
3831 &cp->addr, sizeof(cp->addr));
3835 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
3838 status = MGMT_STATUS_INVALID_PARAMS;
3842 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3844 status = MGMT_STATUS_SUCCESS;
3847 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3848 &cp->addr, sizeof(cp->addr));
3850 hci_dev_unlock(hdev);
3855 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3858 struct mgmt_cp_set_device_id *cp = data;
3859 struct hci_request req;
3863 BT_DBG("%s", hdev->name);
3865 source = __le16_to_cpu(cp->source);
3867 if (source > 0x0002)
3868 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3869 MGMT_STATUS_INVALID_PARAMS);
3873 hdev->devid_source = source;
3874 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3875 hdev->devid_product = __le16_to_cpu(cp->product);
3876 hdev->devid_version = __le16_to_cpu(cp->version);
3878 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
3881 hci_req_init(&req, hdev);
3882 __hci_req_update_eir(&req);
3883 hci_req_run(&req, NULL);
3885 hci_dev_unlock(hdev);
3890 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
3893 BT_DBG("status %d", status);
3896 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
3899 struct cmd_lookup match = { NULL, hdev };
3900 struct hci_request req;
3902 struct adv_info *adv_instance;
3908 u8 mgmt_err = mgmt_status(status);
3910 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3911 cmd_status_rsp, &mgmt_err);
3915 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3916 hci_dev_set_flag(hdev, HCI_ADVERTISING);
3918 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3920 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3923 new_settings(hdev, match.sk);
3928 /* If "Set Advertising" was just disabled and instance advertising was
3929 * set up earlier, then re-enable multi-instance advertising.
3931 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3932 list_empty(&hdev->adv_instances))
3935 instance = hdev->cur_adv_instance;
3937 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
3938 struct adv_info, list);
3942 instance = adv_instance->instance;
3945 hci_req_init(&req, hdev);
3947 err = __hci_req_schedule_adv_instance(&req, instance, true);
3950 err = hci_req_run(&req, enable_advertising_instance);
3953 BT_ERR("Failed to re-configure advertising");
3956 hci_dev_unlock(hdev);
3959 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3962 struct mgmt_mode *cp = data;
3963 struct mgmt_pending_cmd *cmd;
3964 struct hci_request req;
3968 BT_DBG("request for %s", hdev->name);
3970 status = mgmt_le_support(hdev);
3972 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3975 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
3976 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3977 MGMT_STATUS_INVALID_PARAMS);
3983 /* The following conditions are ones which mean that we should
3984 * not do any HCI communication but directly send a mgmt
3985 * response to user space (after toggling the flag if
3988 if (!hdev_is_powered(hdev) ||
3989 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
3990 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
3991 hci_conn_num(hdev, LE_LINK) > 0 ||
3992 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
3993 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
3997 hdev->cur_adv_instance = 0x00;
3998 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
3999 if (cp->val == 0x02)
4000 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4002 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4004 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4005 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4008 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4013 err = new_settings(hdev, sk);
4018 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4019 pending_find(MGMT_OP_SET_LE, hdev)) {
4020 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4025 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4031 hci_req_init(&req, hdev);
4033 if (cp->val == 0x02)
4034 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4036 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4038 cancel_adv_timeout(hdev);
4041 /* Switch to instance "0" for the Set Advertising setting.
4042 * We cannot use update_[adv|scan_rsp]_data() here as the
4043 * HCI_ADVERTISING flag is not yet set.
4045 hdev->cur_adv_instance = 0x00;
4046 __hci_req_update_adv_data(&req, 0x00);
4047 __hci_req_update_scan_rsp_data(&req, 0x00);
4048 __hci_req_enable_advertising(&req);
4050 __hci_req_disable_advertising(&req);
4053 err = hci_req_run(&req, set_advertising_complete);
4055 mgmt_pending_remove(cmd);
4058 hci_dev_unlock(hdev);
4062 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4063 void *data, u16 len)
4065 struct mgmt_cp_set_static_address *cp = data;
4068 BT_DBG("%s", hdev->name);
4070 if (!lmp_le_capable(hdev))
4071 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4072 MGMT_STATUS_NOT_SUPPORTED);
4074 if (hdev_is_powered(hdev))
4075 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4076 MGMT_STATUS_REJECTED);
4078 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4079 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4080 return mgmt_cmd_status(sk, hdev->id,
4081 MGMT_OP_SET_STATIC_ADDRESS,
4082 MGMT_STATUS_INVALID_PARAMS);
4084 /* Two most significant bits shall be set */
4085 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4086 return mgmt_cmd_status(sk, hdev->id,
4087 MGMT_OP_SET_STATIC_ADDRESS,
4088 MGMT_STATUS_INVALID_PARAMS);
4093 bacpy(&hdev->static_addr, &cp->bdaddr);
4095 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4099 err = new_settings(hdev, sk);
4102 hci_dev_unlock(hdev);
4106 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4107 void *data, u16 len)
4109 struct mgmt_cp_set_scan_params *cp = data;
4110 __u16 interval, window;
4113 BT_DBG("%s", hdev->name);
4115 if (!lmp_le_capable(hdev))
4116 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4117 MGMT_STATUS_NOT_SUPPORTED);
4119 interval = __le16_to_cpu(cp->interval);
4121 if (interval < 0x0004 || interval > 0x4000)
4122 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4123 MGMT_STATUS_INVALID_PARAMS);
4125 window = __le16_to_cpu(cp->window);
4127 if (window < 0x0004 || window > 0x4000)
4128 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4129 MGMT_STATUS_INVALID_PARAMS);
4131 if (window > interval)
4132 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4133 MGMT_STATUS_INVALID_PARAMS);
4137 hdev->le_scan_interval = interval;
4138 hdev->le_scan_window = window;
4140 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4143 /* If background scan is running, restart it so new parameters are
4146 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4147 hdev->discovery.state == DISCOVERY_STOPPED) {
4148 struct hci_request req;
4150 hci_req_init(&req, hdev);
4152 hci_req_add_le_scan_disable(&req);
4153 hci_req_add_le_passive_scan(&req);
4155 hci_req_run(&req, NULL);
4158 hci_dev_unlock(hdev);
4163 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4166 struct mgmt_pending_cmd *cmd;
4168 BT_DBG("status 0x%02x", status);
4172 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4177 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4178 mgmt_status(status));
4180 struct mgmt_mode *cp = cmd->param;
4183 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4185 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4187 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4188 new_settings(hdev, cmd->sk);
4191 mgmt_pending_remove(cmd);
4194 hci_dev_unlock(hdev);
4197 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4198 void *data, u16 len)
4200 struct mgmt_mode *cp = data;
4201 struct mgmt_pending_cmd *cmd;
4202 struct hci_request req;
4205 BT_DBG("%s", hdev->name);
4207 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4208 hdev->hci_ver < BLUETOOTH_VER_1_2)
4209 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4210 MGMT_STATUS_NOT_SUPPORTED);
4212 if (cp->val != 0x00 && cp->val != 0x01)
4213 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4214 MGMT_STATUS_INVALID_PARAMS);
4218 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4219 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4224 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4225 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4230 if (!hdev_is_powered(hdev)) {
4231 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4232 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4234 new_settings(hdev, sk);
4238 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4245 hci_req_init(&req, hdev);
4247 __hci_req_write_fast_connectable(&req, cp->val);
4249 err = hci_req_run(&req, fast_connectable_complete);
4251 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4252 MGMT_STATUS_FAILED);
4253 mgmt_pending_remove(cmd);
4257 hci_dev_unlock(hdev);
4262 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4264 struct mgmt_pending_cmd *cmd;
4266 BT_DBG("status 0x%02x", status);
4270 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4275 u8 mgmt_err = mgmt_status(status);
4277 /* We need to restore the flag if related HCI commands
4280 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4282 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4284 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4285 new_settings(hdev, cmd->sk);
4288 mgmt_pending_remove(cmd);
4291 hci_dev_unlock(hdev);
4294 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4296 struct mgmt_mode *cp = data;
4297 struct mgmt_pending_cmd *cmd;
4298 struct hci_request req;
4301 BT_DBG("request for %s", hdev->name);
4303 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4304 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4305 MGMT_STATUS_NOT_SUPPORTED);
4307 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4308 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4309 MGMT_STATUS_REJECTED);
4311 if (cp->val != 0x00 && cp->val != 0x01)
4312 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4313 MGMT_STATUS_INVALID_PARAMS);
4317 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4318 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4322 if (!hdev_is_powered(hdev)) {
4324 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4325 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4326 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4327 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4328 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4331 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4333 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4337 err = new_settings(hdev, sk);
4341 /* Reject disabling when powered on */
4343 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4344 MGMT_STATUS_REJECTED);
4347 /* When configuring a dual-mode controller to operate
4348 * with LE only and using a static address, then switching
4349 * BR/EDR back on is not allowed.
4351 * Dual-mode controllers shall operate with the public
4352 * address as its identity address for BR/EDR and LE. So
4353 * reject the attempt to create an invalid configuration.
4355 * The same restrictions applies when secure connections
4356 * has been enabled. For BR/EDR this is a controller feature
4357 * while for LE it is a host stack feature. This means that
4358 * switching BR/EDR back on when secure connections has been
4359 * enabled is not a supported transaction.
4361 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4362 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4363 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4364 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4365 MGMT_STATUS_REJECTED);
4370 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4371 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4376 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4382 /* We need to flip the bit already here so that
4383 * hci_req_update_adv_data generates the correct flags.
4385 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4387 hci_req_init(&req, hdev);
4389 __hci_req_write_fast_connectable(&req, false);
4390 __hci_req_update_scan(&req);
4392 /* Since only the advertising data flags will change, there
4393 * is no need to update the scan response data.
4395 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4397 err = hci_req_run(&req, set_bredr_complete);
4399 mgmt_pending_remove(cmd);
4402 hci_dev_unlock(hdev);
4406 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4408 struct mgmt_pending_cmd *cmd;
4409 struct mgmt_mode *cp;
4411 BT_DBG("%s status %u", hdev->name, status);
4415 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4420 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4421 mgmt_status(status));
4429 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4430 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4433 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4434 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4437 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4438 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4442 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4443 new_settings(hdev, cmd->sk);
4446 mgmt_pending_remove(cmd);
4448 hci_dev_unlock(hdev);
4451 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4452 void *data, u16 len)
4454 struct mgmt_mode *cp = data;
4455 struct mgmt_pending_cmd *cmd;
4456 struct hci_request req;
4460 BT_DBG("request for %s", hdev->name);
4462 if (!lmp_sc_capable(hdev) &&
4463 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4464 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4465 MGMT_STATUS_NOT_SUPPORTED);
4467 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4468 lmp_sc_capable(hdev) &&
4469 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4470 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4471 MGMT_STATUS_REJECTED);
4473 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4474 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4475 MGMT_STATUS_INVALID_PARAMS);
4479 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4480 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4484 changed = !hci_dev_test_and_set_flag(hdev,
4486 if (cp->val == 0x02)
4487 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4489 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4491 changed = hci_dev_test_and_clear_flag(hdev,
4493 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4496 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4501 err = new_settings(hdev, sk);
4506 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4507 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4514 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4515 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4516 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4520 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4526 hci_req_init(&req, hdev);
4527 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4528 err = hci_req_run(&req, sc_enable_complete);
4530 mgmt_pending_remove(cmd);
4535 hci_dev_unlock(hdev);
4539 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4540 void *data, u16 len)
4542 struct mgmt_mode *cp = data;
4543 bool changed, use_changed;
4546 BT_DBG("request for %s", hdev->name);
4548 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4549 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4550 MGMT_STATUS_INVALID_PARAMS);
4555 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4557 changed = hci_dev_test_and_clear_flag(hdev,
4558 HCI_KEEP_DEBUG_KEYS);
4560 if (cp->val == 0x02)
4561 use_changed = !hci_dev_test_and_set_flag(hdev,
4562 HCI_USE_DEBUG_KEYS);
4564 use_changed = hci_dev_test_and_clear_flag(hdev,
4565 HCI_USE_DEBUG_KEYS);
4567 if (hdev_is_powered(hdev) && use_changed &&
4568 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4569 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4570 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4571 sizeof(mode), &mode);
4574 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4579 err = new_settings(hdev, sk);
4582 hci_dev_unlock(hdev);
4586 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4589 struct mgmt_cp_set_privacy *cp = cp_data;
4593 BT_DBG("request for %s", hdev->name);
4595 if (!lmp_le_capable(hdev))
4596 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4597 MGMT_STATUS_NOT_SUPPORTED);
4599 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
4600 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4601 MGMT_STATUS_INVALID_PARAMS);
4603 if (hdev_is_powered(hdev))
4604 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4605 MGMT_STATUS_REJECTED);
4609 /* If user space supports this command it is also expected to
4610 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4612 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4615 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
4616 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4617 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4618 if (cp->privacy == 0x02)
4619 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
4621 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4623 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
4624 memset(hdev->irk, 0, sizeof(hdev->irk));
4625 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
4626 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4629 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4634 err = new_settings(hdev, sk);
4637 hci_dev_unlock(hdev);
4641 static bool irk_is_valid(struct mgmt_irk_info *irk)
4643 switch (irk->addr.type) {
4644 case BDADDR_LE_PUBLIC:
4647 case BDADDR_LE_RANDOM:
4648 /* Two most significant bits shall be set */
4649 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4657 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4660 struct mgmt_cp_load_irks *cp = cp_data;
4661 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4662 sizeof(struct mgmt_irk_info));
4663 u16 irk_count, expected_len;
4666 BT_DBG("request for %s", hdev->name);
4668 if (!lmp_le_capable(hdev))
4669 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4670 MGMT_STATUS_NOT_SUPPORTED);
4672 irk_count = __le16_to_cpu(cp->irk_count);
4673 if (irk_count > max_irk_count) {
4674 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4675 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4676 MGMT_STATUS_INVALID_PARAMS);
4679 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4680 if (expected_len != len) {
4681 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4683 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4684 MGMT_STATUS_INVALID_PARAMS);
4687 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4689 for (i = 0; i < irk_count; i++) {
4690 struct mgmt_irk_info *key = &cp->irks[i];
4692 if (!irk_is_valid(key))
4693 return mgmt_cmd_status(sk, hdev->id,
4695 MGMT_STATUS_INVALID_PARAMS);
4700 hci_smp_irks_clear(hdev);
4702 for (i = 0; i < irk_count; i++) {
4703 struct mgmt_irk_info *irk = &cp->irks[i];
4705 hci_add_irk(hdev, &irk->addr.bdaddr,
4706 le_addr_type(irk->addr.type), irk->val,
4710 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4712 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4714 hci_dev_unlock(hdev);
4719 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4721 if (key->master != 0x00 && key->master != 0x01)
4724 switch (key->addr.type) {
4725 case BDADDR_LE_PUBLIC:
4728 case BDADDR_LE_RANDOM:
4729 /* Two most significant bits shall be set */
4730 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4738 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4739 void *cp_data, u16 len)
4741 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4742 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4743 sizeof(struct mgmt_ltk_info));
4744 u16 key_count, expected_len;
4747 BT_DBG("request for %s", hdev->name);
4749 if (!lmp_le_capable(hdev))
4750 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4751 MGMT_STATUS_NOT_SUPPORTED);
4753 key_count = __le16_to_cpu(cp->key_count);
4754 if (key_count > max_key_count) {
4755 BT_ERR("load_ltks: too big key_count value %u", key_count);
4756 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4757 MGMT_STATUS_INVALID_PARAMS);
4760 expected_len = sizeof(*cp) + key_count *
4761 sizeof(struct mgmt_ltk_info);
4762 if (expected_len != len) {
4763 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4765 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4766 MGMT_STATUS_INVALID_PARAMS);
4769 BT_DBG("%s key_count %u", hdev->name, key_count);
4771 for (i = 0; i < key_count; i++) {
4772 struct mgmt_ltk_info *key = &cp->keys[i];
4774 if (!ltk_is_valid(key))
4775 return mgmt_cmd_status(sk, hdev->id,
4776 MGMT_OP_LOAD_LONG_TERM_KEYS,
4777 MGMT_STATUS_INVALID_PARAMS);
4782 hci_smp_ltks_clear(hdev);
4784 for (i = 0; i < key_count; i++) {
4785 struct mgmt_ltk_info *key = &cp->keys[i];
4786 u8 type, authenticated;
4788 switch (key->type) {
4789 case MGMT_LTK_UNAUTHENTICATED:
4790 authenticated = 0x00;
4791 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4793 case MGMT_LTK_AUTHENTICATED:
4794 authenticated = 0x01;
4795 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4797 case MGMT_LTK_P256_UNAUTH:
4798 authenticated = 0x00;
4799 type = SMP_LTK_P256;
4801 case MGMT_LTK_P256_AUTH:
4802 authenticated = 0x01;
4803 type = SMP_LTK_P256;
4805 case MGMT_LTK_P256_DEBUG:
4806 authenticated = 0x00;
4807 type = SMP_LTK_P256_DEBUG;
4812 hci_add_ltk(hdev, &key->addr.bdaddr,
4813 le_addr_type(key->addr.type), type, authenticated,
4814 key->val, key->enc_size, key->ediv, key->rand);
4817 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4820 hci_dev_unlock(hdev);
4825 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
4827 struct hci_conn *conn = cmd->user_data;
4828 struct mgmt_rp_get_conn_info rp;
4831 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
4833 if (status == MGMT_STATUS_SUCCESS) {
4834 rp.rssi = conn->rssi;
4835 rp.tx_power = conn->tx_power;
4836 rp.max_tx_power = conn->max_tx_power;
4838 rp.rssi = HCI_RSSI_INVALID;
4839 rp.tx_power = HCI_TX_POWER_INVALID;
4840 rp.max_tx_power = HCI_TX_POWER_INVALID;
4843 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4844 status, &rp, sizeof(rp));
4846 hci_conn_drop(conn);
4852 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
4855 struct hci_cp_read_rssi *cp;
4856 struct mgmt_pending_cmd *cmd;
4857 struct hci_conn *conn;
4861 BT_DBG("status 0x%02x", hci_status);
4865 /* Commands sent in request are either Read RSSI or Read Transmit Power
4866 * Level so we check which one was last sent to retrieve connection
4867 * handle. Both commands have handle as first parameter so it's safe to
4868 * cast data on the same command struct.
4870 * First command sent is always Read RSSI and we fail only if it fails.
4871 * In other case we simply override error to indicate success as we
4872 * already remembered if TX power value is actually valid.
4874 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4876 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4877 status = MGMT_STATUS_SUCCESS;
4879 status = mgmt_status(hci_status);
4883 BT_ERR("invalid sent_cmd in conn_info response");
4887 handle = __le16_to_cpu(cp->handle);
4888 conn = hci_conn_hash_lookup_handle(hdev, handle);
4890 BT_ERR("unknown handle (%d) in conn_info response", handle);
4894 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
4898 cmd->cmd_complete(cmd, status);
4899 mgmt_pending_remove(cmd);
4902 hci_dev_unlock(hdev);
4905 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4908 struct mgmt_cp_get_conn_info *cp = data;
4909 struct mgmt_rp_get_conn_info rp;
4910 struct hci_conn *conn;
4911 unsigned long conn_info_age;
4914 BT_DBG("%s", hdev->name);
4916 memset(&rp, 0, sizeof(rp));
4917 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4918 rp.addr.type = cp->addr.type;
4920 if (!bdaddr_type_is_valid(cp->addr.type))
4921 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4922 MGMT_STATUS_INVALID_PARAMS,
4927 if (!hdev_is_powered(hdev)) {
4928 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4929 MGMT_STATUS_NOT_POWERED, &rp,
4934 if (cp->addr.type == BDADDR_BREDR)
4935 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4938 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4940 if (!conn || conn->state != BT_CONNECTED) {
4941 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4942 MGMT_STATUS_NOT_CONNECTED, &rp,
4947 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
4948 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4949 MGMT_STATUS_BUSY, &rp, sizeof(rp));
4953 /* To avoid client trying to guess when to poll again for information we
4954 * calculate conn info age as random value between min/max set in hdev.
4956 conn_info_age = hdev->conn_info_min_age +
4957 prandom_u32_max(hdev->conn_info_max_age -
4958 hdev->conn_info_min_age);
4960 /* Query controller to refresh cached values if they are too old or were
4963 if (time_after(jiffies, conn->conn_info_timestamp +
4964 msecs_to_jiffies(conn_info_age)) ||
4965 !conn->conn_info_timestamp) {
4966 struct hci_request req;
4967 struct hci_cp_read_tx_power req_txp_cp;
4968 struct hci_cp_read_rssi req_rssi_cp;
4969 struct mgmt_pending_cmd *cmd;
4971 hci_req_init(&req, hdev);
4972 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4973 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4976 /* For LE links TX power does not change thus we don't need to
4977 * query for it once value is known.
4979 if (!bdaddr_type_is_le(cp->addr.type) ||
4980 conn->tx_power == HCI_TX_POWER_INVALID) {
4981 req_txp_cp.handle = cpu_to_le16(conn->handle);
4982 req_txp_cp.type = 0x00;
4983 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4984 sizeof(req_txp_cp), &req_txp_cp);
4987 /* Max TX power needs to be read only once per connection */
4988 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4989 req_txp_cp.handle = cpu_to_le16(conn->handle);
4990 req_txp_cp.type = 0x01;
4991 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4992 sizeof(req_txp_cp), &req_txp_cp);
4995 err = hci_req_run(&req, conn_info_refresh_complete);
4999 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5006 hci_conn_hold(conn);
5007 cmd->user_data = hci_conn_get(conn);
5008 cmd->cmd_complete = conn_info_cmd_complete;
5010 conn->conn_info_timestamp = jiffies;
5012 /* Cache is valid, just reply with values cached in hci_conn */
5013 rp.rssi = conn->rssi;
5014 rp.tx_power = conn->tx_power;
5015 rp.max_tx_power = conn->max_tx_power;
5017 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5018 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5022 hci_dev_unlock(hdev);
5026 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5028 struct hci_conn *conn = cmd->user_data;
5029 struct mgmt_rp_get_clock_info rp;
5030 struct hci_dev *hdev;
5033 memset(&rp, 0, sizeof(rp));
5034 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5039 hdev = hci_dev_get(cmd->index);
5041 rp.local_clock = cpu_to_le32(hdev->clock);
5046 rp.piconet_clock = cpu_to_le32(conn->clock);
5047 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5051 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5055 hci_conn_drop(conn);
5062 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5064 struct hci_cp_read_clock *hci_cp;
5065 struct mgmt_pending_cmd *cmd;
5066 struct hci_conn *conn;
5068 BT_DBG("%s status %u", hdev->name, status);
5072 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5076 if (hci_cp->which) {
5077 u16 handle = __le16_to_cpu(hci_cp->handle);
5078 conn = hci_conn_hash_lookup_handle(hdev, handle);
5083 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5087 cmd->cmd_complete(cmd, mgmt_status(status));
5088 mgmt_pending_remove(cmd);
5091 hci_dev_unlock(hdev);
5094 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5097 struct mgmt_cp_get_clock_info *cp = data;
5098 struct mgmt_rp_get_clock_info rp;
5099 struct hci_cp_read_clock hci_cp;
5100 struct mgmt_pending_cmd *cmd;
5101 struct hci_request req;
5102 struct hci_conn *conn;
5105 BT_DBG("%s", hdev->name);
5107 memset(&rp, 0, sizeof(rp));
5108 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5109 rp.addr.type = cp->addr.type;
5111 if (cp->addr.type != BDADDR_BREDR)
5112 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5113 MGMT_STATUS_INVALID_PARAMS,
5118 if (!hdev_is_powered(hdev)) {
5119 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5120 MGMT_STATUS_NOT_POWERED, &rp,
5125 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5126 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5128 if (!conn || conn->state != BT_CONNECTED) {
5129 err = mgmt_cmd_complete(sk, hdev->id,
5130 MGMT_OP_GET_CLOCK_INFO,
5131 MGMT_STATUS_NOT_CONNECTED,
5139 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5145 cmd->cmd_complete = clock_info_cmd_complete;
5147 hci_req_init(&req, hdev);
5149 memset(&hci_cp, 0, sizeof(hci_cp));
5150 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5153 hci_conn_hold(conn);
5154 cmd->user_data = hci_conn_get(conn);
5156 hci_cp.handle = cpu_to_le16(conn->handle);
5157 hci_cp.which = 0x01; /* Piconet clock */
5158 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5161 err = hci_req_run(&req, get_clock_info_complete);
5163 mgmt_pending_remove(cmd);
5166 hci_dev_unlock(hdev);
5170 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5172 struct hci_conn *conn;
5174 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5178 if (conn->dst_type != type)
5181 if (conn->state != BT_CONNECTED)
5187 /* This function requires the caller holds hdev->lock */
5188 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5189 u8 addr_type, u8 auto_connect)
5191 struct hci_conn_params *params;
5193 params = hci_conn_params_add(hdev, addr, addr_type);
5197 if (params->auto_connect == auto_connect)
5200 list_del_init(¶ms->action);
5202 switch (auto_connect) {
5203 case HCI_AUTO_CONN_DISABLED:
5204 case HCI_AUTO_CONN_LINK_LOSS:
5205 /* If auto connect is being disabled when we're trying to
5206 * connect to device, keep connecting.
5208 if (params->explicit_connect)
5209 list_add(¶ms->action, &hdev->pend_le_conns);
5211 case HCI_AUTO_CONN_REPORT:
5212 if (params->explicit_connect)
5213 list_add(¶ms->action, &hdev->pend_le_conns);
5215 list_add(¶ms->action, &hdev->pend_le_reports);
5217 case HCI_AUTO_CONN_DIRECT:
5218 case HCI_AUTO_CONN_ALWAYS:
5219 if (!is_connected(hdev, addr, addr_type))
5220 list_add(¶ms->action, &hdev->pend_le_conns);
5224 params->auto_connect = auto_connect;
5226 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5232 static void device_added(struct sock *sk, struct hci_dev *hdev,
5233 bdaddr_t *bdaddr, u8 type, u8 action)
5235 struct mgmt_ev_device_added ev;
5237 bacpy(&ev.addr.bdaddr, bdaddr);
5238 ev.addr.type = type;
5241 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5244 static int add_device(struct sock *sk, struct hci_dev *hdev,
5245 void *data, u16 len)
5247 struct mgmt_cp_add_device *cp = data;
5248 u8 auto_conn, addr_type;
5251 BT_DBG("%s", hdev->name);
5253 if (!bdaddr_type_is_valid(cp->addr.type) ||
5254 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5255 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5256 MGMT_STATUS_INVALID_PARAMS,
5257 &cp->addr, sizeof(cp->addr));
5259 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5260 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5261 MGMT_STATUS_INVALID_PARAMS,
5262 &cp->addr, sizeof(cp->addr));
5266 if (cp->addr.type == BDADDR_BREDR) {
5267 /* Only incoming connections action is supported for now */
5268 if (cp->action != 0x01) {
5269 err = mgmt_cmd_complete(sk, hdev->id,
5271 MGMT_STATUS_INVALID_PARAMS,
5272 &cp->addr, sizeof(cp->addr));
5276 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5281 hci_req_update_scan(hdev);
5286 addr_type = le_addr_type(cp->addr.type);
5288 if (cp->action == 0x02)
5289 auto_conn = HCI_AUTO_CONN_ALWAYS;
5290 else if (cp->action == 0x01)
5291 auto_conn = HCI_AUTO_CONN_DIRECT;
5293 auto_conn = HCI_AUTO_CONN_REPORT;
5295 /* Kernel internally uses conn_params with resolvable private
5296 * address, but Add Device allows only identity addresses.
5297 * Make sure it is enforced before calling
5298 * hci_conn_params_lookup.
5300 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5301 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5302 MGMT_STATUS_INVALID_PARAMS,
5303 &cp->addr, sizeof(cp->addr));
5307 /* If the connection parameters don't exist for this device,
5308 * they will be created and configured with defaults.
5310 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5312 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5313 MGMT_STATUS_FAILED, &cp->addr,
5318 hci_update_background_scan(hdev);
5321 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5323 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5324 MGMT_STATUS_SUCCESS, &cp->addr,
5328 hci_dev_unlock(hdev);
5332 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5333 bdaddr_t *bdaddr, u8 type)
5335 struct mgmt_ev_device_removed ev;
5337 bacpy(&ev.addr.bdaddr, bdaddr);
5338 ev.addr.type = type;
5340 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5343 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5344 void *data, u16 len)
5346 struct mgmt_cp_remove_device *cp = data;
5349 BT_DBG("%s", hdev->name);
5353 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5354 struct hci_conn_params *params;
5357 if (!bdaddr_type_is_valid(cp->addr.type)) {
5358 err = mgmt_cmd_complete(sk, hdev->id,
5359 MGMT_OP_REMOVE_DEVICE,
5360 MGMT_STATUS_INVALID_PARAMS,
5361 &cp->addr, sizeof(cp->addr));
5365 if (cp->addr.type == BDADDR_BREDR) {
5366 err = hci_bdaddr_list_del(&hdev->whitelist,
5370 err = mgmt_cmd_complete(sk, hdev->id,
5371 MGMT_OP_REMOVE_DEVICE,
5372 MGMT_STATUS_INVALID_PARAMS,
5378 hci_req_update_scan(hdev);
5380 device_removed(sk, hdev, &cp->addr.bdaddr,
5385 addr_type = le_addr_type(cp->addr.type);
5387 /* Kernel internally uses conn_params with resolvable private
5388 * address, but Remove Device allows only identity addresses.
5389 * Make sure it is enforced before calling
5390 * hci_conn_params_lookup.
5392 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5393 err = mgmt_cmd_complete(sk, hdev->id,
5394 MGMT_OP_REMOVE_DEVICE,
5395 MGMT_STATUS_INVALID_PARAMS,
5396 &cp->addr, sizeof(cp->addr));
5400 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5403 err = mgmt_cmd_complete(sk, hdev->id,
5404 MGMT_OP_REMOVE_DEVICE,
5405 MGMT_STATUS_INVALID_PARAMS,
5406 &cp->addr, sizeof(cp->addr));
5410 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
5411 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
5412 err = mgmt_cmd_complete(sk, hdev->id,
5413 MGMT_OP_REMOVE_DEVICE,
5414 MGMT_STATUS_INVALID_PARAMS,
5415 &cp->addr, sizeof(cp->addr));
5419 list_del(¶ms->action);
5420 list_del(¶ms->list);
5422 hci_update_background_scan(hdev);
5424 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5426 struct hci_conn_params *p, *tmp;
5427 struct bdaddr_list *b, *btmp;
5429 if (cp->addr.type) {
5430 err = mgmt_cmd_complete(sk, hdev->id,
5431 MGMT_OP_REMOVE_DEVICE,
5432 MGMT_STATUS_INVALID_PARAMS,
5433 &cp->addr, sizeof(cp->addr));
5437 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5438 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5443 hci_req_update_scan(hdev);
5445 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5446 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5448 device_removed(sk, hdev, &p->addr, p->addr_type);
5449 if (p->explicit_connect) {
5450 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
5453 list_del(&p->action);
5458 BT_DBG("All LE connection parameters were removed");
5460 hci_update_background_scan(hdev);
5464 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5465 MGMT_STATUS_SUCCESS, &cp->addr,
5468 hci_dev_unlock(hdev);
5472 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5475 struct mgmt_cp_load_conn_param *cp = data;
5476 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5477 sizeof(struct mgmt_conn_param));
5478 u16 param_count, expected_len;
5481 if (!lmp_le_capable(hdev))
5482 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5483 MGMT_STATUS_NOT_SUPPORTED);
5485 param_count = __le16_to_cpu(cp->param_count);
5486 if (param_count > max_param_count) {
5487 BT_ERR("load_conn_param: too big param_count value %u",
5489 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5490 MGMT_STATUS_INVALID_PARAMS);
5493 expected_len = sizeof(*cp) + param_count *
5494 sizeof(struct mgmt_conn_param);
5495 if (expected_len != len) {
5496 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5498 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5499 MGMT_STATUS_INVALID_PARAMS);
5502 BT_DBG("%s param_count %u", hdev->name, param_count);
5506 hci_conn_params_clear_disabled(hdev);
5508 for (i = 0; i < param_count; i++) {
5509 struct mgmt_conn_param *param = &cp->params[i];
5510 struct hci_conn_params *hci_param;
5511 u16 min, max, latency, timeout;
5514 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5517 if (param->addr.type == BDADDR_LE_PUBLIC) {
5518 addr_type = ADDR_LE_DEV_PUBLIC;
5519 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5520 addr_type = ADDR_LE_DEV_RANDOM;
5522 BT_ERR("Ignoring invalid connection parameters");
5526 min = le16_to_cpu(param->min_interval);
5527 max = le16_to_cpu(param->max_interval);
5528 latency = le16_to_cpu(param->latency);
5529 timeout = le16_to_cpu(param->timeout);
5531 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5532 min, max, latency, timeout);
5534 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5535 BT_ERR("Ignoring invalid connection parameters");
5539 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5542 BT_ERR("Failed to add connection parameters");
5546 hci_param->conn_min_interval = min;
5547 hci_param->conn_max_interval = max;
5548 hci_param->conn_latency = latency;
5549 hci_param->supervision_timeout = timeout;
5552 hci_dev_unlock(hdev);
5554 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
5558 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5559 void *data, u16 len)
5561 struct mgmt_cp_set_external_config *cp = data;
5565 BT_DBG("%s", hdev->name);
5567 if (hdev_is_powered(hdev))
5568 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5569 MGMT_STATUS_REJECTED);
5571 if (cp->config != 0x00 && cp->config != 0x01)
5572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5573 MGMT_STATUS_INVALID_PARAMS);
5575 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5576 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5577 MGMT_STATUS_NOT_SUPPORTED);
5582 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
5584 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
5586 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5593 err = new_options(hdev, sk);
5595 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
5596 mgmt_index_removed(hdev);
5598 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
5599 hci_dev_set_flag(hdev, HCI_CONFIG);
5600 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5602 queue_work(hdev->req_workqueue, &hdev->power_on);
5604 set_bit(HCI_RAW, &hdev->flags);
5605 mgmt_index_added(hdev);
5610 hci_dev_unlock(hdev);
5614 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5615 void *data, u16 len)
5617 struct mgmt_cp_set_public_address *cp = data;
5621 BT_DBG("%s", hdev->name);
5623 if (hdev_is_powered(hdev))
5624 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5625 MGMT_STATUS_REJECTED);
5627 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5628 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5629 MGMT_STATUS_INVALID_PARAMS);
5631 if (!hdev->set_bdaddr)
5632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5633 MGMT_STATUS_NOT_SUPPORTED);
5637 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5638 bacpy(&hdev->public_addr, &cp->bdaddr);
5640 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5647 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5648 err = new_options(hdev, sk);
5650 if (is_configured(hdev)) {
5651 mgmt_index_removed(hdev);
5653 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
5655 hci_dev_set_flag(hdev, HCI_CONFIG);
5656 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5658 queue_work(hdev->req_workqueue, &hdev->power_on);
5662 hci_dev_unlock(hdev);
5666 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
5667 u16 opcode, struct sk_buff *skb)
5669 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
5670 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
5671 u8 *h192, *r192, *h256, *r256;
5672 struct mgmt_pending_cmd *cmd;
5676 BT_DBG("%s status %u", hdev->name, status);
5678 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
5682 mgmt_cp = cmd->param;
5685 status = mgmt_status(status);
5692 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
5693 struct hci_rp_read_local_oob_data *rp;
5695 if (skb->len != sizeof(*rp)) {
5696 status = MGMT_STATUS_FAILED;
5699 status = MGMT_STATUS_SUCCESS;
5700 rp = (void *)skb->data;
5702 eir_len = 5 + 18 + 18;
5709 struct hci_rp_read_local_oob_ext_data *rp;
5711 if (skb->len != sizeof(*rp)) {
5712 status = MGMT_STATUS_FAILED;
5715 status = MGMT_STATUS_SUCCESS;
5716 rp = (void *)skb->data;
5718 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5719 eir_len = 5 + 18 + 18;
5723 eir_len = 5 + 18 + 18 + 18 + 18;
5733 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
5740 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
5741 hdev->dev_class, 3);
5744 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5745 EIR_SSP_HASH_C192, h192, 16);
5746 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5747 EIR_SSP_RAND_R192, r192, 16);
5751 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5752 EIR_SSP_HASH_C256, h256, 16);
5753 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5754 EIR_SSP_RAND_R256, r256, 16);
5758 mgmt_rp->type = mgmt_cp->type;
5759 mgmt_rp->eir_len = cpu_to_le16(eir_len);
5761 err = mgmt_cmd_complete(cmd->sk, hdev->id,
5762 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
5763 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
5764 if (err < 0 || status)
5767 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
5769 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
5770 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
5771 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
5774 mgmt_pending_remove(cmd);
5777 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
5778 struct mgmt_cp_read_local_oob_ext_data *cp)
5780 struct mgmt_pending_cmd *cmd;
5781 struct hci_request req;
5784 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
5789 hci_req_init(&req, hdev);
5791 if (bredr_sc_enabled(hdev))
5792 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
5794 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
5796 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
5798 mgmt_pending_remove(cmd);
5805 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
5806 void *data, u16 data_len)
5808 struct mgmt_cp_read_local_oob_ext_data *cp = data;
5809 struct mgmt_rp_read_local_oob_ext_data *rp;
5812 u8 status, flags, role, addr[7], hash[16], rand[16];
5815 BT_DBG("%s", hdev->name);
5817 if (hdev_is_powered(hdev)) {
5819 case BIT(BDADDR_BREDR):
5820 status = mgmt_bredr_support(hdev);
5826 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
5827 status = mgmt_le_support(hdev);
5831 eir_len = 9 + 3 + 18 + 18 + 3;
5834 status = MGMT_STATUS_INVALID_PARAMS;
5839 status = MGMT_STATUS_NOT_POWERED;
5843 rp_len = sizeof(*rp) + eir_len;
5844 rp = kmalloc(rp_len, GFP_ATOMIC);
5855 case BIT(BDADDR_BREDR):
5856 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5857 err = read_local_ssp_oob_req(hdev, sk, cp);
5858 hci_dev_unlock(hdev);
5862 status = MGMT_STATUS_FAILED;
5865 eir_len = eir_append_data(rp->eir, eir_len,
5867 hdev->dev_class, 3);
5870 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
5871 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5872 smp_generate_oob(hdev, hash, rand) < 0) {
5873 hci_dev_unlock(hdev);
5874 status = MGMT_STATUS_FAILED;
5878 /* This should return the active RPA, but since the RPA
5879 * is only programmed on demand, it is really hard to fill
5880 * this in at the moment. For now disallow retrieving
5881 * local out-of-band data when privacy is in use.
5883 * Returning the identity address will not help here since
5884 * pairing happens before the identity resolving key is
5885 * known and thus the connection establishment happens
5886 * based on the RPA and not the identity address.
5888 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5889 hci_dev_unlock(hdev);
5890 status = MGMT_STATUS_REJECTED;
5894 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
5895 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
5896 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5897 bacmp(&hdev->static_addr, BDADDR_ANY))) {
5898 memcpy(addr, &hdev->static_addr, 6);
5901 memcpy(addr, &hdev->bdaddr, 6);
5905 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
5906 addr, sizeof(addr));
5908 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
5913 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
5914 &role, sizeof(role));
5916 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
5917 eir_len = eir_append_data(rp->eir, eir_len,
5919 hash, sizeof(hash));
5921 eir_len = eir_append_data(rp->eir, eir_len,
5923 rand, sizeof(rand));
5926 flags = mgmt_get_adv_discov_flags(hdev);
5928 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5929 flags |= LE_AD_NO_BREDR;
5931 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
5932 &flags, sizeof(flags));
5936 hci_dev_unlock(hdev);
5938 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
5940 status = MGMT_STATUS_SUCCESS;
5943 rp->type = cp->type;
5944 rp->eir_len = cpu_to_le16(eir_len);
5946 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
5947 status, rp, sizeof(*rp) + eir_len);
5948 if (err < 0 || status)
5951 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
5952 rp, sizeof(*rp) + eir_len,
5953 HCI_MGMT_OOB_DATA_EVENTS, sk);
5961 static u32 get_supported_adv_flags(struct hci_dev *hdev)
5965 flags |= MGMT_ADV_FLAG_CONNECTABLE;
5966 flags |= MGMT_ADV_FLAG_DISCOV;
5967 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
5968 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
5969 flags |= MGMT_ADV_FLAG_APPEARANCE;
5970 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
5972 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
5973 flags |= MGMT_ADV_FLAG_TX_POWER;
5978 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
5979 void *data, u16 data_len)
5981 struct mgmt_rp_read_adv_features *rp;
5984 struct adv_info *adv_instance;
5985 u32 supported_flags;
5988 BT_DBG("%s", hdev->name);
5990 if (!lmp_le_capable(hdev))
5991 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
5992 MGMT_STATUS_REJECTED);
5996 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
5997 rp = kmalloc(rp_len, GFP_ATOMIC);
5999 hci_dev_unlock(hdev);
6003 supported_flags = get_supported_adv_flags(hdev);
6005 rp->supported_flags = cpu_to_le32(supported_flags);
6006 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6007 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6008 rp->max_instances = HCI_MAX_ADV_INSTANCES;
6009 rp->num_instances = hdev->adv_instance_cnt;
6011 instance = rp->instance;
6012 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6013 *instance = adv_instance->instance;
6017 hci_dev_unlock(hdev);
6019 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6020 MGMT_STATUS_SUCCESS, rp, rp_len);
6027 static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
6029 u8 max_len = HCI_MAX_AD_LENGTH;
6032 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6033 MGMT_ADV_FLAG_LIMITED_DISCOV |
6034 MGMT_ADV_FLAG_MANAGED_FLAGS))
6037 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6040 /* at least 1 byte of name should fit in */
6041 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
6044 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
6051 static bool flags_managed(u32 adv_flags)
6053 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
6054 MGMT_ADV_FLAG_LIMITED_DISCOV |
6055 MGMT_ADV_FLAG_MANAGED_FLAGS);
6058 static bool tx_power_managed(u32 adv_flags)
6060 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
6063 static bool name_managed(u32 adv_flags)
6065 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
6068 static bool appearance_managed(u32 adv_flags)
6070 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
6073 static bool tlv_data_is_valid(u32 adv_flags, u8 *data, u8 len, bool is_adv_data)
6078 max_len = tlv_data_max_len(adv_flags, is_adv_data);
6083 /* Make sure that the data is correctly formatted. */
6084 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6087 if (data[i + 1] == EIR_FLAGS &&
6088 (!is_adv_data || flags_managed(adv_flags)))
6091 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
6094 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
6097 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
6100 if (data[i + 1] == EIR_APPEARANCE &&
6101 appearance_managed(adv_flags))
6104 /* If the current field length would exceed the total data
6105 * length, then it's invalid.
6107 if (i + cur_len >= len)
6114 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6117 struct mgmt_pending_cmd *cmd;
6118 struct mgmt_cp_add_advertising *cp;
6119 struct mgmt_rp_add_advertising rp;
6120 struct adv_info *adv_instance, *n;
6123 BT_DBG("status %d", status);
6127 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6129 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
6130 if (!adv_instance->pending)
6134 adv_instance->pending = false;
6138 instance = adv_instance->instance;
6140 if (hdev->cur_adv_instance == instance)
6141 cancel_adv_timeout(hdev);
6143 hci_remove_adv_instance(hdev, instance);
6144 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6151 rp.instance = cp->instance;
6154 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6155 mgmt_status(status));
6157 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6158 mgmt_status(status), &rp, sizeof(rp));
6160 mgmt_pending_remove(cmd);
6163 hci_dev_unlock(hdev);
6166 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6167 void *data, u16 data_len)
6169 struct mgmt_cp_add_advertising *cp = data;
6170 struct mgmt_rp_add_advertising rp;
6172 u32 supported_flags;
6174 u16 timeout, duration;
6175 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6176 u8 schedule_instance = 0;
6177 struct adv_info *next_instance;
6179 struct mgmt_pending_cmd *cmd;
6180 struct hci_request req;
6182 BT_DBG("%s", hdev->name);
6184 status = mgmt_le_support(hdev);
6186 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6189 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6191 MGMT_STATUS_INVALID_PARAMS);
6193 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6194 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6195 MGMT_STATUS_INVALID_PARAMS);
6197 flags = __le32_to_cpu(cp->flags);
6198 timeout = __le16_to_cpu(cp->timeout);
6199 duration = __le16_to_cpu(cp->duration);
6201 /* The current implementation only supports a subset of the specified
6204 supported_flags = get_supported_adv_flags(hdev);
6205 if (flags & ~supported_flags)
6206 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6207 MGMT_STATUS_INVALID_PARAMS);
6211 if (timeout && !hdev_is_powered(hdev)) {
6212 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6213 MGMT_STATUS_REJECTED);
6217 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6218 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6219 pending_find(MGMT_OP_SET_LE, hdev)) {
6220 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6225 if (!tlv_data_is_valid(flags, cp->data, cp->adv_data_len, true) ||
6226 !tlv_data_is_valid(flags, cp->data + cp->adv_data_len,
6227 cp->scan_rsp_len, false)) {
6228 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6229 MGMT_STATUS_INVALID_PARAMS);
6233 err = hci_add_adv_instance(hdev, cp->instance, flags,
6234 cp->adv_data_len, cp->data,
6236 cp->data + cp->adv_data_len,
6239 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6240 MGMT_STATUS_FAILED);
6244 /* Only trigger an advertising added event if a new instance was
6247 if (hdev->adv_instance_cnt > prev_instance_cnt)
6248 mgmt_advertising_added(sk, hdev, cp->instance);
6250 if (hdev->cur_adv_instance == cp->instance) {
6251 /* If the currently advertised instance is being changed then
6252 * cancel the current advertising and schedule the next
6253 * instance. If there is only one instance then the overridden
6254 * advertising data will be visible right away.
6256 cancel_adv_timeout(hdev);
6258 next_instance = hci_get_next_instance(hdev, cp->instance);
6260 schedule_instance = next_instance->instance;
6261 } else if (!hdev->adv_instance_timeout) {
6262 /* Immediately advertise the new instance if no other
6263 * instance is currently being advertised.
6265 schedule_instance = cp->instance;
6268 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
6269 * there is no instance to be advertised then we have no HCI
6270 * communication to make. Simply return.
6272 if (!hdev_is_powered(hdev) ||
6273 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6274 !schedule_instance) {
6275 rp.instance = cp->instance;
6276 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6277 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6281 /* We're good to go, update advertising data, parameters, and start
6284 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6291 hci_req_init(&req, hdev);
6293 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6296 err = hci_req_run(&req, add_advertising_complete);
6299 mgmt_pending_remove(cmd);
6302 hci_dev_unlock(hdev);
6307 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6310 struct mgmt_pending_cmd *cmd;
6311 struct mgmt_cp_remove_advertising *cp;
6312 struct mgmt_rp_remove_advertising rp;
6314 BT_DBG("status %d", status);
6318 /* A failure status here only means that we failed to disable
6319 * advertising. Otherwise, the advertising instance has been removed,
6320 * so report success.
6322 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6327 rp.instance = cp->instance;
6329 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6331 mgmt_pending_remove(cmd);
6334 hci_dev_unlock(hdev);
6337 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6338 void *data, u16 data_len)
6340 struct mgmt_cp_remove_advertising *cp = data;
6341 struct mgmt_rp_remove_advertising rp;
6342 struct mgmt_pending_cmd *cmd;
6343 struct hci_request req;
6346 BT_DBG("%s", hdev->name);
6350 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
6351 err = mgmt_cmd_status(sk, hdev->id,
6352 MGMT_OP_REMOVE_ADVERTISING,
6353 MGMT_STATUS_INVALID_PARAMS);
6357 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6358 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6359 pending_find(MGMT_OP_SET_LE, hdev)) {
6360 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6365 if (list_empty(&hdev->adv_instances)) {
6366 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6367 MGMT_STATUS_INVALID_PARAMS);
6371 hci_req_init(&req, hdev);
6373 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
6375 if (list_empty(&hdev->adv_instances))
6376 __hci_req_disable_advertising(&req);
6378 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
6379 * flag is set or the device isn't powered then we have no HCI
6380 * communication to make. Simply return.
6382 if (skb_queue_empty(&req.cmd_q) ||
6383 !hdev_is_powered(hdev) ||
6384 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6385 rp.instance = cp->instance;
6386 err = mgmt_cmd_complete(sk, hdev->id,
6387 MGMT_OP_REMOVE_ADVERTISING,
6388 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6392 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6399 err = hci_req_run(&req, remove_advertising_complete);
6401 mgmt_pending_remove(cmd);
6404 hci_dev_unlock(hdev);
6409 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
6410 void *data, u16 data_len)
6412 struct mgmt_cp_get_adv_size_info *cp = data;
6413 struct mgmt_rp_get_adv_size_info rp;
6414 u32 flags, supported_flags;
6417 BT_DBG("%s", hdev->name);
6419 if (!lmp_le_capable(hdev))
6420 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6421 MGMT_STATUS_REJECTED);
6423 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6424 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6425 MGMT_STATUS_INVALID_PARAMS);
6427 flags = __le32_to_cpu(cp->flags);
6429 /* The current implementation only supports a subset of the specified
6432 supported_flags = get_supported_adv_flags(hdev);
6433 if (flags & ~supported_flags)
6434 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6435 MGMT_STATUS_INVALID_PARAMS);
6437 rp.instance = cp->instance;
6438 rp.flags = cp->flags;
6439 rp.max_adv_data_len = tlv_data_max_len(flags, true);
6440 rp.max_scan_rsp_len = tlv_data_max_len(flags, false);
6442 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6443 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6448 static const struct hci_mgmt_handler mgmt_handlers[] = {
6449 { NULL }, /* 0x0000 (no command) */
6450 { read_version, MGMT_READ_VERSION_SIZE,
6452 HCI_MGMT_UNTRUSTED },
6453 { read_commands, MGMT_READ_COMMANDS_SIZE,
6455 HCI_MGMT_UNTRUSTED },
6456 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6458 HCI_MGMT_UNTRUSTED },
6459 { read_controller_info, MGMT_READ_INFO_SIZE,
6460 HCI_MGMT_UNTRUSTED },
6461 { set_powered, MGMT_SETTING_SIZE },
6462 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6463 { set_connectable, MGMT_SETTING_SIZE },
6464 { set_fast_connectable, MGMT_SETTING_SIZE },
6465 { set_bondable, MGMT_SETTING_SIZE },
6466 { set_link_security, MGMT_SETTING_SIZE },
6467 { set_ssp, MGMT_SETTING_SIZE },
6468 { set_hs, MGMT_SETTING_SIZE },
6469 { set_le, MGMT_SETTING_SIZE },
6470 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6471 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6472 { add_uuid, MGMT_ADD_UUID_SIZE },
6473 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6474 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6476 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6478 { disconnect, MGMT_DISCONNECT_SIZE },
6479 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6480 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6481 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6482 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6483 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6484 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6485 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6486 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6487 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6488 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6489 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6490 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6491 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6493 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6494 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6495 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6496 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6497 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6498 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6499 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6500 { set_advertising, MGMT_SETTING_SIZE },
6501 { set_bredr, MGMT_SETTING_SIZE },
6502 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6503 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6504 { set_secure_conn, MGMT_SETTING_SIZE },
6505 { set_debug_keys, MGMT_SETTING_SIZE },
6506 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6507 { load_irks, MGMT_LOAD_IRKS_SIZE,
6509 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6510 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6511 { add_device, MGMT_ADD_DEVICE_SIZE },
6512 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6513 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6515 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6517 HCI_MGMT_UNTRUSTED },
6518 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6519 HCI_MGMT_UNCONFIGURED |
6520 HCI_MGMT_UNTRUSTED },
6521 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6522 HCI_MGMT_UNCONFIGURED },
6523 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6524 HCI_MGMT_UNCONFIGURED },
6525 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6527 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6528 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
6530 HCI_MGMT_UNTRUSTED },
6531 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
6532 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
6534 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
6535 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
6536 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
6537 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
6538 HCI_MGMT_UNTRUSTED },
6539 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
6542 void mgmt_index_added(struct hci_dev *hdev)
6544 struct mgmt_ev_ext_index ev;
6546 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6549 switch (hdev->dev_type) {
6551 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6552 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6553 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6556 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6557 HCI_MGMT_INDEX_EVENTS);
6570 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6571 HCI_MGMT_EXT_INDEX_EVENTS);
6574 void mgmt_index_removed(struct hci_dev *hdev)
6576 struct mgmt_ev_ext_index ev;
6577 u8 status = MGMT_STATUS_INVALID_INDEX;
6579 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6582 switch (hdev->dev_type) {
6584 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6586 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6587 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6588 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6591 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6592 HCI_MGMT_INDEX_EVENTS);
6605 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
6606 HCI_MGMT_EXT_INDEX_EVENTS);
6609 /* This function requires the caller holds hdev->lock */
6610 static void restart_le_actions(struct hci_dev *hdev)
6612 struct hci_conn_params *p;
6614 list_for_each_entry(p, &hdev->le_conn_params, list) {
6615 /* Needed for AUTO_OFF case where might not "really"
6616 * have been powered off.
6618 list_del_init(&p->action);
6620 switch (p->auto_connect) {
6621 case HCI_AUTO_CONN_DIRECT:
6622 case HCI_AUTO_CONN_ALWAYS:
6623 list_add(&p->action, &hdev->pend_le_conns);
6625 case HCI_AUTO_CONN_REPORT:
6626 list_add(&p->action, &hdev->pend_le_reports);
6634 void mgmt_power_on(struct hci_dev *hdev, int err)
6636 struct cmd_lookup match = { NULL, hdev };
6638 BT_DBG("err %d", err);
6643 restart_le_actions(hdev);
6644 hci_update_background_scan(hdev);
6647 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6649 new_settings(hdev, match.sk);
6654 hci_dev_unlock(hdev);
6657 void __mgmt_power_off(struct hci_dev *hdev)
6659 struct cmd_lookup match = { NULL, hdev };
6660 u8 status, zero_cod[] = { 0, 0, 0 };
6662 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6664 /* If the power off is because of hdev unregistration let
6665 * use the appropriate INVALID_INDEX status. Otherwise use
6666 * NOT_POWERED. We cover both scenarios here since later in
6667 * mgmt_index_removed() any hci_conn callbacks will have already
6668 * been triggered, potentially causing misleading DISCONNECTED
6671 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
6672 status = MGMT_STATUS_INVALID_INDEX;
6674 status = MGMT_STATUS_NOT_POWERED;
6676 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6678 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
6679 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6680 zero_cod, sizeof(zero_cod),
6681 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
6682 ext_info_changed(hdev, NULL);
6685 new_settings(hdev, match.sk);
6691 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6693 struct mgmt_pending_cmd *cmd;
6696 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6700 if (err == -ERFKILL)
6701 status = MGMT_STATUS_RFKILLED;
6703 status = MGMT_STATUS_FAILED;
6705 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6707 mgmt_pending_remove(cmd);
6710 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6713 struct mgmt_ev_new_link_key ev;
6715 memset(&ev, 0, sizeof(ev));
6717 ev.store_hint = persistent;
6718 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6719 ev.key.addr.type = BDADDR_BREDR;
6720 ev.key.type = key->type;
6721 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6722 ev.key.pin_len = key->pin_len;
6724 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6727 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6729 switch (ltk->type) {
6732 if (ltk->authenticated)
6733 return MGMT_LTK_AUTHENTICATED;
6734 return MGMT_LTK_UNAUTHENTICATED;
6736 if (ltk->authenticated)
6737 return MGMT_LTK_P256_AUTH;
6738 return MGMT_LTK_P256_UNAUTH;
6739 case SMP_LTK_P256_DEBUG:
6740 return MGMT_LTK_P256_DEBUG;
6743 return MGMT_LTK_UNAUTHENTICATED;
6746 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6748 struct mgmt_ev_new_long_term_key ev;
6750 memset(&ev, 0, sizeof(ev));
6752 /* Devices using resolvable or non-resolvable random addresses
6753 * without providing an identity resolving key don't require
6754 * to store long term keys. Their addresses will change the
6757 * Only when a remote device provides an identity address
6758 * make sure the long term key is stored. If the remote
6759 * identity is known, the long term keys are internally
6760 * mapped to the identity address. So allow static random
6761 * and public addresses here.
6763 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6764 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6765 ev.store_hint = 0x00;
6767 ev.store_hint = persistent;
6769 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6770 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6771 ev.key.type = mgmt_ltk_type(key);
6772 ev.key.enc_size = key->enc_size;
6773 ev.key.ediv = key->ediv;
6774 ev.key.rand = key->rand;
6776 if (key->type == SMP_LTK)
6779 /* Make sure we copy only the significant bytes based on the
6780 * encryption key size, and set the rest of the value to zeroes.
6782 memcpy(ev.key.val, key->val, key->enc_size);
6783 memset(ev.key.val + key->enc_size, 0,
6784 sizeof(ev.key.val) - key->enc_size);
6786 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6789 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
6791 struct mgmt_ev_new_irk ev;
6793 memset(&ev, 0, sizeof(ev));
6795 ev.store_hint = persistent;
6797 bacpy(&ev.rpa, &irk->rpa);
6798 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6799 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6800 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6802 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6805 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6808 struct mgmt_ev_new_csrk ev;
6810 memset(&ev, 0, sizeof(ev));
6812 /* Devices using resolvable or non-resolvable random addresses
6813 * without providing an identity resolving key don't require
6814 * to store signature resolving keys. Their addresses will change
6815 * the next time around.
6817 * Only when a remote device provides an identity address
6818 * make sure the signature resolving key is stored. So allow
6819 * static random and public addresses here.
6821 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6822 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6823 ev.store_hint = 0x00;
6825 ev.store_hint = persistent;
6827 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6828 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6829 ev.key.type = csrk->type;
6830 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6832 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6835 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6836 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6837 u16 max_interval, u16 latency, u16 timeout)
6839 struct mgmt_ev_new_conn_param ev;
6841 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6844 memset(&ev, 0, sizeof(ev));
6845 bacpy(&ev.addr.bdaddr, bdaddr);
6846 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6847 ev.store_hint = store_hint;
6848 ev.min_interval = cpu_to_le16(min_interval);
6849 ev.max_interval = cpu_to_le16(max_interval);
6850 ev.latency = cpu_to_le16(latency);
6851 ev.timeout = cpu_to_le16(timeout);
6853 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6856 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6857 u32 flags, u8 *name, u8 name_len)
6860 struct mgmt_ev_device_connected *ev = (void *) buf;
6863 bacpy(&ev->addr.bdaddr, &conn->dst);
6864 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6866 ev->flags = __cpu_to_le32(flags);
6868 /* We must ensure that the EIR Data fields are ordered and
6869 * unique. Keep it simple for now and avoid the problem by not
6870 * adding any BR/EDR data to the LE adv.
6872 if (conn->le_adv_data_len > 0) {
6873 memcpy(&ev->eir[eir_len],
6874 conn->le_adv_data, conn->le_adv_data_len);
6875 eir_len = conn->le_adv_data_len;
6878 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6881 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6882 eir_len = eir_append_data(ev->eir, eir_len,
6884 conn->dev_class, 3);
6887 ev->eir_len = cpu_to_le16(eir_len);
6889 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6890 sizeof(*ev) + eir_len, NULL);
6893 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
6895 struct sock **sk = data;
6897 cmd->cmd_complete(cmd, 0);
6902 mgmt_pending_remove(cmd);
6905 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
6907 struct hci_dev *hdev = data;
6908 struct mgmt_cp_unpair_device *cp = cmd->param;
6910 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6912 cmd->cmd_complete(cmd, 0);
6913 mgmt_pending_remove(cmd);
6916 bool mgmt_powering_down(struct hci_dev *hdev)
6918 struct mgmt_pending_cmd *cmd;
6919 struct mgmt_mode *cp;
6921 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6932 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6933 u8 link_type, u8 addr_type, u8 reason,
6934 bool mgmt_connected)
6936 struct mgmt_ev_device_disconnected ev;
6937 struct sock *sk = NULL;
6939 /* The connection is still in hci_conn_hash so test for 1
6940 * instead of 0 to know if this is the last one.
6942 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6943 cancel_delayed_work(&hdev->power_off);
6944 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6947 if (!mgmt_connected)
6950 if (link_type != ACL_LINK && link_type != LE_LINK)
6953 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6955 bacpy(&ev.addr.bdaddr, bdaddr);
6956 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6959 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6964 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6968 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6969 u8 link_type, u8 addr_type, u8 status)
6971 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6972 struct mgmt_cp_disconnect *cp;
6973 struct mgmt_pending_cmd *cmd;
6975 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6978 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
6984 if (bacmp(bdaddr, &cp->addr.bdaddr))
6987 if (cp->addr.type != bdaddr_type)
6990 cmd->cmd_complete(cmd, mgmt_status(status));
6991 mgmt_pending_remove(cmd);
6994 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6995 u8 addr_type, u8 status)
6997 struct mgmt_ev_connect_failed ev;
6999 /* The connection is still in hci_conn_hash so test for 1
7000 * instead of 0 to know if this is the last one.
7002 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7003 cancel_delayed_work(&hdev->power_off);
7004 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7007 bacpy(&ev.addr.bdaddr, bdaddr);
7008 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7009 ev.status = mgmt_status(status);
7011 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7014 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7016 struct mgmt_ev_pin_code_request ev;
7018 bacpy(&ev.addr.bdaddr, bdaddr);
7019 ev.addr.type = BDADDR_BREDR;
7022 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7025 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7028 struct mgmt_pending_cmd *cmd;
7030 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7034 cmd->cmd_complete(cmd, mgmt_status(status));
7035 mgmt_pending_remove(cmd);
7038 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7041 struct mgmt_pending_cmd *cmd;
7043 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7047 cmd->cmd_complete(cmd, mgmt_status(status));
7048 mgmt_pending_remove(cmd);
7051 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7052 u8 link_type, u8 addr_type, u32 value,
7055 struct mgmt_ev_user_confirm_request ev;
7057 BT_DBG("%s", hdev->name);
7059 bacpy(&ev.addr.bdaddr, bdaddr);
7060 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7061 ev.confirm_hint = confirm_hint;
7062 ev.value = cpu_to_le32(value);
7064 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7068 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7069 u8 link_type, u8 addr_type)
7071 struct mgmt_ev_user_passkey_request ev;
7073 BT_DBG("%s", hdev->name);
7075 bacpy(&ev.addr.bdaddr, bdaddr);
7076 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7078 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7082 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7083 u8 link_type, u8 addr_type, u8 status,
7086 struct mgmt_pending_cmd *cmd;
7088 cmd = pending_find(opcode, hdev);
7092 cmd->cmd_complete(cmd, mgmt_status(status));
7093 mgmt_pending_remove(cmd);
7098 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7099 u8 link_type, u8 addr_type, u8 status)
7101 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7102 status, MGMT_OP_USER_CONFIRM_REPLY);
7105 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7106 u8 link_type, u8 addr_type, u8 status)
7108 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7110 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7113 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7114 u8 link_type, u8 addr_type, u8 status)
7116 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7117 status, MGMT_OP_USER_PASSKEY_REPLY);
7120 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7121 u8 link_type, u8 addr_type, u8 status)
7123 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7125 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7128 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7129 u8 link_type, u8 addr_type, u32 passkey,
7132 struct mgmt_ev_passkey_notify ev;
7134 BT_DBG("%s", hdev->name);
7136 bacpy(&ev.addr.bdaddr, bdaddr);
7137 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7138 ev.passkey = __cpu_to_le32(passkey);
7139 ev.entered = entered;
7141 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7144 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7146 struct mgmt_ev_auth_failed ev;
7147 struct mgmt_pending_cmd *cmd;
7148 u8 status = mgmt_status(hci_status);
7150 bacpy(&ev.addr.bdaddr, &conn->dst);
7151 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7154 cmd = find_pairing(conn);
7156 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7157 cmd ? cmd->sk : NULL);
7160 cmd->cmd_complete(cmd, status);
7161 mgmt_pending_remove(cmd);
7165 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7167 struct cmd_lookup match = { NULL, hdev };
7171 u8 mgmt_err = mgmt_status(status);
7172 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7173 cmd_status_rsp, &mgmt_err);
7177 if (test_bit(HCI_AUTH, &hdev->flags))
7178 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7180 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7182 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7186 new_settings(hdev, match.sk);
7192 static void clear_eir(struct hci_request *req)
7194 struct hci_dev *hdev = req->hdev;
7195 struct hci_cp_write_eir cp;
7197 if (!lmp_ext_inq_capable(hdev))
7200 memset(hdev->eir, 0, sizeof(hdev->eir));
7202 memset(&cp, 0, sizeof(cp));
7204 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7207 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7209 struct cmd_lookup match = { NULL, hdev };
7210 struct hci_request req;
7211 bool changed = false;
7214 u8 mgmt_err = mgmt_status(status);
7216 if (enable && hci_dev_test_and_clear_flag(hdev,
7218 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7219 new_settings(hdev, NULL);
7222 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7228 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7230 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7232 changed = hci_dev_test_and_clear_flag(hdev,
7235 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7238 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7241 new_settings(hdev, match.sk);
7246 hci_req_init(&req, hdev);
7248 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7249 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7250 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7251 sizeof(enable), &enable);
7252 __hci_req_update_eir(&req);
7257 hci_req_run(&req, NULL);
7260 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7262 struct cmd_lookup *match = data;
7264 if (match->sk == NULL) {
7265 match->sk = cmd->sk;
7266 sock_hold(match->sk);
7270 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7273 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7275 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7276 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7277 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7280 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
7281 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7282 ext_info_changed(hdev, NULL);
7289 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7291 struct mgmt_cp_set_local_name ev;
7292 struct mgmt_pending_cmd *cmd;
7297 memset(&ev, 0, sizeof(ev));
7298 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7299 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7301 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7303 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7305 /* If this is a HCI command related to powering on the
7306 * HCI dev don't send any mgmt signals.
7308 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7312 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7313 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
7314 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
7317 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7321 for (i = 0; i < uuid_count; i++) {
7322 if (!memcmp(uuid, uuids[i], 16))
7329 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7333 while (parsed < eir_len) {
7334 u8 field_len = eir[0];
7341 if (eir_len - parsed < field_len + 1)
7345 case EIR_UUID16_ALL:
7346 case EIR_UUID16_SOME:
7347 for (i = 0; i + 3 <= field_len; i += 2) {
7348 memcpy(uuid, bluetooth_base_uuid, 16);
7349 uuid[13] = eir[i + 3];
7350 uuid[12] = eir[i + 2];
7351 if (has_uuid(uuid, uuid_count, uuids))
7355 case EIR_UUID32_ALL:
7356 case EIR_UUID32_SOME:
7357 for (i = 0; i + 5 <= field_len; i += 4) {
7358 memcpy(uuid, bluetooth_base_uuid, 16);
7359 uuid[15] = eir[i + 5];
7360 uuid[14] = eir[i + 4];
7361 uuid[13] = eir[i + 3];
7362 uuid[12] = eir[i + 2];
7363 if (has_uuid(uuid, uuid_count, uuids))
7367 case EIR_UUID128_ALL:
7368 case EIR_UUID128_SOME:
7369 for (i = 0; i + 17 <= field_len; i += 16) {
7370 memcpy(uuid, eir + i + 2, 16);
7371 if (has_uuid(uuid, uuid_count, uuids))
7377 parsed += field_len + 1;
7378 eir += field_len + 1;
7384 static void restart_le_scan(struct hci_dev *hdev)
7386 /* If controller is not scanning we are done. */
7387 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7390 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7391 hdev->discovery.scan_start +
7392 hdev->discovery.scan_duration))
7395 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
7396 DISCOV_LE_RESTART_DELAY);
7399 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7400 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7402 /* If a RSSI threshold has been specified, and
7403 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7404 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7405 * is set, let it through for further processing, as we might need to
7408 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7409 * the results are also dropped.
7411 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7412 (rssi == HCI_RSSI_INVALID ||
7413 (rssi < hdev->discovery.rssi &&
7414 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7417 if (hdev->discovery.uuid_count != 0) {
7418 /* If a list of UUIDs is provided in filter, results with no
7419 * matching UUID should be dropped.
7421 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7422 hdev->discovery.uuids) &&
7423 !eir_has_uuids(scan_rsp, scan_rsp_len,
7424 hdev->discovery.uuid_count,
7425 hdev->discovery.uuids))
7429 /* If duplicate filtering does not report RSSI changes, then restart
7430 * scanning to ensure updated result with updated RSSI values.
7432 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7433 restart_le_scan(hdev);
7435 /* Validate RSSI value against the RSSI threshold once more. */
7436 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7437 rssi < hdev->discovery.rssi)
7444 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7445 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7446 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7449 struct mgmt_ev_device_found *ev = (void *)buf;
7452 /* Don't send events for a non-kernel initiated discovery. With
7453 * LE one exception is if we have pend_le_reports > 0 in which
7454 * case we're doing passive scanning and want these events.
7456 if (!hci_discovery_active(hdev)) {
7457 if (link_type == ACL_LINK)
7459 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7463 if (hdev->discovery.result_filtering) {
7464 /* We are using service discovery */
7465 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7470 if (hdev->discovery.limited) {
7471 /* Check for limited discoverable bit */
7473 if (!(dev_class[1] & 0x20))
7476 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
7477 if (!flags || !(flags[0] & LE_AD_LIMITED))
7482 /* Make sure that the buffer is big enough. The 5 extra bytes
7483 * are for the potential CoD field.
7485 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7488 memset(buf, 0, sizeof(buf));
7490 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7491 * RSSI value was reported as 0 when not available. This behavior
7492 * is kept when using device discovery. This is required for full
7493 * backwards compatibility with the API.
7495 * However when using service discovery, the value 127 will be
7496 * returned when the RSSI is not available.
7498 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7499 link_type == ACL_LINK)
7502 bacpy(&ev->addr.bdaddr, bdaddr);
7503 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7505 ev->flags = cpu_to_le32(flags);
7508 /* Copy EIR or advertising data into event */
7509 memcpy(ev->eir, eir, eir_len);
7511 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7513 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7516 if (scan_rsp_len > 0)
7517 /* Append scan response data to event */
7518 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7520 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7521 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7523 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7526 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7527 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7529 struct mgmt_ev_device_found *ev;
7530 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7533 ev = (struct mgmt_ev_device_found *) buf;
7535 memset(buf, 0, sizeof(buf));
7537 bacpy(&ev->addr.bdaddr, bdaddr);
7538 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7541 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7544 ev->eir_len = cpu_to_le16(eir_len);
7546 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7549 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7551 struct mgmt_ev_discovering ev;
7553 BT_DBG("%s discovering %u", hdev->name, discovering);
7555 memset(&ev, 0, sizeof(ev));
7556 ev.type = hdev->discovery.type;
7557 ev.discovering = discovering;
7559 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7562 static struct hci_mgmt_chan chan = {
7563 .channel = HCI_CHANNEL_CONTROL,
7564 .handler_count = ARRAY_SIZE(mgmt_handlers),
7565 .handlers = mgmt_handlers,
7566 .hdev_init = mgmt_init_hdev,
7571 return hci_mgmt_chan_register(&chan);
7574 void mgmt_exit(void)
7576 hci_mgmt_chan_unregister(&chan);