0ce6cdd278b21eda90b2451b9c2569e39d7e3210
[cascardo/linux.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <asm/unaligned.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE      0
34 #define HCI_REQ_PEND      1
35 #define HCI_REQ_CANCELED  2
36
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39         skb_queue_head_init(&req->cmd_q);
40         req->hdev = hdev;
41         req->err = 0;
42 }
43
44 static int req_run(struct hci_request *req, hci_req_complete_t complete,
45                    hci_req_complete_skb_t complete_skb)
46 {
47         struct hci_dev *hdev = req->hdev;
48         struct sk_buff *skb;
49         unsigned long flags;
50
51         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53         /* If an error occurred during request building, remove all HCI
54          * commands queued on the HCI request queue.
55          */
56         if (req->err) {
57                 skb_queue_purge(&req->cmd_q);
58                 return req->err;
59         }
60
61         /* Do not allow empty requests */
62         if (skb_queue_empty(&req->cmd_q))
63                 return -ENODATA;
64
65         skb = skb_peek_tail(&req->cmd_q);
66         if (complete) {
67                 bt_cb(skb)->hci.req_complete = complete;
68         } else if (complete_skb) {
69                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71         }
72
73         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77         queue_work(hdev->workqueue, &hdev->cmd_work);
78
79         return 0;
80 }
81
82 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83 {
84         return req_run(req, complete, NULL);
85 }
86
87 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88 {
89         return req_run(req, NULL, complete);
90 }
91
92 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93                                   struct sk_buff *skb)
94 {
95         BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97         if (hdev->req_status == HCI_REQ_PEND) {
98                 hdev->req_result = result;
99                 hdev->req_status = HCI_REQ_DONE;
100                 if (skb)
101                         hdev->req_skb = skb_get(skb);
102                 wake_up_interruptible(&hdev->req_wait_q);
103         }
104 }
105
106 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
107 {
108         BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110         if (hdev->req_status == HCI_REQ_PEND) {
111                 hdev->req_result = err;
112                 hdev->req_status = HCI_REQ_CANCELED;
113                 wake_up_interruptible(&hdev->req_wait_q);
114         }
115 }
116
117 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118                                   const void *param, u8 event, u32 timeout)
119 {
120         DECLARE_WAITQUEUE(wait, current);
121         struct hci_request req;
122         struct sk_buff *skb;
123         int err = 0;
124
125         BT_DBG("%s", hdev->name);
126
127         hci_req_init(&req, hdev);
128
129         hci_req_add_ev(&req, opcode, plen, param, event);
130
131         hdev->req_status = HCI_REQ_PEND;
132
133         add_wait_queue(&hdev->req_wait_q, &wait);
134         set_current_state(TASK_INTERRUPTIBLE);
135
136         err = hci_req_run_skb(&req, hci_req_sync_complete);
137         if (err < 0) {
138                 remove_wait_queue(&hdev->req_wait_q, &wait);
139                 set_current_state(TASK_RUNNING);
140                 return ERR_PTR(err);
141         }
142
143         schedule_timeout(timeout);
144
145         remove_wait_queue(&hdev->req_wait_q, &wait);
146
147         if (signal_pending(current))
148                 return ERR_PTR(-EINTR);
149
150         switch (hdev->req_status) {
151         case HCI_REQ_DONE:
152                 err = -bt_to_errno(hdev->req_result);
153                 break;
154
155         case HCI_REQ_CANCELED:
156                 err = -hdev->req_result;
157                 break;
158
159         default:
160                 err = -ETIMEDOUT;
161                 break;
162         }
163
164         hdev->req_status = hdev->req_result = 0;
165         skb = hdev->req_skb;
166         hdev->req_skb = NULL;
167
168         BT_DBG("%s end: err %d", hdev->name, err);
169
170         if (err < 0) {
171                 kfree_skb(skb);
172                 return ERR_PTR(err);
173         }
174
175         if (!skb)
176                 return ERR_PTR(-ENODATA);
177
178         return skb;
179 }
180 EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
182 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183                                const void *param, u32 timeout)
184 {
185         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186 }
187 EXPORT_SYMBOL(__hci_cmd_sync);
188
189 /* Execute request and wait for completion. */
190 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191                                                      unsigned long opt),
192                    unsigned long opt, u32 timeout, u8 *hci_status)
193 {
194         struct hci_request req;
195         DECLARE_WAITQUEUE(wait, current);
196         int err = 0;
197
198         BT_DBG("%s start", hdev->name);
199
200         hci_req_init(&req, hdev);
201
202         hdev->req_status = HCI_REQ_PEND;
203
204         err = func(&req, opt);
205         if (err) {
206                 if (hci_status)
207                         *hci_status = HCI_ERROR_UNSPECIFIED;
208                 return err;
209         }
210
211         add_wait_queue(&hdev->req_wait_q, &wait);
212         set_current_state(TASK_INTERRUPTIBLE);
213
214         err = hci_req_run_skb(&req, hci_req_sync_complete);
215         if (err < 0) {
216                 hdev->req_status = 0;
217
218                 remove_wait_queue(&hdev->req_wait_q, &wait);
219                 set_current_state(TASK_RUNNING);
220
221                 /* ENODATA means the HCI request command queue is empty.
222                  * This can happen when a request with conditionals doesn't
223                  * trigger any commands to be sent. This is normal behavior
224                  * and should not trigger an error return.
225                  */
226                 if (err == -ENODATA) {
227                         if (hci_status)
228                                 *hci_status = 0;
229                         return 0;
230                 }
231
232                 if (hci_status)
233                         *hci_status = HCI_ERROR_UNSPECIFIED;
234
235                 return err;
236         }
237
238         schedule_timeout(timeout);
239
240         remove_wait_queue(&hdev->req_wait_q, &wait);
241
242         if (signal_pending(current))
243                 return -EINTR;
244
245         switch (hdev->req_status) {
246         case HCI_REQ_DONE:
247                 err = -bt_to_errno(hdev->req_result);
248                 if (hci_status)
249                         *hci_status = hdev->req_result;
250                 break;
251
252         case HCI_REQ_CANCELED:
253                 err = -hdev->req_result;
254                 if (hci_status)
255                         *hci_status = HCI_ERROR_UNSPECIFIED;
256                 break;
257
258         default:
259                 err = -ETIMEDOUT;
260                 if (hci_status)
261                         *hci_status = HCI_ERROR_UNSPECIFIED;
262                 break;
263         }
264
265         kfree_skb(hdev->req_skb);
266         hdev->req_skb = NULL;
267         hdev->req_status = hdev->req_result = 0;
268
269         BT_DBG("%s end: err %d", hdev->name, err);
270
271         return err;
272 }
273
274 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
275                                                   unsigned long opt),
276                  unsigned long opt, u32 timeout, u8 *hci_status)
277 {
278         int ret;
279
280         if (!test_bit(HCI_UP, &hdev->flags))
281                 return -ENETDOWN;
282
283         /* Serialize all requests */
284         hci_req_sync_lock(hdev);
285         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
286         hci_req_sync_unlock(hdev);
287
288         return ret;
289 }
290
291 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
292                                 const void *param)
293 {
294         int len = HCI_COMMAND_HDR_SIZE + plen;
295         struct hci_command_hdr *hdr;
296         struct sk_buff *skb;
297
298         skb = bt_skb_alloc(len, GFP_ATOMIC);
299         if (!skb)
300                 return NULL;
301
302         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
303         hdr->opcode = cpu_to_le16(opcode);
304         hdr->plen   = plen;
305
306         if (plen)
307                 memcpy(skb_put(skb, plen), param, plen);
308
309         BT_DBG("skb len %d", skb->len);
310
311         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
312         hci_skb_opcode(skb) = opcode;
313
314         return skb;
315 }
316
317 /* Queue a command to an asynchronous HCI request */
318 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
319                     const void *param, u8 event)
320 {
321         struct hci_dev *hdev = req->hdev;
322         struct sk_buff *skb;
323
324         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
325
326         /* If an error occurred during request building, there is no point in
327          * queueing the HCI command. We can simply return.
328          */
329         if (req->err)
330                 return;
331
332         skb = hci_prepare_cmd(hdev, opcode, plen, param);
333         if (!skb) {
334                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
335                        hdev->name, opcode);
336                 req->err = -ENOMEM;
337                 return;
338         }
339
340         if (skb_queue_empty(&req->cmd_q))
341                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
342
343         bt_cb(skb)->hci.req_event = event;
344
345         skb_queue_tail(&req->cmd_q, skb);
346 }
347
348 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
349                  const void *param)
350 {
351         hci_req_add_ev(req, opcode, plen, param, 0);
352 }
353
354 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
355 {
356         struct hci_dev *hdev = req->hdev;
357         struct hci_cp_write_page_scan_activity acp;
358         u8 type;
359
360         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
361                 return;
362
363         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
364                 return;
365
366         if (enable) {
367                 type = PAGE_SCAN_TYPE_INTERLACED;
368
369                 /* 160 msec page scan interval */
370                 acp.interval = cpu_to_le16(0x0100);
371         } else {
372                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
373
374                 /* default 1.28 sec page scan */
375                 acp.interval = cpu_to_le16(0x0800);
376         }
377
378         acp.window = cpu_to_le16(0x0012);
379
380         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
381             __cpu_to_le16(hdev->page_scan_window) != acp.window)
382                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
383                             sizeof(acp), &acp);
384
385         if (hdev->page_scan_type != type)
386                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
387 }
388
389 /* This function controls the background scanning based on hdev->pend_le_conns
390  * list. If there are pending LE connection we start the background scanning,
391  * otherwise we stop it.
392  *
393  * This function requires the caller holds hdev->lock.
394  */
395 static void __hci_update_background_scan(struct hci_request *req)
396 {
397         struct hci_dev *hdev = req->hdev;
398
399         if (!test_bit(HCI_UP, &hdev->flags) ||
400             test_bit(HCI_INIT, &hdev->flags) ||
401             hci_dev_test_flag(hdev, HCI_SETUP) ||
402             hci_dev_test_flag(hdev, HCI_CONFIG) ||
403             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
404             hci_dev_test_flag(hdev, HCI_UNREGISTER))
405                 return;
406
407         /* No point in doing scanning if LE support hasn't been enabled */
408         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
409                 return;
410
411         /* If discovery is active don't interfere with it */
412         if (hdev->discovery.state != DISCOVERY_STOPPED)
413                 return;
414
415         /* Reset RSSI and UUID filters when starting background scanning
416          * since these filters are meant for service discovery only.
417          *
418          * The Start Discovery and Start Service Discovery operations
419          * ensure to set proper values for RSSI threshold and UUID
420          * filter list. So it is safe to just reset them here.
421          */
422         hci_discovery_filter_clear(hdev);
423
424         if (list_empty(&hdev->pend_le_conns) &&
425             list_empty(&hdev->pend_le_reports)) {
426                 /* If there is no pending LE connections or devices
427                  * to be scanned for, we should stop the background
428                  * scanning.
429                  */
430
431                 /* If controller is not scanning we are done. */
432                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
433                         return;
434
435                 hci_req_add_le_scan_disable(req);
436
437                 BT_DBG("%s stopping background scanning", hdev->name);
438         } else {
439                 /* If there is at least one pending LE connection, we should
440                  * keep the background scan running.
441                  */
442
443                 /* If controller is connecting, we should not start scanning
444                  * since some controllers are not able to scan and connect at
445                  * the same time.
446                  */
447                 if (hci_lookup_le_connect(hdev))
448                         return;
449
450                 /* If controller is currently scanning, we stop it to ensure we
451                  * don't miss any advertising (due to duplicates filter).
452                  */
453                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
454                         hci_req_add_le_scan_disable(req);
455
456                 hci_req_add_le_passive_scan(req);
457
458                 BT_DBG("%s starting background scanning", hdev->name);
459         }
460 }
461
462 void __hci_req_update_name(struct hci_request *req)
463 {
464         struct hci_dev *hdev = req->hdev;
465         struct hci_cp_write_local_name cp;
466
467         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
468
469         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
470 }
471
472 #define PNP_INFO_SVCLASS_ID             0x1200
473
474 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
475 {
476         u8 *ptr = data, *uuids_start = NULL;
477         struct bt_uuid *uuid;
478
479         if (len < 4)
480                 return ptr;
481
482         list_for_each_entry(uuid, &hdev->uuids, list) {
483                 u16 uuid16;
484
485                 if (uuid->size != 16)
486                         continue;
487
488                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
489                 if (uuid16 < 0x1100)
490                         continue;
491
492                 if (uuid16 == PNP_INFO_SVCLASS_ID)
493                         continue;
494
495                 if (!uuids_start) {
496                         uuids_start = ptr;
497                         uuids_start[0] = 1;
498                         uuids_start[1] = EIR_UUID16_ALL;
499                         ptr += 2;
500                 }
501
502                 /* Stop if not enough space to put next UUID */
503                 if ((ptr - data) + sizeof(u16) > len) {
504                         uuids_start[1] = EIR_UUID16_SOME;
505                         break;
506                 }
507
508                 *ptr++ = (uuid16 & 0x00ff);
509                 *ptr++ = (uuid16 & 0xff00) >> 8;
510                 uuids_start[0] += sizeof(uuid16);
511         }
512
513         return ptr;
514 }
515
516 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
517 {
518         u8 *ptr = data, *uuids_start = NULL;
519         struct bt_uuid *uuid;
520
521         if (len < 6)
522                 return ptr;
523
524         list_for_each_entry(uuid, &hdev->uuids, list) {
525                 if (uuid->size != 32)
526                         continue;
527
528                 if (!uuids_start) {
529                         uuids_start = ptr;
530                         uuids_start[0] = 1;
531                         uuids_start[1] = EIR_UUID32_ALL;
532                         ptr += 2;
533                 }
534
535                 /* Stop if not enough space to put next UUID */
536                 if ((ptr - data) + sizeof(u32) > len) {
537                         uuids_start[1] = EIR_UUID32_SOME;
538                         break;
539                 }
540
541                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
542                 ptr += sizeof(u32);
543                 uuids_start[0] += sizeof(u32);
544         }
545
546         return ptr;
547 }
548
549 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
550 {
551         u8 *ptr = data, *uuids_start = NULL;
552         struct bt_uuid *uuid;
553
554         if (len < 18)
555                 return ptr;
556
557         list_for_each_entry(uuid, &hdev->uuids, list) {
558                 if (uuid->size != 128)
559                         continue;
560
561                 if (!uuids_start) {
562                         uuids_start = ptr;
563                         uuids_start[0] = 1;
564                         uuids_start[1] = EIR_UUID128_ALL;
565                         ptr += 2;
566                 }
567
568                 /* Stop if not enough space to put next UUID */
569                 if ((ptr - data) + 16 > len) {
570                         uuids_start[1] = EIR_UUID128_SOME;
571                         break;
572                 }
573
574                 memcpy(ptr, uuid->uuid, 16);
575                 ptr += 16;
576                 uuids_start[0] += 16;
577         }
578
579         return ptr;
580 }
581
582 static void create_eir(struct hci_dev *hdev, u8 *data)
583 {
584         u8 *ptr = data;
585         size_t name_len;
586
587         name_len = strlen(hdev->dev_name);
588
589         if (name_len > 0) {
590                 /* EIR Data type */
591                 if (name_len > 48) {
592                         name_len = 48;
593                         ptr[1] = EIR_NAME_SHORT;
594                 } else
595                         ptr[1] = EIR_NAME_COMPLETE;
596
597                 /* EIR Data length */
598                 ptr[0] = name_len + 1;
599
600                 memcpy(ptr + 2, hdev->dev_name, name_len);
601
602                 ptr += (name_len + 2);
603         }
604
605         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
606                 ptr[0] = 2;
607                 ptr[1] = EIR_TX_POWER;
608                 ptr[2] = (u8) hdev->inq_tx_power;
609
610                 ptr += 3;
611         }
612
613         if (hdev->devid_source > 0) {
614                 ptr[0] = 9;
615                 ptr[1] = EIR_DEVICE_ID;
616
617                 put_unaligned_le16(hdev->devid_source, ptr + 2);
618                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
619                 put_unaligned_le16(hdev->devid_product, ptr + 6);
620                 put_unaligned_le16(hdev->devid_version, ptr + 8);
621
622                 ptr += 10;
623         }
624
625         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
627         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
628 }
629
630 void __hci_req_update_eir(struct hci_request *req)
631 {
632         struct hci_dev *hdev = req->hdev;
633         struct hci_cp_write_eir cp;
634
635         if (!hdev_is_powered(hdev))
636                 return;
637
638         if (!lmp_ext_inq_capable(hdev))
639                 return;
640
641         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
642                 return;
643
644         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
645                 return;
646
647         memset(&cp, 0, sizeof(cp));
648
649         create_eir(hdev, cp.data);
650
651         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
652                 return;
653
654         memcpy(hdev->eir, cp.data, sizeof(cp.data));
655
656         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
657 }
658
659 void hci_req_add_le_scan_disable(struct hci_request *req)
660 {
661         struct hci_cp_le_set_scan_enable cp;
662
663         memset(&cp, 0, sizeof(cp));
664         cp.enable = LE_SCAN_DISABLE;
665         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
666 }
667
668 static void add_to_white_list(struct hci_request *req,
669                               struct hci_conn_params *params)
670 {
671         struct hci_cp_le_add_to_white_list cp;
672
673         cp.bdaddr_type = params->addr_type;
674         bacpy(&cp.bdaddr, &params->addr);
675
676         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
677 }
678
679 static u8 update_white_list(struct hci_request *req)
680 {
681         struct hci_dev *hdev = req->hdev;
682         struct hci_conn_params *params;
683         struct bdaddr_list *b;
684         uint8_t white_list_entries = 0;
685
686         /* Go through the current white list programmed into the
687          * controller one by one and check if that address is still
688          * in the list of pending connections or list of devices to
689          * report. If not present in either list, then queue the
690          * command to remove it from the controller.
691          */
692         list_for_each_entry(b, &hdev->le_white_list, list) {
693                 /* If the device is neither in pend_le_conns nor
694                  * pend_le_reports then remove it from the whitelist.
695                  */
696                 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
697                                                &b->bdaddr, b->bdaddr_type) &&
698                     !hci_pend_le_action_lookup(&hdev->pend_le_reports,
699                                                &b->bdaddr, b->bdaddr_type)) {
700                         struct hci_cp_le_del_from_white_list cp;
701
702                         cp.bdaddr_type = b->bdaddr_type;
703                         bacpy(&cp.bdaddr, &b->bdaddr);
704
705                         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
706                                     sizeof(cp), &cp);
707                         continue;
708                 }
709
710                 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
711                         /* White list can not be used with RPAs */
712                         return 0x00;
713                 }
714
715                 white_list_entries++;
716         }
717
718         /* Since all no longer valid white list entries have been
719          * removed, walk through the list of pending connections
720          * and ensure that any new device gets programmed into
721          * the controller.
722          *
723          * If the list of the devices is larger than the list of
724          * available white list entries in the controller, then
725          * just abort and return filer policy value to not use the
726          * white list.
727          */
728         list_for_each_entry(params, &hdev->pend_le_conns, action) {
729                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
730                                            &params->addr, params->addr_type))
731                         continue;
732
733                 if (white_list_entries >= hdev->le_white_list_size) {
734                         /* Select filter policy to accept all advertising */
735                         return 0x00;
736                 }
737
738                 if (hci_find_irk_by_addr(hdev, &params->addr,
739                                          params->addr_type)) {
740                         /* White list can not be used with RPAs */
741                         return 0x00;
742                 }
743
744                 white_list_entries++;
745                 add_to_white_list(req, params);
746         }
747
748         /* After adding all new pending connections, walk through
749          * the list of pending reports and also add these to the
750          * white list if there is still space.
751          */
752         list_for_each_entry(params, &hdev->pend_le_reports, action) {
753                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
754                                            &params->addr, params->addr_type))
755                         continue;
756
757                 if (white_list_entries >= hdev->le_white_list_size) {
758                         /* Select filter policy to accept all advertising */
759                         return 0x00;
760                 }
761
762                 if (hci_find_irk_by_addr(hdev, &params->addr,
763                                          params->addr_type)) {
764                         /* White list can not be used with RPAs */
765                         return 0x00;
766                 }
767
768                 white_list_entries++;
769                 add_to_white_list(req, params);
770         }
771
772         /* Select filter policy to use white list */
773         return 0x01;
774 }
775
776 static bool scan_use_rpa(struct hci_dev *hdev)
777 {
778         return hci_dev_test_flag(hdev, HCI_PRIVACY);
779 }
780
781 void hci_req_add_le_passive_scan(struct hci_request *req)
782 {
783         struct hci_cp_le_set_scan_param param_cp;
784         struct hci_cp_le_set_scan_enable enable_cp;
785         struct hci_dev *hdev = req->hdev;
786         u8 own_addr_type;
787         u8 filter_policy;
788
789         /* Set require_privacy to false since no SCAN_REQ are send
790          * during passive scanning. Not using an non-resolvable address
791          * here is important so that peer devices using direct
792          * advertising with our address will be correctly reported
793          * by the controller.
794          */
795         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
796                                       &own_addr_type))
797                 return;
798
799         /* Adding or removing entries from the white list must
800          * happen before enabling scanning. The controller does
801          * not allow white list modification while scanning.
802          */
803         filter_policy = update_white_list(req);
804
805         /* When the controller is using random resolvable addresses and
806          * with that having LE privacy enabled, then controllers with
807          * Extended Scanner Filter Policies support can now enable support
808          * for handling directed advertising.
809          *
810          * So instead of using filter polices 0x00 (no whitelist)
811          * and 0x01 (whitelist enabled) use the new filter policies
812          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
813          */
814         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
815             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
816                 filter_policy |= 0x02;
817
818         memset(&param_cp, 0, sizeof(param_cp));
819         param_cp.type = LE_SCAN_PASSIVE;
820         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
821         param_cp.window = cpu_to_le16(hdev->le_scan_window);
822         param_cp.own_address_type = own_addr_type;
823         param_cp.filter_policy = filter_policy;
824         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
825                     &param_cp);
826
827         memset(&enable_cp, 0, sizeof(enable_cp));
828         enable_cp.enable = LE_SCAN_ENABLE;
829         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
830         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
831                     &enable_cp);
832 }
833
834 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
835 {
836         u8 instance = hdev->cur_adv_instance;
837         struct adv_info *adv_instance;
838
839         /* Ignore instance 0 */
840         if (instance == 0x00)
841                 return 0;
842
843         adv_instance = hci_find_adv_instance(hdev, instance);
844         if (!adv_instance)
845                 return 0;
846
847         /* TODO: Take into account the "appearance" and "local-name" flags here.
848          * These are currently being ignored as they are not supported.
849          */
850         return adv_instance->scan_rsp_len;
851 }
852
853 void __hci_req_disable_advertising(struct hci_request *req)
854 {
855         u8 enable = 0x00;
856
857         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
858 }
859
860 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
861 {
862         u32 flags;
863         struct adv_info *adv_instance;
864
865         if (instance == 0x00) {
866                 /* Instance 0 always manages the "Tx Power" and "Flags"
867                  * fields
868                  */
869                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
870
871                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
872                  * corresponds to the "connectable" instance flag.
873                  */
874                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
875                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
876
877                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
878                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
879                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
880                         flags |= MGMT_ADV_FLAG_DISCOV;
881
882                 return flags;
883         }
884
885         adv_instance = hci_find_adv_instance(hdev, instance);
886
887         /* Return 0 when we got an invalid instance identifier. */
888         if (!adv_instance)
889                 return 0;
890
891         return adv_instance->flags;
892 }
893
894 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
895 {
896         /* If privacy is not enabled don't use RPA */
897         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
898                 return false;
899
900         /* If basic privacy mode is enabled use RPA */
901         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
902                 return true;
903
904         /* If limited privacy mode is enabled don't use RPA if we're
905          * both discoverable and bondable.
906          */
907         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
908             hci_dev_test_flag(hdev, HCI_BONDABLE))
909                 return false;
910
911         /* We're neither bondable nor discoverable in the limited
912          * privacy mode, therefore use RPA.
913          */
914         return true;
915 }
916
917 void __hci_req_enable_advertising(struct hci_request *req)
918 {
919         struct hci_dev *hdev = req->hdev;
920         struct hci_cp_le_set_adv_param cp;
921         u8 own_addr_type, enable = 0x01;
922         bool connectable;
923         u32 flags;
924
925         if (hci_conn_num(hdev, LE_LINK) > 0)
926                 return;
927
928         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
929                 __hci_req_disable_advertising(req);
930
931         /* Clear the HCI_LE_ADV bit temporarily so that the
932          * hci_update_random_address knows that it's safe to go ahead
933          * and write a new random address. The flag will be set back on
934          * as soon as the SET_ADV_ENABLE HCI command completes.
935          */
936         hci_dev_clear_flag(hdev, HCI_LE_ADV);
937
938         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
939
940         /* If the "connectable" instance flag was not set, then choose between
941          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
942          */
943         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
944                       mgmt_get_connectable(hdev);
945
946         /* Set require_privacy to true only when non-connectable
947          * advertising is used. In that case it is fine to use a
948          * non-resolvable private address.
949          */
950         if (hci_update_random_address(req, !connectable,
951                                       adv_use_rpa(hdev, flags),
952                                       &own_addr_type) < 0)
953                 return;
954
955         memset(&cp, 0, sizeof(cp));
956         cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
957         cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
958
959         if (connectable)
960                 cp.type = LE_ADV_IND;
961         else if (get_cur_adv_instance_scan_rsp_len(hdev))
962                 cp.type = LE_ADV_SCAN_IND;
963         else
964                 cp.type = LE_ADV_NONCONN_IND;
965
966         cp.own_address_type = own_addr_type;
967         cp.channel_map = hdev->le_adv_channel_map;
968
969         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
970
971         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
972 }
973
974 static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
975 {
976         size_t name_len;
977         int max_len;
978
979         max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
980         name_len = strlen(hdev->dev_name);
981         if (name_len > 0 && max_len > 0) {
982
983                 if (name_len > max_len) {
984                         name_len = max_len;
985                         ptr[1] = EIR_NAME_SHORT;
986                 } else
987                         ptr[1] = EIR_NAME_COMPLETE;
988
989                 ptr[0] = name_len + 1;
990
991                 memcpy(ptr + 2, hdev->dev_name, name_len);
992
993                 ad_len += (name_len + 2);
994                 ptr += (name_len + 2);
995         }
996
997         return ad_len;
998 }
999
1000 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1001 {
1002         return append_local_name(hdev, ptr, 0);
1003 }
1004
1005 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1006                                         u8 *ptr)
1007 {
1008         struct adv_info *adv_instance;
1009         u32 instance_flags;
1010         u8 scan_rsp_len = 0;
1011
1012         adv_instance = hci_find_adv_instance(hdev, instance);
1013         if (!adv_instance)
1014                 return 0;
1015
1016         instance_flags = adv_instance->flags;
1017
1018         memcpy(ptr, adv_instance->scan_rsp_data,
1019                adv_instance->scan_rsp_len);
1020
1021         scan_rsp_len += adv_instance->scan_rsp_len;
1022         ptr += adv_instance->scan_rsp_len;
1023
1024         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1025                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1026
1027         return scan_rsp_len;
1028 }
1029
1030 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1031 {
1032         struct hci_dev *hdev = req->hdev;
1033         struct hci_cp_le_set_scan_rsp_data cp;
1034         u8 len;
1035
1036         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1037                 return;
1038
1039         memset(&cp, 0, sizeof(cp));
1040
1041         if (instance)
1042                 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1043         else
1044                 len = create_default_scan_rsp_data(hdev, cp.data);
1045
1046         if (hdev->scan_rsp_data_len == len &&
1047             !memcmp(cp.data, hdev->scan_rsp_data, len))
1048                 return;
1049
1050         memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1051         hdev->scan_rsp_data_len = len;
1052
1053         cp.length = len;
1054
1055         hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1056 }
1057
1058 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1059 {
1060         struct adv_info *adv_instance = NULL;
1061         u8 ad_len = 0, flags = 0;
1062         u32 instance_flags;
1063
1064         /* Return 0 when the current instance identifier is invalid. */
1065         if (instance) {
1066                 adv_instance = hci_find_adv_instance(hdev, instance);
1067                 if (!adv_instance)
1068                         return 0;
1069         }
1070
1071         instance_flags = get_adv_instance_flags(hdev, instance);
1072
1073         /* The Add Advertising command allows userspace to set both the general
1074          * and limited discoverable flags.
1075          */
1076         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1077                 flags |= LE_AD_GENERAL;
1078
1079         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1080                 flags |= LE_AD_LIMITED;
1081
1082         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1083                 flags |= LE_AD_NO_BREDR;
1084
1085         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1086                 /* If a discovery flag wasn't provided, simply use the global
1087                  * settings.
1088                  */
1089                 if (!flags)
1090                         flags |= mgmt_get_adv_discov_flags(hdev);
1091
1092                 /* If flags would still be empty, then there is no need to
1093                  * include the "Flags" AD field".
1094                  */
1095                 if (flags) {
1096                         ptr[0] = 0x02;
1097                         ptr[1] = EIR_FLAGS;
1098                         ptr[2] = flags;
1099
1100                         ad_len += 3;
1101                         ptr += 3;
1102                 }
1103         }
1104
1105         if (adv_instance) {
1106                 memcpy(ptr, adv_instance->adv_data,
1107                        adv_instance->adv_data_len);
1108                 ad_len += adv_instance->adv_data_len;
1109                 ptr += adv_instance->adv_data_len;
1110         }
1111
1112         /* Provide Tx Power only if we can provide a valid value for it */
1113         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1114             (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1115                 ptr[0] = 0x02;
1116                 ptr[1] = EIR_TX_POWER;
1117                 ptr[2] = (u8)hdev->adv_tx_power;
1118
1119                 ad_len += 3;
1120                 ptr += 3;
1121         }
1122
1123         return ad_len;
1124 }
1125
1126 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1127 {
1128         struct hci_dev *hdev = req->hdev;
1129         struct hci_cp_le_set_adv_data cp;
1130         u8 len;
1131
1132         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1133                 return;
1134
1135         memset(&cp, 0, sizeof(cp));
1136
1137         len = create_instance_adv_data(hdev, instance, cp.data);
1138
1139         /* There's nothing to do if the data hasn't changed */
1140         if (hdev->adv_data_len == len &&
1141             memcmp(cp.data, hdev->adv_data, len) == 0)
1142                 return;
1143
1144         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1145         hdev->adv_data_len = len;
1146
1147         cp.length = len;
1148
1149         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1150 }
1151
1152 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1153 {
1154         struct hci_request req;
1155
1156         hci_req_init(&req, hdev);
1157         __hci_req_update_adv_data(&req, instance);
1158
1159         return hci_req_run(&req, NULL);
1160 }
1161
1162 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1163 {
1164         BT_DBG("%s status %u", hdev->name, status);
1165 }
1166
1167 void hci_req_reenable_advertising(struct hci_dev *hdev)
1168 {
1169         struct hci_request req;
1170
1171         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1172             list_empty(&hdev->adv_instances))
1173                 return;
1174
1175         hci_req_init(&req, hdev);
1176
1177         if (hdev->cur_adv_instance) {
1178                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1179                                                 true);
1180         } else {
1181                 __hci_req_update_adv_data(&req, 0x00);
1182                 __hci_req_update_scan_rsp_data(&req, 0x00);
1183                 __hci_req_enable_advertising(&req);
1184         }
1185
1186         hci_req_run(&req, adv_enable_complete);
1187 }
1188
1189 static void adv_timeout_expire(struct work_struct *work)
1190 {
1191         struct hci_dev *hdev = container_of(work, struct hci_dev,
1192                                             adv_instance_expire.work);
1193
1194         struct hci_request req;
1195         u8 instance;
1196
1197         BT_DBG("%s", hdev->name);
1198
1199         hci_dev_lock(hdev);
1200
1201         hdev->adv_instance_timeout = 0;
1202
1203         instance = hdev->cur_adv_instance;
1204         if (instance == 0x00)
1205                 goto unlock;
1206
1207         hci_req_init(&req, hdev);
1208
1209         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1210
1211         if (list_empty(&hdev->adv_instances))
1212                 __hci_req_disable_advertising(&req);
1213
1214         hci_req_run(&req, NULL);
1215
1216 unlock:
1217         hci_dev_unlock(hdev);
1218 }
1219
1220 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1221                                     bool force)
1222 {
1223         struct hci_dev *hdev = req->hdev;
1224         struct adv_info *adv_instance = NULL;
1225         u16 timeout;
1226
1227         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1228             list_empty(&hdev->adv_instances))
1229                 return -EPERM;
1230
1231         if (hdev->adv_instance_timeout)
1232                 return -EBUSY;
1233
1234         adv_instance = hci_find_adv_instance(hdev, instance);
1235         if (!adv_instance)
1236                 return -ENOENT;
1237
1238         /* A zero timeout means unlimited advertising. As long as there is
1239          * only one instance, duration should be ignored. We still set a timeout
1240          * in case further instances are being added later on.
1241          *
1242          * If the remaining lifetime of the instance is more than the duration
1243          * then the timeout corresponds to the duration, otherwise it will be
1244          * reduced to the remaining instance lifetime.
1245          */
1246         if (adv_instance->timeout == 0 ||
1247             adv_instance->duration <= adv_instance->remaining_time)
1248                 timeout = adv_instance->duration;
1249         else
1250                 timeout = adv_instance->remaining_time;
1251
1252         /* The remaining time is being reduced unless the instance is being
1253          * advertised without time limit.
1254          */
1255         if (adv_instance->timeout)
1256                 adv_instance->remaining_time =
1257                                 adv_instance->remaining_time - timeout;
1258
1259         hdev->adv_instance_timeout = timeout;
1260         queue_delayed_work(hdev->req_workqueue,
1261                            &hdev->adv_instance_expire,
1262                            msecs_to_jiffies(timeout * 1000));
1263
1264         /* If we're just re-scheduling the same instance again then do not
1265          * execute any HCI commands. This happens when a single instance is
1266          * being advertised.
1267          */
1268         if (!force && hdev->cur_adv_instance == instance &&
1269             hci_dev_test_flag(hdev, HCI_LE_ADV))
1270                 return 0;
1271
1272         hdev->cur_adv_instance = instance;
1273         __hci_req_update_adv_data(req, instance);
1274         __hci_req_update_scan_rsp_data(req, instance);
1275         __hci_req_enable_advertising(req);
1276
1277         return 0;
1278 }
1279
1280 static void cancel_adv_timeout(struct hci_dev *hdev)
1281 {
1282         if (hdev->adv_instance_timeout) {
1283                 hdev->adv_instance_timeout = 0;
1284                 cancel_delayed_work(&hdev->adv_instance_expire);
1285         }
1286 }
1287
1288 /* For a single instance:
1289  * - force == true: The instance will be removed even when its remaining
1290  *   lifetime is not zero.
1291  * - force == false: the instance will be deactivated but kept stored unless
1292  *   the remaining lifetime is zero.
1293  *
1294  * For instance == 0x00:
1295  * - force == true: All instances will be removed regardless of their timeout
1296  *   setting.
1297  * - force == false: Only instances that have a timeout will be removed.
1298  */
1299 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1300                                 struct hci_request *req, u8 instance,
1301                                 bool force)
1302 {
1303         struct adv_info *adv_instance, *n, *next_instance = NULL;
1304         int err;
1305         u8 rem_inst;
1306
1307         /* Cancel any timeout concerning the removed instance(s). */
1308         if (!instance || hdev->cur_adv_instance == instance)
1309                 cancel_adv_timeout(hdev);
1310
1311         /* Get the next instance to advertise BEFORE we remove
1312          * the current one. This can be the same instance again
1313          * if there is only one instance.
1314          */
1315         if (instance && hdev->cur_adv_instance == instance)
1316                 next_instance = hci_get_next_instance(hdev, instance);
1317
1318         if (instance == 0x00) {
1319                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1320                                          list) {
1321                         if (!(force || adv_instance->timeout))
1322                                 continue;
1323
1324                         rem_inst = adv_instance->instance;
1325                         err = hci_remove_adv_instance(hdev, rem_inst);
1326                         if (!err)
1327                                 mgmt_advertising_removed(sk, hdev, rem_inst);
1328                 }
1329         } else {
1330                 adv_instance = hci_find_adv_instance(hdev, instance);
1331
1332                 if (force || (adv_instance && adv_instance->timeout &&
1333                               !adv_instance->remaining_time)) {
1334                         /* Don't advertise a removed instance. */
1335                         if (next_instance &&
1336                             next_instance->instance == instance)
1337                                 next_instance = NULL;
1338
1339                         err = hci_remove_adv_instance(hdev, instance);
1340                         if (!err)
1341                                 mgmt_advertising_removed(sk, hdev, instance);
1342                 }
1343         }
1344
1345         if (!req || !hdev_is_powered(hdev) ||
1346             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1347                 return;
1348
1349         if (next_instance)
1350                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1351                                                 false);
1352 }
1353
1354 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1355 {
1356         struct hci_dev *hdev = req->hdev;
1357
1358         /* If we're advertising or initiating an LE connection we can't
1359          * go ahead and change the random address at this time. This is
1360          * because the eventual initiator address used for the
1361          * subsequently created connection will be undefined (some
1362          * controllers use the new address and others the one we had
1363          * when the operation started).
1364          *
1365          * In this kind of scenario skip the update and let the random
1366          * address be updated at the next cycle.
1367          */
1368         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1369             hci_lookup_le_connect(hdev)) {
1370                 BT_DBG("Deferring random address update");
1371                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1372                 return;
1373         }
1374
1375         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1376 }
1377
1378 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1379                               bool use_rpa, u8 *own_addr_type)
1380 {
1381         struct hci_dev *hdev = req->hdev;
1382         int err;
1383
1384         /* If privacy is enabled use a resolvable private address. If
1385          * current RPA has expired or there is something else than
1386          * the current RPA in use, then generate a new one.
1387          */
1388         if (use_rpa) {
1389                 int to;
1390
1391                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1392
1393                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1394                     !bacmp(&hdev->random_addr, &hdev->rpa))
1395                         return 0;
1396
1397                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1398                 if (err < 0) {
1399                         BT_ERR("%s failed to generate new RPA", hdev->name);
1400                         return err;
1401                 }
1402
1403                 set_random_addr(req, &hdev->rpa);
1404
1405                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1406                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1407
1408                 return 0;
1409         }
1410
1411         /* In case of required privacy without resolvable private address,
1412          * use an non-resolvable private address. This is useful for active
1413          * scanning and non-connectable advertising.
1414          */
1415         if (require_privacy) {
1416                 bdaddr_t nrpa;
1417
1418                 while (true) {
1419                         /* The non-resolvable private address is generated
1420                          * from random six bytes with the two most significant
1421                          * bits cleared.
1422                          */
1423                         get_random_bytes(&nrpa, 6);
1424                         nrpa.b[5] &= 0x3f;
1425
1426                         /* The non-resolvable private address shall not be
1427                          * equal to the public address.
1428                          */
1429                         if (bacmp(&hdev->bdaddr, &nrpa))
1430                                 break;
1431                 }
1432
1433                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1434                 set_random_addr(req, &nrpa);
1435                 return 0;
1436         }
1437
1438         /* If forcing static address is in use or there is no public
1439          * address use the static address as random address (but skip
1440          * the HCI command if the current random address is already the
1441          * static one.
1442          *
1443          * In case BR/EDR has been disabled on a dual-mode controller
1444          * and a static address has been configured, then use that
1445          * address instead of the public BR/EDR address.
1446          */
1447         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1448             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1449             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1450              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1451                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1452                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1453                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1454                                     &hdev->static_addr);
1455                 return 0;
1456         }
1457
1458         /* Neither privacy nor static address is being used so use a
1459          * public address.
1460          */
1461         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1462
1463         return 0;
1464 }
1465
1466 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1467 {
1468         struct bdaddr_list *b;
1469
1470         list_for_each_entry(b, &hdev->whitelist, list) {
1471                 struct hci_conn *conn;
1472
1473                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1474                 if (!conn)
1475                         return true;
1476
1477                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1478                         return true;
1479         }
1480
1481         return false;
1482 }
1483
1484 void __hci_req_update_scan(struct hci_request *req)
1485 {
1486         struct hci_dev *hdev = req->hdev;
1487         u8 scan;
1488
1489         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1490                 return;
1491
1492         if (!hdev_is_powered(hdev))
1493                 return;
1494
1495         if (mgmt_powering_down(hdev))
1496                 return;
1497
1498         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1499             disconnected_whitelist_entries(hdev))
1500                 scan = SCAN_PAGE;
1501         else
1502                 scan = SCAN_DISABLED;
1503
1504         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1505                 scan |= SCAN_INQUIRY;
1506
1507         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1508             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1509                 return;
1510
1511         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1512 }
1513
1514 static int update_scan(struct hci_request *req, unsigned long opt)
1515 {
1516         hci_dev_lock(req->hdev);
1517         __hci_req_update_scan(req);
1518         hci_dev_unlock(req->hdev);
1519         return 0;
1520 }
1521
1522 static void scan_update_work(struct work_struct *work)
1523 {
1524         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1525
1526         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1527 }
1528
1529 static int connectable_update(struct hci_request *req, unsigned long opt)
1530 {
1531         struct hci_dev *hdev = req->hdev;
1532
1533         hci_dev_lock(hdev);
1534
1535         __hci_req_update_scan(req);
1536
1537         /* If BR/EDR is not enabled and we disable advertising as a
1538          * by-product of disabling connectable, we need to update the
1539          * advertising flags.
1540          */
1541         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1542                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1543
1544         /* Update the advertising parameters if necessary */
1545         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1546             !list_empty(&hdev->adv_instances))
1547                 __hci_req_enable_advertising(req);
1548
1549         __hci_update_background_scan(req);
1550
1551         hci_dev_unlock(hdev);
1552
1553         return 0;
1554 }
1555
1556 static void connectable_update_work(struct work_struct *work)
1557 {
1558         struct hci_dev *hdev = container_of(work, struct hci_dev,
1559                                             connectable_update);
1560         u8 status;
1561
1562         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1563         mgmt_set_connectable_complete(hdev, status);
1564 }
1565
1566 static u8 get_service_classes(struct hci_dev *hdev)
1567 {
1568         struct bt_uuid *uuid;
1569         u8 val = 0;
1570
1571         list_for_each_entry(uuid, &hdev->uuids, list)
1572                 val |= uuid->svc_hint;
1573
1574         return val;
1575 }
1576
1577 void __hci_req_update_class(struct hci_request *req)
1578 {
1579         struct hci_dev *hdev = req->hdev;
1580         u8 cod[3];
1581
1582         BT_DBG("%s", hdev->name);
1583
1584         if (!hdev_is_powered(hdev))
1585                 return;
1586
1587         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1588                 return;
1589
1590         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1591                 return;
1592
1593         cod[0] = hdev->minor_class;
1594         cod[1] = hdev->major_class;
1595         cod[2] = get_service_classes(hdev);
1596
1597         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1598                 cod[1] |= 0x20;
1599
1600         if (memcmp(cod, hdev->dev_class, 3) == 0)
1601                 return;
1602
1603         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1604 }
1605
1606 static void write_iac(struct hci_request *req)
1607 {
1608         struct hci_dev *hdev = req->hdev;
1609         struct hci_cp_write_current_iac_lap cp;
1610
1611         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1612                 return;
1613
1614         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1615                 /* Limited discoverable mode */
1616                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1617                 cp.iac_lap[0] = 0x00;   /* LIAC */
1618                 cp.iac_lap[1] = 0x8b;
1619                 cp.iac_lap[2] = 0x9e;
1620                 cp.iac_lap[3] = 0x33;   /* GIAC */
1621                 cp.iac_lap[4] = 0x8b;
1622                 cp.iac_lap[5] = 0x9e;
1623         } else {
1624                 /* General discoverable mode */
1625                 cp.num_iac = 1;
1626                 cp.iac_lap[0] = 0x33;   /* GIAC */
1627                 cp.iac_lap[1] = 0x8b;
1628                 cp.iac_lap[2] = 0x9e;
1629         }
1630
1631         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1632                     (cp.num_iac * 3) + 1, &cp);
1633 }
1634
1635 static int discoverable_update(struct hci_request *req, unsigned long opt)
1636 {
1637         struct hci_dev *hdev = req->hdev;
1638
1639         hci_dev_lock(hdev);
1640
1641         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1642                 write_iac(req);
1643                 __hci_req_update_scan(req);
1644                 __hci_req_update_class(req);
1645         }
1646
1647         /* Advertising instances don't use the global discoverable setting, so
1648          * only update AD if advertising was enabled using Set Advertising.
1649          */
1650         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1651                 __hci_req_update_adv_data(req, 0x00);
1652
1653                 /* Discoverable mode affects the local advertising
1654                  * address in limited privacy mode.
1655                  */
1656                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1657                         __hci_req_enable_advertising(req);
1658         }
1659
1660         hci_dev_unlock(hdev);
1661
1662         return 0;
1663 }
1664
1665 static void discoverable_update_work(struct work_struct *work)
1666 {
1667         struct hci_dev *hdev = container_of(work, struct hci_dev,
1668                                             discoverable_update);
1669         u8 status;
1670
1671         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1672         mgmt_set_discoverable_complete(hdev, status);
1673 }
1674
1675 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1676                       u8 reason)
1677 {
1678         switch (conn->state) {
1679         case BT_CONNECTED:
1680         case BT_CONFIG:
1681                 if (conn->type == AMP_LINK) {
1682                         struct hci_cp_disconn_phy_link cp;
1683
1684                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1685                         cp.reason = reason;
1686                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1687                                     &cp);
1688                 } else {
1689                         struct hci_cp_disconnect dc;
1690
1691                         dc.handle = cpu_to_le16(conn->handle);
1692                         dc.reason = reason;
1693                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1694                 }
1695
1696                 conn->state = BT_DISCONN;
1697
1698                 break;
1699         case BT_CONNECT:
1700                 if (conn->type == LE_LINK) {
1701                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1702                                 break;
1703                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1704                                     0, NULL);
1705                 } else if (conn->type == ACL_LINK) {
1706                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1707                                 break;
1708                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1709                                     6, &conn->dst);
1710                 }
1711                 break;
1712         case BT_CONNECT2:
1713                 if (conn->type == ACL_LINK) {
1714                         struct hci_cp_reject_conn_req rej;
1715
1716                         bacpy(&rej.bdaddr, &conn->dst);
1717                         rej.reason = reason;
1718
1719                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1720                                     sizeof(rej), &rej);
1721                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1722                         struct hci_cp_reject_sync_conn_req rej;
1723
1724                         bacpy(&rej.bdaddr, &conn->dst);
1725
1726                         /* SCO rejection has its own limited set of
1727                          * allowed error values (0x0D-0x0F) which isn't
1728                          * compatible with most values passed to this
1729                          * function. To be safe hard-code one of the
1730                          * values that's suitable for SCO.
1731                          */
1732                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1733
1734                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1735                                     sizeof(rej), &rej);
1736                 }
1737                 break;
1738         default:
1739                 conn->state = BT_CLOSED;
1740                 break;
1741         }
1742 }
1743
1744 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1745 {
1746         if (status)
1747                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1748 }
1749
1750 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1751 {
1752         struct hci_request req;
1753         int err;
1754
1755         hci_req_init(&req, conn->hdev);
1756
1757         __hci_abort_conn(&req, conn, reason);
1758
1759         err = hci_req_run(&req, abort_conn_complete);
1760         if (err && err != -ENODATA) {
1761                 BT_ERR("Failed to run HCI request: err %d", err);
1762                 return err;
1763         }
1764
1765         return 0;
1766 }
1767
1768 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1769 {
1770         hci_dev_lock(req->hdev);
1771         __hci_update_background_scan(req);
1772         hci_dev_unlock(req->hdev);
1773         return 0;
1774 }
1775
1776 static void bg_scan_update(struct work_struct *work)
1777 {
1778         struct hci_dev *hdev = container_of(work, struct hci_dev,
1779                                             bg_scan_update);
1780         struct hci_conn *conn;
1781         u8 status;
1782         int err;
1783
1784         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1785         if (!err)
1786                 return;
1787
1788         hci_dev_lock(hdev);
1789
1790         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1791         if (conn)
1792                 hci_le_conn_failed(conn, status);
1793
1794         hci_dev_unlock(hdev);
1795 }
1796
1797 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1798 {
1799         hci_req_add_le_scan_disable(req);
1800         return 0;
1801 }
1802
1803 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1804 {
1805         u8 length = opt;
1806         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1807         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1808         struct hci_cp_inquiry cp;
1809
1810         BT_DBG("%s", req->hdev->name);
1811
1812         hci_dev_lock(req->hdev);
1813         hci_inquiry_cache_flush(req->hdev);
1814         hci_dev_unlock(req->hdev);
1815
1816         memset(&cp, 0, sizeof(cp));
1817
1818         if (req->hdev->discovery.limited)
1819                 memcpy(&cp.lap, liac, sizeof(cp.lap));
1820         else
1821                 memcpy(&cp.lap, giac, sizeof(cp.lap));
1822
1823         cp.length = length;
1824
1825         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1826
1827         return 0;
1828 }
1829
1830 static void le_scan_disable_work(struct work_struct *work)
1831 {
1832         struct hci_dev *hdev = container_of(work, struct hci_dev,
1833                                             le_scan_disable.work);
1834         u8 status;
1835
1836         BT_DBG("%s", hdev->name);
1837
1838         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1839                 return;
1840
1841         cancel_delayed_work(&hdev->le_scan_restart);
1842
1843         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1844         if (status) {
1845                 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1846                 return;
1847         }
1848
1849         hdev->discovery.scan_start = 0;
1850
1851         /* If we were running LE only scan, change discovery state. If
1852          * we were running both LE and BR/EDR inquiry simultaneously,
1853          * and BR/EDR inquiry is already finished, stop discovery,
1854          * otherwise BR/EDR inquiry will stop discovery when finished.
1855          * If we will resolve remote device name, do not change
1856          * discovery state.
1857          */
1858
1859         if (hdev->discovery.type == DISCOV_TYPE_LE)
1860                 goto discov_stopped;
1861
1862         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1863                 return;
1864
1865         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1866                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1867                     hdev->discovery.state != DISCOVERY_RESOLVING)
1868                         goto discov_stopped;
1869
1870                 return;
1871         }
1872
1873         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1874                      HCI_CMD_TIMEOUT, &status);
1875         if (status) {
1876                 BT_ERR("Inquiry failed: status 0x%02x", status);
1877                 goto discov_stopped;
1878         }
1879
1880         return;
1881
1882 discov_stopped:
1883         hci_dev_lock(hdev);
1884         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1885         hci_dev_unlock(hdev);
1886 }
1887
1888 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1889 {
1890         struct hci_dev *hdev = req->hdev;
1891         struct hci_cp_le_set_scan_enable cp;
1892
1893         /* If controller is not scanning we are done. */
1894         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1895                 return 0;
1896
1897         hci_req_add_le_scan_disable(req);
1898
1899         memset(&cp, 0, sizeof(cp));
1900         cp.enable = LE_SCAN_ENABLE;
1901         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1902         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1903
1904         return 0;
1905 }
1906
1907 static void le_scan_restart_work(struct work_struct *work)
1908 {
1909         struct hci_dev *hdev = container_of(work, struct hci_dev,
1910                                             le_scan_restart.work);
1911         unsigned long timeout, duration, scan_start, now;
1912         u8 status;
1913
1914         BT_DBG("%s", hdev->name);
1915
1916         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1917         if (status) {
1918                 BT_ERR("Failed to restart LE scan: status %d", status);
1919                 return;
1920         }
1921
1922         hci_dev_lock(hdev);
1923
1924         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1925             !hdev->discovery.scan_start)
1926                 goto unlock;
1927
1928         /* When the scan was started, hdev->le_scan_disable has been queued
1929          * after duration from scan_start. During scan restart this job
1930          * has been canceled, and we need to queue it again after proper
1931          * timeout, to make sure that scan does not run indefinitely.
1932          */
1933         duration = hdev->discovery.scan_duration;
1934         scan_start = hdev->discovery.scan_start;
1935         now = jiffies;
1936         if (now - scan_start <= duration) {
1937                 int elapsed;
1938
1939                 if (now >= scan_start)
1940                         elapsed = now - scan_start;
1941                 else
1942                         elapsed = ULONG_MAX - scan_start + now;
1943
1944                 timeout = duration - elapsed;
1945         } else {
1946                 timeout = 0;
1947         }
1948
1949         queue_delayed_work(hdev->req_workqueue,
1950                            &hdev->le_scan_disable, timeout);
1951
1952 unlock:
1953         hci_dev_unlock(hdev);
1954 }
1955
1956 static void disable_advertising(struct hci_request *req)
1957 {
1958         u8 enable = 0x00;
1959
1960         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1961 }
1962
1963 static int active_scan(struct hci_request *req, unsigned long opt)
1964 {
1965         uint16_t interval = opt;
1966         struct hci_dev *hdev = req->hdev;
1967         struct hci_cp_le_set_scan_param param_cp;
1968         struct hci_cp_le_set_scan_enable enable_cp;
1969         u8 own_addr_type;
1970         int err;
1971
1972         BT_DBG("%s", hdev->name);
1973
1974         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1975                 hci_dev_lock(hdev);
1976
1977                 /* Don't let discovery abort an outgoing connection attempt
1978                  * that's using directed advertising.
1979                  */
1980                 if (hci_lookup_le_connect(hdev)) {
1981                         hci_dev_unlock(hdev);
1982                         return -EBUSY;
1983                 }
1984
1985                 cancel_adv_timeout(hdev);
1986                 hci_dev_unlock(hdev);
1987
1988                 disable_advertising(req);
1989         }
1990
1991         /* If controller is scanning, it means the background scanning is
1992          * running. Thus, we should temporarily stop it in order to set the
1993          * discovery scanning parameters.
1994          */
1995         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1996                 hci_req_add_le_scan_disable(req);
1997
1998         /* All active scans will be done with either a resolvable private
1999          * address (when privacy feature has been enabled) or non-resolvable
2000          * private address.
2001          */
2002         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2003                                         &own_addr_type);
2004         if (err < 0)
2005                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2006
2007         memset(&param_cp, 0, sizeof(param_cp));
2008         param_cp.type = LE_SCAN_ACTIVE;
2009         param_cp.interval = cpu_to_le16(interval);
2010         param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2011         param_cp.own_address_type = own_addr_type;
2012
2013         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2014                     &param_cp);
2015
2016         memset(&enable_cp, 0, sizeof(enable_cp));
2017         enable_cp.enable = LE_SCAN_ENABLE;
2018         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2019
2020         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2021                     &enable_cp);
2022
2023         return 0;
2024 }
2025
2026 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2027 {
2028         int err;
2029
2030         BT_DBG("%s", req->hdev->name);
2031
2032         err = active_scan(req, opt);
2033         if (err)
2034                 return err;
2035
2036         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2037 }
2038
2039 static void start_discovery(struct hci_dev *hdev, u8 *status)
2040 {
2041         unsigned long timeout;
2042
2043         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2044
2045         switch (hdev->discovery.type) {
2046         case DISCOV_TYPE_BREDR:
2047                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2048                         hci_req_sync(hdev, bredr_inquiry,
2049                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2050                                      status);
2051                 return;
2052         case DISCOV_TYPE_INTERLEAVED:
2053                 /* When running simultaneous discovery, the LE scanning time
2054                  * should occupy the whole discovery time sine BR/EDR inquiry
2055                  * and LE scanning are scheduled by the controller.
2056                  *
2057                  * For interleaving discovery in comparison, BR/EDR inquiry
2058                  * and LE scanning are done sequentially with separate
2059                  * timeouts.
2060                  */
2061                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2062                              &hdev->quirks)) {
2063                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2064                         /* During simultaneous discovery, we double LE scan
2065                          * interval. We must leave some time for the controller
2066                          * to do BR/EDR inquiry.
2067                          */
2068                         hci_req_sync(hdev, interleaved_discov,
2069                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2070                                      status);
2071                         break;
2072                 }
2073
2074                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2075                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2076                              HCI_CMD_TIMEOUT, status);
2077                 break;
2078         case DISCOV_TYPE_LE:
2079                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2080                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2081                              HCI_CMD_TIMEOUT, status);
2082                 break;
2083         default:
2084                 *status = HCI_ERROR_UNSPECIFIED;
2085                 return;
2086         }
2087
2088         if (*status)
2089                 return;
2090
2091         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2092
2093         /* When service discovery is used and the controller has a
2094          * strict duplicate filter, it is important to remember the
2095          * start and duration of the scan. This is required for
2096          * restarting scanning during the discovery phase.
2097          */
2098         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2099                      hdev->discovery.result_filtering) {
2100                 hdev->discovery.scan_start = jiffies;
2101                 hdev->discovery.scan_duration = timeout;
2102         }
2103
2104         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2105                            timeout);
2106 }
2107
2108 bool hci_req_stop_discovery(struct hci_request *req)
2109 {
2110         struct hci_dev *hdev = req->hdev;
2111         struct discovery_state *d = &hdev->discovery;
2112         struct hci_cp_remote_name_req_cancel cp;
2113         struct inquiry_entry *e;
2114         bool ret = false;
2115
2116         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2117
2118         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2119                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2120                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2121
2122                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2123                         cancel_delayed_work(&hdev->le_scan_disable);
2124                         hci_req_add_le_scan_disable(req);
2125                 }
2126
2127                 ret = true;
2128         } else {
2129                 /* Passive scanning */
2130                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2131                         hci_req_add_le_scan_disable(req);
2132                         ret = true;
2133                 }
2134         }
2135
2136         /* No further actions needed for LE-only discovery */
2137         if (d->type == DISCOV_TYPE_LE)
2138                 return ret;
2139
2140         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2141                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2142                                                      NAME_PENDING);
2143                 if (!e)
2144                         return ret;
2145
2146                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2147                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2148                             &cp);
2149                 ret = true;
2150         }
2151
2152         return ret;
2153 }
2154
2155 static int stop_discovery(struct hci_request *req, unsigned long opt)
2156 {
2157         hci_dev_lock(req->hdev);
2158         hci_req_stop_discovery(req);
2159         hci_dev_unlock(req->hdev);
2160
2161         return 0;
2162 }
2163
2164 static void discov_update(struct work_struct *work)
2165 {
2166         struct hci_dev *hdev = container_of(work, struct hci_dev,
2167                                             discov_update);
2168         u8 status = 0;
2169
2170         switch (hdev->discovery.state) {
2171         case DISCOVERY_STARTING:
2172                 start_discovery(hdev, &status);
2173                 mgmt_start_discovery_complete(hdev, status);
2174                 if (status)
2175                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2176                 else
2177                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2178                 break;
2179         case DISCOVERY_STOPPING:
2180                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2181                 mgmt_stop_discovery_complete(hdev, status);
2182                 if (!status)
2183                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2184                 break;
2185         case DISCOVERY_STOPPED:
2186         default:
2187                 return;
2188         }
2189 }
2190
2191 static void discov_off(struct work_struct *work)
2192 {
2193         struct hci_dev *hdev = container_of(work, struct hci_dev,
2194                                             discov_off.work);
2195
2196         BT_DBG("%s", hdev->name);
2197
2198         hci_dev_lock(hdev);
2199
2200         /* When discoverable timeout triggers, then just make sure
2201          * the limited discoverable flag is cleared. Even in the case
2202          * of a timeout triggered from general discoverable, it is
2203          * safe to unconditionally clear the flag.
2204          */
2205         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2206         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2207         hdev->discov_timeout = 0;
2208
2209         hci_dev_unlock(hdev);
2210
2211         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2212         mgmt_new_settings(hdev);
2213 }
2214
2215 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2216 {
2217         struct hci_dev *hdev = req->hdev;
2218         u8 link_sec;
2219
2220         hci_dev_lock(hdev);
2221
2222         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2223             !lmp_host_ssp_capable(hdev)) {
2224                 u8 mode = 0x01;
2225
2226                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2227
2228                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2229                         u8 support = 0x01;
2230
2231                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2232                                     sizeof(support), &support);
2233                 }
2234         }
2235
2236         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2237             lmp_bredr_capable(hdev)) {
2238                 struct hci_cp_write_le_host_supported cp;
2239
2240                 cp.le = 0x01;
2241                 cp.simul = 0x00;
2242
2243                 /* Check first if we already have the right
2244                  * host state (host features set)
2245                  */
2246                 if (cp.le != lmp_host_le_capable(hdev) ||
2247                     cp.simul != lmp_host_le_br_capable(hdev))
2248                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2249                                     sizeof(cp), &cp);
2250         }
2251
2252         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2253                 /* Make sure the controller has a good default for
2254                  * advertising data. This also applies to the case
2255                  * where BR/EDR was toggled during the AUTO_OFF phase.
2256                  */
2257                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2258                     list_empty(&hdev->adv_instances)) {
2259                         __hci_req_update_adv_data(req, 0x00);
2260                         __hci_req_update_scan_rsp_data(req, 0x00);
2261
2262                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2263                                 __hci_req_enable_advertising(req);
2264                 } else if (!list_empty(&hdev->adv_instances)) {
2265                         struct adv_info *adv_instance;
2266
2267                         adv_instance = list_first_entry(&hdev->adv_instances,
2268                                                         struct adv_info, list);
2269                         __hci_req_schedule_adv_instance(req,
2270                                                         adv_instance->instance,
2271                                                         true);
2272                 }
2273         }
2274
2275         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2276         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2277                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2278                             sizeof(link_sec), &link_sec);
2279
2280         if (lmp_bredr_capable(hdev)) {
2281                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2282                         __hci_req_write_fast_connectable(req, true);
2283                 else
2284                         __hci_req_write_fast_connectable(req, false);
2285                 __hci_req_update_scan(req);
2286                 __hci_req_update_class(req);
2287                 __hci_req_update_name(req);
2288                 __hci_req_update_eir(req);
2289         }
2290
2291         hci_dev_unlock(hdev);
2292         return 0;
2293 }
2294
2295 int __hci_req_hci_power_on(struct hci_dev *hdev)
2296 {
2297         /* Register the available SMP channels (BR/EDR and LE) only when
2298          * successfully powering on the controller. This late
2299          * registration is required so that LE SMP can clearly decide if
2300          * the public address or static address is used.
2301          */
2302         smp_register(hdev);
2303
2304         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2305                               NULL);
2306 }
2307
2308 void hci_request_setup(struct hci_dev *hdev)
2309 {
2310         INIT_WORK(&hdev->discov_update, discov_update);
2311         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2312         INIT_WORK(&hdev->scan_update, scan_update_work);
2313         INIT_WORK(&hdev->connectable_update, connectable_update_work);
2314         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2315         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2316         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2317         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2318         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2319 }
2320
2321 void hci_request_cancel_all(struct hci_dev *hdev)
2322 {
2323         hci_req_sync_cancel(hdev, ENODEV);
2324
2325         cancel_work_sync(&hdev->discov_update);
2326         cancel_work_sync(&hdev->bg_scan_update);
2327         cancel_work_sync(&hdev->scan_update);
2328         cancel_work_sync(&hdev->connectable_update);
2329         cancel_work_sync(&hdev->discoverable_update);
2330         cancel_delayed_work_sync(&hdev->discov_off);
2331         cancel_delayed_work_sync(&hdev->le_scan_disable);
2332         cancel_delayed_work_sync(&hdev->le_scan_restart);
2333
2334         if (hdev->adv_instance_timeout) {
2335                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2336                 hdev->adv_instance_timeout = 0;
2337         }
2338 }