Bluetooth: Fix local name in scan rsp
[cascardo/linux.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <asm/unaligned.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE      0
34 #define HCI_REQ_PEND      1
35 #define HCI_REQ_CANCELED  2
36
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39         skb_queue_head_init(&req->cmd_q);
40         req->hdev = hdev;
41         req->err = 0;
42 }
43
44 static int req_run(struct hci_request *req, hci_req_complete_t complete,
45                    hci_req_complete_skb_t complete_skb)
46 {
47         struct hci_dev *hdev = req->hdev;
48         struct sk_buff *skb;
49         unsigned long flags;
50
51         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53         /* If an error occurred during request building, remove all HCI
54          * commands queued on the HCI request queue.
55          */
56         if (req->err) {
57                 skb_queue_purge(&req->cmd_q);
58                 return req->err;
59         }
60
61         /* Do not allow empty requests */
62         if (skb_queue_empty(&req->cmd_q))
63                 return -ENODATA;
64
65         skb = skb_peek_tail(&req->cmd_q);
66         if (complete) {
67                 bt_cb(skb)->hci.req_complete = complete;
68         } else if (complete_skb) {
69                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71         }
72
73         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77         queue_work(hdev->workqueue, &hdev->cmd_work);
78
79         return 0;
80 }
81
82 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83 {
84         return req_run(req, complete, NULL);
85 }
86
87 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88 {
89         return req_run(req, NULL, complete);
90 }
91
92 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93                                   struct sk_buff *skb)
94 {
95         BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97         if (hdev->req_status == HCI_REQ_PEND) {
98                 hdev->req_result = result;
99                 hdev->req_status = HCI_REQ_DONE;
100                 if (skb)
101                         hdev->req_skb = skb_get(skb);
102                 wake_up_interruptible(&hdev->req_wait_q);
103         }
104 }
105
106 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
107 {
108         BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110         if (hdev->req_status == HCI_REQ_PEND) {
111                 hdev->req_result = err;
112                 hdev->req_status = HCI_REQ_CANCELED;
113                 wake_up_interruptible(&hdev->req_wait_q);
114         }
115 }
116
117 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118                                   const void *param, u8 event, u32 timeout)
119 {
120         DECLARE_WAITQUEUE(wait, current);
121         struct hci_request req;
122         struct sk_buff *skb;
123         int err = 0;
124
125         BT_DBG("%s", hdev->name);
126
127         hci_req_init(&req, hdev);
128
129         hci_req_add_ev(&req, opcode, plen, param, event);
130
131         hdev->req_status = HCI_REQ_PEND;
132
133         add_wait_queue(&hdev->req_wait_q, &wait);
134         set_current_state(TASK_INTERRUPTIBLE);
135
136         err = hci_req_run_skb(&req, hci_req_sync_complete);
137         if (err < 0) {
138                 remove_wait_queue(&hdev->req_wait_q, &wait);
139                 set_current_state(TASK_RUNNING);
140                 return ERR_PTR(err);
141         }
142
143         schedule_timeout(timeout);
144
145         remove_wait_queue(&hdev->req_wait_q, &wait);
146
147         if (signal_pending(current))
148                 return ERR_PTR(-EINTR);
149
150         switch (hdev->req_status) {
151         case HCI_REQ_DONE:
152                 err = -bt_to_errno(hdev->req_result);
153                 break;
154
155         case HCI_REQ_CANCELED:
156                 err = -hdev->req_result;
157                 break;
158
159         default:
160                 err = -ETIMEDOUT;
161                 break;
162         }
163
164         hdev->req_status = hdev->req_result = 0;
165         skb = hdev->req_skb;
166         hdev->req_skb = NULL;
167
168         BT_DBG("%s end: err %d", hdev->name, err);
169
170         if (err < 0) {
171                 kfree_skb(skb);
172                 return ERR_PTR(err);
173         }
174
175         if (!skb)
176                 return ERR_PTR(-ENODATA);
177
178         return skb;
179 }
180 EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
182 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183                                const void *param, u32 timeout)
184 {
185         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186 }
187 EXPORT_SYMBOL(__hci_cmd_sync);
188
189 /* Execute request and wait for completion. */
190 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191                                                      unsigned long opt),
192                    unsigned long opt, u32 timeout, u8 *hci_status)
193 {
194         struct hci_request req;
195         DECLARE_WAITQUEUE(wait, current);
196         int err = 0;
197
198         BT_DBG("%s start", hdev->name);
199
200         hci_req_init(&req, hdev);
201
202         hdev->req_status = HCI_REQ_PEND;
203
204         err = func(&req, opt);
205         if (err) {
206                 if (hci_status)
207                         *hci_status = HCI_ERROR_UNSPECIFIED;
208                 return err;
209         }
210
211         add_wait_queue(&hdev->req_wait_q, &wait);
212         set_current_state(TASK_INTERRUPTIBLE);
213
214         err = hci_req_run_skb(&req, hci_req_sync_complete);
215         if (err < 0) {
216                 hdev->req_status = 0;
217
218                 remove_wait_queue(&hdev->req_wait_q, &wait);
219                 set_current_state(TASK_RUNNING);
220
221                 /* ENODATA means the HCI request command queue is empty.
222                  * This can happen when a request with conditionals doesn't
223                  * trigger any commands to be sent. This is normal behavior
224                  * and should not trigger an error return.
225                  */
226                 if (err == -ENODATA) {
227                         if (hci_status)
228                                 *hci_status = 0;
229                         return 0;
230                 }
231
232                 if (hci_status)
233                         *hci_status = HCI_ERROR_UNSPECIFIED;
234
235                 return err;
236         }
237
238         schedule_timeout(timeout);
239
240         remove_wait_queue(&hdev->req_wait_q, &wait);
241
242         if (signal_pending(current))
243                 return -EINTR;
244
245         switch (hdev->req_status) {
246         case HCI_REQ_DONE:
247                 err = -bt_to_errno(hdev->req_result);
248                 if (hci_status)
249                         *hci_status = hdev->req_result;
250                 break;
251
252         case HCI_REQ_CANCELED:
253                 err = -hdev->req_result;
254                 if (hci_status)
255                         *hci_status = HCI_ERROR_UNSPECIFIED;
256                 break;
257
258         default:
259                 err = -ETIMEDOUT;
260                 if (hci_status)
261                         *hci_status = HCI_ERROR_UNSPECIFIED;
262                 break;
263         }
264
265         kfree_skb(hdev->req_skb);
266         hdev->req_skb = NULL;
267         hdev->req_status = hdev->req_result = 0;
268
269         BT_DBG("%s end: err %d", hdev->name, err);
270
271         return err;
272 }
273
274 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
275                                                   unsigned long opt),
276                  unsigned long opt, u32 timeout, u8 *hci_status)
277 {
278         int ret;
279
280         if (!test_bit(HCI_UP, &hdev->flags))
281                 return -ENETDOWN;
282
283         /* Serialize all requests */
284         hci_req_sync_lock(hdev);
285         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
286         hci_req_sync_unlock(hdev);
287
288         return ret;
289 }
290
291 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
292                                 const void *param)
293 {
294         int len = HCI_COMMAND_HDR_SIZE + plen;
295         struct hci_command_hdr *hdr;
296         struct sk_buff *skb;
297
298         skb = bt_skb_alloc(len, GFP_ATOMIC);
299         if (!skb)
300                 return NULL;
301
302         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
303         hdr->opcode = cpu_to_le16(opcode);
304         hdr->plen   = plen;
305
306         if (plen)
307                 memcpy(skb_put(skb, plen), param, plen);
308
309         BT_DBG("skb len %d", skb->len);
310
311         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
312         hci_skb_opcode(skb) = opcode;
313
314         return skb;
315 }
316
317 /* Queue a command to an asynchronous HCI request */
318 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
319                     const void *param, u8 event)
320 {
321         struct hci_dev *hdev = req->hdev;
322         struct sk_buff *skb;
323
324         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
325
326         /* If an error occurred during request building, there is no point in
327          * queueing the HCI command. We can simply return.
328          */
329         if (req->err)
330                 return;
331
332         skb = hci_prepare_cmd(hdev, opcode, plen, param);
333         if (!skb) {
334                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
335                        hdev->name, opcode);
336                 req->err = -ENOMEM;
337                 return;
338         }
339
340         if (skb_queue_empty(&req->cmd_q))
341                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
342
343         bt_cb(skb)->hci.req_event = event;
344
345         skb_queue_tail(&req->cmd_q, skb);
346 }
347
348 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
349                  const void *param)
350 {
351         hci_req_add_ev(req, opcode, plen, param, 0);
352 }
353
354 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
355 {
356         struct hci_dev *hdev = req->hdev;
357         struct hci_cp_write_page_scan_activity acp;
358         u8 type;
359
360         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
361                 return;
362
363         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
364                 return;
365
366         if (enable) {
367                 type = PAGE_SCAN_TYPE_INTERLACED;
368
369                 /* 160 msec page scan interval */
370                 acp.interval = cpu_to_le16(0x0100);
371         } else {
372                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
373
374                 /* default 1.28 sec page scan */
375                 acp.interval = cpu_to_le16(0x0800);
376         }
377
378         acp.window = cpu_to_le16(0x0012);
379
380         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
381             __cpu_to_le16(hdev->page_scan_window) != acp.window)
382                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
383                             sizeof(acp), &acp);
384
385         if (hdev->page_scan_type != type)
386                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
387 }
388
389 /* This function controls the background scanning based on hdev->pend_le_conns
390  * list. If there are pending LE connection we start the background scanning,
391  * otherwise we stop it.
392  *
393  * This function requires the caller holds hdev->lock.
394  */
395 static void __hci_update_background_scan(struct hci_request *req)
396 {
397         struct hci_dev *hdev = req->hdev;
398
399         if (!test_bit(HCI_UP, &hdev->flags) ||
400             test_bit(HCI_INIT, &hdev->flags) ||
401             hci_dev_test_flag(hdev, HCI_SETUP) ||
402             hci_dev_test_flag(hdev, HCI_CONFIG) ||
403             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
404             hci_dev_test_flag(hdev, HCI_UNREGISTER))
405                 return;
406
407         /* No point in doing scanning if LE support hasn't been enabled */
408         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
409                 return;
410
411         /* If discovery is active don't interfere with it */
412         if (hdev->discovery.state != DISCOVERY_STOPPED)
413                 return;
414
415         /* Reset RSSI and UUID filters when starting background scanning
416          * since these filters are meant for service discovery only.
417          *
418          * The Start Discovery and Start Service Discovery operations
419          * ensure to set proper values for RSSI threshold and UUID
420          * filter list. So it is safe to just reset them here.
421          */
422         hci_discovery_filter_clear(hdev);
423
424         if (list_empty(&hdev->pend_le_conns) &&
425             list_empty(&hdev->pend_le_reports)) {
426                 /* If there is no pending LE connections or devices
427                  * to be scanned for, we should stop the background
428                  * scanning.
429                  */
430
431                 /* If controller is not scanning we are done. */
432                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
433                         return;
434
435                 hci_req_add_le_scan_disable(req);
436
437                 BT_DBG("%s stopping background scanning", hdev->name);
438         } else {
439                 /* If there is at least one pending LE connection, we should
440                  * keep the background scan running.
441                  */
442
443                 /* If controller is connecting, we should not start scanning
444                  * since some controllers are not able to scan and connect at
445                  * the same time.
446                  */
447                 if (hci_lookup_le_connect(hdev))
448                         return;
449
450                 /* If controller is currently scanning, we stop it to ensure we
451                  * don't miss any advertising (due to duplicates filter).
452                  */
453                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
454                         hci_req_add_le_scan_disable(req);
455
456                 hci_req_add_le_passive_scan(req);
457
458                 BT_DBG("%s starting background scanning", hdev->name);
459         }
460 }
461
462 void __hci_req_update_name(struct hci_request *req)
463 {
464         struct hci_dev *hdev = req->hdev;
465         struct hci_cp_write_local_name cp;
466
467         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
468
469         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
470 }
471
472 #define PNP_INFO_SVCLASS_ID             0x1200
473
474 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
475 {
476         u8 *ptr = data, *uuids_start = NULL;
477         struct bt_uuid *uuid;
478
479         if (len < 4)
480                 return ptr;
481
482         list_for_each_entry(uuid, &hdev->uuids, list) {
483                 u16 uuid16;
484
485                 if (uuid->size != 16)
486                         continue;
487
488                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
489                 if (uuid16 < 0x1100)
490                         continue;
491
492                 if (uuid16 == PNP_INFO_SVCLASS_ID)
493                         continue;
494
495                 if (!uuids_start) {
496                         uuids_start = ptr;
497                         uuids_start[0] = 1;
498                         uuids_start[1] = EIR_UUID16_ALL;
499                         ptr += 2;
500                 }
501
502                 /* Stop if not enough space to put next UUID */
503                 if ((ptr - data) + sizeof(u16) > len) {
504                         uuids_start[1] = EIR_UUID16_SOME;
505                         break;
506                 }
507
508                 *ptr++ = (uuid16 & 0x00ff);
509                 *ptr++ = (uuid16 & 0xff00) >> 8;
510                 uuids_start[0] += sizeof(uuid16);
511         }
512
513         return ptr;
514 }
515
516 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
517 {
518         u8 *ptr = data, *uuids_start = NULL;
519         struct bt_uuid *uuid;
520
521         if (len < 6)
522                 return ptr;
523
524         list_for_each_entry(uuid, &hdev->uuids, list) {
525                 if (uuid->size != 32)
526                         continue;
527
528                 if (!uuids_start) {
529                         uuids_start = ptr;
530                         uuids_start[0] = 1;
531                         uuids_start[1] = EIR_UUID32_ALL;
532                         ptr += 2;
533                 }
534
535                 /* Stop if not enough space to put next UUID */
536                 if ((ptr - data) + sizeof(u32) > len) {
537                         uuids_start[1] = EIR_UUID32_SOME;
538                         break;
539                 }
540
541                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
542                 ptr += sizeof(u32);
543                 uuids_start[0] += sizeof(u32);
544         }
545
546         return ptr;
547 }
548
549 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
550 {
551         u8 *ptr = data, *uuids_start = NULL;
552         struct bt_uuid *uuid;
553
554         if (len < 18)
555                 return ptr;
556
557         list_for_each_entry(uuid, &hdev->uuids, list) {
558                 if (uuid->size != 128)
559                         continue;
560
561                 if (!uuids_start) {
562                         uuids_start = ptr;
563                         uuids_start[0] = 1;
564                         uuids_start[1] = EIR_UUID128_ALL;
565                         ptr += 2;
566                 }
567
568                 /* Stop if not enough space to put next UUID */
569                 if ((ptr - data) + 16 > len) {
570                         uuids_start[1] = EIR_UUID128_SOME;
571                         break;
572                 }
573
574                 memcpy(ptr, uuid->uuid, 16);
575                 ptr += 16;
576                 uuids_start[0] += 16;
577         }
578
579         return ptr;
580 }
581
582 static void create_eir(struct hci_dev *hdev, u8 *data)
583 {
584         u8 *ptr = data;
585         size_t name_len;
586
587         name_len = strlen(hdev->dev_name);
588
589         if (name_len > 0) {
590                 /* EIR Data type */
591                 if (name_len > 48) {
592                         name_len = 48;
593                         ptr[1] = EIR_NAME_SHORT;
594                 } else
595                         ptr[1] = EIR_NAME_COMPLETE;
596
597                 /* EIR Data length */
598                 ptr[0] = name_len + 1;
599
600                 memcpy(ptr + 2, hdev->dev_name, name_len);
601
602                 ptr += (name_len + 2);
603         }
604
605         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
606                 ptr[0] = 2;
607                 ptr[1] = EIR_TX_POWER;
608                 ptr[2] = (u8) hdev->inq_tx_power;
609
610                 ptr += 3;
611         }
612
613         if (hdev->devid_source > 0) {
614                 ptr[0] = 9;
615                 ptr[1] = EIR_DEVICE_ID;
616
617                 put_unaligned_le16(hdev->devid_source, ptr + 2);
618                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
619                 put_unaligned_le16(hdev->devid_product, ptr + 6);
620                 put_unaligned_le16(hdev->devid_version, ptr + 8);
621
622                 ptr += 10;
623         }
624
625         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
627         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
628 }
629
630 void __hci_req_update_eir(struct hci_request *req)
631 {
632         struct hci_dev *hdev = req->hdev;
633         struct hci_cp_write_eir cp;
634
635         if (!hdev_is_powered(hdev))
636                 return;
637
638         if (!lmp_ext_inq_capable(hdev))
639                 return;
640
641         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
642                 return;
643
644         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
645                 return;
646
647         memset(&cp, 0, sizeof(cp));
648
649         create_eir(hdev, cp.data);
650
651         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
652                 return;
653
654         memcpy(hdev->eir, cp.data, sizeof(cp.data));
655
656         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
657 }
658
659 void hci_req_add_le_scan_disable(struct hci_request *req)
660 {
661         struct hci_cp_le_set_scan_enable cp;
662
663         memset(&cp, 0, sizeof(cp));
664         cp.enable = LE_SCAN_DISABLE;
665         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
666 }
667
668 static void add_to_white_list(struct hci_request *req,
669                               struct hci_conn_params *params)
670 {
671         struct hci_cp_le_add_to_white_list cp;
672
673         cp.bdaddr_type = params->addr_type;
674         bacpy(&cp.bdaddr, &params->addr);
675
676         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
677 }
678
679 static u8 update_white_list(struct hci_request *req)
680 {
681         struct hci_dev *hdev = req->hdev;
682         struct hci_conn_params *params;
683         struct bdaddr_list *b;
684         uint8_t white_list_entries = 0;
685
686         /* Go through the current white list programmed into the
687          * controller one by one and check if that address is still
688          * in the list of pending connections or list of devices to
689          * report. If not present in either list, then queue the
690          * command to remove it from the controller.
691          */
692         list_for_each_entry(b, &hdev->le_white_list, list) {
693                 /* If the device is neither in pend_le_conns nor
694                  * pend_le_reports then remove it from the whitelist.
695                  */
696                 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
697                                                &b->bdaddr, b->bdaddr_type) &&
698                     !hci_pend_le_action_lookup(&hdev->pend_le_reports,
699                                                &b->bdaddr, b->bdaddr_type)) {
700                         struct hci_cp_le_del_from_white_list cp;
701
702                         cp.bdaddr_type = b->bdaddr_type;
703                         bacpy(&cp.bdaddr, &b->bdaddr);
704
705                         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
706                                     sizeof(cp), &cp);
707                         continue;
708                 }
709
710                 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
711                         /* White list can not be used with RPAs */
712                         return 0x00;
713                 }
714
715                 white_list_entries++;
716         }
717
718         /* Since all no longer valid white list entries have been
719          * removed, walk through the list of pending connections
720          * and ensure that any new device gets programmed into
721          * the controller.
722          *
723          * If the list of the devices is larger than the list of
724          * available white list entries in the controller, then
725          * just abort and return filer policy value to not use the
726          * white list.
727          */
728         list_for_each_entry(params, &hdev->pend_le_conns, action) {
729                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
730                                            &params->addr, params->addr_type))
731                         continue;
732
733                 if (white_list_entries >= hdev->le_white_list_size) {
734                         /* Select filter policy to accept all advertising */
735                         return 0x00;
736                 }
737
738                 if (hci_find_irk_by_addr(hdev, &params->addr,
739                                          params->addr_type)) {
740                         /* White list can not be used with RPAs */
741                         return 0x00;
742                 }
743
744                 white_list_entries++;
745                 add_to_white_list(req, params);
746         }
747
748         /* After adding all new pending connections, walk through
749          * the list of pending reports and also add these to the
750          * white list if there is still space.
751          */
752         list_for_each_entry(params, &hdev->pend_le_reports, action) {
753                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
754                                            &params->addr, params->addr_type))
755                         continue;
756
757                 if (white_list_entries >= hdev->le_white_list_size) {
758                         /* Select filter policy to accept all advertising */
759                         return 0x00;
760                 }
761
762                 if (hci_find_irk_by_addr(hdev, &params->addr,
763                                          params->addr_type)) {
764                         /* White list can not be used with RPAs */
765                         return 0x00;
766                 }
767
768                 white_list_entries++;
769                 add_to_white_list(req, params);
770         }
771
772         /* Select filter policy to use white list */
773         return 0x01;
774 }
775
776 static bool scan_use_rpa(struct hci_dev *hdev)
777 {
778         return hci_dev_test_flag(hdev, HCI_PRIVACY);
779 }
780
781 void hci_req_add_le_passive_scan(struct hci_request *req)
782 {
783         struct hci_cp_le_set_scan_param param_cp;
784         struct hci_cp_le_set_scan_enable enable_cp;
785         struct hci_dev *hdev = req->hdev;
786         u8 own_addr_type;
787         u8 filter_policy;
788
789         /* Set require_privacy to false since no SCAN_REQ are send
790          * during passive scanning. Not using an non-resolvable address
791          * here is important so that peer devices using direct
792          * advertising with our address will be correctly reported
793          * by the controller.
794          */
795         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
796                                       &own_addr_type))
797                 return;
798
799         /* Adding or removing entries from the white list must
800          * happen before enabling scanning. The controller does
801          * not allow white list modification while scanning.
802          */
803         filter_policy = update_white_list(req);
804
805         /* When the controller is using random resolvable addresses and
806          * with that having LE privacy enabled, then controllers with
807          * Extended Scanner Filter Policies support can now enable support
808          * for handling directed advertising.
809          *
810          * So instead of using filter polices 0x00 (no whitelist)
811          * and 0x01 (whitelist enabled) use the new filter policies
812          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
813          */
814         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
815             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
816                 filter_policy |= 0x02;
817
818         memset(&param_cp, 0, sizeof(param_cp));
819         param_cp.type = LE_SCAN_PASSIVE;
820         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
821         param_cp.window = cpu_to_le16(hdev->le_scan_window);
822         param_cp.own_address_type = own_addr_type;
823         param_cp.filter_policy = filter_policy;
824         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
825                     &param_cp);
826
827         memset(&enable_cp, 0, sizeof(enable_cp));
828         enable_cp.enable = LE_SCAN_ENABLE;
829         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
830         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
831                     &enable_cp);
832 }
833
834 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
835 {
836         u8 instance = hdev->cur_adv_instance;
837         struct adv_info *adv_instance;
838
839         /* Ignore instance 0 */
840         if (instance == 0x00)
841                 return 0;
842
843         adv_instance = hci_find_adv_instance(hdev, instance);
844         if (!adv_instance)
845                 return 0;
846
847         /* TODO: Take into account the "appearance" and "local-name" flags here.
848          * These are currently being ignored as they are not supported.
849          */
850         return adv_instance->scan_rsp_len;
851 }
852
853 void __hci_req_disable_advertising(struct hci_request *req)
854 {
855         u8 enable = 0x00;
856
857         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
858 }
859
860 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
861 {
862         u32 flags;
863         struct adv_info *adv_instance;
864
865         if (instance == 0x00) {
866                 /* Instance 0 always manages the "Tx Power" and "Flags"
867                  * fields
868                  */
869                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
870
871                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
872                  * corresponds to the "connectable" instance flag.
873                  */
874                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
875                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
876
877                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
878                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
879                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
880                         flags |= MGMT_ADV_FLAG_DISCOV;
881
882                 return flags;
883         }
884
885         adv_instance = hci_find_adv_instance(hdev, instance);
886
887         /* Return 0 when we got an invalid instance identifier. */
888         if (!adv_instance)
889                 return 0;
890
891         return adv_instance->flags;
892 }
893
894 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
895 {
896         /* If privacy is not enabled don't use RPA */
897         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
898                 return false;
899
900         /* If basic privacy mode is enabled use RPA */
901         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
902                 return true;
903
904         /* If limited privacy mode is enabled don't use RPA if we're
905          * both discoverable and bondable.
906          */
907         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
908             hci_dev_test_flag(hdev, HCI_BONDABLE))
909                 return false;
910
911         /* We're neither bondable nor discoverable in the limited
912          * privacy mode, therefore use RPA.
913          */
914         return true;
915 }
916
917 void __hci_req_enable_advertising(struct hci_request *req)
918 {
919         struct hci_dev *hdev = req->hdev;
920         struct hci_cp_le_set_adv_param cp;
921         u8 own_addr_type, enable = 0x01;
922         bool connectable;
923         u32 flags;
924
925         if (hci_conn_num(hdev, LE_LINK) > 0)
926                 return;
927
928         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
929                 __hci_req_disable_advertising(req);
930
931         /* Clear the HCI_LE_ADV bit temporarily so that the
932          * hci_update_random_address knows that it's safe to go ahead
933          * and write a new random address. The flag will be set back on
934          * as soon as the SET_ADV_ENABLE HCI command completes.
935          */
936         hci_dev_clear_flag(hdev, HCI_LE_ADV);
937
938         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
939
940         /* If the "connectable" instance flag was not set, then choose between
941          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
942          */
943         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
944                       mgmt_get_connectable(hdev);
945
946         /* Set require_privacy to true only when non-connectable
947          * advertising is used. In that case it is fine to use a
948          * non-resolvable private address.
949          */
950         if (hci_update_random_address(req, !connectable,
951                                       adv_use_rpa(hdev, flags),
952                                       &own_addr_type) < 0)
953                 return;
954
955         memset(&cp, 0, sizeof(cp));
956         cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
957         cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
958
959         if (connectable)
960                 cp.type = LE_ADV_IND;
961         else if (get_cur_adv_instance_scan_rsp_len(hdev))
962                 cp.type = LE_ADV_SCAN_IND;
963         else
964                 cp.type = LE_ADV_NONCONN_IND;
965
966         cp.own_address_type = own_addr_type;
967         cp.channel_map = hdev->le_adv_channel_map;
968
969         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
970
971         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
972 }
973
974 static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
975 {
976         size_t complete_len;
977         size_t short_len;
978         int max_len;
979
980         max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
981         complete_len = strlen(hdev->dev_name);
982         short_len = strlen(hdev->short_name);
983
984         /* no space left for name */
985         if (max_len < 1)
986                 return ad_len;
987
988         /* no name set */
989         if (!complete_len)
990                 return ad_len;
991
992         /* complete name fits and is eq to max short name len or smaller */
993         if (complete_len <= max_len &&
994             complete_len <= HCI_MAX_SHORT_NAME_LENGTH) {
995                 ptr[0] = complete_len + 1;
996                 ptr[1] = EIR_NAME_COMPLETE;
997                 memcpy(ptr + 2, hdev->dev_name, complete_len);
998
999                 return ad_len + complete_len + 2;
1000         }
1001
1002         /* short name set and fits */
1003         if (short_len && short_len <= max_len) {
1004                 ptr[0] = short_len + 1;
1005                 ptr[1] = EIR_NAME_SHORT;
1006                 memcpy(ptr + 2, hdev->short_name, short_len);
1007
1008                 return ad_len + short_len + 2;
1009         }
1010
1011         /* no short name set so shorten complete name */
1012         if (!short_len) {
1013                 ptr[0] = max_len + 1;
1014                 ptr[1] = EIR_NAME_SHORT;
1015                 memcpy(ptr + 2, hdev->dev_name, max_len);
1016
1017                 return ad_len + max_len + 2;
1018         }
1019
1020         return ad_len;
1021 }
1022
1023 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1024 {
1025         return append_local_name(hdev, ptr, 0);
1026 }
1027
1028 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1029                                         u8 *ptr)
1030 {
1031         struct adv_info *adv_instance;
1032         u32 instance_flags;
1033         u8 scan_rsp_len = 0;
1034
1035         adv_instance = hci_find_adv_instance(hdev, instance);
1036         if (!adv_instance)
1037                 return 0;
1038
1039         instance_flags = adv_instance->flags;
1040
1041         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1042                 ptr[0] = 3;
1043                 ptr[1] = EIR_APPEARANCE;
1044                 put_unaligned_le16(hdev->appearance, ptr + 2);
1045                 scan_rsp_len += 4;
1046                 ptr += 4;
1047         }
1048
1049         memcpy(ptr, adv_instance->scan_rsp_data,
1050                adv_instance->scan_rsp_len);
1051
1052         scan_rsp_len += adv_instance->scan_rsp_len;
1053         ptr += adv_instance->scan_rsp_len;
1054
1055         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1056                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1057
1058         return scan_rsp_len;
1059 }
1060
1061 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1062 {
1063         struct hci_dev *hdev = req->hdev;
1064         struct hci_cp_le_set_scan_rsp_data cp;
1065         u8 len;
1066
1067         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1068                 return;
1069
1070         memset(&cp, 0, sizeof(cp));
1071
1072         if (instance)
1073                 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1074         else
1075                 len = create_default_scan_rsp_data(hdev, cp.data);
1076
1077         if (hdev->scan_rsp_data_len == len &&
1078             !memcmp(cp.data, hdev->scan_rsp_data, len))
1079                 return;
1080
1081         memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1082         hdev->scan_rsp_data_len = len;
1083
1084         cp.length = len;
1085
1086         hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1087 }
1088
1089 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1090 {
1091         struct adv_info *adv_instance = NULL;
1092         u8 ad_len = 0, flags = 0;
1093         u32 instance_flags;
1094
1095         /* Return 0 when the current instance identifier is invalid. */
1096         if (instance) {
1097                 adv_instance = hci_find_adv_instance(hdev, instance);
1098                 if (!adv_instance)
1099                         return 0;
1100         }
1101
1102         instance_flags = get_adv_instance_flags(hdev, instance);
1103
1104         /* The Add Advertising command allows userspace to set both the general
1105          * and limited discoverable flags.
1106          */
1107         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1108                 flags |= LE_AD_GENERAL;
1109
1110         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1111                 flags |= LE_AD_LIMITED;
1112
1113         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1114                 flags |= LE_AD_NO_BREDR;
1115
1116         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1117                 /* If a discovery flag wasn't provided, simply use the global
1118                  * settings.
1119                  */
1120                 if (!flags)
1121                         flags |= mgmt_get_adv_discov_flags(hdev);
1122
1123                 /* If flags would still be empty, then there is no need to
1124                  * include the "Flags" AD field".
1125                  */
1126                 if (flags) {
1127                         ptr[0] = 0x02;
1128                         ptr[1] = EIR_FLAGS;
1129                         ptr[2] = flags;
1130
1131                         ad_len += 3;
1132                         ptr += 3;
1133                 }
1134         }
1135
1136         if (adv_instance) {
1137                 memcpy(ptr, adv_instance->adv_data,
1138                        adv_instance->adv_data_len);
1139                 ad_len += adv_instance->adv_data_len;
1140                 ptr += adv_instance->adv_data_len;
1141         }
1142
1143         /* Provide Tx Power only if we can provide a valid value for it */
1144         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1145             (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1146                 ptr[0] = 0x02;
1147                 ptr[1] = EIR_TX_POWER;
1148                 ptr[2] = (u8)hdev->adv_tx_power;
1149
1150                 ad_len += 3;
1151                 ptr += 3;
1152         }
1153
1154         return ad_len;
1155 }
1156
1157 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1158 {
1159         struct hci_dev *hdev = req->hdev;
1160         struct hci_cp_le_set_adv_data cp;
1161         u8 len;
1162
1163         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1164                 return;
1165
1166         memset(&cp, 0, sizeof(cp));
1167
1168         len = create_instance_adv_data(hdev, instance, cp.data);
1169
1170         /* There's nothing to do if the data hasn't changed */
1171         if (hdev->adv_data_len == len &&
1172             memcmp(cp.data, hdev->adv_data, len) == 0)
1173                 return;
1174
1175         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1176         hdev->adv_data_len = len;
1177
1178         cp.length = len;
1179
1180         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1181 }
1182
1183 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1184 {
1185         struct hci_request req;
1186
1187         hci_req_init(&req, hdev);
1188         __hci_req_update_adv_data(&req, instance);
1189
1190         return hci_req_run(&req, NULL);
1191 }
1192
1193 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1194 {
1195         BT_DBG("%s status %u", hdev->name, status);
1196 }
1197
1198 void hci_req_reenable_advertising(struct hci_dev *hdev)
1199 {
1200         struct hci_request req;
1201
1202         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1203             list_empty(&hdev->adv_instances))
1204                 return;
1205
1206         hci_req_init(&req, hdev);
1207
1208         if (hdev->cur_adv_instance) {
1209                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1210                                                 true);
1211         } else {
1212                 __hci_req_update_adv_data(&req, 0x00);
1213                 __hci_req_update_scan_rsp_data(&req, 0x00);
1214                 __hci_req_enable_advertising(&req);
1215         }
1216
1217         hci_req_run(&req, adv_enable_complete);
1218 }
1219
1220 static void adv_timeout_expire(struct work_struct *work)
1221 {
1222         struct hci_dev *hdev = container_of(work, struct hci_dev,
1223                                             adv_instance_expire.work);
1224
1225         struct hci_request req;
1226         u8 instance;
1227
1228         BT_DBG("%s", hdev->name);
1229
1230         hci_dev_lock(hdev);
1231
1232         hdev->adv_instance_timeout = 0;
1233
1234         instance = hdev->cur_adv_instance;
1235         if (instance == 0x00)
1236                 goto unlock;
1237
1238         hci_req_init(&req, hdev);
1239
1240         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1241
1242         if (list_empty(&hdev->adv_instances))
1243                 __hci_req_disable_advertising(&req);
1244
1245         hci_req_run(&req, NULL);
1246
1247 unlock:
1248         hci_dev_unlock(hdev);
1249 }
1250
1251 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1252                                     bool force)
1253 {
1254         struct hci_dev *hdev = req->hdev;
1255         struct adv_info *adv_instance = NULL;
1256         u16 timeout;
1257
1258         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1259             list_empty(&hdev->adv_instances))
1260                 return -EPERM;
1261
1262         if (hdev->adv_instance_timeout)
1263                 return -EBUSY;
1264
1265         adv_instance = hci_find_adv_instance(hdev, instance);
1266         if (!adv_instance)
1267                 return -ENOENT;
1268
1269         /* A zero timeout means unlimited advertising. As long as there is
1270          * only one instance, duration should be ignored. We still set a timeout
1271          * in case further instances are being added later on.
1272          *
1273          * If the remaining lifetime of the instance is more than the duration
1274          * then the timeout corresponds to the duration, otherwise it will be
1275          * reduced to the remaining instance lifetime.
1276          */
1277         if (adv_instance->timeout == 0 ||
1278             adv_instance->duration <= adv_instance->remaining_time)
1279                 timeout = adv_instance->duration;
1280         else
1281                 timeout = adv_instance->remaining_time;
1282
1283         /* The remaining time is being reduced unless the instance is being
1284          * advertised without time limit.
1285          */
1286         if (adv_instance->timeout)
1287                 adv_instance->remaining_time =
1288                                 adv_instance->remaining_time - timeout;
1289
1290         hdev->adv_instance_timeout = timeout;
1291         queue_delayed_work(hdev->req_workqueue,
1292                            &hdev->adv_instance_expire,
1293                            msecs_to_jiffies(timeout * 1000));
1294
1295         /* If we're just re-scheduling the same instance again then do not
1296          * execute any HCI commands. This happens when a single instance is
1297          * being advertised.
1298          */
1299         if (!force && hdev->cur_adv_instance == instance &&
1300             hci_dev_test_flag(hdev, HCI_LE_ADV))
1301                 return 0;
1302
1303         hdev->cur_adv_instance = instance;
1304         __hci_req_update_adv_data(req, instance);
1305         __hci_req_update_scan_rsp_data(req, instance);
1306         __hci_req_enable_advertising(req);
1307
1308         return 0;
1309 }
1310
1311 static void cancel_adv_timeout(struct hci_dev *hdev)
1312 {
1313         if (hdev->adv_instance_timeout) {
1314                 hdev->adv_instance_timeout = 0;
1315                 cancel_delayed_work(&hdev->adv_instance_expire);
1316         }
1317 }
1318
1319 /* For a single instance:
1320  * - force == true: The instance will be removed even when its remaining
1321  *   lifetime is not zero.
1322  * - force == false: the instance will be deactivated but kept stored unless
1323  *   the remaining lifetime is zero.
1324  *
1325  * For instance == 0x00:
1326  * - force == true: All instances will be removed regardless of their timeout
1327  *   setting.
1328  * - force == false: Only instances that have a timeout will be removed.
1329  */
1330 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1331                                 struct hci_request *req, u8 instance,
1332                                 bool force)
1333 {
1334         struct adv_info *adv_instance, *n, *next_instance = NULL;
1335         int err;
1336         u8 rem_inst;
1337
1338         /* Cancel any timeout concerning the removed instance(s). */
1339         if (!instance || hdev->cur_adv_instance == instance)
1340                 cancel_adv_timeout(hdev);
1341
1342         /* Get the next instance to advertise BEFORE we remove
1343          * the current one. This can be the same instance again
1344          * if there is only one instance.
1345          */
1346         if (instance && hdev->cur_adv_instance == instance)
1347                 next_instance = hci_get_next_instance(hdev, instance);
1348
1349         if (instance == 0x00) {
1350                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1351                                          list) {
1352                         if (!(force || adv_instance->timeout))
1353                                 continue;
1354
1355                         rem_inst = adv_instance->instance;
1356                         err = hci_remove_adv_instance(hdev, rem_inst);
1357                         if (!err)
1358                                 mgmt_advertising_removed(sk, hdev, rem_inst);
1359                 }
1360         } else {
1361                 adv_instance = hci_find_adv_instance(hdev, instance);
1362
1363                 if (force || (adv_instance && adv_instance->timeout &&
1364                               !adv_instance->remaining_time)) {
1365                         /* Don't advertise a removed instance. */
1366                         if (next_instance &&
1367                             next_instance->instance == instance)
1368                                 next_instance = NULL;
1369
1370                         err = hci_remove_adv_instance(hdev, instance);
1371                         if (!err)
1372                                 mgmt_advertising_removed(sk, hdev, instance);
1373                 }
1374         }
1375
1376         if (!req || !hdev_is_powered(hdev) ||
1377             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1378                 return;
1379
1380         if (next_instance)
1381                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1382                                                 false);
1383 }
1384
1385 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1386 {
1387         struct hci_dev *hdev = req->hdev;
1388
1389         /* If we're advertising or initiating an LE connection we can't
1390          * go ahead and change the random address at this time. This is
1391          * because the eventual initiator address used for the
1392          * subsequently created connection will be undefined (some
1393          * controllers use the new address and others the one we had
1394          * when the operation started).
1395          *
1396          * In this kind of scenario skip the update and let the random
1397          * address be updated at the next cycle.
1398          */
1399         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1400             hci_lookup_le_connect(hdev)) {
1401                 BT_DBG("Deferring random address update");
1402                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1403                 return;
1404         }
1405
1406         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1407 }
1408
1409 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1410                               bool use_rpa, u8 *own_addr_type)
1411 {
1412         struct hci_dev *hdev = req->hdev;
1413         int err;
1414
1415         /* If privacy is enabled use a resolvable private address. If
1416          * current RPA has expired or there is something else than
1417          * the current RPA in use, then generate a new one.
1418          */
1419         if (use_rpa) {
1420                 int to;
1421
1422                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1423
1424                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1425                     !bacmp(&hdev->random_addr, &hdev->rpa))
1426                         return 0;
1427
1428                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1429                 if (err < 0) {
1430                         BT_ERR("%s failed to generate new RPA", hdev->name);
1431                         return err;
1432                 }
1433
1434                 set_random_addr(req, &hdev->rpa);
1435
1436                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1437                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1438
1439                 return 0;
1440         }
1441
1442         /* In case of required privacy without resolvable private address,
1443          * use an non-resolvable private address. This is useful for active
1444          * scanning and non-connectable advertising.
1445          */
1446         if (require_privacy) {
1447                 bdaddr_t nrpa;
1448
1449                 while (true) {
1450                         /* The non-resolvable private address is generated
1451                          * from random six bytes with the two most significant
1452                          * bits cleared.
1453                          */
1454                         get_random_bytes(&nrpa, 6);
1455                         nrpa.b[5] &= 0x3f;
1456
1457                         /* The non-resolvable private address shall not be
1458                          * equal to the public address.
1459                          */
1460                         if (bacmp(&hdev->bdaddr, &nrpa))
1461                                 break;
1462                 }
1463
1464                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1465                 set_random_addr(req, &nrpa);
1466                 return 0;
1467         }
1468
1469         /* If forcing static address is in use or there is no public
1470          * address use the static address as random address (but skip
1471          * the HCI command if the current random address is already the
1472          * static one.
1473          *
1474          * In case BR/EDR has been disabled on a dual-mode controller
1475          * and a static address has been configured, then use that
1476          * address instead of the public BR/EDR address.
1477          */
1478         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1479             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1480             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1481              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1482                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1483                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1484                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1485                                     &hdev->static_addr);
1486                 return 0;
1487         }
1488
1489         /* Neither privacy nor static address is being used so use a
1490          * public address.
1491          */
1492         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1493
1494         return 0;
1495 }
1496
1497 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1498 {
1499         struct bdaddr_list *b;
1500
1501         list_for_each_entry(b, &hdev->whitelist, list) {
1502                 struct hci_conn *conn;
1503
1504                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1505                 if (!conn)
1506                         return true;
1507
1508                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1509                         return true;
1510         }
1511
1512         return false;
1513 }
1514
1515 void __hci_req_update_scan(struct hci_request *req)
1516 {
1517         struct hci_dev *hdev = req->hdev;
1518         u8 scan;
1519
1520         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1521                 return;
1522
1523         if (!hdev_is_powered(hdev))
1524                 return;
1525
1526         if (mgmt_powering_down(hdev))
1527                 return;
1528
1529         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1530             disconnected_whitelist_entries(hdev))
1531                 scan = SCAN_PAGE;
1532         else
1533                 scan = SCAN_DISABLED;
1534
1535         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1536                 scan |= SCAN_INQUIRY;
1537
1538         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1539             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1540                 return;
1541
1542         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1543 }
1544
1545 static int update_scan(struct hci_request *req, unsigned long opt)
1546 {
1547         hci_dev_lock(req->hdev);
1548         __hci_req_update_scan(req);
1549         hci_dev_unlock(req->hdev);
1550         return 0;
1551 }
1552
1553 static void scan_update_work(struct work_struct *work)
1554 {
1555         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1556
1557         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1558 }
1559
1560 static int connectable_update(struct hci_request *req, unsigned long opt)
1561 {
1562         struct hci_dev *hdev = req->hdev;
1563
1564         hci_dev_lock(hdev);
1565
1566         __hci_req_update_scan(req);
1567
1568         /* If BR/EDR is not enabled and we disable advertising as a
1569          * by-product of disabling connectable, we need to update the
1570          * advertising flags.
1571          */
1572         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1573                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1574
1575         /* Update the advertising parameters if necessary */
1576         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1577             !list_empty(&hdev->adv_instances))
1578                 __hci_req_enable_advertising(req);
1579
1580         __hci_update_background_scan(req);
1581
1582         hci_dev_unlock(hdev);
1583
1584         return 0;
1585 }
1586
1587 static void connectable_update_work(struct work_struct *work)
1588 {
1589         struct hci_dev *hdev = container_of(work, struct hci_dev,
1590                                             connectable_update);
1591         u8 status;
1592
1593         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1594         mgmt_set_connectable_complete(hdev, status);
1595 }
1596
1597 static u8 get_service_classes(struct hci_dev *hdev)
1598 {
1599         struct bt_uuid *uuid;
1600         u8 val = 0;
1601
1602         list_for_each_entry(uuid, &hdev->uuids, list)
1603                 val |= uuid->svc_hint;
1604
1605         return val;
1606 }
1607
1608 void __hci_req_update_class(struct hci_request *req)
1609 {
1610         struct hci_dev *hdev = req->hdev;
1611         u8 cod[3];
1612
1613         BT_DBG("%s", hdev->name);
1614
1615         if (!hdev_is_powered(hdev))
1616                 return;
1617
1618         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1619                 return;
1620
1621         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1622                 return;
1623
1624         cod[0] = hdev->minor_class;
1625         cod[1] = hdev->major_class;
1626         cod[2] = get_service_classes(hdev);
1627
1628         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1629                 cod[1] |= 0x20;
1630
1631         if (memcmp(cod, hdev->dev_class, 3) == 0)
1632                 return;
1633
1634         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1635 }
1636
1637 static void write_iac(struct hci_request *req)
1638 {
1639         struct hci_dev *hdev = req->hdev;
1640         struct hci_cp_write_current_iac_lap cp;
1641
1642         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1643                 return;
1644
1645         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1646                 /* Limited discoverable mode */
1647                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1648                 cp.iac_lap[0] = 0x00;   /* LIAC */
1649                 cp.iac_lap[1] = 0x8b;
1650                 cp.iac_lap[2] = 0x9e;
1651                 cp.iac_lap[3] = 0x33;   /* GIAC */
1652                 cp.iac_lap[4] = 0x8b;
1653                 cp.iac_lap[5] = 0x9e;
1654         } else {
1655                 /* General discoverable mode */
1656                 cp.num_iac = 1;
1657                 cp.iac_lap[0] = 0x33;   /* GIAC */
1658                 cp.iac_lap[1] = 0x8b;
1659                 cp.iac_lap[2] = 0x9e;
1660         }
1661
1662         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1663                     (cp.num_iac * 3) + 1, &cp);
1664 }
1665
1666 static int discoverable_update(struct hci_request *req, unsigned long opt)
1667 {
1668         struct hci_dev *hdev = req->hdev;
1669
1670         hci_dev_lock(hdev);
1671
1672         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1673                 write_iac(req);
1674                 __hci_req_update_scan(req);
1675                 __hci_req_update_class(req);
1676         }
1677
1678         /* Advertising instances don't use the global discoverable setting, so
1679          * only update AD if advertising was enabled using Set Advertising.
1680          */
1681         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1682                 __hci_req_update_adv_data(req, 0x00);
1683
1684                 /* Discoverable mode affects the local advertising
1685                  * address in limited privacy mode.
1686                  */
1687                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1688                         __hci_req_enable_advertising(req);
1689         }
1690
1691         hci_dev_unlock(hdev);
1692
1693         return 0;
1694 }
1695
1696 static void discoverable_update_work(struct work_struct *work)
1697 {
1698         struct hci_dev *hdev = container_of(work, struct hci_dev,
1699                                             discoverable_update);
1700         u8 status;
1701
1702         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1703         mgmt_set_discoverable_complete(hdev, status);
1704 }
1705
1706 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1707                       u8 reason)
1708 {
1709         switch (conn->state) {
1710         case BT_CONNECTED:
1711         case BT_CONFIG:
1712                 if (conn->type == AMP_LINK) {
1713                         struct hci_cp_disconn_phy_link cp;
1714
1715                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1716                         cp.reason = reason;
1717                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1718                                     &cp);
1719                 } else {
1720                         struct hci_cp_disconnect dc;
1721
1722                         dc.handle = cpu_to_le16(conn->handle);
1723                         dc.reason = reason;
1724                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1725                 }
1726
1727                 conn->state = BT_DISCONN;
1728
1729                 break;
1730         case BT_CONNECT:
1731                 if (conn->type == LE_LINK) {
1732                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1733                                 break;
1734                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1735                                     0, NULL);
1736                 } else if (conn->type == ACL_LINK) {
1737                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1738                                 break;
1739                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1740                                     6, &conn->dst);
1741                 }
1742                 break;
1743         case BT_CONNECT2:
1744                 if (conn->type == ACL_LINK) {
1745                         struct hci_cp_reject_conn_req rej;
1746
1747                         bacpy(&rej.bdaddr, &conn->dst);
1748                         rej.reason = reason;
1749
1750                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1751                                     sizeof(rej), &rej);
1752                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1753                         struct hci_cp_reject_sync_conn_req rej;
1754
1755                         bacpy(&rej.bdaddr, &conn->dst);
1756
1757                         /* SCO rejection has its own limited set of
1758                          * allowed error values (0x0D-0x0F) which isn't
1759                          * compatible with most values passed to this
1760                          * function. To be safe hard-code one of the
1761                          * values that's suitable for SCO.
1762                          */
1763                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1764
1765                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1766                                     sizeof(rej), &rej);
1767                 }
1768                 break;
1769         default:
1770                 conn->state = BT_CLOSED;
1771                 break;
1772         }
1773 }
1774
1775 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1776 {
1777         if (status)
1778                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1779 }
1780
1781 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1782 {
1783         struct hci_request req;
1784         int err;
1785
1786         hci_req_init(&req, conn->hdev);
1787
1788         __hci_abort_conn(&req, conn, reason);
1789
1790         err = hci_req_run(&req, abort_conn_complete);
1791         if (err && err != -ENODATA) {
1792                 BT_ERR("Failed to run HCI request: err %d", err);
1793                 return err;
1794         }
1795
1796         return 0;
1797 }
1798
1799 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1800 {
1801         hci_dev_lock(req->hdev);
1802         __hci_update_background_scan(req);
1803         hci_dev_unlock(req->hdev);
1804         return 0;
1805 }
1806
1807 static void bg_scan_update(struct work_struct *work)
1808 {
1809         struct hci_dev *hdev = container_of(work, struct hci_dev,
1810                                             bg_scan_update);
1811         struct hci_conn *conn;
1812         u8 status;
1813         int err;
1814
1815         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1816         if (!err)
1817                 return;
1818
1819         hci_dev_lock(hdev);
1820
1821         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1822         if (conn)
1823                 hci_le_conn_failed(conn, status);
1824
1825         hci_dev_unlock(hdev);
1826 }
1827
1828 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1829 {
1830         hci_req_add_le_scan_disable(req);
1831         return 0;
1832 }
1833
1834 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1835 {
1836         u8 length = opt;
1837         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1838         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1839         struct hci_cp_inquiry cp;
1840
1841         BT_DBG("%s", req->hdev->name);
1842
1843         hci_dev_lock(req->hdev);
1844         hci_inquiry_cache_flush(req->hdev);
1845         hci_dev_unlock(req->hdev);
1846
1847         memset(&cp, 0, sizeof(cp));
1848
1849         if (req->hdev->discovery.limited)
1850                 memcpy(&cp.lap, liac, sizeof(cp.lap));
1851         else
1852                 memcpy(&cp.lap, giac, sizeof(cp.lap));
1853
1854         cp.length = length;
1855
1856         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1857
1858         return 0;
1859 }
1860
1861 static void le_scan_disable_work(struct work_struct *work)
1862 {
1863         struct hci_dev *hdev = container_of(work, struct hci_dev,
1864                                             le_scan_disable.work);
1865         u8 status;
1866
1867         BT_DBG("%s", hdev->name);
1868
1869         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1870                 return;
1871
1872         cancel_delayed_work(&hdev->le_scan_restart);
1873
1874         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1875         if (status) {
1876                 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1877                 return;
1878         }
1879
1880         hdev->discovery.scan_start = 0;
1881
1882         /* If we were running LE only scan, change discovery state. If
1883          * we were running both LE and BR/EDR inquiry simultaneously,
1884          * and BR/EDR inquiry is already finished, stop discovery,
1885          * otherwise BR/EDR inquiry will stop discovery when finished.
1886          * If we will resolve remote device name, do not change
1887          * discovery state.
1888          */
1889
1890         if (hdev->discovery.type == DISCOV_TYPE_LE)
1891                 goto discov_stopped;
1892
1893         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1894                 return;
1895
1896         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1897                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1898                     hdev->discovery.state != DISCOVERY_RESOLVING)
1899                         goto discov_stopped;
1900
1901                 return;
1902         }
1903
1904         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1905                      HCI_CMD_TIMEOUT, &status);
1906         if (status) {
1907                 BT_ERR("Inquiry failed: status 0x%02x", status);
1908                 goto discov_stopped;
1909         }
1910
1911         return;
1912
1913 discov_stopped:
1914         hci_dev_lock(hdev);
1915         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1916         hci_dev_unlock(hdev);
1917 }
1918
1919 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1920 {
1921         struct hci_dev *hdev = req->hdev;
1922         struct hci_cp_le_set_scan_enable cp;
1923
1924         /* If controller is not scanning we are done. */
1925         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1926                 return 0;
1927
1928         hci_req_add_le_scan_disable(req);
1929
1930         memset(&cp, 0, sizeof(cp));
1931         cp.enable = LE_SCAN_ENABLE;
1932         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1933         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1934
1935         return 0;
1936 }
1937
1938 static void le_scan_restart_work(struct work_struct *work)
1939 {
1940         struct hci_dev *hdev = container_of(work, struct hci_dev,
1941                                             le_scan_restart.work);
1942         unsigned long timeout, duration, scan_start, now;
1943         u8 status;
1944
1945         BT_DBG("%s", hdev->name);
1946
1947         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1948         if (status) {
1949                 BT_ERR("Failed to restart LE scan: status %d", status);
1950                 return;
1951         }
1952
1953         hci_dev_lock(hdev);
1954
1955         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1956             !hdev->discovery.scan_start)
1957                 goto unlock;
1958
1959         /* When the scan was started, hdev->le_scan_disable has been queued
1960          * after duration from scan_start. During scan restart this job
1961          * has been canceled, and we need to queue it again after proper
1962          * timeout, to make sure that scan does not run indefinitely.
1963          */
1964         duration = hdev->discovery.scan_duration;
1965         scan_start = hdev->discovery.scan_start;
1966         now = jiffies;
1967         if (now - scan_start <= duration) {
1968                 int elapsed;
1969
1970                 if (now >= scan_start)
1971                         elapsed = now - scan_start;
1972                 else
1973                         elapsed = ULONG_MAX - scan_start + now;
1974
1975                 timeout = duration - elapsed;
1976         } else {
1977                 timeout = 0;
1978         }
1979
1980         queue_delayed_work(hdev->req_workqueue,
1981                            &hdev->le_scan_disable, timeout);
1982
1983 unlock:
1984         hci_dev_unlock(hdev);
1985 }
1986
1987 static void disable_advertising(struct hci_request *req)
1988 {
1989         u8 enable = 0x00;
1990
1991         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1992 }
1993
1994 static int active_scan(struct hci_request *req, unsigned long opt)
1995 {
1996         uint16_t interval = opt;
1997         struct hci_dev *hdev = req->hdev;
1998         struct hci_cp_le_set_scan_param param_cp;
1999         struct hci_cp_le_set_scan_enable enable_cp;
2000         u8 own_addr_type;
2001         int err;
2002
2003         BT_DBG("%s", hdev->name);
2004
2005         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2006                 hci_dev_lock(hdev);
2007
2008                 /* Don't let discovery abort an outgoing connection attempt
2009                  * that's using directed advertising.
2010                  */
2011                 if (hci_lookup_le_connect(hdev)) {
2012                         hci_dev_unlock(hdev);
2013                         return -EBUSY;
2014                 }
2015
2016                 cancel_adv_timeout(hdev);
2017                 hci_dev_unlock(hdev);
2018
2019                 disable_advertising(req);
2020         }
2021
2022         /* If controller is scanning, it means the background scanning is
2023          * running. Thus, we should temporarily stop it in order to set the
2024          * discovery scanning parameters.
2025          */
2026         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2027                 hci_req_add_le_scan_disable(req);
2028
2029         /* All active scans will be done with either a resolvable private
2030          * address (when privacy feature has been enabled) or non-resolvable
2031          * private address.
2032          */
2033         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2034                                         &own_addr_type);
2035         if (err < 0)
2036                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2037
2038         memset(&param_cp, 0, sizeof(param_cp));
2039         param_cp.type = LE_SCAN_ACTIVE;
2040         param_cp.interval = cpu_to_le16(interval);
2041         param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2042         param_cp.own_address_type = own_addr_type;
2043
2044         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2045                     &param_cp);
2046
2047         memset(&enable_cp, 0, sizeof(enable_cp));
2048         enable_cp.enable = LE_SCAN_ENABLE;
2049         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2050
2051         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2052                     &enable_cp);
2053
2054         return 0;
2055 }
2056
2057 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2058 {
2059         int err;
2060
2061         BT_DBG("%s", req->hdev->name);
2062
2063         err = active_scan(req, opt);
2064         if (err)
2065                 return err;
2066
2067         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2068 }
2069
2070 static void start_discovery(struct hci_dev *hdev, u8 *status)
2071 {
2072         unsigned long timeout;
2073
2074         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2075
2076         switch (hdev->discovery.type) {
2077         case DISCOV_TYPE_BREDR:
2078                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2079                         hci_req_sync(hdev, bredr_inquiry,
2080                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2081                                      status);
2082                 return;
2083         case DISCOV_TYPE_INTERLEAVED:
2084                 /* When running simultaneous discovery, the LE scanning time
2085                  * should occupy the whole discovery time sine BR/EDR inquiry
2086                  * and LE scanning are scheduled by the controller.
2087                  *
2088                  * For interleaving discovery in comparison, BR/EDR inquiry
2089                  * and LE scanning are done sequentially with separate
2090                  * timeouts.
2091                  */
2092                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2093                              &hdev->quirks)) {
2094                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2095                         /* During simultaneous discovery, we double LE scan
2096                          * interval. We must leave some time for the controller
2097                          * to do BR/EDR inquiry.
2098                          */
2099                         hci_req_sync(hdev, interleaved_discov,
2100                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2101                                      status);
2102                         break;
2103                 }
2104
2105                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2106                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2107                              HCI_CMD_TIMEOUT, status);
2108                 break;
2109         case DISCOV_TYPE_LE:
2110                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2111                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2112                              HCI_CMD_TIMEOUT, status);
2113                 break;
2114         default:
2115                 *status = HCI_ERROR_UNSPECIFIED;
2116                 return;
2117         }
2118
2119         if (*status)
2120                 return;
2121
2122         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2123
2124         /* When service discovery is used and the controller has a
2125          * strict duplicate filter, it is important to remember the
2126          * start and duration of the scan. This is required for
2127          * restarting scanning during the discovery phase.
2128          */
2129         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2130                      hdev->discovery.result_filtering) {
2131                 hdev->discovery.scan_start = jiffies;
2132                 hdev->discovery.scan_duration = timeout;
2133         }
2134
2135         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2136                            timeout);
2137 }
2138
2139 bool hci_req_stop_discovery(struct hci_request *req)
2140 {
2141         struct hci_dev *hdev = req->hdev;
2142         struct discovery_state *d = &hdev->discovery;
2143         struct hci_cp_remote_name_req_cancel cp;
2144         struct inquiry_entry *e;
2145         bool ret = false;
2146
2147         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2148
2149         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2150                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2151                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2152
2153                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2154                         cancel_delayed_work(&hdev->le_scan_disable);
2155                         hci_req_add_le_scan_disable(req);
2156                 }
2157
2158                 ret = true;
2159         } else {
2160                 /* Passive scanning */
2161                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2162                         hci_req_add_le_scan_disable(req);
2163                         ret = true;
2164                 }
2165         }
2166
2167         /* No further actions needed for LE-only discovery */
2168         if (d->type == DISCOV_TYPE_LE)
2169                 return ret;
2170
2171         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2172                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2173                                                      NAME_PENDING);
2174                 if (!e)
2175                         return ret;
2176
2177                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2178                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2179                             &cp);
2180                 ret = true;
2181         }
2182
2183         return ret;
2184 }
2185
2186 static int stop_discovery(struct hci_request *req, unsigned long opt)
2187 {
2188         hci_dev_lock(req->hdev);
2189         hci_req_stop_discovery(req);
2190         hci_dev_unlock(req->hdev);
2191
2192         return 0;
2193 }
2194
2195 static void discov_update(struct work_struct *work)
2196 {
2197         struct hci_dev *hdev = container_of(work, struct hci_dev,
2198                                             discov_update);
2199         u8 status = 0;
2200
2201         switch (hdev->discovery.state) {
2202         case DISCOVERY_STARTING:
2203                 start_discovery(hdev, &status);
2204                 mgmt_start_discovery_complete(hdev, status);
2205                 if (status)
2206                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2207                 else
2208                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2209                 break;
2210         case DISCOVERY_STOPPING:
2211                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2212                 mgmt_stop_discovery_complete(hdev, status);
2213                 if (!status)
2214                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2215                 break;
2216         case DISCOVERY_STOPPED:
2217         default:
2218                 return;
2219         }
2220 }
2221
2222 static void discov_off(struct work_struct *work)
2223 {
2224         struct hci_dev *hdev = container_of(work, struct hci_dev,
2225                                             discov_off.work);
2226
2227         BT_DBG("%s", hdev->name);
2228
2229         hci_dev_lock(hdev);
2230
2231         /* When discoverable timeout triggers, then just make sure
2232          * the limited discoverable flag is cleared. Even in the case
2233          * of a timeout triggered from general discoverable, it is
2234          * safe to unconditionally clear the flag.
2235          */
2236         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2237         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2238         hdev->discov_timeout = 0;
2239
2240         hci_dev_unlock(hdev);
2241
2242         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2243         mgmt_new_settings(hdev);
2244 }
2245
2246 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2247 {
2248         struct hci_dev *hdev = req->hdev;
2249         u8 link_sec;
2250
2251         hci_dev_lock(hdev);
2252
2253         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2254             !lmp_host_ssp_capable(hdev)) {
2255                 u8 mode = 0x01;
2256
2257                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2258
2259                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2260                         u8 support = 0x01;
2261
2262                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2263                                     sizeof(support), &support);
2264                 }
2265         }
2266
2267         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2268             lmp_bredr_capable(hdev)) {
2269                 struct hci_cp_write_le_host_supported cp;
2270
2271                 cp.le = 0x01;
2272                 cp.simul = 0x00;
2273
2274                 /* Check first if we already have the right
2275                  * host state (host features set)
2276                  */
2277                 if (cp.le != lmp_host_le_capable(hdev) ||
2278                     cp.simul != lmp_host_le_br_capable(hdev))
2279                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2280                                     sizeof(cp), &cp);
2281         }
2282
2283         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2284                 /* Make sure the controller has a good default for
2285                  * advertising data. This also applies to the case
2286                  * where BR/EDR was toggled during the AUTO_OFF phase.
2287                  */
2288                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2289                     list_empty(&hdev->adv_instances)) {
2290                         __hci_req_update_adv_data(req, 0x00);
2291                         __hci_req_update_scan_rsp_data(req, 0x00);
2292
2293                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2294                                 __hci_req_enable_advertising(req);
2295                 } else if (!list_empty(&hdev->adv_instances)) {
2296                         struct adv_info *adv_instance;
2297
2298                         adv_instance = list_first_entry(&hdev->adv_instances,
2299                                                         struct adv_info, list);
2300                         __hci_req_schedule_adv_instance(req,
2301                                                         adv_instance->instance,
2302                                                         true);
2303                 }
2304         }
2305
2306         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2307         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2308                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2309                             sizeof(link_sec), &link_sec);
2310
2311         if (lmp_bredr_capable(hdev)) {
2312                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2313                         __hci_req_write_fast_connectable(req, true);
2314                 else
2315                         __hci_req_write_fast_connectable(req, false);
2316                 __hci_req_update_scan(req);
2317                 __hci_req_update_class(req);
2318                 __hci_req_update_name(req);
2319                 __hci_req_update_eir(req);
2320         }
2321
2322         hci_dev_unlock(hdev);
2323         return 0;
2324 }
2325
2326 int __hci_req_hci_power_on(struct hci_dev *hdev)
2327 {
2328         /* Register the available SMP channels (BR/EDR and LE) only when
2329          * successfully powering on the controller. This late
2330          * registration is required so that LE SMP can clearly decide if
2331          * the public address or static address is used.
2332          */
2333         smp_register(hdev);
2334
2335         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2336                               NULL);
2337 }
2338
2339 void hci_request_setup(struct hci_dev *hdev)
2340 {
2341         INIT_WORK(&hdev->discov_update, discov_update);
2342         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2343         INIT_WORK(&hdev->scan_update, scan_update_work);
2344         INIT_WORK(&hdev->connectable_update, connectable_update_work);
2345         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2346         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2347         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2348         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2349         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2350 }
2351
2352 void hci_request_cancel_all(struct hci_dev *hdev)
2353 {
2354         hci_req_sync_cancel(hdev, ENODEV);
2355
2356         cancel_work_sync(&hdev->discov_update);
2357         cancel_work_sync(&hdev->bg_scan_update);
2358         cancel_work_sync(&hdev->scan_update);
2359         cancel_work_sync(&hdev->connectable_update);
2360         cancel_work_sync(&hdev->discoverable_update);
2361         cancel_delayed_work_sync(&hdev->discov_off);
2362         cancel_delayed_work_sync(&hdev->le_scan_disable);
2363         cancel_delayed_work_sync(&hdev->le_scan_restart);
2364
2365         if (hdev->adv_instance_timeout) {
2366                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2367                 hdev->adv_instance_timeout = 0;
2368         }
2369 }