ASoC: fsl_ssi: remove explicit register defaults
[cascardo/linux.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <asm/unaligned.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE      0
34 #define HCI_REQ_PEND      1
35 #define HCI_REQ_CANCELED  2
36
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39         skb_queue_head_init(&req->cmd_q);
40         req->hdev = hdev;
41         req->err = 0;
42 }
43
44 static int req_run(struct hci_request *req, hci_req_complete_t complete,
45                    hci_req_complete_skb_t complete_skb)
46 {
47         struct hci_dev *hdev = req->hdev;
48         struct sk_buff *skb;
49         unsigned long flags;
50
51         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53         /* If an error occurred during request building, remove all HCI
54          * commands queued on the HCI request queue.
55          */
56         if (req->err) {
57                 skb_queue_purge(&req->cmd_q);
58                 return req->err;
59         }
60
61         /* Do not allow empty requests */
62         if (skb_queue_empty(&req->cmd_q))
63                 return -ENODATA;
64
65         skb = skb_peek_tail(&req->cmd_q);
66         if (complete) {
67                 bt_cb(skb)->hci.req_complete = complete;
68         } else if (complete_skb) {
69                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71         }
72
73         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77         queue_work(hdev->workqueue, &hdev->cmd_work);
78
79         return 0;
80 }
81
82 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83 {
84         return req_run(req, complete, NULL);
85 }
86
87 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88 {
89         return req_run(req, NULL, complete);
90 }
91
92 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93                                   struct sk_buff *skb)
94 {
95         BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97         if (hdev->req_status == HCI_REQ_PEND) {
98                 hdev->req_result = result;
99                 hdev->req_status = HCI_REQ_DONE;
100                 if (skb)
101                         hdev->req_skb = skb_get(skb);
102                 wake_up_interruptible(&hdev->req_wait_q);
103         }
104 }
105
106 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
107 {
108         BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110         if (hdev->req_status == HCI_REQ_PEND) {
111                 hdev->req_result = err;
112                 hdev->req_status = HCI_REQ_CANCELED;
113                 wake_up_interruptible(&hdev->req_wait_q);
114         }
115 }
116
117 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118                                   const void *param, u8 event, u32 timeout)
119 {
120         DECLARE_WAITQUEUE(wait, current);
121         struct hci_request req;
122         struct sk_buff *skb;
123         int err = 0;
124
125         BT_DBG("%s", hdev->name);
126
127         hci_req_init(&req, hdev);
128
129         hci_req_add_ev(&req, opcode, plen, param, event);
130
131         hdev->req_status = HCI_REQ_PEND;
132
133         add_wait_queue(&hdev->req_wait_q, &wait);
134         set_current_state(TASK_INTERRUPTIBLE);
135
136         err = hci_req_run_skb(&req, hci_req_sync_complete);
137         if (err < 0) {
138                 remove_wait_queue(&hdev->req_wait_q, &wait);
139                 set_current_state(TASK_RUNNING);
140                 return ERR_PTR(err);
141         }
142
143         schedule_timeout(timeout);
144
145         remove_wait_queue(&hdev->req_wait_q, &wait);
146
147         if (signal_pending(current))
148                 return ERR_PTR(-EINTR);
149
150         switch (hdev->req_status) {
151         case HCI_REQ_DONE:
152                 err = -bt_to_errno(hdev->req_result);
153                 break;
154
155         case HCI_REQ_CANCELED:
156                 err = -hdev->req_result;
157                 break;
158
159         default:
160                 err = -ETIMEDOUT;
161                 break;
162         }
163
164         hdev->req_status = hdev->req_result = 0;
165         skb = hdev->req_skb;
166         hdev->req_skb = NULL;
167
168         BT_DBG("%s end: err %d", hdev->name, err);
169
170         if (err < 0) {
171                 kfree_skb(skb);
172                 return ERR_PTR(err);
173         }
174
175         if (!skb)
176                 return ERR_PTR(-ENODATA);
177
178         return skb;
179 }
180 EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
182 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183                                const void *param, u32 timeout)
184 {
185         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186 }
187 EXPORT_SYMBOL(__hci_cmd_sync);
188
189 /* Execute request and wait for completion. */
190 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191                                                      unsigned long opt),
192                    unsigned long opt, u32 timeout, u8 *hci_status)
193 {
194         struct hci_request req;
195         DECLARE_WAITQUEUE(wait, current);
196         int err = 0;
197
198         BT_DBG("%s start", hdev->name);
199
200         hci_req_init(&req, hdev);
201
202         hdev->req_status = HCI_REQ_PEND;
203
204         err = func(&req, opt);
205         if (err) {
206                 if (hci_status)
207                         *hci_status = HCI_ERROR_UNSPECIFIED;
208                 return err;
209         }
210
211         add_wait_queue(&hdev->req_wait_q, &wait);
212         set_current_state(TASK_INTERRUPTIBLE);
213
214         err = hci_req_run_skb(&req, hci_req_sync_complete);
215         if (err < 0) {
216                 hdev->req_status = 0;
217
218                 remove_wait_queue(&hdev->req_wait_q, &wait);
219                 set_current_state(TASK_RUNNING);
220
221                 /* ENODATA means the HCI request command queue is empty.
222                  * This can happen when a request with conditionals doesn't
223                  * trigger any commands to be sent. This is normal behavior
224                  * and should not trigger an error return.
225                  */
226                 if (err == -ENODATA) {
227                         if (hci_status)
228                                 *hci_status = 0;
229                         return 0;
230                 }
231
232                 if (hci_status)
233                         *hci_status = HCI_ERROR_UNSPECIFIED;
234
235                 return err;
236         }
237
238         schedule_timeout(timeout);
239
240         remove_wait_queue(&hdev->req_wait_q, &wait);
241
242         if (signal_pending(current))
243                 return -EINTR;
244
245         switch (hdev->req_status) {
246         case HCI_REQ_DONE:
247                 err = -bt_to_errno(hdev->req_result);
248                 if (hci_status)
249                         *hci_status = hdev->req_result;
250                 break;
251
252         case HCI_REQ_CANCELED:
253                 err = -hdev->req_result;
254                 if (hci_status)
255                         *hci_status = HCI_ERROR_UNSPECIFIED;
256                 break;
257
258         default:
259                 err = -ETIMEDOUT;
260                 if (hci_status)
261                         *hci_status = HCI_ERROR_UNSPECIFIED;
262                 break;
263         }
264
265         hdev->req_status = hdev->req_result = 0;
266
267         BT_DBG("%s end: err %d", hdev->name, err);
268
269         return err;
270 }
271
272 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
273                                                   unsigned long opt),
274                  unsigned long opt, u32 timeout, u8 *hci_status)
275 {
276         int ret;
277
278         if (!test_bit(HCI_UP, &hdev->flags))
279                 return -ENETDOWN;
280
281         /* Serialize all requests */
282         hci_req_sync_lock(hdev);
283         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284         hci_req_sync_unlock(hdev);
285
286         return ret;
287 }
288
289 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290                                 const void *param)
291 {
292         int len = HCI_COMMAND_HDR_SIZE + plen;
293         struct hci_command_hdr *hdr;
294         struct sk_buff *skb;
295
296         skb = bt_skb_alloc(len, GFP_ATOMIC);
297         if (!skb)
298                 return NULL;
299
300         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
301         hdr->opcode = cpu_to_le16(opcode);
302         hdr->plen   = plen;
303
304         if (plen)
305                 memcpy(skb_put(skb, plen), param, plen);
306
307         BT_DBG("skb len %d", skb->len);
308
309         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310         hci_skb_opcode(skb) = opcode;
311
312         return skb;
313 }
314
315 /* Queue a command to an asynchronous HCI request */
316 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317                     const void *param, u8 event)
318 {
319         struct hci_dev *hdev = req->hdev;
320         struct sk_buff *skb;
321
322         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324         /* If an error occurred during request building, there is no point in
325          * queueing the HCI command. We can simply return.
326          */
327         if (req->err)
328                 return;
329
330         skb = hci_prepare_cmd(hdev, opcode, plen, param);
331         if (!skb) {
332                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
333                        hdev->name, opcode);
334                 req->err = -ENOMEM;
335                 return;
336         }
337
338         if (skb_queue_empty(&req->cmd_q))
339                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
340
341         bt_cb(skb)->hci.req_event = event;
342
343         skb_queue_tail(&req->cmd_q, skb);
344 }
345
346 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347                  const void *param)
348 {
349         hci_req_add_ev(req, opcode, plen, param, 0);
350 }
351
352 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353 {
354         struct hci_dev *hdev = req->hdev;
355         struct hci_cp_write_page_scan_activity acp;
356         u8 type;
357
358         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359                 return;
360
361         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362                 return;
363
364         if (enable) {
365                 type = PAGE_SCAN_TYPE_INTERLACED;
366
367                 /* 160 msec page scan interval */
368                 acp.interval = cpu_to_le16(0x0100);
369         } else {
370                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
371
372                 /* default 1.28 sec page scan */
373                 acp.interval = cpu_to_le16(0x0800);
374         }
375
376         acp.window = cpu_to_le16(0x0012);
377
378         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379             __cpu_to_le16(hdev->page_scan_window) != acp.window)
380                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381                             sizeof(acp), &acp);
382
383         if (hdev->page_scan_type != type)
384                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385 }
386
387 /* This function controls the background scanning based on hdev->pend_le_conns
388  * list. If there are pending LE connection we start the background scanning,
389  * otherwise we stop it.
390  *
391  * This function requires the caller holds hdev->lock.
392  */
393 static void __hci_update_background_scan(struct hci_request *req)
394 {
395         struct hci_dev *hdev = req->hdev;
396
397         if (!test_bit(HCI_UP, &hdev->flags) ||
398             test_bit(HCI_INIT, &hdev->flags) ||
399             hci_dev_test_flag(hdev, HCI_SETUP) ||
400             hci_dev_test_flag(hdev, HCI_CONFIG) ||
401             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402             hci_dev_test_flag(hdev, HCI_UNREGISTER))
403                 return;
404
405         /* No point in doing scanning if LE support hasn't been enabled */
406         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
407                 return;
408
409         /* If discovery is active don't interfere with it */
410         if (hdev->discovery.state != DISCOVERY_STOPPED)
411                 return;
412
413         /* Reset RSSI and UUID filters when starting background scanning
414          * since these filters are meant for service discovery only.
415          *
416          * The Start Discovery and Start Service Discovery operations
417          * ensure to set proper values for RSSI threshold and UUID
418          * filter list. So it is safe to just reset them here.
419          */
420         hci_discovery_filter_clear(hdev);
421
422         if (list_empty(&hdev->pend_le_conns) &&
423             list_empty(&hdev->pend_le_reports)) {
424                 /* If there is no pending LE connections or devices
425                  * to be scanned for, we should stop the background
426                  * scanning.
427                  */
428
429                 /* If controller is not scanning we are done. */
430                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
431                         return;
432
433                 hci_req_add_le_scan_disable(req);
434
435                 BT_DBG("%s stopping background scanning", hdev->name);
436         } else {
437                 /* If there is at least one pending LE connection, we should
438                  * keep the background scan running.
439                  */
440
441                 /* If controller is connecting, we should not start scanning
442                  * since some controllers are not able to scan and connect at
443                  * the same time.
444                  */
445                 if (hci_lookup_le_connect(hdev))
446                         return;
447
448                 /* If controller is currently scanning, we stop it to ensure we
449                  * don't miss any advertising (due to duplicates filter).
450                  */
451                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452                         hci_req_add_le_scan_disable(req);
453
454                 hci_req_add_le_passive_scan(req);
455
456                 BT_DBG("%s starting background scanning", hdev->name);
457         }
458 }
459
460 void __hci_req_update_name(struct hci_request *req)
461 {
462         struct hci_dev *hdev = req->hdev;
463         struct hci_cp_write_local_name cp;
464
465         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
466
467         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
468 }
469
470 #define PNP_INFO_SVCLASS_ID             0x1200
471
472 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
473 {
474         u8 *ptr = data, *uuids_start = NULL;
475         struct bt_uuid *uuid;
476
477         if (len < 4)
478                 return ptr;
479
480         list_for_each_entry(uuid, &hdev->uuids, list) {
481                 u16 uuid16;
482
483                 if (uuid->size != 16)
484                         continue;
485
486                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
487                 if (uuid16 < 0x1100)
488                         continue;
489
490                 if (uuid16 == PNP_INFO_SVCLASS_ID)
491                         continue;
492
493                 if (!uuids_start) {
494                         uuids_start = ptr;
495                         uuids_start[0] = 1;
496                         uuids_start[1] = EIR_UUID16_ALL;
497                         ptr += 2;
498                 }
499
500                 /* Stop if not enough space to put next UUID */
501                 if ((ptr - data) + sizeof(u16) > len) {
502                         uuids_start[1] = EIR_UUID16_SOME;
503                         break;
504                 }
505
506                 *ptr++ = (uuid16 & 0x00ff);
507                 *ptr++ = (uuid16 & 0xff00) >> 8;
508                 uuids_start[0] += sizeof(uuid16);
509         }
510
511         return ptr;
512 }
513
514 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 {
516         u8 *ptr = data, *uuids_start = NULL;
517         struct bt_uuid *uuid;
518
519         if (len < 6)
520                 return ptr;
521
522         list_for_each_entry(uuid, &hdev->uuids, list) {
523                 if (uuid->size != 32)
524                         continue;
525
526                 if (!uuids_start) {
527                         uuids_start = ptr;
528                         uuids_start[0] = 1;
529                         uuids_start[1] = EIR_UUID32_ALL;
530                         ptr += 2;
531                 }
532
533                 /* Stop if not enough space to put next UUID */
534                 if ((ptr - data) + sizeof(u32) > len) {
535                         uuids_start[1] = EIR_UUID32_SOME;
536                         break;
537                 }
538
539                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
540                 ptr += sizeof(u32);
541                 uuids_start[0] += sizeof(u32);
542         }
543
544         return ptr;
545 }
546
547 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
548 {
549         u8 *ptr = data, *uuids_start = NULL;
550         struct bt_uuid *uuid;
551
552         if (len < 18)
553                 return ptr;
554
555         list_for_each_entry(uuid, &hdev->uuids, list) {
556                 if (uuid->size != 128)
557                         continue;
558
559                 if (!uuids_start) {
560                         uuids_start = ptr;
561                         uuids_start[0] = 1;
562                         uuids_start[1] = EIR_UUID128_ALL;
563                         ptr += 2;
564                 }
565
566                 /* Stop if not enough space to put next UUID */
567                 if ((ptr - data) + 16 > len) {
568                         uuids_start[1] = EIR_UUID128_SOME;
569                         break;
570                 }
571
572                 memcpy(ptr, uuid->uuid, 16);
573                 ptr += 16;
574                 uuids_start[0] += 16;
575         }
576
577         return ptr;
578 }
579
580 static void create_eir(struct hci_dev *hdev, u8 *data)
581 {
582         u8 *ptr = data;
583         size_t name_len;
584
585         name_len = strlen(hdev->dev_name);
586
587         if (name_len > 0) {
588                 /* EIR Data type */
589                 if (name_len > 48) {
590                         name_len = 48;
591                         ptr[1] = EIR_NAME_SHORT;
592                 } else
593                         ptr[1] = EIR_NAME_COMPLETE;
594
595                 /* EIR Data length */
596                 ptr[0] = name_len + 1;
597
598                 memcpy(ptr + 2, hdev->dev_name, name_len);
599
600                 ptr += (name_len + 2);
601         }
602
603         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
604                 ptr[0] = 2;
605                 ptr[1] = EIR_TX_POWER;
606                 ptr[2] = (u8) hdev->inq_tx_power;
607
608                 ptr += 3;
609         }
610
611         if (hdev->devid_source > 0) {
612                 ptr[0] = 9;
613                 ptr[1] = EIR_DEVICE_ID;
614
615                 put_unaligned_le16(hdev->devid_source, ptr + 2);
616                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617                 put_unaligned_le16(hdev->devid_product, ptr + 6);
618                 put_unaligned_le16(hdev->devid_version, ptr + 8);
619
620                 ptr += 10;
621         }
622
623         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626 }
627
628 void __hci_req_update_eir(struct hci_request *req)
629 {
630         struct hci_dev *hdev = req->hdev;
631         struct hci_cp_write_eir cp;
632
633         if (!hdev_is_powered(hdev))
634                 return;
635
636         if (!lmp_ext_inq_capable(hdev))
637                 return;
638
639         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
640                 return;
641
642         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
643                 return;
644
645         memset(&cp, 0, sizeof(cp));
646
647         create_eir(hdev, cp.data);
648
649         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
650                 return;
651
652         memcpy(hdev->eir, cp.data, sizeof(cp.data));
653
654         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
655 }
656
657 void hci_req_add_le_scan_disable(struct hci_request *req)
658 {
659         struct hci_cp_le_set_scan_enable cp;
660
661         memset(&cp, 0, sizeof(cp));
662         cp.enable = LE_SCAN_DISABLE;
663         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
664 }
665
666 static void add_to_white_list(struct hci_request *req,
667                               struct hci_conn_params *params)
668 {
669         struct hci_cp_le_add_to_white_list cp;
670
671         cp.bdaddr_type = params->addr_type;
672         bacpy(&cp.bdaddr, &params->addr);
673
674         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
675 }
676
677 static u8 update_white_list(struct hci_request *req)
678 {
679         struct hci_dev *hdev = req->hdev;
680         struct hci_conn_params *params;
681         struct bdaddr_list *b;
682         uint8_t white_list_entries = 0;
683
684         /* Go through the current white list programmed into the
685          * controller one by one and check if that address is still
686          * in the list of pending connections or list of devices to
687          * report. If not present in either list, then queue the
688          * command to remove it from the controller.
689          */
690         list_for_each_entry(b, &hdev->le_white_list, list) {
691                 struct hci_cp_le_del_from_white_list cp;
692
693                 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
694                                               &b->bdaddr, b->bdaddr_type) ||
695                     hci_pend_le_action_lookup(&hdev->pend_le_reports,
696                                               &b->bdaddr, b->bdaddr_type)) {
697                         white_list_entries++;
698                         continue;
699                 }
700
701                 cp.bdaddr_type = b->bdaddr_type;
702                 bacpy(&cp.bdaddr, &b->bdaddr);
703
704                 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
705                             sizeof(cp), &cp);
706         }
707
708         /* Since all no longer valid white list entries have been
709          * removed, walk through the list of pending connections
710          * and ensure that any new device gets programmed into
711          * the controller.
712          *
713          * If the list of the devices is larger than the list of
714          * available white list entries in the controller, then
715          * just abort and return filer policy value to not use the
716          * white list.
717          */
718         list_for_each_entry(params, &hdev->pend_le_conns, action) {
719                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
720                                            &params->addr, params->addr_type))
721                         continue;
722
723                 if (white_list_entries >= hdev->le_white_list_size) {
724                         /* Select filter policy to accept all advertising */
725                         return 0x00;
726                 }
727
728                 if (hci_find_irk_by_addr(hdev, &params->addr,
729                                          params->addr_type)) {
730                         /* White list can not be used with RPAs */
731                         return 0x00;
732                 }
733
734                 white_list_entries++;
735                 add_to_white_list(req, params);
736         }
737
738         /* After adding all new pending connections, walk through
739          * the list of pending reports and also add these to the
740          * white list if there is still space.
741          */
742         list_for_each_entry(params, &hdev->pend_le_reports, action) {
743                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
744                                            &params->addr, params->addr_type))
745                         continue;
746
747                 if (white_list_entries >= hdev->le_white_list_size) {
748                         /* Select filter policy to accept all advertising */
749                         return 0x00;
750                 }
751
752                 if (hci_find_irk_by_addr(hdev, &params->addr,
753                                          params->addr_type)) {
754                         /* White list can not be used with RPAs */
755                         return 0x00;
756                 }
757
758                 white_list_entries++;
759                 add_to_white_list(req, params);
760         }
761
762         /* Select filter policy to use white list */
763         return 0x01;
764 }
765
766 void hci_req_add_le_passive_scan(struct hci_request *req)
767 {
768         struct hci_cp_le_set_scan_param param_cp;
769         struct hci_cp_le_set_scan_enable enable_cp;
770         struct hci_dev *hdev = req->hdev;
771         u8 own_addr_type;
772         u8 filter_policy;
773
774         /* Set require_privacy to false since no SCAN_REQ are send
775          * during passive scanning. Not using an non-resolvable address
776          * here is important so that peer devices using direct
777          * advertising with our address will be correctly reported
778          * by the controller.
779          */
780         if (hci_update_random_address(req, false, &own_addr_type))
781                 return;
782
783         /* Adding or removing entries from the white list must
784          * happen before enabling scanning. The controller does
785          * not allow white list modification while scanning.
786          */
787         filter_policy = update_white_list(req);
788
789         /* When the controller is using random resolvable addresses and
790          * with that having LE privacy enabled, then controllers with
791          * Extended Scanner Filter Policies support can now enable support
792          * for handling directed advertising.
793          *
794          * So instead of using filter polices 0x00 (no whitelist)
795          * and 0x01 (whitelist enabled) use the new filter policies
796          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
797          */
798         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
799             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
800                 filter_policy |= 0x02;
801
802         memset(&param_cp, 0, sizeof(param_cp));
803         param_cp.type = LE_SCAN_PASSIVE;
804         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
805         param_cp.window = cpu_to_le16(hdev->le_scan_window);
806         param_cp.own_address_type = own_addr_type;
807         param_cp.filter_policy = filter_policy;
808         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
809                     &param_cp);
810
811         memset(&enable_cp, 0, sizeof(enable_cp));
812         enable_cp.enable = LE_SCAN_ENABLE;
813         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
814         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
815                     &enable_cp);
816 }
817
818 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
819 {
820         u8 instance = hdev->cur_adv_instance;
821         struct adv_info *adv_instance;
822
823         /* Ignore instance 0 */
824         if (instance == 0x00)
825                 return 0;
826
827         adv_instance = hci_find_adv_instance(hdev, instance);
828         if (!adv_instance)
829                 return 0;
830
831         /* TODO: Take into account the "appearance" and "local-name" flags here.
832          * These are currently being ignored as they are not supported.
833          */
834         return adv_instance->scan_rsp_len;
835 }
836
837 void __hci_req_disable_advertising(struct hci_request *req)
838 {
839         u8 enable = 0x00;
840
841         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
842 }
843
844 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
845 {
846         u32 flags;
847         struct adv_info *adv_instance;
848
849         if (instance == 0x00) {
850                 /* Instance 0 always manages the "Tx Power" and "Flags"
851                  * fields
852                  */
853                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
854
855                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
856                  * corresponds to the "connectable" instance flag.
857                  */
858                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
859                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
860
861                 return flags;
862         }
863
864         adv_instance = hci_find_adv_instance(hdev, instance);
865
866         /* Return 0 when we got an invalid instance identifier. */
867         if (!adv_instance)
868                 return 0;
869
870         return adv_instance->flags;
871 }
872
873 void __hci_req_enable_advertising(struct hci_request *req)
874 {
875         struct hci_dev *hdev = req->hdev;
876         struct hci_cp_le_set_adv_param cp;
877         u8 own_addr_type, enable = 0x01;
878         bool connectable;
879         u32 flags;
880
881         if (hci_conn_num(hdev, LE_LINK) > 0)
882                 return;
883
884         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
885                 __hci_req_disable_advertising(req);
886
887         /* Clear the HCI_LE_ADV bit temporarily so that the
888          * hci_update_random_address knows that it's safe to go ahead
889          * and write a new random address. The flag will be set back on
890          * as soon as the SET_ADV_ENABLE HCI command completes.
891          */
892         hci_dev_clear_flag(hdev, HCI_LE_ADV);
893
894         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
895
896         /* If the "connectable" instance flag was not set, then choose between
897          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
898          */
899         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
900                       mgmt_get_connectable(hdev);
901
902         /* Set require_privacy to true only when non-connectable
903          * advertising is used. In that case it is fine to use a
904          * non-resolvable private address.
905          */
906         if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
907                 return;
908
909         memset(&cp, 0, sizeof(cp));
910         cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
911         cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
912
913         if (connectable)
914                 cp.type = LE_ADV_IND;
915         else if (get_cur_adv_instance_scan_rsp_len(hdev))
916                 cp.type = LE_ADV_SCAN_IND;
917         else
918                 cp.type = LE_ADV_NONCONN_IND;
919
920         cp.own_address_type = own_addr_type;
921         cp.channel_map = hdev->le_adv_channel_map;
922
923         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
924
925         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
926 }
927
928 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
929 {
930         u8 ad_len = 0;
931         size_t name_len;
932
933         name_len = strlen(hdev->dev_name);
934         if (name_len > 0) {
935                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
936
937                 if (name_len > max_len) {
938                         name_len = max_len;
939                         ptr[1] = EIR_NAME_SHORT;
940                 } else
941                         ptr[1] = EIR_NAME_COMPLETE;
942
943                 ptr[0] = name_len + 1;
944
945                 memcpy(ptr + 2, hdev->dev_name, name_len);
946
947                 ad_len += (name_len + 2);
948                 ptr += (name_len + 2);
949         }
950
951         return ad_len;
952 }
953
954 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
955                                         u8 *ptr)
956 {
957         struct adv_info *adv_instance;
958
959         adv_instance = hci_find_adv_instance(hdev, instance);
960         if (!adv_instance)
961                 return 0;
962
963         /* TODO: Set the appropriate entries based on advertising instance flags
964          * here once flags other than 0 are supported.
965          */
966         memcpy(ptr, adv_instance->scan_rsp_data,
967                adv_instance->scan_rsp_len);
968
969         return adv_instance->scan_rsp_len;
970 }
971
972 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
973 {
974         struct hci_dev *hdev = req->hdev;
975         struct hci_cp_le_set_scan_rsp_data cp;
976         u8 len;
977
978         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
979                 return;
980
981         memset(&cp, 0, sizeof(cp));
982
983         if (instance)
984                 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
985         else
986                 len = create_default_scan_rsp_data(hdev, cp.data);
987
988         if (hdev->scan_rsp_data_len == len &&
989             !memcmp(cp.data, hdev->scan_rsp_data, len))
990                 return;
991
992         memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
993         hdev->scan_rsp_data_len = len;
994
995         cp.length = len;
996
997         hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
998 }
999
1000 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1001 {
1002         struct adv_info *adv_instance = NULL;
1003         u8 ad_len = 0, flags = 0;
1004         u32 instance_flags;
1005
1006         /* Return 0 when the current instance identifier is invalid. */
1007         if (instance) {
1008                 adv_instance = hci_find_adv_instance(hdev, instance);
1009                 if (!adv_instance)
1010                         return 0;
1011         }
1012
1013         instance_flags = get_adv_instance_flags(hdev, instance);
1014
1015         /* The Add Advertising command allows userspace to set both the general
1016          * and limited discoverable flags.
1017          */
1018         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1019                 flags |= LE_AD_GENERAL;
1020
1021         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1022                 flags |= LE_AD_LIMITED;
1023
1024         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1025                 /* If a discovery flag wasn't provided, simply use the global
1026                  * settings.
1027                  */
1028                 if (!flags)
1029                         flags |= mgmt_get_adv_discov_flags(hdev);
1030
1031                 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1032                         flags |= LE_AD_NO_BREDR;
1033
1034                 /* If flags would still be empty, then there is no need to
1035                  * include the "Flags" AD field".
1036                  */
1037                 if (flags) {
1038                         ptr[0] = 0x02;
1039                         ptr[1] = EIR_FLAGS;
1040                         ptr[2] = flags;
1041
1042                         ad_len += 3;
1043                         ptr += 3;
1044                 }
1045         }
1046
1047         if (adv_instance) {
1048                 memcpy(ptr, adv_instance->adv_data,
1049                        adv_instance->adv_data_len);
1050                 ad_len += adv_instance->adv_data_len;
1051                 ptr += adv_instance->adv_data_len;
1052         }
1053
1054         /* Provide Tx Power only if we can provide a valid value for it */
1055         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1056             (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1057                 ptr[0] = 0x02;
1058                 ptr[1] = EIR_TX_POWER;
1059                 ptr[2] = (u8)hdev->adv_tx_power;
1060
1061                 ad_len += 3;
1062                 ptr += 3;
1063         }
1064
1065         return ad_len;
1066 }
1067
1068 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1069 {
1070         struct hci_dev *hdev = req->hdev;
1071         struct hci_cp_le_set_adv_data cp;
1072         u8 len;
1073
1074         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1075                 return;
1076
1077         memset(&cp, 0, sizeof(cp));
1078
1079         len = create_instance_adv_data(hdev, instance, cp.data);
1080
1081         /* There's nothing to do if the data hasn't changed */
1082         if (hdev->adv_data_len == len &&
1083             memcmp(cp.data, hdev->adv_data, len) == 0)
1084                 return;
1085
1086         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1087         hdev->adv_data_len = len;
1088
1089         cp.length = len;
1090
1091         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1092 }
1093
1094 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1095 {
1096         struct hci_request req;
1097
1098         hci_req_init(&req, hdev);
1099         __hci_req_update_adv_data(&req, instance);
1100
1101         return hci_req_run(&req, NULL);
1102 }
1103
1104 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1105 {
1106         BT_DBG("%s status %u", hdev->name, status);
1107 }
1108
1109 void hci_req_reenable_advertising(struct hci_dev *hdev)
1110 {
1111         struct hci_request req;
1112
1113         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1114             list_empty(&hdev->adv_instances))
1115                 return;
1116
1117         hci_req_init(&req, hdev);
1118
1119         if (hdev->cur_adv_instance) {
1120                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1121                                                 true);
1122         } else {
1123                 __hci_req_update_adv_data(&req, 0x00);
1124                 __hci_req_update_scan_rsp_data(&req, 0x00);
1125                 __hci_req_enable_advertising(&req);
1126         }
1127
1128         hci_req_run(&req, adv_enable_complete);
1129 }
1130
1131 static void adv_timeout_expire(struct work_struct *work)
1132 {
1133         struct hci_dev *hdev = container_of(work, struct hci_dev,
1134                                             adv_instance_expire.work);
1135
1136         struct hci_request req;
1137         u8 instance;
1138
1139         BT_DBG("%s", hdev->name);
1140
1141         hci_dev_lock(hdev);
1142
1143         hdev->adv_instance_timeout = 0;
1144
1145         instance = hdev->cur_adv_instance;
1146         if (instance == 0x00)
1147                 goto unlock;
1148
1149         hci_req_init(&req, hdev);
1150
1151         hci_req_clear_adv_instance(hdev, &req, instance, false);
1152
1153         if (list_empty(&hdev->adv_instances))
1154                 __hci_req_disable_advertising(&req);
1155
1156         hci_req_run(&req, NULL);
1157
1158 unlock:
1159         hci_dev_unlock(hdev);
1160 }
1161
1162 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1163                                     bool force)
1164 {
1165         struct hci_dev *hdev = req->hdev;
1166         struct adv_info *adv_instance = NULL;
1167         u16 timeout;
1168
1169         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1170             list_empty(&hdev->adv_instances))
1171                 return -EPERM;
1172
1173         if (hdev->adv_instance_timeout)
1174                 return -EBUSY;
1175
1176         adv_instance = hci_find_adv_instance(hdev, instance);
1177         if (!adv_instance)
1178                 return -ENOENT;
1179
1180         /* A zero timeout means unlimited advertising. As long as there is
1181          * only one instance, duration should be ignored. We still set a timeout
1182          * in case further instances are being added later on.
1183          *
1184          * If the remaining lifetime of the instance is more than the duration
1185          * then the timeout corresponds to the duration, otherwise it will be
1186          * reduced to the remaining instance lifetime.
1187          */
1188         if (adv_instance->timeout == 0 ||
1189             adv_instance->duration <= adv_instance->remaining_time)
1190                 timeout = adv_instance->duration;
1191         else
1192                 timeout = adv_instance->remaining_time;
1193
1194         /* The remaining time is being reduced unless the instance is being
1195          * advertised without time limit.
1196          */
1197         if (adv_instance->timeout)
1198                 adv_instance->remaining_time =
1199                                 adv_instance->remaining_time - timeout;
1200
1201         hdev->adv_instance_timeout = timeout;
1202         queue_delayed_work(hdev->req_workqueue,
1203                            &hdev->adv_instance_expire,
1204                            msecs_to_jiffies(timeout * 1000));
1205
1206         /* If we're just re-scheduling the same instance again then do not
1207          * execute any HCI commands. This happens when a single instance is
1208          * being advertised.
1209          */
1210         if (!force && hdev->cur_adv_instance == instance &&
1211             hci_dev_test_flag(hdev, HCI_LE_ADV))
1212                 return 0;
1213
1214         hdev->cur_adv_instance = instance;
1215         __hci_req_update_adv_data(req, instance);
1216         __hci_req_update_scan_rsp_data(req, instance);
1217         __hci_req_enable_advertising(req);
1218
1219         return 0;
1220 }
1221
1222 static void cancel_adv_timeout(struct hci_dev *hdev)
1223 {
1224         if (hdev->adv_instance_timeout) {
1225                 hdev->adv_instance_timeout = 0;
1226                 cancel_delayed_work(&hdev->adv_instance_expire);
1227         }
1228 }
1229
1230 /* For a single instance:
1231  * - force == true: The instance will be removed even when its remaining
1232  *   lifetime is not zero.
1233  * - force == false: the instance will be deactivated but kept stored unless
1234  *   the remaining lifetime is zero.
1235  *
1236  * For instance == 0x00:
1237  * - force == true: All instances will be removed regardless of their timeout
1238  *   setting.
1239  * - force == false: Only instances that have a timeout will be removed.
1240  */
1241 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1242                                 u8 instance, bool force)
1243 {
1244         struct adv_info *adv_instance, *n, *next_instance = NULL;
1245         int err;
1246         u8 rem_inst;
1247
1248         /* Cancel any timeout concerning the removed instance(s). */
1249         if (!instance || hdev->cur_adv_instance == instance)
1250                 cancel_adv_timeout(hdev);
1251
1252         /* Get the next instance to advertise BEFORE we remove
1253          * the current one. This can be the same instance again
1254          * if there is only one instance.
1255          */
1256         if (instance && hdev->cur_adv_instance == instance)
1257                 next_instance = hci_get_next_instance(hdev, instance);
1258
1259         if (instance == 0x00) {
1260                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1261                                          list) {
1262                         if (!(force || adv_instance->timeout))
1263                                 continue;
1264
1265                         rem_inst = adv_instance->instance;
1266                         err = hci_remove_adv_instance(hdev, rem_inst);
1267                         if (!err)
1268                                 mgmt_advertising_removed(NULL, hdev, rem_inst);
1269                 }
1270         } else {
1271                 adv_instance = hci_find_adv_instance(hdev, instance);
1272
1273                 if (force || (adv_instance && adv_instance->timeout &&
1274                               !adv_instance->remaining_time)) {
1275                         /* Don't advertise a removed instance. */
1276                         if (next_instance &&
1277                             next_instance->instance == instance)
1278                                 next_instance = NULL;
1279
1280                         err = hci_remove_adv_instance(hdev, instance);
1281                         if (!err)
1282                                 mgmt_advertising_removed(NULL, hdev, instance);
1283                 }
1284         }
1285
1286         if (!req || !hdev_is_powered(hdev) ||
1287             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1288                 return;
1289
1290         if (next_instance)
1291                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1292                                                 false);
1293 }
1294
1295 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1296 {
1297         struct hci_dev *hdev = req->hdev;
1298
1299         /* If we're advertising or initiating an LE connection we can't
1300          * go ahead and change the random address at this time. This is
1301          * because the eventual initiator address used for the
1302          * subsequently created connection will be undefined (some
1303          * controllers use the new address and others the one we had
1304          * when the operation started).
1305          *
1306          * In this kind of scenario skip the update and let the random
1307          * address be updated at the next cycle.
1308          */
1309         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1310             hci_lookup_le_connect(hdev)) {
1311                 BT_DBG("Deferring random address update");
1312                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1313                 return;
1314         }
1315
1316         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1317 }
1318
1319 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1320                               u8 *own_addr_type)
1321 {
1322         struct hci_dev *hdev = req->hdev;
1323         int err;
1324
1325         /* If privacy is enabled use a resolvable private address. If
1326          * current RPA has expired or there is something else than
1327          * the current RPA in use, then generate a new one.
1328          */
1329         if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
1330                 int to;
1331
1332                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1333
1334                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1335                     !bacmp(&hdev->random_addr, &hdev->rpa))
1336                         return 0;
1337
1338                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1339                 if (err < 0) {
1340                         BT_ERR("%s failed to generate new RPA", hdev->name);
1341                         return err;
1342                 }
1343
1344                 set_random_addr(req, &hdev->rpa);
1345
1346                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1347                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1348
1349                 return 0;
1350         }
1351
1352         /* In case of required privacy without resolvable private address,
1353          * use an non-resolvable private address. This is useful for active
1354          * scanning and non-connectable advertising.
1355          */
1356         if (require_privacy) {
1357                 bdaddr_t nrpa;
1358
1359                 while (true) {
1360                         /* The non-resolvable private address is generated
1361                          * from random six bytes with the two most significant
1362                          * bits cleared.
1363                          */
1364                         get_random_bytes(&nrpa, 6);
1365                         nrpa.b[5] &= 0x3f;
1366
1367                         /* The non-resolvable private address shall not be
1368                          * equal to the public address.
1369                          */
1370                         if (bacmp(&hdev->bdaddr, &nrpa))
1371                                 break;
1372                 }
1373
1374                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1375                 set_random_addr(req, &nrpa);
1376                 return 0;
1377         }
1378
1379         /* If forcing static address is in use or there is no public
1380          * address use the static address as random address (but skip
1381          * the HCI command if the current random address is already the
1382          * static one.
1383          *
1384          * In case BR/EDR has been disabled on a dual-mode controller
1385          * and a static address has been configured, then use that
1386          * address instead of the public BR/EDR address.
1387          */
1388         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1389             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1390             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1391              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1392                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1393                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1394                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1395                                     &hdev->static_addr);
1396                 return 0;
1397         }
1398
1399         /* Neither privacy nor static address is being used so use a
1400          * public address.
1401          */
1402         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1403
1404         return 0;
1405 }
1406
1407 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1408 {
1409         struct bdaddr_list *b;
1410
1411         list_for_each_entry(b, &hdev->whitelist, list) {
1412                 struct hci_conn *conn;
1413
1414                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1415                 if (!conn)
1416                         return true;
1417
1418                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1419                         return true;
1420         }
1421
1422         return false;
1423 }
1424
1425 void __hci_req_update_scan(struct hci_request *req)
1426 {
1427         struct hci_dev *hdev = req->hdev;
1428         u8 scan;
1429
1430         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1431                 return;
1432
1433         if (!hdev_is_powered(hdev))
1434                 return;
1435
1436         if (mgmt_powering_down(hdev))
1437                 return;
1438
1439         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1440             disconnected_whitelist_entries(hdev))
1441                 scan = SCAN_PAGE;
1442         else
1443                 scan = SCAN_DISABLED;
1444
1445         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1446                 scan |= SCAN_INQUIRY;
1447
1448         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1449             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1450                 return;
1451
1452         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1453 }
1454
1455 static int update_scan(struct hci_request *req, unsigned long opt)
1456 {
1457         hci_dev_lock(req->hdev);
1458         __hci_req_update_scan(req);
1459         hci_dev_unlock(req->hdev);
1460         return 0;
1461 }
1462
1463 static void scan_update_work(struct work_struct *work)
1464 {
1465         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1466
1467         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1468 }
1469
1470 static int connectable_update(struct hci_request *req, unsigned long opt)
1471 {
1472         struct hci_dev *hdev = req->hdev;
1473
1474         hci_dev_lock(hdev);
1475
1476         __hci_req_update_scan(req);
1477
1478         /* If BR/EDR is not enabled and we disable advertising as a
1479          * by-product of disabling connectable, we need to update the
1480          * advertising flags.
1481          */
1482         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1483                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1484
1485         /* Update the advertising parameters if necessary */
1486         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1487             !list_empty(&hdev->adv_instances))
1488                 __hci_req_enable_advertising(req);
1489
1490         __hci_update_background_scan(req);
1491
1492         hci_dev_unlock(hdev);
1493
1494         return 0;
1495 }
1496
1497 static void connectable_update_work(struct work_struct *work)
1498 {
1499         struct hci_dev *hdev = container_of(work, struct hci_dev,
1500                                             connectable_update);
1501         u8 status;
1502
1503         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1504         mgmt_set_connectable_complete(hdev, status);
1505 }
1506
1507 static u8 get_service_classes(struct hci_dev *hdev)
1508 {
1509         struct bt_uuid *uuid;
1510         u8 val = 0;
1511
1512         list_for_each_entry(uuid, &hdev->uuids, list)
1513                 val |= uuid->svc_hint;
1514
1515         return val;
1516 }
1517
1518 void __hci_req_update_class(struct hci_request *req)
1519 {
1520         struct hci_dev *hdev = req->hdev;
1521         u8 cod[3];
1522
1523         BT_DBG("%s", hdev->name);
1524
1525         if (!hdev_is_powered(hdev))
1526                 return;
1527
1528         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1529                 return;
1530
1531         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1532                 return;
1533
1534         cod[0] = hdev->minor_class;
1535         cod[1] = hdev->major_class;
1536         cod[2] = get_service_classes(hdev);
1537
1538         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1539                 cod[1] |= 0x20;
1540
1541         if (memcmp(cod, hdev->dev_class, 3) == 0)
1542                 return;
1543
1544         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1545 }
1546
1547 static void write_iac(struct hci_request *req)
1548 {
1549         struct hci_dev *hdev = req->hdev;
1550         struct hci_cp_write_current_iac_lap cp;
1551
1552         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1553                 return;
1554
1555         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1556                 /* Limited discoverable mode */
1557                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1558                 cp.iac_lap[0] = 0x00;   /* LIAC */
1559                 cp.iac_lap[1] = 0x8b;
1560                 cp.iac_lap[2] = 0x9e;
1561                 cp.iac_lap[3] = 0x33;   /* GIAC */
1562                 cp.iac_lap[4] = 0x8b;
1563                 cp.iac_lap[5] = 0x9e;
1564         } else {
1565                 /* General discoverable mode */
1566                 cp.num_iac = 1;
1567                 cp.iac_lap[0] = 0x33;   /* GIAC */
1568                 cp.iac_lap[1] = 0x8b;
1569                 cp.iac_lap[2] = 0x9e;
1570         }
1571
1572         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1573                     (cp.num_iac * 3) + 1, &cp);
1574 }
1575
1576 static int discoverable_update(struct hci_request *req, unsigned long opt)
1577 {
1578         struct hci_dev *hdev = req->hdev;
1579
1580         hci_dev_lock(hdev);
1581
1582         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1583                 write_iac(req);
1584                 __hci_req_update_scan(req);
1585                 __hci_req_update_class(req);
1586         }
1587
1588         /* Advertising instances don't use the global discoverable setting, so
1589          * only update AD if advertising was enabled using Set Advertising.
1590          */
1591         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
1592                 __hci_req_update_adv_data(req, 0x00);
1593
1594         hci_dev_unlock(hdev);
1595
1596         return 0;
1597 }
1598
1599 static void discoverable_update_work(struct work_struct *work)
1600 {
1601         struct hci_dev *hdev = container_of(work, struct hci_dev,
1602                                             discoverable_update);
1603         u8 status;
1604
1605         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1606         mgmt_set_discoverable_complete(hdev, status);
1607 }
1608
1609 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1610                       u8 reason)
1611 {
1612         switch (conn->state) {
1613         case BT_CONNECTED:
1614         case BT_CONFIG:
1615                 if (conn->type == AMP_LINK) {
1616                         struct hci_cp_disconn_phy_link cp;
1617
1618                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1619                         cp.reason = reason;
1620                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1621                                     &cp);
1622                 } else {
1623                         struct hci_cp_disconnect dc;
1624
1625                         dc.handle = cpu_to_le16(conn->handle);
1626                         dc.reason = reason;
1627                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1628                 }
1629
1630                 conn->state = BT_DISCONN;
1631
1632                 break;
1633         case BT_CONNECT:
1634                 if (conn->type == LE_LINK) {
1635                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1636                                 break;
1637                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1638                                     0, NULL);
1639                 } else if (conn->type == ACL_LINK) {
1640                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1641                                 break;
1642                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1643                                     6, &conn->dst);
1644                 }
1645                 break;
1646         case BT_CONNECT2:
1647                 if (conn->type == ACL_LINK) {
1648                         struct hci_cp_reject_conn_req rej;
1649
1650                         bacpy(&rej.bdaddr, &conn->dst);
1651                         rej.reason = reason;
1652
1653                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1654                                     sizeof(rej), &rej);
1655                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1656                         struct hci_cp_reject_sync_conn_req rej;
1657
1658                         bacpy(&rej.bdaddr, &conn->dst);
1659
1660                         /* SCO rejection has its own limited set of
1661                          * allowed error values (0x0D-0x0F) which isn't
1662                          * compatible with most values passed to this
1663                          * function. To be safe hard-code one of the
1664                          * values that's suitable for SCO.
1665                          */
1666                         rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
1667
1668                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1669                                     sizeof(rej), &rej);
1670                 }
1671                 break;
1672         default:
1673                 conn->state = BT_CLOSED;
1674                 break;
1675         }
1676 }
1677
1678 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1679 {
1680         if (status)
1681                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1682 }
1683
1684 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1685 {
1686         struct hci_request req;
1687         int err;
1688
1689         hci_req_init(&req, conn->hdev);
1690
1691         __hci_abort_conn(&req, conn, reason);
1692
1693         err = hci_req_run(&req, abort_conn_complete);
1694         if (err && err != -ENODATA) {
1695                 BT_ERR("Failed to run HCI request: err %d", err);
1696                 return err;
1697         }
1698
1699         return 0;
1700 }
1701
1702 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1703 {
1704         hci_dev_lock(req->hdev);
1705         __hci_update_background_scan(req);
1706         hci_dev_unlock(req->hdev);
1707         return 0;
1708 }
1709
1710 static void bg_scan_update(struct work_struct *work)
1711 {
1712         struct hci_dev *hdev = container_of(work, struct hci_dev,
1713                                             bg_scan_update);
1714         struct hci_conn *conn;
1715         u8 status;
1716         int err;
1717
1718         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1719         if (!err)
1720                 return;
1721
1722         hci_dev_lock(hdev);
1723
1724         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1725         if (conn)
1726                 hci_le_conn_failed(conn, status);
1727
1728         hci_dev_unlock(hdev);
1729 }
1730
1731 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1732 {
1733         hci_req_add_le_scan_disable(req);
1734         return 0;
1735 }
1736
1737 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1738 {
1739         u8 length = opt;
1740         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1741         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1742         struct hci_cp_inquiry cp;
1743
1744         BT_DBG("%s", req->hdev->name);
1745
1746         hci_dev_lock(req->hdev);
1747         hci_inquiry_cache_flush(req->hdev);
1748         hci_dev_unlock(req->hdev);
1749
1750         memset(&cp, 0, sizeof(cp));
1751
1752         if (req->hdev->discovery.limited)
1753                 memcpy(&cp.lap, liac, sizeof(cp.lap));
1754         else
1755                 memcpy(&cp.lap, giac, sizeof(cp.lap));
1756
1757         cp.length = length;
1758
1759         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1760
1761         return 0;
1762 }
1763
1764 static void le_scan_disable_work(struct work_struct *work)
1765 {
1766         struct hci_dev *hdev = container_of(work, struct hci_dev,
1767                                             le_scan_disable.work);
1768         u8 status;
1769
1770         BT_DBG("%s", hdev->name);
1771
1772         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1773                 return;
1774
1775         cancel_delayed_work(&hdev->le_scan_restart);
1776
1777         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1778         if (status) {
1779                 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1780                 return;
1781         }
1782
1783         hdev->discovery.scan_start = 0;
1784
1785         /* If we were running LE only scan, change discovery state. If
1786          * we were running both LE and BR/EDR inquiry simultaneously,
1787          * and BR/EDR inquiry is already finished, stop discovery,
1788          * otherwise BR/EDR inquiry will stop discovery when finished.
1789          * If we will resolve remote device name, do not change
1790          * discovery state.
1791          */
1792
1793         if (hdev->discovery.type == DISCOV_TYPE_LE)
1794                 goto discov_stopped;
1795
1796         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1797                 return;
1798
1799         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1800                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1801                     hdev->discovery.state != DISCOVERY_RESOLVING)
1802                         goto discov_stopped;
1803
1804                 return;
1805         }
1806
1807         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1808                      HCI_CMD_TIMEOUT, &status);
1809         if (status) {
1810                 BT_ERR("Inquiry failed: status 0x%02x", status);
1811                 goto discov_stopped;
1812         }
1813
1814         return;
1815
1816 discov_stopped:
1817         hci_dev_lock(hdev);
1818         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1819         hci_dev_unlock(hdev);
1820 }
1821
1822 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1823 {
1824         struct hci_dev *hdev = req->hdev;
1825         struct hci_cp_le_set_scan_enable cp;
1826
1827         /* If controller is not scanning we are done. */
1828         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1829                 return 0;
1830
1831         hci_req_add_le_scan_disable(req);
1832
1833         memset(&cp, 0, sizeof(cp));
1834         cp.enable = LE_SCAN_ENABLE;
1835         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1836         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1837
1838         return 0;
1839 }
1840
1841 static void le_scan_restart_work(struct work_struct *work)
1842 {
1843         struct hci_dev *hdev = container_of(work, struct hci_dev,
1844                                             le_scan_restart.work);
1845         unsigned long timeout, duration, scan_start, now;
1846         u8 status;
1847
1848         BT_DBG("%s", hdev->name);
1849
1850         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1851         if (status) {
1852                 BT_ERR("Failed to restart LE scan: status %d", status);
1853                 return;
1854         }
1855
1856         hci_dev_lock(hdev);
1857
1858         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1859             !hdev->discovery.scan_start)
1860                 goto unlock;
1861
1862         /* When the scan was started, hdev->le_scan_disable has been queued
1863          * after duration from scan_start. During scan restart this job
1864          * has been canceled, and we need to queue it again after proper
1865          * timeout, to make sure that scan does not run indefinitely.
1866          */
1867         duration = hdev->discovery.scan_duration;
1868         scan_start = hdev->discovery.scan_start;
1869         now = jiffies;
1870         if (now - scan_start <= duration) {
1871                 int elapsed;
1872
1873                 if (now >= scan_start)
1874                         elapsed = now - scan_start;
1875                 else
1876                         elapsed = ULONG_MAX - scan_start + now;
1877
1878                 timeout = duration - elapsed;
1879         } else {
1880                 timeout = 0;
1881         }
1882
1883         queue_delayed_work(hdev->req_workqueue,
1884                            &hdev->le_scan_disable, timeout);
1885
1886 unlock:
1887         hci_dev_unlock(hdev);
1888 }
1889
1890 static void disable_advertising(struct hci_request *req)
1891 {
1892         u8 enable = 0x00;
1893
1894         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1895 }
1896
1897 static int active_scan(struct hci_request *req, unsigned long opt)
1898 {
1899         uint16_t interval = opt;
1900         struct hci_dev *hdev = req->hdev;
1901         struct hci_cp_le_set_scan_param param_cp;
1902         struct hci_cp_le_set_scan_enable enable_cp;
1903         u8 own_addr_type;
1904         int err;
1905
1906         BT_DBG("%s", hdev->name);
1907
1908         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1909                 hci_dev_lock(hdev);
1910
1911                 /* Don't let discovery abort an outgoing connection attempt
1912                  * that's using directed advertising.
1913                  */
1914                 if (hci_lookup_le_connect(hdev)) {
1915                         hci_dev_unlock(hdev);
1916                         return -EBUSY;
1917                 }
1918
1919                 cancel_adv_timeout(hdev);
1920                 hci_dev_unlock(hdev);
1921
1922                 disable_advertising(req);
1923         }
1924
1925         /* If controller is scanning, it means the background scanning is
1926          * running. Thus, we should temporarily stop it in order to set the
1927          * discovery scanning parameters.
1928          */
1929         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1930                 hci_req_add_le_scan_disable(req);
1931
1932         /* All active scans will be done with either a resolvable private
1933          * address (when privacy feature has been enabled) or non-resolvable
1934          * private address.
1935          */
1936         err = hci_update_random_address(req, true, &own_addr_type);
1937         if (err < 0)
1938                 own_addr_type = ADDR_LE_DEV_PUBLIC;
1939
1940         memset(&param_cp, 0, sizeof(param_cp));
1941         param_cp.type = LE_SCAN_ACTIVE;
1942         param_cp.interval = cpu_to_le16(interval);
1943         param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1944         param_cp.own_address_type = own_addr_type;
1945
1946         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1947                     &param_cp);
1948
1949         memset(&enable_cp, 0, sizeof(enable_cp));
1950         enable_cp.enable = LE_SCAN_ENABLE;
1951         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1952
1953         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1954                     &enable_cp);
1955
1956         return 0;
1957 }
1958
1959 static int interleaved_discov(struct hci_request *req, unsigned long opt)
1960 {
1961         int err;
1962
1963         BT_DBG("%s", req->hdev->name);
1964
1965         err = active_scan(req, opt);
1966         if (err)
1967                 return err;
1968
1969         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
1970 }
1971
1972 static void start_discovery(struct hci_dev *hdev, u8 *status)
1973 {
1974         unsigned long timeout;
1975
1976         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
1977
1978         switch (hdev->discovery.type) {
1979         case DISCOV_TYPE_BREDR:
1980                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
1981                         hci_req_sync(hdev, bredr_inquiry,
1982                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
1983                                      status);
1984                 return;
1985         case DISCOV_TYPE_INTERLEAVED:
1986                 /* When running simultaneous discovery, the LE scanning time
1987                  * should occupy the whole discovery time sine BR/EDR inquiry
1988                  * and LE scanning are scheduled by the controller.
1989                  *
1990                  * For interleaving discovery in comparison, BR/EDR inquiry
1991                  * and LE scanning are done sequentially with separate
1992                  * timeouts.
1993                  */
1994                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
1995                              &hdev->quirks)) {
1996                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1997                         /* During simultaneous discovery, we double LE scan
1998                          * interval. We must leave some time for the controller
1999                          * to do BR/EDR inquiry.
2000                          */
2001                         hci_req_sync(hdev, interleaved_discov,
2002                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2003                                      status);
2004                         break;
2005                 }
2006
2007                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2008                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2009                              HCI_CMD_TIMEOUT, status);
2010                 break;
2011         case DISCOV_TYPE_LE:
2012                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2013                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2014                              HCI_CMD_TIMEOUT, status);
2015                 break;
2016         default:
2017                 *status = HCI_ERROR_UNSPECIFIED;
2018                 return;
2019         }
2020
2021         if (*status)
2022                 return;
2023
2024         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2025
2026         /* When service discovery is used and the controller has a
2027          * strict duplicate filter, it is important to remember the
2028          * start and duration of the scan. This is required for
2029          * restarting scanning during the discovery phase.
2030          */
2031         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2032                      hdev->discovery.result_filtering) {
2033                 hdev->discovery.scan_start = jiffies;
2034                 hdev->discovery.scan_duration = timeout;
2035         }
2036
2037         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2038                            timeout);
2039 }
2040
2041 bool hci_req_stop_discovery(struct hci_request *req)
2042 {
2043         struct hci_dev *hdev = req->hdev;
2044         struct discovery_state *d = &hdev->discovery;
2045         struct hci_cp_remote_name_req_cancel cp;
2046         struct inquiry_entry *e;
2047         bool ret = false;
2048
2049         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2050
2051         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2052                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2053                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2054
2055                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2056                         cancel_delayed_work(&hdev->le_scan_disable);
2057                         hci_req_add_le_scan_disable(req);
2058                 }
2059
2060                 ret = true;
2061         } else {
2062                 /* Passive scanning */
2063                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2064                         hci_req_add_le_scan_disable(req);
2065                         ret = true;
2066                 }
2067         }
2068
2069         /* No further actions needed for LE-only discovery */
2070         if (d->type == DISCOV_TYPE_LE)
2071                 return ret;
2072
2073         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2074                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2075                                                      NAME_PENDING);
2076                 if (!e)
2077                         return ret;
2078
2079                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2080                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2081                             &cp);
2082                 ret = true;
2083         }
2084
2085         return ret;
2086 }
2087
2088 static int stop_discovery(struct hci_request *req, unsigned long opt)
2089 {
2090         hci_dev_lock(req->hdev);
2091         hci_req_stop_discovery(req);
2092         hci_dev_unlock(req->hdev);
2093
2094         return 0;
2095 }
2096
2097 static void discov_update(struct work_struct *work)
2098 {
2099         struct hci_dev *hdev = container_of(work, struct hci_dev,
2100                                             discov_update);
2101         u8 status = 0;
2102
2103         switch (hdev->discovery.state) {
2104         case DISCOVERY_STARTING:
2105                 start_discovery(hdev, &status);
2106                 mgmt_start_discovery_complete(hdev, status);
2107                 if (status)
2108                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2109                 else
2110                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2111                 break;
2112         case DISCOVERY_STOPPING:
2113                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2114                 mgmt_stop_discovery_complete(hdev, status);
2115                 if (!status)
2116                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2117                 break;
2118         case DISCOVERY_STOPPED:
2119         default:
2120                 return;
2121         }
2122 }
2123
2124 static void discov_off(struct work_struct *work)
2125 {
2126         struct hci_dev *hdev = container_of(work, struct hci_dev,
2127                                             discov_off.work);
2128
2129         BT_DBG("%s", hdev->name);
2130
2131         hci_dev_lock(hdev);
2132
2133         /* When discoverable timeout triggers, then just make sure
2134          * the limited discoverable flag is cleared. Even in the case
2135          * of a timeout triggered from general discoverable, it is
2136          * safe to unconditionally clear the flag.
2137          */
2138         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2139         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2140         hdev->discov_timeout = 0;
2141
2142         hci_dev_unlock(hdev);
2143
2144         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2145         mgmt_new_settings(hdev);
2146 }
2147
2148 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2149 {
2150         struct hci_dev *hdev = req->hdev;
2151         u8 link_sec;
2152
2153         hci_dev_lock(hdev);
2154
2155         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2156             !lmp_host_ssp_capable(hdev)) {
2157                 u8 mode = 0x01;
2158
2159                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2160
2161                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2162                         u8 support = 0x01;
2163
2164                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2165                                     sizeof(support), &support);
2166                 }
2167         }
2168
2169         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2170             lmp_bredr_capable(hdev)) {
2171                 struct hci_cp_write_le_host_supported cp;
2172
2173                 cp.le = 0x01;
2174                 cp.simul = 0x00;
2175
2176                 /* Check first if we already have the right
2177                  * host state (host features set)
2178                  */
2179                 if (cp.le != lmp_host_le_capable(hdev) ||
2180                     cp.simul != lmp_host_le_br_capable(hdev))
2181                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2182                                     sizeof(cp), &cp);
2183         }
2184
2185         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2186                 /* Make sure the controller has a good default for
2187                  * advertising data. This also applies to the case
2188                  * where BR/EDR was toggled during the AUTO_OFF phase.
2189                  */
2190                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2191                     list_empty(&hdev->adv_instances)) {
2192                         __hci_req_update_adv_data(req, 0x00);
2193                         __hci_req_update_scan_rsp_data(req, 0x00);
2194
2195                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2196                                 __hci_req_enable_advertising(req);
2197                 } else if (!list_empty(&hdev->adv_instances)) {
2198                         struct adv_info *adv_instance;
2199
2200                         adv_instance = list_first_entry(&hdev->adv_instances,
2201                                                         struct adv_info, list);
2202                         __hci_req_schedule_adv_instance(req,
2203                                                         adv_instance->instance,
2204                                                         true);
2205                 }
2206         }
2207
2208         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2209         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2210                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2211                             sizeof(link_sec), &link_sec);
2212
2213         if (lmp_bredr_capable(hdev)) {
2214                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2215                         __hci_req_write_fast_connectable(req, true);
2216                 else
2217                         __hci_req_write_fast_connectable(req, false);
2218                 __hci_req_update_scan(req);
2219                 __hci_req_update_class(req);
2220                 __hci_req_update_name(req);
2221                 __hci_req_update_eir(req);
2222         }
2223
2224         hci_dev_unlock(hdev);
2225         return 0;
2226 }
2227
2228 int __hci_req_hci_power_on(struct hci_dev *hdev)
2229 {
2230         /* Register the available SMP channels (BR/EDR and LE) only when
2231          * successfully powering on the controller. This late
2232          * registration is required so that LE SMP can clearly decide if
2233          * the public address or static address is used.
2234          */
2235         smp_register(hdev);
2236
2237         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2238                               NULL);
2239 }
2240
2241 void hci_request_setup(struct hci_dev *hdev)
2242 {
2243         INIT_WORK(&hdev->discov_update, discov_update);
2244         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2245         INIT_WORK(&hdev->scan_update, scan_update_work);
2246         INIT_WORK(&hdev->connectable_update, connectable_update_work);
2247         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2248         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2249         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2250         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2251         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2252 }
2253
2254 void hci_request_cancel_all(struct hci_dev *hdev)
2255 {
2256         hci_req_sync_cancel(hdev, ENODEV);
2257
2258         cancel_work_sync(&hdev->discov_update);
2259         cancel_work_sync(&hdev->bg_scan_update);
2260         cancel_work_sync(&hdev->scan_update);
2261         cancel_work_sync(&hdev->connectable_update);
2262         cancel_work_sync(&hdev->discoverable_update);
2263         cancel_delayed_work_sync(&hdev->discov_off);
2264         cancel_delayed_work_sync(&hdev->le_scan_disable);
2265         cancel_delayed_work_sync(&hdev->le_scan_restart);
2266
2267         if (hdev->adv_instance_timeout) {
2268                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2269                 hdev->adv_instance_timeout = 0;
2270         }
2271 }