Bluetooth: mgmt/hci_core: multi-adv for add_advertising*()
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97
98         if (!test_bit(HCI_UP, &hdev->flags))
99                 return -ENETDOWN;
100
101         if (copy_from_user(buf, user_buf, buf_size))
102                 return -EFAULT;
103
104         buf[buf_size] = '\0';
105         if (strtobool(buf, &enable))
106                 return -EINVAL;
107
108         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
109                 return -EALREADY;
110
111         hci_req_lock(hdev);
112         if (enable)
113                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114                                      HCI_CMD_TIMEOUT);
115         else
116                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117                                      HCI_CMD_TIMEOUT);
118         hci_req_unlock(hdev);
119
120         if (IS_ERR(skb))
121                 return PTR_ERR(skb);
122
123         kfree_skb(skb);
124
125         hci_dev_change_flag(hdev, HCI_DUT_MODE);
126
127         return count;
128 }
129
130 static const struct file_operations dut_mode_fops = {
131         .open           = simple_open,
132         .read           = dut_mode_read,
133         .write          = dut_mode_write,
134         .llseek         = default_llseek,
135 };
136
137 /* ---- HCI requests ---- */
138
139 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
140                                   struct sk_buff *skb)
141 {
142         BT_DBG("%s result 0x%2.2x", hdev->name, result);
143
144         if (hdev->req_status == HCI_REQ_PEND) {
145                 hdev->req_result = result;
146                 hdev->req_status = HCI_REQ_DONE;
147                 if (skb)
148                         hdev->req_skb = skb_get(skb);
149                 wake_up_interruptible(&hdev->req_wait_q);
150         }
151 }
152
153 static void hci_req_cancel(struct hci_dev *hdev, int err)
154 {
155         BT_DBG("%s err 0x%2.2x", hdev->name, err);
156
157         if (hdev->req_status == HCI_REQ_PEND) {
158                 hdev->req_result = err;
159                 hdev->req_status = HCI_REQ_CANCELED;
160                 wake_up_interruptible(&hdev->req_wait_q);
161         }
162 }
163
164 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
165                                   const void *param, u8 event, u32 timeout)
166 {
167         DECLARE_WAITQUEUE(wait, current);
168         struct hci_request req;
169         struct sk_buff *skb;
170         int err = 0;
171
172         BT_DBG("%s", hdev->name);
173
174         hci_req_init(&req, hdev);
175
176         hci_req_add_ev(&req, opcode, plen, param, event);
177
178         hdev->req_status = HCI_REQ_PEND;
179
180         add_wait_queue(&hdev->req_wait_q, &wait);
181         set_current_state(TASK_INTERRUPTIBLE);
182
183         err = hci_req_run_skb(&req, hci_req_sync_complete);
184         if (err < 0) {
185                 remove_wait_queue(&hdev->req_wait_q, &wait);
186                 set_current_state(TASK_RUNNING);
187                 return ERR_PTR(err);
188         }
189
190         schedule_timeout(timeout);
191
192         remove_wait_queue(&hdev->req_wait_q, &wait);
193
194         if (signal_pending(current))
195                 return ERR_PTR(-EINTR);
196
197         switch (hdev->req_status) {
198         case HCI_REQ_DONE:
199                 err = -bt_to_errno(hdev->req_result);
200                 break;
201
202         case HCI_REQ_CANCELED:
203                 err = -hdev->req_result;
204                 break;
205
206         default:
207                 err = -ETIMEDOUT;
208                 break;
209         }
210
211         hdev->req_status = hdev->req_result = 0;
212         skb = hdev->req_skb;
213         hdev->req_skb = NULL;
214
215         BT_DBG("%s end: err %d", hdev->name, err);
216
217         if (err < 0) {
218                 kfree_skb(skb);
219                 return ERR_PTR(err);
220         }
221
222         if (!skb)
223                 return ERR_PTR(-ENODATA);
224
225         return skb;
226 }
227 EXPORT_SYMBOL(__hci_cmd_sync_ev);
228
229 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
230                                const void *param, u32 timeout)
231 {
232         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
233 }
234 EXPORT_SYMBOL(__hci_cmd_sync);
235
236 /* Execute request and wait for completion. */
237 static int __hci_req_sync(struct hci_dev *hdev,
238                           void (*func)(struct hci_request *req,
239                                       unsigned long opt),
240                           unsigned long opt, __u32 timeout)
241 {
242         struct hci_request req;
243         DECLARE_WAITQUEUE(wait, current);
244         int err = 0;
245
246         BT_DBG("%s start", hdev->name);
247
248         hci_req_init(&req, hdev);
249
250         hdev->req_status = HCI_REQ_PEND;
251
252         func(&req, opt);
253
254         add_wait_queue(&hdev->req_wait_q, &wait);
255         set_current_state(TASK_INTERRUPTIBLE);
256
257         err = hci_req_run_skb(&req, hci_req_sync_complete);
258         if (err < 0) {
259                 hdev->req_status = 0;
260
261                 remove_wait_queue(&hdev->req_wait_q, &wait);
262                 set_current_state(TASK_RUNNING);
263
264                 /* ENODATA means the HCI request command queue is empty.
265                  * This can happen when a request with conditionals doesn't
266                  * trigger any commands to be sent. This is normal behavior
267                  * and should not trigger an error return.
268                  */
269                 if (err == -ENODATA)
270                         return 0;
271
272                 return err;
273         }
274
275         schedule_timeout(timeout);
276
277         remove_wait_queue(&hdev->req_wait_q, &wait);
278
279         if (signal_pending(current))
280                 return -EINTR;
281
282         switch (hdev->req_status) {
283         case HCI_REQ_DONE:
284                 err = -bt_to_errno(hdev->req_result);
285                 break;
286
287         case HCI_REQ_CANCELED:
288                 err = -hdev->req_result;
289                 break;
290
291         default:
292                 err = -ETIMEDOUT;
293                 break;
294         }
295
296         hdev->req_status = hdev->req_result = 0;
297
298         BT_DBG("%s end: err %d", hdev->name, err);
299
300         return err;
301 }
302
303 static int hci_req_sync(struct hci_dev *hdev,
304                         void (*req)(struct hci_request *req,
305                                     unsigned long opt),
306                         unsigned long opt, __u32 timeout)
307 {
308         int ret;
309
310         if (!test_bit(HCI_UP, &hdev->flags))
311                 return -ENETDOWN;
312
313         /* Serialize all requests */
314         hci_req_lock(hdev);
315         ret = __hci_req_sync(hdev, req, opt, timeout);
316         hci_req_unlock(hdev);
317
318         return ret;
319 }
320
321 static void hci_reset_req(struct hci_request *req, unsigned long opt)
322 {
323         BT_DBG("%s %ld", req->hdev->name, opt);
324
325         /* Reset device */
326         set_bit(HCI_RESET, &req->hdev->flags);
327         hci_req_add(req, HCI_OP_RESET, 0, NULL);
328 }
329
330 static void bredr_init(struct hci_request *req)
331 {
332         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
333
334         /* Read Local Supported Features */
335         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
336
337         /* Read Local Version */
338         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
339
340         /* Read BD Address */
341         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
342 }
343
344 static void amp_init1(struct hci_request *req)
345 {
346         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
347
348         /* Read Local Version */
349         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
350
351         /* Read Local Supported Commands */
352         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
353
354         /* Read Local AMP Info */
355         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
356
357         /* Read Data Blk size */
358         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
359
360         /* Read Flow Control Mode */
361         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
362
363         /* Read Location Data */
364         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
365 }
366
367 static void amp_init2(struct hci_request *req)
368 {
369         /* Read Local Supported Features. Not all AMP controllers
370          * support this so it's placed conditionally in the second
371          * stage init.
372          */
373         if (req->hdev->commands[14] & 0x20)
374                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
375 }
376
377 static void hci_init1_req(struct hci_request *req, unsigned long opt)
378 {
379         struct hci_dev *hdev = req->hdev;
380
381         BT_DBG("%s %ld", hdev->name, opt);
382
383         /* Reset */
384         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
385                 hci_reset_req(req, 0);
386
387         switch (hdev->dev_type) {
388         case HCI_BREDR:
389                 bredr_init(req);
390                 break;
391
392         case HCI_AMP:
393                 amp_init1(req);
394                 break;
395
396         default:
397                 BT_ERR("Unknown device type %d", hdev->dev_type);
398                 break;
399         }
400 }
401
402 static void bredr_setup(struct hci_request *req)
403 {
404         __le16 param;
405         __u8 flt_type;
406
407         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
408         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
409
410         /* Read Class of Device */
411         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
412
413         /* Read Local Name */
414         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
415
416         /* Read Voice Setting */
417         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
418
419         /* Read Number of Supported IAC */
420         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
421
422         /* Read Current IAC LAP */
423         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
424
425         /* Clear Event Filters */
426         flt_type = HCI_FLT_CLEAR_ALL;
427         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
428
429         /* Connection accept timeout ~20 secs */
430         param = cpu_to_le16(0x7d00);
431         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
432 }
433
434 static void le_setup(struct hci_request *req)
435 {
436         struct hci_dev *hdev = req->hdev;
437
438         /* Read LE Buffer Size */
439         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
440
441         /* Read LE Local Supported Features */
442         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
443
444         /* Read LE Supported States */
445         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
446
447         /* Read LE White List Size */
448         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
449
450         /* Clear LE White List */
451         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
452
453         /* LE-only controllers have LE implicitly enabled */
454         if (!lmp_bredr_capable(hdev))
455                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
456 }
457
458 static void hci_setup_event_mask(struct hci_request *req)
459 {
460         struct hci_dev *hdev = req->hdev;
461
462         /* The second byte is 0xff instead of 0x9f (two reserved bits
463          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
464          * command otherwise.
465          */
466         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
467
468         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
469          * any event mask for pre 1.2 devices.
470          */
471         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
472                 return;
473
474         if (lmp_bredr_capable(hdev)) {
475                 events[4] |= 0x01; /* Flow Specification Complete */
476                 events[4] |= 0x02; /* Inquiry Result with RSSI */
477                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
478                 events[5] |= 0x08; /* Synchronous Connection Complete */
479                 events[5] |= 0x10; /* Synchronous Connection Changed */
480         } else {
481                 /* Use a different default for LE-only devices */
482                 memset(events, 0, sizeof(events));
483                 events[0] |= 0x10; /* Disconnection Complete */
484                 events[1] |= 0x08; /* Read Remote Version Information Complete */
485                 events[1] |= 0x20; /* Command Complete */
486                 events[1] |= 0x40; /* Command Status */
487                 events[1] |= 0x80; /* Hardware Error */
488                 events[2] |= 0x04; /* Number of Completed Packets */
489                 events[3] |= 0x02; /* Data Buffer Overflow */
490
491                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
492                         events[0] |= 0x80; /* Encryption Change */
493                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
494                 }
495         }
496
497         if (lmp_inq_rssi_capable(hdev))
498                 events[4] |= 0x02; /* Inquiry Result with RSSI */
499
500         if (lmp_sniffsubr_capable(hdev))
501                 events[5] |= 0x20; /* Sniff Subrating */
502
503         if (lmp_pause_enc_capable(hdev))
504                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
505
506         if (lmp_ext_inq_capable(hdev))
507                 events[5] |= 0x40; /* Extended Inquiry Result */
508
509         if (lmp_no_flush_capable(hdev))
510                 events[7] |= 0x01; /* Enhanced Flush Complete */
511
512         if (lmp_lsto_capable(hdev))
513                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
514
515         if (lmp_ssp_capable(hdev)) {
516                 events[6] |= 0x01;      /* IO Capability Request */
517                 events[6] |= 0x02;      /* IO Capability Response */
518                 events[6] |= 0x04;      /* User Confirmation Request */
519                 events[6] |= 0x08;      /* User Passkey Request */
520                 events[6] |= 0x10;      /* Remote OOB Data Request */
521                 events[6] |= 0x20;      /* Simple Pairing Complete */
522                 events[7] |= 0x04;      /* User Passkey Notification */
523                 events[7] |= 0x08;      /* Keypress Notification */
524                 events[7] |= 0x10;      /* Remote Host Supported
525                                          * Features Notification
526                                          */
527         }
528
529         if (lmp_le_capable(hdev))
530                 events[7] |= 0x20;      /* LE Meta-Event */
531
532         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
533 }
534
535 static void hci_init2_req(struct hci_request *req, unsigned long opt)
536 {
537         struct hci_dev *hdev = req->hdev;
538
539         if (hdev->dev_type == HCI_AMP)
540                 return amp_init2(req);
541
542         if (lmp_bredr_capable(hdev))
543                 bredr_setup(req);
544         else
545                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
546
547         if (lmp_le_capable(hdev))
548                 le_setup(req);
549
550         /* All Bluetooth 1.2 and later controllers should support the
551          * HCI command for reading the local supported commands.
552          *
553          * Unfortunately some controllers indicate Bluetooth 1.2 support,
554          * but do not have support for this command. If that is the case,
555          * the driver can quirk the behavior and skip reading the local
556          * supported commands.
557          */
558         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
559             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
560                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
561
562         if (lmp_ssp_capable(hdev)) {
563                 /* When SSP is available, then the host features page
564                  * should also be available as well. However some
565                  * controllers list the max_page as 0 as long as SSP
566                  * has not been enabled. To achieve proper debugging
567                  * output, force the minimum max_page to 1 at least.
568                  */
569                 hdev->max_page = 0x01;
570
571                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
572                         u8 mode = 0x01;
573
574                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
575                                     sizeof(mode), &mode);
576                 } else {
577                         struct hci_cp_write_eir cp;
578
579                         memset(hdev->eir, 0, sizeof(hdev->eir));
580                         memset(&cp, 0, sizeof(cp));
581
582                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
583                 }
584         }
585
586         if (lmp_inq_rssi_capable(hdev) ||
587             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
588                 u8 mode;
589
590                 /* If Extended Inquiry Result events are supported, then
591                  * they are clearly preferred over Inquiry Result with RSSI
592                  * events.
593                  */
594                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
595
596                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
597         }
598
599         if (lmp_inq_tx_pwr_capable(hdev))
600                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
601
602         if (lmp_ext_feat_capable(hdev)) {
603                 struct hci_cp_read_local_ext_features cp;
604
605                 cp.page = 0x01;
606                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
607                             sizeof(cp), &cp);
608         }
609
610         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
611                 u8 enable = 1;
612                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
613                             &enable);
614         }
615 }
616
617 static void hci_setup_link_policy(struct hci_request *req)
618 {
619         struct hci_dev *hdev = req->hdev;
620         struct hci_cp_write_def_link_policy cp;
621         u16 link_policy = 0;
622
623         if (lmp_rswitch_capable(hdev))
624                 link_policy |= HCI_LP_RSWITCH;
625         if (lmp_hold_capable(hdev))
626                 link_policy |= HCI_LP_HOLD;
627         if (lmp_sniff_capable(hdev))
628                 link_policy |= HCI_LP_SNIFF;
629         if (lmp_park_capable(hdev))
630                 link_policy |= HCI_LP_PARK;
631
632         cp.policy = cpu_to_le16(link_policy);
633         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
634 }
635
636 static void hci_set_le_support(struct hci_request *req)
637 {
638         struct hci_dev *hdev = req->hdev;
639         struct hci_cp_write_le_host_supported cp;
640
641         /* LE-only devices do not support explicit enablement */
642         if (!lmp_bredr_capable(hdev))
643                 return;
644
645         memset(&cp, 0, sizeof(cp));
646
647         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
648                 cp.le = 0x01;
649                 cp.simul = 0x00;
650         }
651
652         if (cp.le != lmp_host_le_capable(hdev))
653                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
654                             &cp);
655 }
656
657 static void hci_set_event_mask_page_2(struct hci_request *req)
658 {
659         struct hci_dev *hdev = req->hdev;
660         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
661
662         /* If Connectionless Slave Broadcast master role is supported
663          * enable all necessary events for it.
664          */
665         if (lmp_csb_master_capable(hdev)) {
666                 events[1] |= 0x40;      /* Triggered Clock Capture */
667                 events[1] |= 0x80;      /* Synchronization Train Complete */
668                 events[2] |= 0x10;      /* Slave Page Response Timeout */
669                 events[2] |= 0x20;      /* CSB Channel Map Change */
670         }
671
672         /* If Connectionless Slave Broadcast slave role is supported
673          * enable all necessary events for it.
674          */
675         if (lmp_csb_slave_capable(hdev)) {
676                 events[2] |= 0x01;      /* Synchronization Train Received */
677                 events[2] |= 0x02;      /* CSB Receive */
678                 events[2] |= 0x04;      /* CSB Timeout */
679                 events[2] |= 0x08;      /* Truncated Page Complete */
680         }
681
682         /* Enable Authenticated Payload Timeout Expired event if supported */
683         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
684                 events[2] |= 0x80;
685
686         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
687 }
688
689 static void hci_init3_req(struct hci_request *req, unsigned long opt)
690 {
691         struct hci_dev *hdev = req->hdev;
692         u8 p;
693
694         hci_setup_event_mask(req);
695
696         if (hdev->commands[6] & 0x20) {
697                 struct hci_cp_read_stored_link_key cp;
698
699                 bacpy(&cp.bdaddr, BDADDR_ANY);
700                 cp.read_all = 0x01;
701                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
702         }
703
704         if (hdev->commands[5] & 0x10)
705                 hci_setup_link_policy(req);
706
707         if (hdev->commands[8] & 0x01)
708                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
709
710         /* Some older Broadcom based Bluetooth 1.2 controllers do not
711          * support the Read Page Scan Type command. Check support for
712          * this command in the bit mask of supported commands.
713          */
714         if (hdev->commands[13] & 0x01)
715                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
716
717         if (lmp_le_capable(hdev)) {
718                 u8 events[8];
719
720                 memset(events, 0, sizeof(events));
721                 events[0] = 0x0f;
722
723                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
724                         events[0] |= 0x10;      /* LE Long Term Key Request */
725
726                 /* If controller supports the Connection Parameters Request
727                  * Link Layer Procedure, enable the corresponding event.
728                  */
729                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
730                         events[0] |= 0x20;      /* LE Remote Connection
731                                                  * Parameter Request
732                                                  */
733
734                 /* If the controller supports the Data Length Extension
735                  * feature, enable the corresponding event.
736                  */
737                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
738                         events[0] |= 0x40;      /* LE Data Length Change */
739
740                 /* If the controller supports Extended Scanner Filter
741                  * Policies, enable the correspondig event.
742                  */
743                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
744                         events[1] |= 0x04;      /* LE Direct Advertising
745                                                  * Report
746                                                  */
747
748                 /* If the controller supports the LE Read Local P-256
749                  * Public Key command, enable the corresponding event.
750                  */
751                 if (hdev->commands[34] & 0x02)
752                         events[0] |= 0x80;      /* LE Read Local P-256
753                                                  * Public Key Complete
754                                                  */
755
756                 /* If the controller supports the LE Generate DHKey
757                  * command, enable the corresponding event.
758                  */
759                 if (hdev->commands[34] & 0x04)
760                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
761
762                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
763                             events);
764
765                 if (hdev->commands[25] & 0x40) {
766                         /* Read LE Advertising Channel TX Power */
767                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
768                 }
769
770                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
771                         /* Read LE Maximum Data Length */
772                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
773
774                         /* Read LE Suggested Default Data Length */
775                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
776                 }
777
778                 hci_set_le_support(req);
779         }
780
781         /* Read features beyond page 1 if available */
782         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
783                 struct hci_cp_read_local_ext_features cp;
784
785                 cp.page = p;
786                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
787                             sizeof(cp), &cp);
788         }
789 }
790
791 static void hci_init4_req(struct hci_request *req, unsigned long opt)
792 {
793         struct hci_dev *hdev = req->hdev;
794
795         /* Some Broadcom based Bluetooth controllers do not support the
796          * Delete Stored Link Key command. They are clearly indicating its
797          * absence in the bit mask of supported commands.
798          *
799          * Check the supported commands and only if the the command is marked
800          * as supported send it. If not supported assume that the controller
801          * does not have actual support for stored link keys which makes this
802          * command redundant anyway.
803          *
804          * Some controllers indicate that they support handling deleting
805          * stored link keys, but they don't. The quirk lets a driver
806          * just disable this command.
807          */
808         if (hdev->commands[6] & 0x80 &&
809             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
810                 struct hci_cp_delete_stored_link_key cp;
811
812                 bacpy(&cp.bdaddr, BDADDR_ANY);
813                 cp.delete_all = 0x01;
814                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
815                             sizeof(cp), &cp);
816         }
817
818         /* Set event mask page 2 if the HCI command for it is supported */
819         if (hdev->commands[22] & 0x04)
820                 hci_set_event_mask_page_2(req);
821
822         /* Read local codec list if the HCI command is supported */
823         if (hdev->commands[29] & 0x20)
824                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
825
826         /* Get MWS transport configuration if the HCI command is supported */
827         if (hdev->commands[30] & 0x08)
828                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
829
830         /* Check for Synchronization Train support */
831         if (lmp_sync_train_capable(hdev))
832                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
833
834         /* Enable Secure Connections if supported and configured */
835         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
836             bredr_sc_enabled(hdev)) {
837                 u8 support = 0x01;
838
839                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
840                             sizeof(support), &support);
841         }
842 }
843
844 static int __hci_init(struct hci_dev *hdev)
845 {
846         int err;
847
848         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
849         if (err < 0)
850                 return err;
851
852         /* The Device Under Test (DUT) mode is special and available for
853          * all controller types. So just create it early on.
854          */
855         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
856                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
857                                     &dut_mode_fops);
858         }
859
860         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
861         if (err < 0)
862                 return err;
863
864         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
865          * BR/EDR/LE type controllers. AMP controllers only need the
866          * first two stages of init.
867          */
868         if (hdev->dev_type != HCI_BREDR)
869                 return 0;
870
871         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
872         if (err < 0)
873                 return err;
874
875         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
876         if (err < 0)
877                 return err;
878
879         /* This function is only called when the controller is actually in
880          * configured state. When the controller is marked as unconfigured,
881          * this initialization procedure is not run.
882          *
883          * It means that it is possible that a controller runs through its
884          * setup phase and then discovers missing settings. If that is the
885          * case, then this function will not be called. It then will only
886          * be called during the config phase.
887          *
888          * So only when in setup phase or config phase, create the debugfs
889          * entries and register the SMP channels.
890          */
891         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
892             !hci_dev_test_flag(hdev, HCI_CONFIG))
893                 return 0;
894
895         hci_debugfs_create_common(hdev);
896
897         if (lmp_bredr_capable(hdev))
898                 hci_debugfs_create_bredr(hdev);
899
900         if (lmp_le_capable(hdev))
901                 hci_debugfs_create_le(hdev);
902
903         return 0;
904 }
905
906 static void hci_init0_req(struct hci_request *req, unsigned long opt)
907 {
908         struct hci_dev *hdev = req->hdev;
909
910         BT_DBG("%s %ld", hdev->name, opt);
911
912         /* Reset */
913         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
914                 hci_reset_req(req, 0);
915
916         /* Read Local Version */
917         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
918
919         /* Read BD Address */
920         if (hdev->set_bdaddr)
921                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
922 }
923
924 static int __hci_unconf_init(struct hci_dev *hdev)
925 {
926         int err;
927
928         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
929                 return 0;
930
931         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
932         if (err < 0)
933                 return err;
934
935         return 0;
936 }
937
938 static void hci_scan_req(struct hci_request *req, unsigned long opt)
939 {
940         __u8 scan = opt;
941
942         BT_DBG("%s %x", req->hdev->name, scan);
943
944         /* Inquiry and Page scans */
945         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
946 }
947
948 static void hci_auth_req(struct hci_request *req, unsigned long opt)
949 {
950         __u8 auth = opt;
951
952         BT_DBG("%s %x", req->hdev->name, auth);
953
954         /* Authentication */
955         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
956 }
957
958 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
959 {
960         __u8 encrypt = opt;
961
962         BT_DBG("%s %x", req->hdev->name, encrypt);
963
964         /* Encryption */
965         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
966 }
967
968 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
969 {
970         __le16 policy = cpu_to_le16(opt);
971
972         BT_DBG("%s %x", req->hdev->name, policy);
973
974         /* Default link policy */
975         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
976 }
977
978 /* Get HCI device by index.
979  * Device is held on return. */
980 struct hci_dev *hci_dev_get(int index)
981 {
982         struct hci_dev *hdev = NULL, *d;
983
984         BT_DBG("%d", index);
985
986         if (index < 0)
987                 return NULL;
988
989         read_lock(&hci_dev_list_lock);
990         list_for_each_entry(d, &hci_dev_list, list) {
991                 if (d->id == index) {
992                         hdev = hci_dev_hold(d);
993                         break;
994                 }
995         }
996         read_unlock(&hci_dev_list_lock);
997         return hdev;
998 }
999
1000 /* ---- Inquiry support ---- */
1001
1002 bool hci_discovery_active(struct hci_dev *hdev)
1003 {
1004         struct discovery_state *discov = &hdev->discovery;
1005
1006         switch (discov->state) {
1007         case DISCOVERY_FINDING:
1008         case DISCOVERY_RESOLVING:
1009                 return true;
1010
1011         default:
1012                 return false;
1013         }
1014 }
1015
1016 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1017 {
1018         int old_state = hdev->discovery.state;
1019
1020         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1021
1022         if (old_state == state)
1023                 return;
1024
1025         hdev->discovery.state = state;
1026
1027         switch (state) {
1028         case DISCOVERY_STOPPED:
1029                 hci_update_background_scan(hdev);
1030
1031                 if (old_state != DISCOVERY_STARTING)
1032                         mgmt_discovering(hdev, 0);
1033                 break;
1034         case DISCOVERY_STARTING:
1035                 break;
1036         case DISCOVERY_FINDING:
1037                 mgmt_discovering(hdev, 1);
1038                 break;
1039         case DISCOVERY_RESOLVING:
1040                 break;
1041         case DISCOVERY_STOPPING:
1042                 break;
1043         }
1044 }
1045
1046 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1047 {
1048         struct discovery_state *cache = &hdev->discovery;
1049         struct inquiry_entry *p, *n;
1050
1051         list_for_each_entry_safe(p, n, &cache->all, all) {
1052                 list_del(&p->all);
1053                 kfree(p);
1054         }
1055
1056         INIT_LIST_HEAD(&cache->unknown);
1057         INIT_LIST_HEAD(&cache->resolve);
1058 }
1059
1060 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1061                                                bdaddr_t *bdaddr)
1062 {
1063         struct discovery_state *cache = &hdev->discovery;
1064         struct inquiry_entry *e;
1065
1066         BT_DBG("cache %p, %pMR", cache, bdaddr);
1067
1068         list_for_each_entry(e, &cache->all, all) {
1069                 if (!bacmp(&e->data.bdaddr, bdaddr))
1070                         return e;
1071         }
1072
1073         return NULL;
1074 }
1075
1076 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1077                                                        bdaddr_t *bdaddr)
1078 {
1079         struct discovery_state *cache = &hdev->discovery;
1080         struct inquiry_entry *e;
1081
1082         BT_DBG("cache %p, %pMR", cache, bdaddr);
1083
1084         list_for_each_entry(e, &cache->unknown, list) {
1085                 if (!bacmp(&e->data.bdaddr, bdaddr))
1086                         return e;
1087         }
1088
1089         return NULL;
1090 }
1091
1092 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1093                                                        bdaddr_t *bdaddr,
1094                                                        int state)
1095 {
1096         struct discovery_state *cache = &hdev->discovery;
1097         struct inquiry_entry *e;
1098
1099         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1100
1101         list_for_each_entry(e, &cache->resolve, list) {
1102                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1103                         return e;
1104                 if (!bacmp(&e->data.bdaddr, bdaddr))
1105                         return e;
1106         }
1107
1108         return NULL;
1109 }
1110
1111 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1112                                       struct inquiry_entry *ie)
1113 {
1114         struct discovery_state *cache = &hdev->discovery;
1115         struct list_head *pos = &cache->resolve;
1116         struct inquiry_entry *p;
1117
1118         list_del(&ie->list);
1119
1120         list_for_each_entry(p, &cache->resolve, list) {
1121                 if (p->name_state != NAME_PENDING &&
1122                     abs(p->data.rssi) >= abs(ie->data.rssi))
1123                         break;
1124                 pos = &p->list;
1125         }
1126
1127         list_add(&ie->list, pos);
1128 }
1129
1130 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1131                              bool name_known)
1132 {
1133         struct discovery_state *cache = &hdev->discovery;
1134         struct inquiry_entry *ie;
1135         u32 flags = 0;
1136
1137         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1138
1139         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1140
1141         if (!data->ssp_mode)
1142                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1143
1144         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1145         if (ie) {
1146                 if (!ie->data.ssp_mode)
1147                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1148
1149                 if (ie->name_state == NAME_NEEDED &&
1150                     data->rssi != ie->data.rssi) {
1151                         ie->data.rssi = data->rssi;
1152                         hci_inquiry_cache_update_resolve(hdev, ie);
1153                 }
1154
1155                 goto update;
1156         }
1157
1158         /* Entry not in the cache. Add new one. */
1159         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1160         if (!ie) {
1161                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1162                 goto done;
1163         }
1164
1165         list_add(&ie->all, &cache->all);
1166
1167         if (name_known) {
1168                 ie->name_state = NAME_KNOWN;
1169         } else {
1170                 ie->name_state = NAME_NOT_KNOWN;
1171                 list_add(&ie->list, &cache->unknown);
1172         }
1173
1174 update:
1175         if (name_known && ie->name_state != NAME_KNOWN &&
1176             ie->name_state != NAME_PENDING) {
1177                 ie->name_state = NAME_KNOWN;
1178                 list_del(&ie->list);
1179         }
1180
1181         memcpy(&ie->data, data, sizeof(*data));
1182         ie->timestamp = jiffies;
1183         cache->timestamp = jiffies;
1184
1185         if (ie->name_state == NAME_NOT_KNOWN)
1186                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1187
1188 done:
1189         return flags;
1190 }
1191
1192 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1193 {
1194         struct discovery_state *cache = &hdev->discovery;
1195         struct inquiry_info *info = (struct inquiry_info *) buf;
1196         struct inquiry_entry *e;
1197         int copied = 0;
1198
1199         list_for_each_entry(e, &cache->all, all) {
1200                 struct inquiry_data *data = &e->data;
1201
1202                 if (copied >= num)
1203                         break;
1204
1205                 bacpy(&info->bdaddr, &data->bdaddr);
1206                 info->pscan_rep_mode    = data->pscan_rep_mode;
1207                 info->pscan_period_mode = data->pscan_period_mode;
1208                 info->pscan_mode        = data->pscan_mode;
1209                 memcpy(info->dev_class, data->dev_class, 3);
1210                 info->clock_offset      = data->clock_offset;
1211
1212                 info++;
1213                 copied++;
1214         }
1215
1216         BT_DBG("cache %p, copied %d", cache, copied);
1217         return copied;
1218 }
1219
1220 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1221 {
1222         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1223         struct hci_dev *hdev = req->hdev;
1224         struct hci_cp_inquiry cp;
1225
1226         BT_DBG("%s", hdev->name);
1227
1228         if (test_bit(HCI_INQUIRY, &hdev->flags))
1229                 return;
1230
1231         /* Start Inquiry */
1232         memcpy(&cp.lap, &ir->lap, 3);
1233         cp.length  = ir->length;
1234         cp.num_rsp = ir->num_rsp;
1235         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1236 }
1237
1238 int hci_inquiry(void __user *arg)
1239 {
1240         __u8 __user *ptr = arg;
1241         struct hci_inquiry_req ir;
1242         struct hci_dev *hdev;
1243         int err = 0, do_inquiry = 0, max_rsp;
1244         long timeo;
1245         __u8 *buf;
1246
1247         if (copy_from_user(&ir, ptr, sizeof(ir)))
1248                 return -EFAULT;
1249
1250         hdev = hci_dev_get(ir.dev_id);
1251         if (!hdev)
1252                 return -ENODEV;
1253
1254         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1255                 err = -EBUSY;
1256                 goto done;
1257         }
1258
1259         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1260                 err = -EOPNOTSUPP;
1261                 goto done;
1262         }
1263
1264         if (hdev->dev_type != HCI_BREDR) {
1265                 err = -EOPNOTSUPP;
1266                 goto done;
1267         }
1268
1269         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1270                 err = -EOPNOTSUPP;
1271                 goto done;
1272         }
1273
1274         hci_dev_lock(hdev);
1275         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1276             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1277                 hci_inquiry_cache_flush(hdev);
1278                 do_inquiry = 1;
1279         }
1280         hci_dev_unlock(hdev);
1281
1282         timeo = ir.length * msecs_to_jiffies(2000);
1283
1284         if (do_inquiry) {
1285                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1286                                    timeo);
1287                 if (err < 0)
1288                         goto done;
1289
1290                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1291                  * cleared). If it is interrupted by a signal, return -EINTR.
1292                  */
1293                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1294                                 TASK_INTERRUPTIBLE))
1295                         return -EINTR;
1296         }
1297
1298         /* for unlimited number of responses we will use buffer with
1299          * 255 entries
1300          */
1301         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1302
1303         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1304          * copy it to the user space.
1305          */
1306         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1307         if (!buf) {
1308                 err = -ENOMEM;
1309                 goto done;
1310         }
1311
1312         hci_dev_lock(hdev);
1313         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1314         hci_dev_unlock(hdev);
1315
1316         BT_DBG("num_rsp %d", ir.num_rsp);
1317
1318         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1319                 ptr += sizeof(ir);
1320                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1321                                  ir.num_rsp))
1322                         err = -EFAULT;
1323         } else
1324                 err = -EFAULT;
1325
1326         kfree(buf);
1327
1328 done:
1329         hci_dev_put(hdev);
1330         return err;
1331 }
1332
1333 static int hci_dev_do_open(struct hci_dev *hdev)
1334 {
1335         int ret = 0;
1336
1337         BT_DBG("%s %p", hdev->name, hdev);
1338
1339         hci_req_lock(hdev);
1340
1341         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1342                 ret = -ENODEV;
1343                 goto done;
1344         }
1345
1346         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1347             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1348                 /* Check for rfkill but allow the HCI setup stage to
1349                  * proceed (which in itself doesn't cause any RF activity).
1350                  */
1351                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1352                         ret = -ERFKILL;
1353                         goto done;
1354                 }
1355
1356                 /* Check for valid public address or a configured static
1357                  * random adddress, but let the HCI setup proceed to
1358                  * be able to determine if there is a public address
1359                  * or not.
1360                  *
1361                  * In case of user channel usage, it is not important
1362                  * if a public address or static random address is
1363                  * available.
1364                  *
1365                  * This check is only valid for BR/EDR controllers
1366                  * since AMP controllers do not have an address.
1367                  */
1368                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1369                     hdev->dev_type == HCI_BREDR &&
1370                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1371                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1372                         ret = -EADDRNOTAVAIL;
1373                         goto done;
1374                 }
1375         }
1376
1377         if (test_bit(HCI_UP, &hdev->flags)) {
1378                 ret = -EALREADY;
1379                 goto done;
1380         }
1381
1382         if (hdev->open(hdev)) {
1383                 ret = -EIO;
1384                 goto done;
1385         }
1386
1387         atomic_set(&hdev->cmd_cnt, 1);
1388         set_bit(HCI_INIT, &hdev->flags);
1389
1390         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1391                 if (hdev->setup)
1392                         ret = hdev->setup(hdev);
1393
1394                 /* The transport driver can set these quirks before
1395                  * creating the HCI device or in its setup callback.
1396                  *
1397                  * In case any of them is set, the controller has to
1398                  * start up as unconfigured.
1399                  */
1400                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1401                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1402                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1403
1404                 /* For an unconfigured controller it is required to
1405                  * read at least the version information provided by
1406                  * the Read Local Version Information command.
1407                  *
1408                  * If the set_bdaddr driver callback is provided, then
1409                  * also the original Bluetooth public device address
1410                  * will be read using the Read BD Address command.
1411                  */
1412                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1413                         ret = __hci_unconf_init(hdev);
1414         }
1415
1416         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1417                 /* If public address change is configured, ensure that
1418                  * the address gets programmed. If the driver does not
1419                  * support changing the public address, fail the power
1420                  * on procedure.
1421                  */
1422                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1423                     hdev->set_bdaddr)
1424                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1425                 else
1426                         ret = -EADDRNOTAVAIL;
1427         }
1428
1429         if (!ret) {
1430                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1431                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1432                         ret = __hci_init(hdev);
1433         }
1434
1435         clear_bit(HCI_INIT, &hdev->flags);
1436
1437         if (!ret) {
1438                 hci_dev_hold(hdev);
1439                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1440                 set_bit(HCI_UP, &hdev->flags);
1441                 hci_notify(hdev, HCI_DEV_UP);
1442                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1443                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1444                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1445                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1446                     hdev->dev_type == HCI_BREDR) {
1447                         hci_dev_lock(hdev);
1448                         mgmt_powered(hdev, 1);
1449                         hci_dev_unlock(hdev);
1450                 }
1451         } else {
1452                 /* Init failed, cleanup */
1453                 flush_work(&hdev->tx_work);
1454                 flush_work(&hdev->cmd_work);
1455                 flush_work(&hdev->rx_work);
1456
1457                 skb_queue_purge(&hdev->cmd_q);
1458                 skb_queue_purge(&hdev->rx_q);
1459
1460                 if (hdev->flush)
1461                         hdev->flush(hdev);
1462
1463                 if (hdev->sent_cmd) {
1464                         kfree_skb(hdev->sent_cmd);
1465                         hdev->sent_cmd = NULL;
1466                 }
1467
1468                 hdev->close(hdev);
1469                 hdev->flags &= BIT(HCI_RAW);
1470         }
1471
1472 done:
1473         hci_req_unlock(hdev);
1474         return ret;
1475 }
1476
1477 /* ---- HCI ioctl helpers ---- */
1478
1479 int hci_dev_open(__u16 dev)
1480 {
1481         struct hci_dev *hdev;
1482         int err;
1483
1484         hdev = hci_dev_get(dev);
1485         if (!hdev)
1486                 return -ENODEV;
1487
1488         /* Devices that are marked as unconfigured can only be powered
1489          * up as user channel. Trying to bring them up as normal devices
1490          * will result into a failure. Only user channel operation is
1491          * possible.
1492          *
1493          * When this function is called for a user channel, the flag
1494          * HCI_USER_CHANNEL will be set first before attempting to
1495          * open the device.
1496          */
1497         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1498             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1499                 err = -EOPNOTSUPP;
1500                 goto done;
1501         }
1502
1503         /* We need to ensure that no other power on/off work is pending
1504          * before proceeding to call hci_dev_do_open. This is
1505          * particularly important if the setup procedure has not yet
1506          * completed.
1507          */
1508         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1509                 cancel_delayed_work(&hdev->power_off);
1510
1511         /* After this call it is guaranteed that the setup procedure
1512          * has finished. This means that error conditions like RFKILL
1513          * or no valid public or static random address apply.
1514          */
1515         flush_workqueue(hdev->req_workqueue);
1516
1517         /* For controllers not using the management interface and that
1518          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1519          * so that pairing works for them. Once the management interface
1520          * is in use this bit will be cleared again and userspace has
1521          * to explicitly enable it.
1522          */
1523         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1524             !hci_dev_test_flag(hdev, HCI_MGMT))
1525                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1526
1527         err = hci_dev_do_open(hdev);
1528
1529 done:
1530         hci_dev_put(hdev);
1531         return err;
1532 }
1533
1534 /* This function requires the caller holds hdev->lock */
1535 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1536 {
1537         struct hci_conn_params *p;
1538
1539         list_for_each_entry(p, &hdev->le_conn_params, list) {
1540                 if (p->conn) {
1541                         hci_conn_drop(p->conn);
1542                         hci_conn_put(p->conn);
1543                         p->conn = NULL;
1544                 }
1545                 list_del_init(&p->action);
1546         }
1547
1548         BT_DBG("All LE pending actions cleared");
1549 }
1550
1551 static int hci_dev_do_close(struct hci_dev *hdev)
1552 {
1553         BT_DBG("%s %p", hdev->name, hdev);
1554
1555         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1556             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1557             test_bit(HCI_UP, &hdev->flags)) {
1558                 /* Execute vendor specific shutdown routine */
1559                 if (hdev->shutdown)
1560                         hdev->shutdown(hdev);
1561         }
1562
1563         cancel_delayed_work(&hdev->power_off);
1564
1565         hci_req_cancel(hdev, ENODEV);
1566         hci_req_lock(hdev);
1567
1568         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1569                 cancel_delayed_work_sync(&hdev->cmd_timer);
1570                 hci_req_unlock(hdev);
1571                 return 0;
1572         }
1573
1574         /* Flush RX and TX works */
1575         flush_work(&hdev->tx_work);
1576         flush_work(&hdev->rx_work);
1577
1578         if (hdev->discov_timeout > 0) {
1579                 cancel_delayed_work(&hdev->discov_off);
1580                 hdev->discov_timeout = 0;
1581                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1582                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1583         }
1584
1585         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1586                 cancel_delayed_work(&hdev->service_cache);
1587
1588         cancel_delayed_work_sync(&hdev->le_scan_disable);
1589         cancel_delayed_work_sync(&hdev->le_scan_restart);
1590
1591         if (hci_dev_test_flag(hdev, HCI_MGMT))
1592                 cancel_delayed_work_sync(&hdev->rpa_expired);
1593
1594         if (hdev->adv_instance_timeout) {
1595                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1596                 hdev->adv_instance_timeout = 0;
1597         }
1598
1599         /* Avoid potential lockdep warnings from the *_flush() calls by
1600          * ensuring the workqueue is empty up front.
1601          */
1602         drain_workqueue(hdev->workqueue);
1603
1604         hci_dev_lock(hdev);
1605
1606         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1607
1608         if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1609                 if (hdev->dev_type == HCI_BREDR)
1610                         mgmt_powered(hdev, 0);
1611         }
1612
1613         hci_inquiry_cache_flush(hdev);
1614         hci_pend_le_actions_clear(hdev);
1615         hci_conn_hash_flush(hdev);
1616         hci_dev_unlock(hdev);
1617
1618         smp_unregister(hdev);
1619
1620         hci_notify(hdev, HCI_DEV_DOWN);
1621
1622         if (hdev->flush)
1623                 hdev->flush(hdev);
1624
1625         /* Reset device */
1626         skb_queue_purge(&hdev->cmd_q);
1627         atomic_set(&hdev->cmd_cnt, 1);
1628         if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1629             !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1630             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1631                 set_bit(HCI_INIT, &hdev->flags);
1632                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1633                 clear_bit(HCI_INIT, &hdev->flags);
1634         }
1635
1636         /* flush cmd  work */
1637         flush_work(&hdev->cmd_work);
1638
1639         /* Drop queues */
1640         skb_queue_purge(&hdev->rx_q);
1641         skb_queue_purge(&hdev->cmd_q);
1642         skb_queue_purge(&hdev->raw_q);
1643
1644         /* Drop last sent command */
1645         if (hdev->sent_cmd) {
1646                 cancel_delayed_work_sync(&hdev->cmd_timer);
1647                 kfree_skb(hdev->sent_cmd);
1648                 hdev->sent_cmd = NULL;
1649         }
1650
1651         /* After this point our queues are empty
1652          * and no tasks are scheduled. */
1653         hdev->close(hdev);
1654
1655         /* Clear flags */
1656         hdev->flags &= BIT(HCI_RAW);
1657         hci_dev_clear_volatile_flags(hdev);
1658
1659         /* Controller radio is available but is currently powered down */
1660         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1661
1662         memset(hdev->eir, 0, sizeof(hdev->eir));
1663         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1664         bacpy(&hdev->random_addr, BDADDR_ANY);
1665
1666         hci_req_unlock(hdev);
1667
1668         hci_dev_put(hdev);
1669         return 0;
1670 }
1671
1672 int hci_dev_close(__u16 dev)
1673 {
1674         struct hci_dev *hdev;
1675         int err;
1676
1677         hdev = hci_dev_get(dev);
1678         if (!hdev)
1679                 return -ENODEV;
1680
1681         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1682                 err = -EBUSY;
1683                 goto done;
1684         }
1685
1686         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1687                 cancel_delayed_work(&hdev->power_off);
1688
1689         err = hci_dev_do_close(hdev);
1690
1691 done:
1692         hci_dev_put(hdev);
1693         return err;
1694 }
1695
1696 static int hci_dev_do_reset(struct hci_dev *hdev)
1697 {
1698         int ret;
1699
1700         BT_DBG("%s %p", hdev->name, hdev);
1701
1702         hci_req_lock(hdev);
1703
1704         /* Drop queues */
1705         skb_queue_purge(&hdev->rx_q);
1706         skb_queue_purge(&hdev->cmd_q);
1707
1708         /* Avoid potential lockdep warnings from the *_flush() calls by
1709          * ensuring the workqueue is empty up front.
1710          */
1711         drain_workqueue(hdev->workqueue);
1712
1713         hci_dev_lock(hdev);
1714         hci_inquiry_cache_flush(hdev);
1715         hci_conn_hash_flush(hdev);
1716         hci_dev_unlock(hdev);
1717
1718         if (hdev->flush)
1719                 hdev->flush(hdev);
1720
1721         atomic_set(&hdev->cmd_cnt, 1);
1722         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1723
1724         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1725
1726         hci_req_unlock(hdev);
1727         return ret;
1728 }
1729
1730 int hci_dev_reset(__u16 dev)
1731 {
1732         struct hci_dev *hdev;
1733         int err;
1734
1735         hdev = hci_dev_get(dev);
1736         if (!hdev)
1737                 return -ENODEV;
1738
1739         if (!test_bit(HCI_UP, &hdev->flags)) {
1740                 err = -ENETDOWN;
1741                 goto done;
1742         }
1743
1744         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1745                 err = -EBUSY;
1746                 goto done;
1747         }
1748
1749         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1750                 err = -EOPNOTSUPP;
1751                 goto done;
1752         }
1753
1754         err = hci_dev_do_reset(hdev);
1755
1756 done:
1757         hci_dev_put(hdev);
1758         return err;
1759 }
1760
1761 int hci_dev_reset_stat(__u16 dev)
1762 {
1763         struct hci_dev *hdev;
1764         int ret = 0;
1765
1766         hdev = hci_dev_get(dev);
1767         if (!hdev)
1768                 return -ENODEV;
1769
1770         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1771                 ret = -EBUSY;
1772                 goto done;
1773         }
1774
1775         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1776                 ret = -EOPNOTSUPP;
1777                 goto done;
1778         }
1779
1780         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1781
1782 done:
1783         hci_dev_put(hdev);
1784         return ret;
1785 }
1786
1787 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1788 {
1789         bool conn_changed, discov_changed;
1790
1791         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1792
1793         if ((scan & SCAN_PAGE))
1794                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1795                                                           HCI_CONNECTABLE);
1796         else
1797                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1798                                                            HCI_CONNECTABLE);
1799
1800         if ((scan & SCAN_INQUIRY)) {
1801                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1802                                                             HCI_DISCOVERABLE);
1803         } else {
1804                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1805                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1806                                                              HCI_DISCOVERABLE);
1807         }
1808
1809         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1810                 return;
1811
1812         if (conn_changed || discov_changed) {
1813                 /* In case this was disabled through mgmt */
1814                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1815
1816                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1817                         mgmt_update_adv_data(hdev);
1818
1819                 mgmt_new_settings(hdev);
1820         }
1821 }
1822
1823 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1824 {
1825         struct hci_dev *hdev;
1826         struct hci_dev_req dr;
1827         int err = 0;
1828
1829         if (copy_from_user(&dr, arg, sizeof(dr)))
1830                 return -EFAULT;
1831
1832         hdev = hci_dev_get(dr.dev_id);
1833         if (!hdev)
1834                 return -ENODEV;
1835
1836         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1837                 err = -EBUSY;
1838                 goto done;
1839         }
1840
1841         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1842                 err = -EOPNOTSUPP;
1843                 goto done;
1844         }
1845
1846         if (hdev->dev_type != HCI_BREDR) {
1847                 err = -EOPNOTSUPP;
1848                 goto done;
1849         }
1850
1851         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1852                 err = -EOPNOTSUPP;
1853                 goto done;
1854         }
1855
1856         switch (cmd) {
1857         case HCISETAUTH:
1858                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1859                                    HCI_INIT_TIMEOUT);
1860                 break;
1861
1862         case HCISETENCRYPT:
1863                 if (!lmp_encrypt_capable(hdev)) {
1864                         err = -EOPNOTSUPP;
1865                         break;
1866                 }
1867
1868                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1869                         /* Auth must be enabled first */
1870                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1871                                            HCI_INIT_TIMEOUT);
1872                         if (err)
1873                                 break;
1874                 }
1875
1876                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1877                                    HCI_INIT_TIMEOUT);
1878                 break;
1879
1880         case HCISETSCAN:
1881                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1882                                    HCI_INIT_TIMEOUT);
1883
1884                 /* Ensure that the connectable and discoverable states
1885                  * get correctly modified as this was a non-mgmt change.
1886                  */
1887                 if (!err)
1888                         hci_update_scan_state(hdev, dr.dev_opt);
1889                 break;
1890
1891         case HCISETLINKPOL:
1892                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1893                                    HCI_INIT_TIMEOUT);
1894                 break;
1895
1896         case HCISETLINKMODE:
1897                 hdev->link_mode = ((__u16) dr.dev_opt) &
1898                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1899                 break;
1900
1901         case HCISETPTYPE:
1902                 hdev->pkt_type = (__u16) dr.dev_opt;
1903                 break;
1904
1905         case HCISETACLMTU:
1906                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1907                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1908                 break;
1909
1910         case HCISETSCOMTU:
1911                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1912                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1913                 break;
1914
1915         default:
1916                 err = -EINVAL;
1917                 break;
1918         }
1919
1920 done:
1921         hci_dev_put(hdev);
1922         return err;
1923 }
1924
1925 int hci_get_dev_list(void __user *arg)
1926 {
1927         struct hci_dev *hdev;
1928         struct hci_dev_list_req *dl;
1929         struct hci_dev_req *dr;
1930         int n = 0, size, err;
1931         __u16 dev_num;
1932
1933         if (get_user(dev_num, (__u16 __user *) arg))
1934                 return -EFAULT;
1935
1936         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1937                 return -EINVAL;
1938
1939         size = sizeof(*dl) + dev_num * sizeof(*dr);
1940
1941         dl = kzalloc(size, GFP_KERNEL);
1942         if (!dl)
1943                 return -ENOMEM;
1944
1945         dr = dl->dev_req;
1946
1947         read_lock(&hci_dev_list_lock);
1948         list_for_each_entry(hdev, &hci_dev_list, list) {
1949                 unsigned long flags = hdev->flags;
1950
1951                 /* When the auto-off is configured it means the transport
1952                  * is running, but in that case still indicate that the
1953                  * device is actually down.
1954                  */
1955                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1956                         flags &= ~BIT(HCI_UP);
1957
1958                 (dr + n)->dev_id  = hdev->id;
1959                 (dr + n)->dev_opt = flags;
1960
1961                 if (++n >= dev_num)
1962                         break;
1963         }
1964         read_unlock(&hci_dev_list_lock);
1965
1966         dl->dev_num = n;
1967         size = sizeof(*dl) + n * sizeof(*dr);
1968
1969         err = copy_to_user(arg, dl, size);
1970         kfree(dl);
1971
1972         return err ? -EFAULT : 0;
1973 }
1974
1975 int hci_get_dev_info(void __user *arg)
1976 {
1977         struct hci_dev *hdev;
1978         struct hci_dev_info di;
1979         unsigned long flags;
1980         int err = 0;
1981
1982         if (copy_from_user(&di, arg, sizeof(di)))
1983                 return -EFAULT;
1984
1985         hdev = hci_dev_get(di.dev_id);
1986         if (!hdev)
1987                 return -ENODEV;
1988
1989         /* When the auto-off is configured it means the transport
1990          * is running, but in that case still indicate that the
1991          * device is actually down.
1992          */
1993         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1994                 flags = hdev->flags & ~BIT(HCI_UP);
1995         else
1996                 flags = hdev->flags;
1997
1998         strcpy(di.name, hdev->name);
1999         di.bdaddr   = hdev->bdaddr;
2000         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2001         di.flags    = flags;
2002         di.pkt_type = hdev->pkt_type;
2003         if (lmp_bredr_capable(hdev)) {
2004                 di.acl_mtu  = hdev->acl_mtu;
2005                 di.acl_pkts = hdev->acl_pkts;
2006                 di.sco_mtu  = hdev->sco_mtu;
2007                 di.sco_pkts = hdev->sco_pkts;
2008         } else {
2009                 di.acl_mtu  = hdev->le_mtu;
2010                 di.acl_pkts = hdev->le_pkts;
2011                 di.sco_mtu  = 0;
2012                 di.sco_pkts = 0;
2013         }
2014         di.link_policy = hdev->link_policy;
2015         di.link_mode   = hdev->link_mode;
2016
2017         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2018         memcpy(&di.features, &hdev->features, sizeof(di.features));
2019
2020         if (copy_to_user(arg, &di, sizeof(di)))
2021                 err = -EFAULT;
2022
2023         hci_dev_put(hdev);
2024
2025         return err;
2026 }
2027
2028 /* ---- Interface to HCI drivers ---- */
2029
2030 static int hci_rfkill_set_block(void *data, bool blocked)
2031 {
2032         struct hci_dev *hdev = data;
2033
2034         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2035
2036         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2037                 return -EBUSY;
2038
2039         if (blocked) {
2040                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2041                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2042                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2043                         hci_dev_do_close(hdev);
2044         } else {
2045                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2046         }
2047
2048         return 0;
2049 }
2050
2051 static const struct rfkill_ops hci_rfkill_ops = {
2052         .set_block = hci_rfkill_set_block,
2053 };
2054
2055 static void hci_power_on(struct work_struct *work)
2056 {
2057         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2058         int err;
2059
2060         BT_DBG("%s", hdev->name);
2061
2062         err = hci_dev_do_open(hdev);
2063         if (err < 0) {
2064                 hci_dev_lock(hdev);
2065                 mgmt_set_powered_failed(hdev, err);
2066                 hci_dev_unlock(hdev);
2067                 return;
2068         }
2069
2070         /* During the HCI setup phase, a few error conditions are
2071          * ignored and they need to be checked now. If they are still
2072          * valid, it is important to turn the device back off.
2073          */
2074         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2075             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2076             (hdev->dev_type == HCI_BREDR &&
2077              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2078              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2079                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2080                 hci_dev_do_close(hdev);
2081         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2082                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2083                                    HCI_AUTO_OFF_TIMEOUT);
2084         }
2085
2086         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2087                 /* For unconfigured devices, set the HCI_RAW flag
2088                  * so that userspace can easily identify them.
2089                  */
2090                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2091                         set_bit(HCI_RAW, &hdev->flags);
2092
2093                 /* For fully configured devices, this will send
2094                  * the Index Added event. For unconfigured devices,
2095                  * it will send Unconfigued Index Added event.
2096                  *
2097                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2098                  * and no event will be send.
2099                  */
2100                 mgmt_index_added(hdev);
2101         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2102                 /* When the controller is now configured, then it
2103                  * is important to clear the HCI_RAW flag.
2104                  */
2105                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2106                         clear_bit(HCI_RAW, &hdev->flags);
2107
2108                 /* Powering on the controller with HCI_CONFIG set only
2109                  * happens with the transition from unconfigured to
2110                  * configured. This will send the Index Added event.
2111                  */
2112                 mgmt_index_added(hdev);
2113         }
2114 }
2115
2116 static void hci_power_off(struct work_struct *work)
2117 {
2118         struct hci_dev *hdev = container_of(work, struct hci_dev,
2119                                             power_off.work);
2120
2121         BT_DBG("%s", hdev->name);
2122
2123         hci_dev_do_close(hdev);
2124 }
2125
2126 static void hci_error_reset(struct work_struct *work)
2127 {
2128         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2129
2130         BT_DBG("%s", hdev->name);
2131
2132         if (hdev->hw_error)
2133                 hdev->hw_error(hdev, hdev->hw_error_code);
2134         else
2135                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2136                        hdev->hw_error_code);
2137
2138         if (hci_dev_do_close(hdev))
2139                 return;
2140
2141         hci_dev_do_open(hdev);
2142 }
2143
2144 static void hci_discov_off(struct work_struct *work)
2145 {
2146         struct hci_dev *hdev;
2147
2148         hdev = container_of(work, struct hci_dev, discov_off.work);
2149
2150         BT_DBG("%s", hdev->name);
2151
2152         mgmt_discoverable_timeout(hdev);
2153 }
2154
2155 static void hci_adv_timeout_expire(struct work_struct *work)
2156 {
2157         struct hci_dev *hdev;
2158
2159         hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2160
2161         BT_DBG("%s", hdev->name);
2162
2163         mgmt_adv_timeout_expired(hdev);
2164 }
2165
2166 void hci_uuids_clear(struct hci_dev *hdev)
2167 {
2168         struct bt_uuid *uuid, *tmp;
2169
2170         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2171                 list_del(&uuid->list);
2172                 kfree(uuid);
2173         }
2174 }
2175
2176 void hci_link_keys_clear(struct hci_dev *hdev)
2177 {
2178         struct link_key *key;
2179
2180         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2181                 list_del_rcu(&key->list);
2182                 kfree_rcu(key, rcu);
2183         }
2184 }
2185
2186 void hci_smp_ltks_clear(struct hci_dev *hdev)
2187 {
2188         struct smp_ltk *k;
2189
2190         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2191                 list_del_rcu(&k->list);
2192                 kfree_rcu(k, rcu);
2193         }
2194 }
2195
2196 void hci_smp_irks_clear(struct hci_dev *hdev)
2197 {
2198         struct smp_irk *k;
2199
2200         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2201                 list_del_rcu(&k->list);
2202                 kfree_rcu(k, rcu);
2203         }
2204 }
2205
2206 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2207 {
2208         struct link_key *k;
2209
2210         rcu_read_lock();
2211         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2212                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2213                         rcu_read_unlock();
2214                         return k;
2215                 }
2216         }
2217         rcu_read_unlock();
2218
2219         return NULL;
2220 }
2221
2222 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2223                                u8 key_type, u8 old_key_type)
2224 {
2225         /* Legacy key */
2226         if (key_type < 0x03)
2227                 return true;
2228
2229         /* Debug keys are insecure so don't store them persistently */
2230         if (key_type == HCI_LK_DEBUG_COMBINATION)
2231                 return false;
2232
2233         /* Changed combination key and there's no previous one */
2234         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2235                 return false;
2236
2237         /* Security mode 3 case */
2238         if (!conn)
2239                 return true;
2240
2241         /* BR/EDR key derived using SC from an LE link */
2242         if (conn->type == LE_LINK)
2243                 return true;
2244
2245         /* Neither local nor remote side had no-bonding as requirement */
2246         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2247                 return true;
2248
2249         /* Local side had dedicated bonding as requirement */
2250         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2251                 return true;
2252
2253         /* Remote side had dedicated bonding as requirement */
2254         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2255                 return true;
2256
2257         /* If none of the above criteria match, then don't store the key
2258          * persistently */
2259         return false;
2260 }
2261
2262 static u8 ltk_role(u8 type)
2263 {
2264         if (type == SMP_LTK)
2265                 return HCI_ROLE_MASTER;
2266
2267         return HCI_ROLE_SLAVE;
2268 }
2269
2270 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2271                              u8 addr_type, u8 role)
2272 {
2273         struct smp_ltk *k;
2274
2275         rcu_read_lock();
2276         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2277                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2278                         continue;
2279
2280                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2281                         rcu_read_unlock();
2282                         return k;
2283                 }
2284         }
2285         rcu_read_unlock();
2286
2287         return NULL;
2288 }
2289
2290 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2291 {
2292         struct smp_irk *irk;
2293
2294         rcu_read_lock();
2295         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2296                 if (!bacmp(&irk->rpa, rpa)) {
2297                         rcu_read_unlock();
2298                         return irk;
2299                 }
2300         }
2301
2302         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2303                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2304                         bacpy(&irk->rpa, rpa);
2305                         rcu_read_unlock();
2306                         return irk;
2307                 }
2308         }
2309         rcu_read_unlock();
2310
2311         return NULL;
2312 }
2313
2314 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2315                                      u8 addr_type)
2316 {
2317         struct smp_irk *irk;
2318
2319         /* Identity Address must be public or static random */
2320         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2321                 return NULL;
2322
2323         rcu_read_lock();
2324         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2325                 if (addr_type == irk->addr_type &&
2326                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2327                         rcu_read_unlock();
2328                         return irk;
2329                 }
2330         }
2331         rcu_read_unlock();
2332
2333         return NULL;
2334 }
2335
2336 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2337                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2338                                   u8 pin_len, bool *persistent)
2339 {
2340         struct link_key *key, *old_key;
2341         u8 old_key_type;
2342
2343         old_key = hci_find_link_key(hdev, bdaddr);
2344         if (old_key) {
2345                 old_key_type = old_key->type;
2346                 key = old_key;
2347         } else {
2348                 old_key_type = conn ? conn->key_type : 0xff;
2349                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2350                 if (!key)
2351                         return NULL;
2352                 list_add_rcu(&key->list, &hdev->link_keys);
2353         }
2354
2355         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2356
2357         /* Some buggy controller combinations generate a changed
2358          * combination key for legacy pairing even when there's no
2359          * previous key */
2360         if (type == HCI_LK_CHANGED_COMBINATION &&
2361             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2362                 type = HCI_LK_COMBINATION;
2363                 if (conn)
2364                         conn->key_type = type;
2365         }
2366
2367         bacpy(&key->bdaddr, bdaddr);
2368         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2369         key->pin_len = pin_len;
2370
2371         if (type == HCI_LK_CHANGED_COMBINATION)
2372                 key->type = old_key_type;
2373         else
2374                 key->type = type;
2375
2376         if (persistent)
2377                 *persistent = hci_persistent_key(hdev, conn, type,
2378                                                  old_key_type);
2379
2380         return key;
2381 }
2382
2383 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2384                             u8 addr_type, u8 type, u8 authenticated,
2385                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2386 {
2387         struct smp_ltk *key, *old_key;
2388         u8 role = ltk_role(type);
2389
2390         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2391         if (old_key)
2392                 key = old_key;
2393         else {
2394                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2395                 if (!key)
2396                         return NULL;
2397                 list_add_rcu(&key->list, &hdev->long_term_keys);
2398         }
2399
2400         bacpy(&key->bdaddr, bdaddr);
2401         key->bdaddr_type = addr_type;
2402         memcpy(key->val, tk, sizeof(key->val));
2403         key->authenticated = authenticated;
2404         key->ediv = ediv;
2405         key->rand = rand;
2406         key->enc_size = enc_size;
2407         key->type = type;
2408
2409         return key;
2410 }
2411
2412 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2413                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2414 {
2415         struct smp_irk *irk;
2416
2417         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2418         if (!irk) {
2419                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2420                 if (!irk)
2421                         return NULL;
2422
2423                 bacpy(&irk->bdaddr, bdaddr);
2424                 irk->addr_type = addr_type;
2425
2426                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2427         }
2428
2429         memcpy(irk->val, val, 16);
2430         bacpy(&irk->rpa, rpa);
2431
2432         return irk;
2433 }
2434
2435 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2436 {
2437         struct link_key *key;
2438
2439         key = hci_find_link_key(hdev, bdaddr);
2440         if (!key)
2441                 return -ENOENT;
2442
2443         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2444
2445         list_del_rcu(&key->list);
2446         kfree_rcu(key, rcu);
2447
2448         return 0;
2449 }
2450
2451 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2452 {
2453         struct smp_ltk *k;
2454         int removed = 0;
2455
2456         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2457                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2458                         continue;
2459
2460                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2461
2462                 list_del_rcu(&k->list);
2463                 kfree_rcu(k, rcu);
2464                 removed++;
2465         }
2466
2467         return removed ? 0 : -ENOENT;
2468 }
2469
2470 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2471 {
2472         struct smp_irk *k;
2473
2474         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2475                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2476                         continue;
2477
2478                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2479
2480                 list_del_rcu(&k->list);
2481                 kfree_rcu(k, rcu);
2482         }
2483 }
2484
2485 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2486 {
2487         struct smp_ltk *k;
2488         struct smp_irk *irk;
2489         u8 addr_type;
2490
2491         if (type == BDADDR_BREDR) {
2492                 if (hci_find_link_key(hdev, bdaddr))
2493                         return true;
2494                 return false;
2495         }
2496
2497         /* Convert to HCI addr type which struct smp_ltk uses */
2498         if (type == BDADDR_LE_PUBLIC)
2499                 addr_type = ADDR_LE_DEV_PUBLIC;
2500         else
2501                 addr_type = ADDR_LE_DEV_RANDOM;
2502
2503         irk = hci_get_irk(hdev, bdaddr, addr_type);
2504         if (irk) {
2505                 bdaddr = &irk->bdaddr;
2506                 addr_type = irk->addr_type;
2507         }
2508
2509         rcu_read_lock();
2510         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2511                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2512                         rcu_read_unlock();
2513                         return true;
2514                 }
2515         }
2516         rcu_read_unlock();
2517
2518         return false;
2519 }
2520
2521 /* HCI command timer function */
2522 static void hci_cmd_timeout(struct work_struct *work)
2523 {
2524         struct hci_dev *hdev = container_of(work, struct hci_dev,
2525                                             cmd_timer.work);
2526
2527         if (hdev->sent_cmd) {
2528                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2529                 u16 opcode = __le16_to_cpu(sent->opcode);
2530
2531                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2532         } else {
2533                 BT_ERR("%s command tx timeout", hdev->name);
2534         }
2535
2536         atomic_set(&hdev->cmd_cnt, 1);
2537         queue_work(hdev->workqueue, &hdev->cmd_work);
2538 }
2539
2540 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2541                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2542 {
2543         struct oob_data *data;
2544
2545         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2546                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2547                         continue;
2548                 if (data->bdaddr_type != bdaddr_type)
2549                         continue;
2550                 return data;
2551         }
2552
2553         return NULL;
2554 }
2555
2556 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2557                                u8 bdaddr_type)
2558 {
2559         struct oob_data *data;
2560
2561         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2562         if (!data)
2563                 return -ENOENT;
2564
2565         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2566
2567         list_del(&data->list);
2568         kfree(data);
2569
2570         return 0;
2571 }
2572
2573 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2574 {
2575         struct oob_data *data, *n;
2576
2577         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2578                 list_del(&data->list);
2579                 kfree(data);
2580         }
2581 }
2582
2583 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2584                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2585                             u8 *hash256, u8 *rand256)
2586 {
2587         struct oob_data *data;
2588
2589         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2590         if (!data) {
2591                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2592                 if (!data)
2593                         return -ENOMEM;
2594
2595                 bacpy(&data->bdaddr, bdaddr);
2596                 data->bdaddr_type = bdaddr_type;
2597                 list_add(&data->list, &hdev->remote_oob_data);
2598         }
2599
2600         if (hash192 && rand192) {
2601                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2602                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2603                 if (hash256 && rand256)
2604                         data->present = 0x03;
2605         } else {
2606                 memset(data->hash192, 0, sizeof(data->hash192));
2607                 memset(data->rand192, 0, sizeof(data->rand192));
2608                 if (hash256 && rand256)
2609                         data->present = 0x02;
2610                 else
2611                         data->present = 0x00;
2612         }
2613
2614         if (hash256 && rand256) {
2615                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2616                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2617         } else {
2618                 memset(data->hash256, 0, sizeof(data->hash256));
2619                 memset(data->rand256, 0, sizeof(data->rand256));
2620                 if (hash192 && rand192)
2621                         data->present = 0x01;
2622         }
2623
2624         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2625
2626         return 0;
2627 }
2628
2629 /* This function requires the caller holds hdev->lock */
2630 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2631 {
2632         struct adv_info *adv_instance;
2633
2634         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2635                 if (adv_instance->instance == instance)
2636                         return adv_instance;
2637         }
2638
2639         return NULL;
2640 }
2641
2642 /* This function requires the caller holds hdev->lock */
2643 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2644         struct adv_info *cur_instance;
2645
2646         cur_instance = hci_find_adv_instance(hdev, instance);
2647         if (!cur_instance)
2648                 return NULL;
2649
2650         if (cur_instance == list_last_entry(&hdev->adv_instances,
2651                                             struct adv_info, list))
2652                 return list_first_entry(&hdev->adv_instances,
2653                                                  struct adv_info, list);
2654         else
2655                 return list_next_entry(cur_instance, list);
2656 }
2657
2658 /* This function requires the caller holds hdev->lock */
2659 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2660 {
2661         struct adv_info *adv_instance;
2662
2663         adv_instance = hci_find_adv_instance(hdev, instance);
2664         if (!adv_instance)
2665                 return -ENOENT;
2666
2667         BT_DBG("%s removing %dMR", hdev->name, instance);
2668
2669         if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2670                 cancel_delayed_work(&hdev->adv_instance_expire);
2671                 hdev->adv_instance_timeout = 0;
2672         }
2673
2674         list_del(&adv_instance->list);
2675         kfree(adv_instance);
2676
2677         hdev->adv_instance_cnt--;
2678
2679         return 0;
2680 }
2681
2682 /* This function requires the caller holds hdev->lock */
2683 void hci_adv_instances_clear(struct hci_dev *hdev)
2684 {
2685         struct adv_info *adv_instance, *n;
2686
2687         if (hdev->adv_instance_timeout) {
2688                 cancel_delayed_work(&hdev->adv_instance_expire);
2689                 hdev->adv_instance_timeout = 0;
2690         }
2691
2692         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2693                 list_del(&adv_instance->list);
2694                 kfree(adv_instance);
2695         }
2696
2697         hdev->adv_instance_cnt = 0;
2698 }
2699
2700 /* This function requires the caller holds hdev->lock */
2701 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2702                          u16 adv_data_len, u8 *adv_data,
2703                          u16 scan_rsp_len, u8 *scan_rsp_data,
2704                          u16 timeout, u16 duration)
2705 {
2706         struct adv_info *adv_instance;
2707
2708         adv_instance = hci_find_adv_instance(hdev, instance);
2709         if (adv_instance) {
2710                 memset(adv_instance->adv_data, 0,
2711                        sizeof(adv_instance->adv_data));
2712                 memset(adv_instance->scan_rsp_data, 0,
2713                        sizeof(adv_instance->scan_rsp_data));
2714         } else {
2715                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2716                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2717                         return -EOVERFLOW;
2718
2719                 adv_instance = kmalloc(sizeof(*adv_instance), GFP_KERNEL);
2720                 if (!adv_instance)
2721                         return -ENOMEM;
2722
2723                 memset(adv_instance, 0, sizeof(*adv_instance));
2724                 adv_instance->pending = true;
2725                 adv_instance->instance = instance;
2726                 list_add(&adv_instance->list, &hdev->adv_instances);
2727                 hdev->adv_instance_cnt++;
2728         }
2729
2730         adv_instance->flags = flags;
2731         adv_instance->adv_data_len = adv_data_len;
2732         adv_instance->scan_rsp_len = scan_rsp_len;
2733
2734         if (adv_data_len)
2735                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2736
2737         if (scan_rsp_len)
2738                 memcpy(adv_instance->scan_rsp_data,
2739                        scan_rsp_data, scan_rsp_len);
2740
2741         adv_instance->timeout = timeout;
2742         adv_instance->remaining_time = timeout;
2743
2744         if (duration == 0)
2745                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2746         else
2747                 adv_instance->duration = duration;
2748
2749         BT_DBG("%s for %dMR", hdev->name, instance);
2750
2751         return 0;
2752 }
2753
2754 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2755                                          bdaddr_t *bdaddr, u8 type)
2756 {
2757         struct bdaddr_list *b;
2758
2759         list_for_each_entry(b, bdaddr_list, list) {
2760                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2761                         return b;
2762         }
2763
2764         return NULL;
2765 }
2766
2767 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2768 {
2769         struct list_head *p, *n;
2770
2771         list_for_each_safe(p, n, bdaddr_list) {
2772                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2773
2774                 list_del(p);
2775                 kfree(b);
2776         }
2777 }
2778
2779 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2780 {
2781         struct bdaddr_list *entry;
2782
2783         if (!bacmp(bdaddr, BDADDR_ANY))
2784                 return -EBADF;
2785
2786         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2787                 return -EEXIST;
2788
2789         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2790         if (!entry)
2791                 return -ENOMEM;
2792
2793         bacpy(&entry->bdaddr, bdaddr);
2794         entry->bdaddr_type = type;
2795
2796         list_add(&entry->list, list);
2797
2798         return 0;
2799 }
2800
2801 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2802 {
2803         struct bdaddr_list *entry;
2804
2805         if (!bacmp(bdaddr, BDADDR_ANY)) {
2806                 hci_bdaddr_list_clear(list);
2807                 return 0;
2808         }
2809
2810         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2811         if (!entry)
2812                 return -ENOENT;
2813
2814         list_del(&entry->list);
2815         kfree(entry);
2816
2817         return 0;
2818 }
2819
2820 /* This function requires the caller holds hdev->lock */
2821 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2822                                                bdaddr_t *addr, u8 addr_type)
2823 {
2824         struct hci_conn_params *params;
2825
2826         /* The conn params list only contains identity addresses */
2827         if (!hci_is_identity_address(addr, addr_type))
2828                 return NULL;
2829
2830         list_for_each_entry(params, &hdev->le_conn_params, list) {
2831                 if (bacmp(&params->addr, addr) == 0 &&
2832                     params->addr_type == addr_type) {
2833                         return params;
2834                 }
2835         }
2836
2837         return NULL;
2838 }
2839
2840 /* This function requires the caller holds hdev->lock */
2841 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2842                                                   bdaddr_t *addr, u8 addr_type)
2843 {
2844         struct hci_conn_params *param;
2845
2846         /* The list only contains identity addresses */
2847         if (!hci_is_identity_address(addr, addr_type))
2848                 return NULL;
2849
2850         list_for_each_entry(param, list, action) {
2851                 if (bacmp(&param->addr, addr) == 0 &&
2852                     param->addr_type == addr_type)
2853                         return param;
2854         }
2855
2856         return NULL;
2857 }
2858
2859 /* This function requires the caller holds hdev->lock */
2860 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2861                                             bdaddr_t *addr, u8 addr_type)
2862 {
2863         struct hci_conn_params *params;
2864
2865         if (!hci_is_identity_address(addr, addr_type))
2866                 return NULL;
2867
2868         params = hci_conn_params_lookup(hdev, addr, addr_type);
2869         if (params)
2870                 return params;
2871
2872         params = kzalloc(sizeof(*params), GFP_KERNEL);
2873         if (!params) {
2874                 BT_ERR("Out of memory");
2875                 return NULL;
2876         }
2877
2878         bacpy(&params->addr, addr);
2879         params->addr_type = addr_type;
2880
2881         list_add(&params->list, &hdev->le_conn_params);
2882         INIT_LIST_HEAD(&params->action);
2883
2884         params->conn_min_interval = hdev->le_conn_min_interval;
2885         params->conn_max_interval = hdev->le_conn_max_interval;
2886         params->conn_latency = hdev->le_conn_latency;
2887         params->supervision_timeout = hdev->le_supv_timeout;
2888         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2889
2890         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2891
2892         return params;
2893 }
2894
2895 static void hci_conn_params_free(struct hci_conn_params *params)
2896 {
2897         if (params->conn) {
2898                 hci_conn_drop(params->conn);
2899                 hci_conn_put(params->conn);
2900         }
2901
2902         list_del(&params->action);
2903         list_del(&params->list);
2904         kfree(params);
2905 }
2906
2907 /* This function requires the caller holds hdev->lock */
2908 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2909 {
2910         struct hci_conn_params *params;
2911
2912         params = hci_conn_params_lookup(hdev, addr, addr_type);
2913         if (!params)
2914                 return;
2915
2916         hci_conn_params_free(params);
2917
2918         hci_update_background_scan(hdev);
2919
2920         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2921 }
2922
2923 /* This function requires the caller holds hdev->lock */
2924 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2925 {
2926         struct hci_conn_params *params, *tmp;
2927
2928         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2929                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2930                         continue;
2931                 list_del(&params->list);
2932                 kfree(params);
2933         }
2934
2935         BT_DBG("All LE disabled connection parameters were removed");
2936 }
2937
2938 /* This function requires the caller holds hdev->lock */
2939 void hci_conn_params_clear_all(struct hci_dev *hdev)
2940 {
2941         struct hci_conn_params *params, *tmp;
2942
2943         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2944                 hci_conn_params_free(params);
2945
2946         hci_update_background_scan(hdev);
2947
2948         BT_DBG("All LE connection parameters were removed");
2949 }
2950
2951 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2952 {
2953         if (status) {
2954                 BT_ERR("Failed to start inquiry: status %d", status);
2955
2956                 hci_dev_lock(hdev);
2957                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2958                 hci_dev_unlock(hdev);
2959                 return;
2960         }
2961 }
2962
2963 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2964                                           u16 opcode)
2965 {
2966         /* General inquiry access code (GIAC) */
2967         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2968         struct hci_cp_inquiry cp;
2969         int err;
2970
2971         if (status) {
2972                 BT_ERR("Failed to disable LE scanning: status %d", status);
2973                 return;
2974         }
2975
2976         hdev->discovery.scan_start = 0;
2977
2978         switch (hdev->discovery.type) {
2979         case DISCOV_TYPE_LE:
2980                 hci_dev_lock(hdev);
2981                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2982                 hci_dev_unlock(hdev);
2983                 break;
2984
2985         case DISCOV_TYPE_INTERLEAVED:
2986                 hci_dev_lock(hdev);
2987
2988                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2989                              &hdev->quirks)) {
2990                         /* If we were running LE only scan, change discovery
2991                          * state. If we were running both LE and BR/EDR inquiry
2992                          * simultaneously, and BR/EDR inquiry is already
2993                          * finished, stop discovery, otherwise BR/EDR inquiry
2994                          * will stop discovery when finished. If we will resolve
2995                          * remote device name, do not change discovery state.
2996                          */
2997                         if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2998                             hdev->discovery.state != DISCOVERY_RESOLVING)
2999                                 hci_discovery_set_state(hdev,
3000                                                         DISCOVERY_STOPPED);
3001                 } else {
3002                         struct hci_request req;
3003
3004                         hci_inquiry_cache_flush(hdev);
3005
3006                         hci_req_init(&req, hdev);
3007
3008                         memset(&cp, 0, sizeof(cp));
3009                         memcpy(&cp.lap, lap, sizeof(cp.lap));
3010                         cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3011                         hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3012
3013                         err = hci_req_run(&req, inquiry_complete);
3014                         if (err) {
3015                                 BT_ERR("Inquiry request failed: err %d", err);
3016                                 hci_discovery_set_state(hdev,
3017                                                         DISCOVERY_STOPPED);
3018                         }
3019                 }
3020
3021                 hci_dev_unlock(hdev);
3022                 break;
3023         }
3024 }
3025
3026 static void le_scan_disable_work(struct work_struct *work)
3027 {
3028         struct hci_dev *hdev = container_of(work, struct hci_dev,
3029                                             le_scan_disable.work);
3030         struct hci_request req;
3031         int err;
3032
3033         BT_DBG("%s", hdev->name);
3034
3035         cancel_delayed_work_sync(&hdev->le_scan_restart);
3036
3037         hci_req_init(&req, hdev);
3038
3039         hci_req_add_le_scan_disable(&req);
3040
3041         err = hci_req_run(&req, le_scan_disable_work_complete);
3042         if (err)
3043                 BT_ERR("Disable LE scanning request failed: err %d", err);
3044 }
3045
3046 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3047                                           u16 opcode)
3048 {
3049         unsigned long timeout, duration, scan_start, now;
3050
3051         BT_DBG("%s", hdev->name);
3052
3053         if (status) {
3054                 BT_ERR("Failed to restart LE scan: status %d", status);
3055                 return;
3056         }
3057
3058         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3059             !hdev->discovery.scan_start)
3060                 return;
3061
3062         /* When the scan was started, hdev->le_scan_disable has been queued
3063          * after duration from scan_start. During scan restart this job
3064          * has been canceled, and we need to queue it again after proper
3065          * timeout, to make sure that scan does not run indefinitely.
3066          */
3067         duration = hdev->discovery.scan_duration;
3068         scan_start = hdev->discovery.scan_start;
3069         now = jiffies;
3070         if (now - scan_start <= duration) {
3071                 int elapsed;
3072
3073                 if (now >= scan_start)
3074                         elapsed = now - scan_start;
3075                 else
3076                         elapsed = ULONG_MAX - scan_start + now;
3077
3078                 timeout = duration - elapsed;
3079         } else {
3080                 timeout = 0;
3081         }
3082         queue_delayed_work(hdev->workqueue,
3083                            &hdev->le_scan_disable, timeout);
3084 }
3085
3086 static void le_scan_restart_work(struct work_struct *work)
3087 {
3088         struct hci_dev *hdev = container_of(work, struct hci_dev,
3089                                             le_scan_restart.work);
3090         struct hci_request req;
3091         struct hci_cp_le_set_scan_enable cp;
3092         int err;
3093
3094         BT_DBG("%s", hdev->name);
3095
3096         /* If controller is not scanning we are done. */
3097         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3098                 return;
3099
3100         hci_req_init(&req, hdev);
3101
3102         hci_req_add_le_scan_disable(&req);
3103
3104         memset(&cp, 0, sizeof(cp));
3105         cp.enable = LE_SCAN_ENABLE;
3106         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3107         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3108
3109         err = hci_req_run(&req, le_scan_restart_work_complete);
3110         if (err)
3111                 BT_ERR("Restart LE scan request failed: err %d", err);
3112 }
3113
3114 /* Copy the Identity Address of the controller.
3115  *
3116  * If the controller has a public BD_ADDR, then by default use that one.
3117  * If this is a LE only controller without a public address, default to
3118  * the static random address.
3119  *
3120  * For debugging purposes it is possible to force controllers with a
3121  * public address to use the static random address instead.
3122  *
3123  * In case BR/EDR has been disabled on a dual-mode controller and
3124  * userspace has configured a static address, then that address
3125  * becomes the identity address instead of the public BR/EDR address.
3126  */
3127 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3128                                u8 *bdaddr_type)
3129 {
3130         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3131             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3132             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3133              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3134                 bacpy(bdaddr, &hdev->static_addr);
3135                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3136         } else {
3137                 bacpy(bdaddr, &hdev->bdaddr);
3138                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3139         }
3140 }
3141
3142 /* Alloc HCI device */
3143 struct hci_dev *hci_alloc_dev(void)
3144 {
3145         struct hci_dev *hdev;
3146
3147         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3148         if (!hdev)
3149                 return NULL;
3150
3151         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3152         hdev->esco_type = (ESCO_HV1);
3153         hdev->link_mode = (HCI_LM_ACCEPT);
3154         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3155         hdev->io_capability = 0x03;     /* No Input No Output */
3156         hdev->manufacturer = 0xffff;    /* Default to internal use */
3157         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3158         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3159         hdev->adv_instance_cnt = 0;
3160         hdev->cur_adv_instance = 0x00;
3161         hdev->adv_instance_timeout = 0;
3162
3163         hdev->sniff_max_interval = 800;
3164         hdev->sniff_min_interval = 80;
3165
3166         hdev->le_adv_channel_map = 0x07;
3167         hdev->le_adv_min_interval = 0x0800;
3168         hdev->le_adv_max_interval = 0x0800;
3169         hdev->le_scan_interval = 0x0060;
3170         hdev->le_scan_window = 0x0030;
3171         hdev->le_conn_min_interval = 0x0028;
3172         hdev->le_conn_max_interval = 0x0038;
3173         hdev->le_conn_latency = 0x0000;
3174         hdev->le_supv_timeout = 0x002a;
3175         hdev->le_def_tx_len = 0x001b;
3176         hdev->le_def_tx_time = 0x0148;
3177         hdev->le_max_tx_len = 0x001b;
3178         hdev->le_max_tx_time = 0x0148;
3179         hdev->le_max_rx_len = 0x001b;
3180         hdev->le_max_rx_time = 0x0148;
3181
3182         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3183         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3184         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3185         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3186
3187         mutex_init(&hdev->lock);
3188         mutex_init(&hdev->req_lock);
3189
3190         INIT_LIST_HEAD(&hdev->mgmt_pending);
3191         INIT_LIST_HEAD(&hdev->blacklist);
3192         INIT_LIST_HEAD(&hdev->whitelist);
3193         INIT_LIST_HEAD(&hdev->uuids);
3194         INIT_LIST_HEAD(&hdev->link_keys);
3195         INIT_LIST_HEAD(&hdev->long_term_keys);
3196         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3197         INIT_LIST_HEAD(&hdev->remote_oob_data);
3198         INIT_LIST_HEAD(&hdev->le_white_list);
3199         INIT_LIST_HEAD(&hdev->le_conn_params);
3200         INIT_LIST_HEAD(&hdev->pend_le_conns);
3201         INIT_LIST_HEAD(&hdev->pend_le_reports);
3202         INIT_LIST_HEAD(&hdev->conn_hash.list);
3203         INIT_LIST_HEAD(&hdev->adv_instances);
3204
3205         INIT_WORK(&hdev->rx_work, hci_rx_work);
3206         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3207         INIT_WORK(&hdev->tx_work, hci_tx_work);
3208         INIT_WORK(&hdev->power_on, hci_power_on);
3209         INIT_WORK(&hdev->error_reset, hci_error_reset);
3210
3211         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3212         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3213         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3214         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3215         INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3216
3217         skb_queue_head_init(&hdev->rx_q);
3218         skb_queue_head_init(&hdev->cmd_q);
3219         skb_queue_head_init(&hdev->raw_q);
3220
3221         init_waitqueue_head(&hdev->req_wait_q);
3222
3223         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3224
3225         hci_init_sysfs(hdev);
3226         discovery_init(hdev);
3227         adv_info_init(hdev);
3228
3229         return hdev;
3230 }
3231 EXPORT_SYMBOL(hci_alloc_dev);
3232
3233 /* Free HCI device */
3234 void hci_free_dev(struct hci_dev *hdev)
3235 {
3236         /* will free via device release */
3237         put_device(&hdev->dev);
3238 }
3239 EXPORT_SYMBOL(hci_free_dev);
3240
3241 /* Register HCI device */
3242 int hci_register_dev(struct hci_dev *hdev)
3243 {
3244         int id, error;
3245
3246         if (!hdev->open || !hdev->close || !hdev->send)
3247                 return -EINVAL;
3248
3249         /* Do not allow HCI_AMP devices to register at index 0,
3250          * so the index can be used as the AMP controller ID.
3251          */
3252         switch (hdev->dev_type) {
3253         case HCI_BREDR:
3254                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3255                 break;
3256         case HCI_AMP:
3257                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3258                 break;
3259         default:
3260                 return -EINVAL;
3261         }
3262
3263         if (id < 0)
3264                 return id;
3265
3266         sprintf(hdev->name, "hci%d", id);
3267         hdev->id = id;
3268
3269         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3270
3271         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3272                                           WQ_MEM_RECLAIM, 1, hdev->name);
3273         if (!hdev->workqueue) {
3274                 error = -ENOMEM;
3275                 goto err;
3276         }
3277
3278         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3279                                               WQ_MEM_RECLAIM, 1, hdev->name);
3280         if (!hdev->req_workqueue) {
3281                 destroy_workqueue(hdev->workqueue);
3282                 error = -ENOMEM;
3283                 goto err;
3284         }
3285
3286         if (!IS_ERR_OR_NULL(bt_debugfs))
3287                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3288
3289         dev_set_name(&hdev->dev, "%s", hdev->name);
3290
3291         error = device_add(&hdev->dev);
3292         if (error < 0)
3293                 goto err_wqueue;
3294
3295         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3296                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3297                                     hdev);
3298         if (hdev->rfkill) {
3299                 if (rfkill_register(hdev->rfkill) < 0) {
3300                         rfkill_destroy(hdev->rfkill);
3301                         hdev->rfkill = NULL;
3302                 }
3303         }
3304
3305         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3306                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3307
3308         hci_dev_set_flag(hdev, HCI_SETUP);
3309         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3310
3311         if (hdev->dev_type == HCI_BREDR) {
3312                 /* Assume BR/EDR support until proven otherwise (such as
3313                  * through reading supported features during init.
3314                  */
3315                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3316         }
3317
3318         write_lock(&hci_dev_list_lock);
3319         list_add(&hdev->list, &hci_dev_list);
3320         write_unlock(&hci_dev_list_lock);
3321
3322         /* Devices that are marked for raw-only usage are unconfigured
3323          * and should not be included in normal operation.
3324          */
3325         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3326                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3327
3328         hci_notify(hdev, HCI_DEV_REG);
3329         hci_dev_hold(hdev);
3330
3331         queue_work(hdev->req_workqueue, &hdev->power_on);
3332
3333         return id;
3334
3335 err_wqueue:
3336         destroy_workqueue(hdev->workqueue);
3337         destroy_workqueue(hdev->req_workqueue);
3338 err:
3339         ida_simple_remove(&hci_index_ida, hdev->id);
3340
3341         return error;
3342 }
3343 EXPORT_SYMBOL(hci_register_dev);
3344
3345 /* Unregister HCI device */
3346 void hci_unregister_dev(struct hci_dev *hdev)
3347 {
3348         int id;
3349
3350         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3351
3352         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3353
3354         id = hdev->id;
3355
3356         write_lock(&hci_dev_list_lock);
3357         list_del(&hdev->list);
3358         write_unlock(&hci_dev_list_lock);
3359
3360         hci_dev_do_close(hdev);
3361
3362         cancel_work_sync(&hdev->power_on);
3363
3364         if (!test_bit(HCI_INIT, &hdev->flags) &&
3365             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3366             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3367                 hci_dev_lock(hdev);
3368                 mgmt_index_removed(hdev);
3369                 hci_dev_unlock(hdev);
3370         }
3371
3372         /* mgmt_index_removed should take care of emptying the
3373          * pending list */
3374         BUG_ON(!list_empty(&hdev->mgmt_pending));
3375
3376         hci_notify(hdev, HCI_DEV_UNREG);
3377
3378         if (hdev->rfkill) {
3379                 rfkill_unregister(hdev->rfkill);
3380                 rfkill_destroy(hdev->rfkill);
3381         }
3382
3383         device_del(&hdev->dev);
3384
3385         debugfs_remove_recursive(hdev->debugfs);
3386
3387         destroy_workqueue(hdev->workqueue);
3388         destroy_workqueue(hdev->req_workqueue);
3389
3390         hci_dev_lock(hdev);
3391         hci_bdaddr_list_clear(&hdev->blacklist);
3392         hci_bdaddr_list_clear(&hdev->whitelist);
3393         hci_uuids_clear(hdev);
3394         hci_link_keys_clear(hdev);
3395         hci_smp_ltks_clear(hdev);
3396         hci_smp_irks_clear(hdev);
3397         hci_remote_oob_data_clear(hdev);
3398         hci_adv_instances_clear(hdev);
3399         hci_bdaddr_list_clear(&hdev->le_white_list);
3400         hci_conn_params_clear_all(hdev);
3401         hci_discovery_filter_clear(hdev);
3402         hci_dev_unlock(hdev);
3403
3404         hci_dev_put(hdev);
3405
3406         ida_simple_remove(&hci_index_ida, id);
3407 }
3408 EXPORT_SYMBOL(hci_unregister_dev);
3409
3410 /* Suspend HCI device */
3411 int hci_suspend_dev(struct hci_dev *hdev)
3412 {
3413         hci_notify(hdev, HCI_DEV_SUSPEND);
3414         return 0;
3415 }
3416 EXPORT_SYMBOL(hci_suspend_dev);
3417
3418 /* Resume HCI device */
3419 int hci_resume_dev(struct hci_dev *hdev)
3420 {
3421         hci_notify(hdev, HCI_DEV_RESUME);
3422         return 0;
3423 }
3424 EXPORT_SYMBOL(hci_resume_dev);
3425
3426 /* Reset HCI device */
3427 int hci_reset_dev(struct hci_dev *hdev)
3428 {
3429         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3430         struct sk_buff *skb;
3431
3432         skb = bt_skb_alloc(3, GFP_ATOMIC);
3433         if (!skb)
3434                 return -ENOMEM;
3435
3436         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3437         memcpy(skb_put(skb, 3), hw_err, 3);
3438
3439         /* Send Hardware Error to upper stack */
3440         return hci_recv_frame(hdev, skb);
3441 }
3442 EXPORT_SYMBOL(hci_reset_dev);
3443
3444 /* Receive frame from HCI drivers */
3445 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3446 {
3447         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3448                       && !test_bit(HCI_INIT, &hdev->flags))) {
3449                 kfree_skb(skb);
3450                 return -ENXIO;
3451         }
3452
3453         /* Incoming skb */
3454         bt_cb(skb)->incoming = 1;
3455
3456         /* Time stamp */
3457         __net_timestamp(skb);
3458
3459         skb_queue_tail(&hdev->rx_q, skb);
3460         queue_work(hdev->workqueue, &hdev->rx_work);
3461
3462         return 0;
3463 }
3464 EXPORT_SYMBOL(hci_recv_frame);
3465
3466 /* ---- Interface to upper protocols ---- */
3467
3468 int hci_register_cb(struct hci_cb *cb)
3469 {
3470         BT_DBG("%p name %s", cb, cb->name);
3471
3472         mutex_lock(&hci_cb_list_lock);
3473         list_add_tail(&cb->list, &hci_cb_list);
3474         mutex_unlock(&hci_cb_list_lock);
3475
3476         return 0;
3477 }
3478 EXPORT_SYMBOL(hci_register_cb);
3479
3480 int hci_unregister_cb(struct hci_cb *cb)
3481 {
3482         BT_DBG("%p name %s", cb, cb->name);
3483
3484         mutex_lock(&hci_cb_list_lock);
3485         list_del(&cb->list);
3486         mutex_unlock(&hci_cb_list_lock);
3487
3488         return 0;
3489 }
3490 EXPORT_SYMBOL(hci_unregister_cb);
3491
3492 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3493 {
3494         int err;
3495
3496         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3497
3498         /* Time stamp */
3499         __net_timestamp(skb);
3500
3501         /* Send copy to monitor */
3502         hci_send_to_monitor(hdev, skb);
3503
3504         if (atomic_read(&hdev->promisc)) {
3505                 /* Send copy to the sockets */
3506                 hci_send_to_sock(hdev, skb);
3507         }
3508
3509         /* Get rid of skb owner, prior to sending to the driver. */
3510         skb_orphan(skb);
3511
3512         err = hdev->send(hdev, skb);
3513         if (err < 0) {
3514                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3515                 kfree_skb(skb);
3516         }
3517 }
3518
3519 /* Send HCI command */
3520 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3521                  const void *param)
3522 {
3523         struct sk_buff *skb;
3524
3525         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3526
3527         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3528         if (!skb) {
3529                 BT_ERR("%s no memory for command", hdev->name);
3530                 return -ENOMEM;
3531         }
3532
3533         /* Stand-alone HCI commands must be flagged as
3534          * single-command requests.
3535          */
3536         bt_cb(skb)->req.start = true;
3537
3538         skb_queue_tail(&hdev->cmd_q, skb);
3539         queue_work(hdev->workqueue, &hdev->cmd_work);
3540
3541         return 0;
3542 }
3543
3544 /* Get data from the previously sent command */
3545 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3546 {
3547         struct hci_command_hdr *hdr;
3548
3549         if (!hdev->sent_cmd)
3550                 return NULL;
3551
3552         hdr = (void *) hdev->sent_cmd->data;
3553
3554         if (hdr->opcode != cpu_to_le16(opcode))
3555                 return NULL;
3556
3557         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3558
3559         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3560 }
3561
3562 /* Send ACL data */
3563 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3564 {
3565         struct hci_acl_hdr *hdr;
3566         int len = skb->len;
3567
3568         skb_push(skb, HCI_ACL_HDR_SIZE);
3569         skb_reset_transport_header(skb);
3570         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3571         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3572         hdr->dlen   = cpu_to_le16(len);
3573 }
3574
3575 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3576                           struct sk_buff *skb, __u16 flags)
3577 {
3578         struct hci_conn *conn = chan->conn;
3579         struct hci_dev *hdev = conn->hdev;
3580         struct sk_buff *list;
3581
3582         skb->len = skb_headlen(skb);
3583         skb->data_len = 0;
3584
3585         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3586
3587         switch (hdev->dev_type) {
3588         case HCI_BREDR:
3589                 hci_add_acl_hdr(skb, conn->handle, flags);
3590                 break;
3591         case HCI_AMP:
3592                 hci_add_acl_hdr(skb, chan->handle, flags);
3593                 break;
3594         default:
3595                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3596                 return;
3597         }
3598
3599         list = skb_shinfo(skb)->frag_list;
3600         if (!list) {
3601                 /* Non fragmented */
3602                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3603
3604                 skb_queue_tail(queue, skb);
3605         } else {
3606                 /* Fragmented */
3607                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3608
3609                 skb_shinfo(skb)->frag_list = NULL;
3610
3611                 /* Queue all fragments atomically. We need to use spin_lock_bh
3612                  * here because of 6LoWPAN links, as there this function is
3613                  * called from softirq and using normal spin lock could cause
3614                  * deadlocks.
3615                  */
3616                 spin_lock_bh(&queue->lock);
3617
3618                 __skb_queue_tail(queue, skb);
3619
3620                 flags &= ~ACL_START;
3621                 flags |= ACL_CONT;
3622                 do {
3623                         skb = list; list = list->next;
3624
3625                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3626                         hci_add_acl_hdr(skb, conn->handle, flags);
3627
3628                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3629
3630                         __skb_queue_tail(queue, skb);
3631                 } while (list);
3632
3633                 spin_unlock_bh(&queue->lock);
3634         }
3635 }
3636
3637 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3638 {
3639         struct hci_dev *hdev = chan->conn->hdev;
3640
3641         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3642
3643         hci_queue_acl(chan, &chan->data_q, skb, flags);
3644
3645         queue_work(hdev->workqueue, &hdev->tx_work);
3646 }
3647
3648 /* Send SCO data */
3649 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3650 {
3651         struct hci_dev *hdev = conn->hdev;
3652         struct hci_sco_hdr hdr;
3653
3654         BT_DBG("%s len %d", hdev->name, skb->len);
3655
3656         hdr.handle = cpu_to_le16(conn->handle);
3657         hdr.dlen   = skb->len;
3658
3659         skb_push(skb, HCI_SCO_HDR_SIZE);
3660         skb_reset_transport_header(skb);
3661         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3662
3663         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3664
3665         skb_queue_tail(&conn->data_q, skb);
3666         queue_work(hdev->workqueue, &hdev->tx_work);
3667 }
3668
3669 /* ---- HCI TX task (outgoing data) ---- */
3670
3671 /* HCI Connection scheduler */
3672 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3673                                      int *quote)
3674 {
3675         struct hci_conn_hash *h = &hdev->conn_hash;
3676         struct hci_conn *conn = NULL, *c;
3677         unsigned int num = 0, min = ~0;
3678
3679         /* We don't have to lock device here. Connections are always
3680          * added and removed with TX task disabled. */
3681
3682         rcu_read_lock();
3683
3684         list_for_each_entry_rcu(c, &h->list, list) {
3685                 if (c->type != type || skb_queue_empty(&c->data_q))
3686                         continue;
3687
3688                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3689                         continue;
3690
3691                 num++;
3692
3693                 if (c->sent < min) {
3694                         min  = c->sent;
3695                         conn = c;
3696                 }
3697
3698                 if (hci_conn_num(hdev, type) == num)
3699                         break;
3700         }
3701
3702         rcu_read_unlock();
3703
3704         if (conn) {
3705                 int cnt, q;
3706
3707                 switch (conn->type) {
3708                 case ACL_LINK:
3709                         cnt = hdev->acl_cnt;
3710                         break;
3711                 case SCO_LINK:
3712                 case ESCO_LINK:
3713                         cnt = hdev->sco_cnt;
3714                         break;
3715                 case LE_LINK:
3716                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3717                         break;
3718                 default:
3719                         cnt = 0;
3720                         BT_ERR("Unknown link type");
3721                 }
3722
3723                 q = cnt / num;
3724                 *quote = q ? q : 1;
3725         } else
3726                 *quote = 0;
3727
3728         BT_DBG("conn %p quote %d", conn, *quote);
3729         return conn;
3730 }
3731
3732 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3733 {
3734         struct hci_conn_hash *h = &hdev->conn_hash;
3735         struct hci_conn *c;
3736
3737         BT_ERR("%s link tx timeout", hdev->name);
3738
3739         rcu_read_lock();
3740
3741         /* Kill stalled connections */
3742         list_for_each_entry_rcu(c, &h->list, list) {
3743                 if (c->type == type && c->sent) {
3744                         BT_ERR("%s killing stalled connection %pMR",
3745                                hdev->name, &c->dst);
3746                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3747                 }
3748         }
3749
3750         rcu_read_unlock();
3751 }
3752
3753 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3754                                       int *quote)
3755 {
3756         struct hci_conn_hash *h = &hdev->conn_hash;
3757         struct hci_chan *chan = NULL;
3758         unsigned int num = 0, min = ~0, cur_prio = 0;
3759         struct hci_conn *conn;
3760         int cnt, q, conn_num = 0;
3761
3762         BT_DBG("%s", hdev->name);
3763
3764         rcu_read_lock();
3765
3766         list_for_each_entry_rcu(conn, &h->list, list) {
3767                 struct hci_chan *tmp;
3768
3769                 if (conn->type != type)
3770                         continue;
3771
3772                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3773                         continue;
3774
3775                 conn_num++;
3776
3777                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3778                         struct sk_buff *skb;
3779
3780                         if (skb_queue_empty(&tmp->data_q))
3781                                 continue;
3782
3783                         skb = skb_peek(&tmp->data_q);
3784                         if (skb->priority < cur_prio)
3785                                 continue;
3786
3787                         if (skb->priority > cur_prio) {
3788                                 num = 0;
3789                                 min = ~0;
3790                                 cur_prio = skb->priority;
3791                         }
3792
3793                         num++;
3794
3795                         if (conn->sent < min) {
3796                                 min  = conn->sent;
3797                                 chan = tmp;
3798                         }
3799                 }
3800
3801                 if (hci_conn_num(hdev, type) == conn_num)
3802                         break;
3803         }
3804
3805         rcu_read_unlock();
3806
3807         if (!chan)
3808                 return NULL;
3809
3810         switch (chan->conn->type) {
3811         case ACL_LINK:
3812                 cnt = hdev->acl_cnt;
3813                 break;
3814         case AMP_LINK:
3815                 cnt = hdev->block_cnt;
3816                 break;
3817         case SCO_LINK:
3818         case ESCO_LINK:
3819                 cnt = hdev->sco_cnt;
3820                 break;
3821         case LE_LINK:
3822                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3823                 break;
3824         default:
3825                 cnt = 0;
3826                 BT_ERR("Unknown link type");
3827         }
3828
3829         q = cnt / num;
3830         *quote = q ? q : 1;
3831         BT_DBG("chan %p quote %d", chan, *quote);
3832         return chan;
3833 }
3834
3835 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3836 {
3837         struct hci_conn_hash *h = &hdev->conn_hash;
3838         struct hci_conn *conn;
3839         int num = 0;
3840
3841         BT_DBG("%s", hdev->name);
3842
3843         rcu_read_lock();
3844
3845         list_for_each_entry_rcu(conn, &h->list, list) {
3846                 struct hci_chan *chan;
3847
3848                 if (conn->type != type)
3849                         continue;
3850
3851                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3852                         continue;
3853
3854                 num++;
3855
3856                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3857                         struct sk_buff *skb;
3858
3859                         if (chan->sent) {
3860                                 chan->sent = 0;
3861                                 continue;
3862                         }
3863
3864                         if (skb_queue_empty(&chan->data_q))
3865                                 continue;
3866
3867                         skb = skb_peek(&chan->data_q);
3868                         if (skb->priority >= HCI_PRIO_MAX - 1)
3869                                 continue;
3870
3871                         skb->priority = HCI_PRIO_MAX - 1;
3872
3873                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3874                                skb->priority);
3875                 }
3876
3877                 if (hci_conn_num(hdev, type) == num)
3878                         break;
3879         }
3880
3881         rcu_read_unlock();
3882
3883 }
3884
3885 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3886 {
3887         /* Calculate count of blocks used by this packet */
3888         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3889 }
3890
3891 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3892 {
3893         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3894                 /* ACL tx timeout must be longer than maximum
3895                  * link supervision timeout (40.9 seconds) */
3896                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3897                                        HCI_ACL_TX_TIMEOUT))
3898                         hci_link_tx_to(hdev, ACL_LINK);
3899         }
3900 }
3901
3902 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3903 {
3904         unsigned int cnt = hdev->acl_cnt;
3905         struct hci_chan *chan;
3906         struct sk_buff *skb;
3907         int quote;
3908
3909         __check_timeout(hdev, cnt);
3910
3911         while (hdev->acl_cnt &&
3912                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3913                 u32 priority = (skb_peek(&chan->data_q))->priority;
3914                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3915                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3916                                skb->len, skb->priority);
3917
3918                         /* Stop if priority has changed */
3919                         if (skb->priority < priority)
3920                                 break;
3921
3922                         skb = skb_dequeue(&chan->data_q);
3923
3924                         hci_conn_enter_active_mode(chan->conn,
3925                                                    bt_cb(skb)->force_active);
3926
3927                         hci_send_frame(hdev, skb);
3928                         hdev->acl_last_tx = jiffies;
3929
3930                         hdev->acl_cnt--;
3931                         chan->sent++;
3932                         chan->conn->sent++;
3933                 }
3934         }
3935
3936         if (cnt != hdev->acl_cnt)
3937                 hci_prio_recalculate(hdev, ACL_LINK);
3938 }
3939
3940 static void hci_sched_acl_blk(struct hci_dev *hdev)
3941 {
3942         unsigned int cnt = hdev->block_cnt;
3943         struct hci_chan *chan;
3944         struct sk_buff *skb;
3945         int quote;
3946         u8 type;
3947
3948         __check_timeout(hdev, cnt);
3949
3950         BT_DBG("%s", hdev->name);
3951
3952         if (hdev->dev_type == HCI_AMP)
3953                 type = AMP_LINK;
3954         else
3955                 type = ACL_LINK;
3956
3957         while (hdev->block_cnt > 0 &&
3958                (chan = hci_chan_sent(hdev, type, &quote))) {
3959                 u32 priority = (skb_peek(&chan->data_q))->priority;
3960                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3961                         int blocks;
3962
3963                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3964                                skb->len, skb->priority);
3965
3966                         /* Stop if priority has changed */
3967                         if (skb->priority < priority)
3968                                 break;
3969
3970                         skb = skb_dequeue(&chan->data_q);
3971
3972                         blocks = __get_blocks(hdev, skb);
3973                         if (blocks > hdev->block_cnt)
3974                                 return;
3975
3976                         hci_conn_enter_active_mode(chan->conn,
3977                                                    bt_cb(skb)->force_active);
3978
3979                         hci_send_frame(hdev, skb);
3980                         hdev->acl_last_tx = jiffies;
3981
3982                         hdev->block_cnt -= blocks;
3983                         quote -= blocks;
3984
3985                         chan->sent += blocks;
3986                         chan->conn->sent += blocks;
3987                 }
3988         }
3989
3990         if (cnt != hdev->block_cnt)
3991                 hci_prio_recalculate(hdev, type);
3992 }
3993
3994 static void hci_sched_acl(struct hci_dev *hdev)
3995 {
3996         BT_DBG("%s", hdev->name);
3997
3998         /* No ACL link over BR/EDR controller */
3999         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4000                 return;
4001
4002         /* No AMP link over AMP controller */
4003         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4004                 return;
4005
4006         switch (hdev->flow_ctl_mode) {
4007         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4008                 hci_sched_acl_pkt(hdev);
4009                 break;
4010
4011         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4012                 hci_sched_acl_blk(hdev);
4013                 break;
4014         }
4015 }
4016
4017 /* Schedule SCO */
4018 static void hci_sched_sco(struct hci_dev *hdev)
4019 {
4020         struct hci_conn *conn;
4021         struct sk_buff *skb;
4022         int quote;
4023
4024         BT_DBG("%s", hdev->name);
4025
4026         if (!hci_conn_num(hdev, SCO_LINK))
4027                 return;
4028
4029         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4030                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4031                         BT_DBG("skb %p len %d", skb, skb->len);
4032                         hci_send_frame(hdev, skb);
4033
4034                         conn->sent++;
4035                         if (conn->sent == ~0)
4036                                 conn->sent = 0;
4037                 }
4038         }
4039 }
4040
4041 static void hci_sched_esco(struct hci_dev *hdev)
4042 {
4043         struct hci_conn *conn;
4044         struct sk_buff *skb;
4045         int quote;
4046
4047         BT_DBG("%s", hdev->name);
4048
4049         if (!hci_conn_num(hdev, ESCO_LINK))
4050                 return;
4051
4052         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4053                                                      &quote))) {
4054                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4055                         BT_DBG("skb %p len %d", skb, skb->len);
4056                         hci_send_frame(hdev, skb);
4057
4058                         conn->sent++;
4059                         if (conn->sent == ~0)
4060                                 conn->sent = 0;
4061                 }
4062         }
4063 }
4064
4065 static void hci_sched_le(struct hci_dev *hdev)
4066 {
4067         struct hci_chan *chan;
4068         struct sk_buff *skb;
4069         int quote, cnt, tmp;
4070
4071         BT_DBG("%s", hdev->name);
4072
4073         if (!hci_conn_num(hdev, LE_LINK))
4074                 return;
4075
4076         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4077                 /* LE tx timeout must be longer than maximum
4078                  * link supervision timeout (40.9 seconds) */
4079                 if (!hdev->le_cnt && hdev->le_pkts &&
4080                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4081                         hci_link_tx_to(hdev, LE_LINK);
4082         }
4083
4084         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4085         tmp = cnt;
4086         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4087                 u32 priority = (skb_peek(&chan->data_q))->priority;
4088                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4089                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4090                                skb->len, skb->priority);
4091
4092                         /* Stop if priority has changed */
4093                         if (skb->priority < priority)
4094                                 break;
4095
4096                         skb = skb_dequeue(&chan->data_q);
4097
4098                         hci_send_frame(hdev, skb);
4099                         hdev->le_last_tx = jiffies;
4100
4101                         cnt--;
4102                         chan->sent++;
4103                         chan->conn->sent++;
4104                 }
4105         }
4106
4107         if (hdev->le_pkts)
4108                 hdev->le_cnt = cnt;
4109         else
4110                 hdev->acl_cnt = cnt;
4111
4112         if (cnt != tmp)
4113                 hci_prio_recalculate(hdev, LE_LINK);
4114 }
4115
4116 static void hci_tx_work(struct work_struct *work)
4117 {
4118         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4119         struct sk_buff *skb;
4120
4121         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4122                hdev->sco_cnt, hdev->le_cnt);
4123
4124         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4125                 /* Schedule queues and send stuff to HCI driver */
4126                 hci_sched_acl(hdev);
4127                 hci_sched_sco(hdev);
4128                 hci_sched_esco(hdev);
4129                 hci_sched_le(hdev);
4130         }
4131
4132         /* Send next queued raw (unknown type) packet */
4133         while ((skb = skb_dequeue(&hdev->raw_q)))
4134                 hci_send_frame(hdev, skb);
4135 }
4136
4137 /* ----- HCI RX task (incoming data processing) ----- */
4138
4139 /* ACL data packet */
4140 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4141 {
4142         struct hci_acl_hdr *hdr = (void *) skb->data;
4143         struct hci_conn *conn;
4144         __u16 handle, flags;
4145
4146         skb_pull(skb, HCI_ACL_HDR_SIZE);
4147
4148         handle = __le16_to_cpu(hdr->handle);
4149         flags  = hci_flags(handle);
4150         handle = hci_handle(handle);
4151
4152         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4153                handle, flags);
4154
4155         hdev->stat.acl_rx++;
4156
4157         hci_dev_lock(hdev);
4158         conn = hci_conn_hash_lookup_handle(hdev, handle);
4159         hci_dev_unlock(hdev);
4160
4161         if (conn) {
4162                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4163
4164                 /* Send to upper protocol */
4165                 l2cap_recv_acldata(conn, skb, flags);
4166                 return;
4167         } else {
4168                 BT_ERR("%s ACL packet for unknown connection handle %d",
4169                        hdev->name, handle);
4170         }
4171
4172         kfree_skb(skb);
4173 }
4174
4175 /* SCO data packet */
4176 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4177 {
4178         struct hci_sco_hdr *hdr = (void *) skb->data;
4179         struct hci_conn *conn;
4180         __u16 handle;
4181
4182         skb_pull(skb, HCI_SCO_HDR_SIZE);
4183
4184         handle = __le16_to_cpu(hdr->handle);
4185
4186         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4187
4188         hdev->stat.sco_rx++;
4189
4190         hci_dev_lock(hdev);
4191         conn = hci_conn_hash_lookup_handle(hdev, handle);
4192         hci_dev_unlock(hdev);
4193
4194         if (conn) {
4195                 /* Send to upper protocol */
4196                 sco_recv_scodata(conn, skb);
4197                 return;
4198         } else {
4199                 BT_ERR("%s SCO packet for unknown connection handle %d",
4200                        hdev->name, handle);
4201         }
4202
4203         kfree_skb(skb);
4204 }
4205
4206 static bool hci_req_is_complete(struct hci_dev *hdev)
4207 {
4208         struct sk_buff *skb;
4209
4210         skb = skb_peek(&hdev->cmd_q);
4211         if (!skb)
4212                 return true;
4213
4214         return bt_cb(skb)->req.start;
4215 }
4216
4217 static void hci_resend_last(struct hci_dev *hdev)
4218 {
4219         struct hci_command_hdr *sent;
4220         struct sk_buff *skb;
4221         u16 opcode;
4222
4223         if (!hdev->sent_cmd)
4224                 return;
4225
4226         sent = (void *) hdev->sent_cmd->data;
4227         opcode = __le16_to_cpu(sent->opcode);
4228         if (opcode == HCI_OP_RESET)
4229                 return;
4230
4231         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4232         if (!skb)
4233                 return;
4234
4235         skb_queue_head(&hdev->cmd_q, skb);
4236         queue_work(hdev->workqueue, &hdev->cmd_work);
4237 }
4238
4239 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4240                           hci_req_complete_t *req_complete,
4241                           hci_req_complete_skb_t *req_complete_skb)
4242 {
4243         struct sk_buff *skb;
4244         unsigned long flags;
4245
4246         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4247
4248         /* If the completed command doesn't match the last one that was
4249          * sent we need to do special handling of it.
4250          */
4251         if (!hci_sent_cmd_data(hdev, opcode)) {
4252                 /* Some CSR based controllers generate a spontaneous
4253                  * reset complete event during init and any pending
4254                  * command will never be completed. In such a case we
4255                  * need to resend whatever was the last sent
4256                  * command.
4257                  */
4258                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4259                         hci_resend_last(hdev);
4260
4261                 return;
4262         }
4263
4264         /* If the command succeeded and there's still more commands in
4265          * this request the request is not yet complete.
4266          */
4267         if (!status && !hci_req_is_complete(hdev))
4268                 return;
4269
4270         /* If this was the last command in a request the complete
4271          * callback would be found in hdev->sent_cmd instead of the
4272          * command queue (hdev->cmd_q).
4273          */
4274         if (bt_cb(hdev->sent_cmd)->req.complete) {
4275                 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4276                 return;
4277         }
4278
4279         if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4280                 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4281                 return;
4282         }
4283
4284         /* Remove all pending commands belonging to this request */
4285         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4286         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4287                 if (bt_cb(skb)->req.start) {
4288                         __skb_queue_head(&hdev->cmd_q, skb);
4289                         break;
4290                 }
4291
4292                 *req_complete = bt_cb(skb)->req.complete;
4293                 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4294                 kfree_skb(skb);
4295         }
4296         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4297 }
4298
4299 static void hci_rx_work(struct work_struct *work)
4300 {
4301         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4302         struct sk_buff *skb;
4303
4304         BT_DBG("%s", hdev->name);
4305
4306         while ((skb = skb_dequeue(&hdev->rx_q))) {
4307                 /* Send copy to monitor */
4308                 hci_send_to_monitor(hdev, skb);
4309
4310                 if (atomic_read(&hdev->promisc)) {
4311                         /* Send copy to the sockets */
4312                         hci_send_to_sock(hdev, skb);
4313                 }
4314
4315                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4316                         kfree_skb(skb);
4317                         continue;
4318                 }
4319
4320                 if (test_bit(HCI_INIT, &hdev->flags)) {
4321                         /* Don't process data packets in this states. */
4322                         switch (bt_cb(skb)->pkt_type) {
4323                         case HCI_ACLDATA_PKT:
4324                         case HCI_SCODATA_PKT:
4325                                 kfree_skb(skb);
4326                                 continue;
4327                         }
4328                 }
4329
4330                 /* Process frame */
4331                 switch (bt_cb(skb)->pkt_type) {
4332                 case HCI_EVENT_PKT:
4333                         BT_DBG("%s Event packet", hdev->name);
4334                         hci_event_packet(hdev, skb);
4335                         break;
4336
4337                 case HCI_ACLDATA_PKT:
4338                         BT_DBG("%s ACL data packet", hdev->name);
4339                         hci_acldata_packet(hdev, skb);
4340                         break;
4341
4342                 case HCI_SCODATA_PKT:
4343                         BT_DBG("%s SCO data packet", hdev->name);
4344                         hci_scodata_packet(hdev, skb);
4345                         break;
4346
4347                 default:
4348                         kfree_skb(skb);
4349                         break;
4350                 }
4351         }
4352 }
4353
4354 static void hci_cmd_work(struct work_struct *work)
4355 {
4356         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4357         struct sk_buff *skb;
4358
4359         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4360                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4361
4362         /* Send queued commands */
4363         if (atomic_read(&hdev->cmd_cnt)) {
4364                 skb = skb_dequeue(&hdev->cmd_q);
4365                 if (!skb)
4366                         return;
4367
4368                 kfree_skb(hdev->sent_cmd);
4369
4370                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4371                 if (hdev->sent_cmd) {
4372                         atomic_dec(&hdev->cmd_cnt);
4373                         hci_send_frame(hdev, skb);
4374                         if (test_bit(HCI_RESET, &hdev->flags))
4375                                 cancel_delayed_work(&hdev->cmd_timer);
4376                         else
4377                                 schedule_delayed_work(&hdev->cmd_timer,
4378                                                       HCI_CMD_TIMEOUT);
4379                 } else {
4380                         skb_queue_head(&hdev->cmd_q, skb);
4381                         queue_work(hdev->workqueue, &hdev->cmd_work);
4382                 }
4383         }
4384 }