Merge tag 'devicetree-for-3.20' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_RWLOCK(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97         int err;
98
99         if (!test_bit(HCI_UP, &hdev->flags))
100                 return -ENETDOWN;
101
102         if (copy_from_user(buf, user_buf, buf_size))
103                 return -EFAULT;
104
105         buf[buf_size] = '\0';
106         if (strtobool(buf, &enable))
107                 return -EINVAL;
108
109         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
110                 return -EALREADY;
111
112         hci_req_lock(hdev);
113         if (enable)
114                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115                                      HCI_CMD_TIMEOUT);
116         else
117                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118                                      HCI_CMD_TIMEOUT);
119         hci_req_unlock(hdev);
120
121         if (IS_ERR(skb))
122                 return PTR_ERR(skb);
123
124         err = -bt_to_errno(skb->data[0]);
125         kfree_skb(skb);
126
127         if (err < 0)
128                 return err;
129
130         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
131
132         return count;
133 }
134
135 static const struct file_operations dut_mode_fops = {
136         .open           = simple_open,
137         .read           = dut_mode_read,
138         .write          = dut_mode_write,
139         .llseek         = default_llseek,
140 };
141
142 /* ---- HCI requests ---- */
143
144 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
145 {
146         BT_DBG("%s result 0x%2.2x", hdev->name, result);
147
148         if (hdev->req_status == HCI_REQ_PEND) {
149                 hdev->req_result = result;
150                 hdev->req_status = HCI_REQ_DONE;
151                 wake_up_interruptible(&hdev->req_wait_q);
152         }
153 }
154
155 static void hci_req_cancel(struct hci_dev *hdev, int err)
156 {
157         BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159         if (hdev->req_status == HCI_REQ_PEND) {
160                 hdev->req_result = err;
161                 hdev->req_status = HCI_REQ_CANCELED;
162                 wake_up_interruptible(&hdev->req_wait_q);
163         }
164 }
165
166 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167                                             u8 event)
168 {
169         struct hci_ev_cmd_complete *ev;
170         struct hci_event_hdr *hdr;
171         struct sk_buff *skb;
172
173         hci_dev_lock(hdev);
174
175         skb = hdev->recv_evt;
176         hdev->recv_evt = NULL;
177
178         hci_dev_unlock(hdev);
179
180         if (!skb)
181                 return ERR_PTR(-ENODATA);
182
183         if (skb->len < sizeof(*hdr)) {
184                 BT_ERR("Too short HCI event");
185                 goto failed;
186         }
187
188         hdr = (void *) skb->data;
189         skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
191         if (event) {
192                 if (hdr->evt != event)
193                         goto failed;
194                 return skb;
195         }
196
197         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199                 goto failed;
200         }
201
202         if (skb->len < sizeof(*ev)) {
203                 BT_ERR("Too short cmd_complete event");
204                 goto failed;
205         }
206
207         ev = (void *) skb->data;
208         skb_pull(skb, sizeof(*ev));
209
210         if (opcode == __le16_to_cpu(ev->opcode))
211                 return skb;
212
213         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214                __le16_to_cpu(ev->opcode));
215
216 failed:
217         kfree_skb(skb);
218         return ERR_PTR(-ENODATA);
219 }
220
221 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
222                                   const void *param, u8 event, u32 timeout)
223 {
224         DECLARE_WAITQUEUE(wait, current);
225         struct hci_request req;
226         int err = 0;
227
228         BT_DBG("%s", hdev->name);
229
230         hci_req_init(&req, hdev);
231
232         hci_req_add_ev(&req, opcode, plen, param, event);
233
234         hdev->req_status = HCI_REQ_PEND;
235
236         add_wait_queue(&hdev->req_wait_q, &wait);
237         set_current_state(TASK_INTERRUPTIBLE);
238
239         err = hci_req_run(&req, hci_req_sync_complete);
240         if (err < 0) {
241                 remove_wait_queue(&hdev->req_wait_q, &wait);
242                 set_current_state(TASK_RUNNING);
243                 return ERR_PTR(err);
244         }
245
246         schedule_timeout(timeout);
247
248         remove_wait_queue(&hdev->req_wait_q, &wait);
249
250         if (signal_pending(current))
251                 return ERR_PTR(-EINTR);
252
253         switch (hdev->req_status) {
254         case HCI_REQ_DONE:
255                 err = -bt_to_errno(hdev->req_result);
256                 break;
257
258         case HCI_REQ_CANCELED:
259                 err = -hdev->req_result;
260                 break;
261
262         default:
263                 err = -ETIMEDOUT;
264                 break;
265         }
266
267         hdev->req_status = hdev->req_result = 0;
268
269         BT_DBG("%s end: err %d", hdev->name, err);
270
271         if (err < 0)
272                 return ERR_PTR(err);
273
274         return hci_get_cmd_complete(hdev, opcode, event);
275 }
276 EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
279                                const void *param, u32 timeout)
280 {
281         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
282 }
283 EXPORT_SYMBOL(__hci_cmd_sync);
284
285 /* Execute request and wait for completion. */
286 static int __hci_req_sync(struct hci_dev *hdev,
287                           void (*func)(struct hci_request *req,
288                                       unsigned long opt),
289                           unsigned long opt, __u32 timeout)
290 {
291         struct hci_request req;
292         DECLARE_WAITQUEUE(wait, current);
293         int err = 0;
294
295         BT_DBG("%s start", hdev->name);
296
297         hci_req_init(&req, hdev);
298
299         hdev->req_status = HCI_REQ_PEND;
300
301         func(&req, opt);
302
303         add_wait_queue(&hdev->req_wait_q, &wait);
304         set_current_state(TASK_INTERRUPTIBLE);
305
306         err = hci_req_run(&req, hci_req_sync_complete);
307         if (err < 0) {
308                 hdev->req_status = 0;
309
310                 remove_wait_queue(&hdev->req_wait_q, &wait);
311                 set_current_state(TASK_RUNNING);
312
313                 /* ENODATA means the HCI request command queue is empty.
314                  * This can happen when a request with conditionals doesn't
315                  * trigger any commands to be sent. This is normal behavior
316                  * and should not trigger an error return.
317                  */
318                 if (err == -ENODATA)
319                         return 0;
320
321                 return err;
322         }
323
324         schedule_timeout(timeout);
325
326         remove_wait_queue(&hdev->req_wait_q, &wait);
327
328         if (signal_pending(current))
329                 return -EINTR;
330
331         switch (hdev->req_status) {
332         case HCI_REQ_DONE:
333                 err = -bt_to_errno(hdev->req_result);
334                 break;
335
336         case HCI_REQ_CANCELED:
337                 err = -hdev->req_result;
338                 break;
339
340         default:
341                 err = -ETIMEDOUT;
342                 break;
343         }
344
345         hdev->req_status = hdev->req_result = 0;
346
347         BT_DBG("%s end: err %d", hdev->name, err);
348
349         return err;
350 }
351
352 static int hci_req_sync(struct hci_dev *hdev,
353                         void (*req)(struct hci_request *req,
354                                     unsigned long opt),
355                         unsigned long opt, __u32 timeout)
356 {
357         int ret;
358
359         if (!test_bit(HCI_UP, &hdev->flags))
360                 return -ENETDOWN;
361
362         /* Serialize all requests */
363         hci_req_lock(hdev);
364         ret = __hci_req_sync(hdev, req, opt, timeout);
365         hci_req_unlock(hdev);
366
367         return ret;
368 }
369
370 static void hci_reset_req(struct hci_request *req, unsigned long opt)
371 {
372         BT_DBG("%s %ld", req->hdev->name, opt);
373
374         /* Reset device */
375         set_bit(HCI_RESET, &req->hdev->flags);
376         hci_req_add(req, HCI_OP_RESET, 0, NULL);
377 }
378
379 static void bredr_init(struct hci_request *req)
380 {
381         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
382
383         /* Read Local Supported Features */
384         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
385
386         /* Read Local Version */
387         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
388
389         /* Read BD Address */
390         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
391 }
392
393 static void amp_init(struct hci_request *req)
394 {
395         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
396
397         /* Read Local Version */
398         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
399
400         /* Read Local Supported Commands */
401         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403         /* Read Local Supported Features */
404         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
406         /* Read Local AMP Info */
407         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
408
409         /* Read Data Blk size */
410         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
411
412         /* Read Flow Control Mode */
413         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
414
415         /* Read Location Data */
416         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
417 }
418
419 static void hci_init1_req(struct hci_request *req, unsigned long opt)
420 {
421         struct hci_dev *hdev = req->hdev;
422
423         BT_DBG("%s %ld", hdev->name, opt);
424
425         /* Reset */
426         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
427                 hci_reset_req(req, 0);
428
429         switch (hdev->dev_type) {
430         case HCI_BREDR:
431                 bredr_init(req);
432                 break;
433
434         case HCI_AMP:
435                 amp_init(req);
436                 break;
437
438         default:
439                 BT_ERR("Unknown device type %d", hdev->dev_type);
440                 break;
441         }
442 }
443
444 static void bredr_setup(struct hci_request *req)
445 {
446         __le16 param;
447         __u8 flt_type;
448
449         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
450         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
451
452         /* Read Class of Device */
453         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
454
455         /* Read Local Name */
456         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
457
458         /* Read Voice Setting */
459         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
460
461         /* Read Number of Supported IAC */
462         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
463
464         /* Read Current IAC LAP */
465         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
466
467         /* Clear Event Filters */
468         flt_type = HCI_FLT_CLEAR_ALL;
469         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
470
471         /* Connection accept timeout ~20 secs */
472         param = cpu_to_le16(0x7d00);
473         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
474 }
475
476 static void le_setup(struct hci_request *req)
477 {
478         struct hci_dev *hdev = req->hdev;
479
480         /* Read LE Buffer Size */
481         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
482
483         /* Read LE Local Supported Features */
484         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
485
486         /* Read LE Supported States */
487         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
488
489         /* Read LE White List Size */
490         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
491
492         /* Clear LE White List */
493         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
494
495         /* LE-only controllers have LE implicitly enabled */
496         if (!lmp_bredr_capable(hdev))
497                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
498 }
499
500 static void hci_setup_event_mask(struct hci_request *req)
501 {
502         struct hci_dev *hdev = req->hdev;
503
504         /* The second byte is 0xff instead of 0x9f (two reserved bits
505          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
506          * command otherwise.
507          */
508         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
509
510         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
511          * any event mask for pre 1.2 devices.
512          */
513         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
514                 return;
515
516         if (lmp_bredr_capable(hdev)) {
517                 events[4] |= 0x01; /* Flow Specification Complete */
518                 events[4] |= 0x02; /* Inquiry Result with RSSI */
519                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
520                 events[5] |= 0x08; /* Synchronous Connection Complete */
521                 events[5] |= 0x10; /* Synchronous Connection Changed */
522         } else {
523                 /* Use a different default for LE-only devices */
524                 memset(events, 0, sizeof(events));
525                 events[0] |= 0x10; /* Disconnection Complete */
526                 events[1] |= 0x08; /* Read Remote Version Information Complete */
527                 events[1] |= 0x20; /* Command Complete */
528                 events[1] |= 0x40; /* Command Status */
529                 events[1] |= 0x80; /* Hardware Error */
530                 events[2] |= 0x04; /* Number of Completed Packets */
531                 events[3] |= 0x02; /* Data Buffer Overflow */
532
533                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
534                         events[0] |= 0x80; /* Encryption Change */
535                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
536                 }
537         }
538
539         if (lmp_inq_rssi_capable(hdev))
540                 events[4] |= 0x02; /* Inquiry Result with RSSI */
541
542         if (lmp_sniffsubr_capable(hdev))
543                 events[5] |= 0x20; /* Sniff Subrating */
544
545         if (lmp_pause_enc_capable(hdev))
546                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
547
548         if (lmp_ext_inq_capable(hdev))
549                 events[5] |= 0x40; /* Extended Inquiry Result */
550
551         if (lmp_no_flush_capable(hdev))
552                 events[7] |= 0x01; /* Enhanced Flush Complete */
553
554         if (lmp_lsto_capable(hdev))
555                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
556
557         if (lmp_ssp_capable(hdev)) {
558                 events[6] |= 0x01;      /* IO Capability Request */
559                 events[6] |= 0x02;      /* IO Capability Response */
560                 events[6] |= 0x04;      /* User Confirmation Request */
561                 events[6] |= 0x08;      /* User Passkey Request */
562                 events[6] |= 0x10;      /* Remote OOB Data Request */
563                 events[6] |= 0x20;      /* Simple Pairing Complete */
564                 events[7] |= 0x04;      /* User Passkey Notification */
565                 events[7] |= 0x08;      /* Keypress Notification */
566                 events[7] |= 0x10;      /* Remote Host Supported
567                                          * Features Notification
568                                          */
569         }
570
571         if (lmp_le_capable(hdev))
572                 events[7] |= 0x20;      /* LE Meta-Event */
573
574         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
575 }
576
577 static void hci_init2_req(struct hci_request *req, unsigned long opt)
578 {
579         struct hci_dev *hdev = req->hdev;
580
581         if (lmp_bredr_capable(hdev))
582                 bredr_setup(req);
583         else
584                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
585
586         if (lmp_le_capable(hdev))
587                 le_setup(req);
588
589         /* All Bluetooth 1.2 and later controllers should support the
590          * HCI command for reading the local supported commands.
591          *
592          * Unfortunately some controllers indicate Bluetooth 1.2 support,
593          * but do not have support for this command. If that is the case,
594          * the driver can quirk the behavior and skip reading the local
595          * supported commands.
596          */
597         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
598             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
599                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
600
601         if (lmp_ssp_capable(hdev)) {
602                 /* When SSP is available, then the host features page
603                  * should also be available as well. However some
604                  * controllers list the max_page as 0 as long as SSP
605                  * has not been enabled. To achieve proper debugging
606                  * output, force the minimum max_page to 1 at least.
607                  */
608                 hdev->max_page = 0x01;
609
610                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
611                         u8 mode = 0x01;
612
613                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
614                                     sizeof(mode), &mode);
615                 } else {
616                         struct hci_cp_write_eir cp;
617
618                         memset(hdev->eir, 0, sizeof(hdev->eir));
619                         memset(&cp, 0, sizeof(cp));
620
621                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
622                 }
623         }
624
625         if (lmp_inq_rssi_capable(hdev) ||
626             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
627                 u8 mode;
628
629                 /* If Extended Inquiry Result events are supported, then
630                  * they are clearly preferred over Inquiry Result with RSSI
631                  * events.
632                  */
633                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
634
635                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
636         }
637
638         if (lmp_inq_tx_pwr_capable(hdev))
639                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
640
641         if (lmp_ext_feat_capable(hdev)) {
642                 struct hci_cp_read_local_ext_features cp;
643
644                 cp.page = 0x01;
645                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
646                             sizeof(cp), &cp);
647         }
648
649         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
650                 u8 enable = 1;
651                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
652                             &enable);
653         }
654 }
655
656 static void hci_setup_link_policy(struct hci_request *req)
657 {
658         struct hci_dev *hdev = req->hdev;
659         struct hci_cp_write_def_link_policy cp;
660         u16 link_policy = 0;
661
662         if (lmp_rswitch_capable(hdev))
663                 link_policy |= HCI_LP_RSWITCH;
664         if (lmp_hold_capable(hdev))
665                 link_policy |= HCI_LP_HOLD;
666         if (lmp_sniff_capable(hdev))
667                 link_policy |= HCI_LP_SNIFF;
668         if (lmp_park_capable(hdev))
669                 link_policy |= HCI_LP_PARK;
670
671         cp.policy = cpu_to_le16(link_policy);
672         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
673 }
674
675 static void hci_set_le_support(struct hci_request *req)
676 {
677         struct hci_dev *hdev = req->hdev;
678         struct hci_cp_write_le_host_supported cp;
679
680         /* LE-only devices do not support explicit enablement */
681         if (!lmp_bredr_capable(hdev))
682                 return;
683
684         memset(&cp, 0, sizeof(cp));
685
686         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
687                 cp.le = 0x01;
688                 cp.simul = 0x00;
689         }
690
691         if (cp.le != lmp_host_le_capable(hdev))
692                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
693                             &cp);
694 }
695
696 static void hci_set_event_mask_page_2(struct hci_request *req)
697 {
698         struct hci_dev *hdev = req->hdev;
699         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
700
701         /* If Connectionless Slave Broadcast master role is supported
702          * enable all necessary events for it.
703          */
704         if (lmp_csb_master_capable(hdev)) {
705                 events[1] |= 0x40;      /* Triggered Clock Capture */
706                 events[1] |= 0x80;      /* Synchronization Train Complete */
707                 events[2] |= 0x10;      /* Slave Page Response Timeout */
708                 events[2] |= 0x20;      /* CSB Channel Map Change */
709         }
710
711         /* If Connectionless Slave Broadcast slave role is supported
712          * enable all necessary events for it.
713          */
714         if (lmp_csb_slave_capable(hdev)) {
715                 events[2] |= 0x01;      /* Synchronization Train Received */
716                 events[2] |= 0x02;      /* CSB Receive */
717                 events[2] |= 0x04;      /* CSB Timeout */
718                 events[2] |= 0x08;      /* Truncated Page Complete */
719         }
720
721         /* Enable Authenticated Payload Timeout Expired event if supported */
722         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
723                 events[2] |= 0x80;
724
725         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
726 }
727
728 static void hci_init3_req(struct hci_request *req, unsigned long opt)
729 {
730         struct hci_dev *hdev = req->hdev;
731         u8 p;
732
733         hci_setup_event_mask(req);
734
735         if (hdev->commands[6] & 0x20) {
736                 struct hci_cp_read_stored_link_key cp;
737
738                 bacpy(&cp.bdaddr, BDADDR_ANY);
739                 cp.read_all = 0x01;
740                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
741         }
742
743         if (hdev->commands[5] & 0x10)
744                 hci_setup_link_policy(req);
745
746         if (hdev->commands[8] & 0x01)
747                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
748
749         /* Some older Broadcom based Bluetooth 1.2 controllers do not
750          * support the Read Page Scan Type command. Check support for
751          * this command in the bit mask of supported commands.
752          */
753         if (hdev->commands[13] & 0x01)
754                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
755
756         if (lmp_le_capable(hdev)) {
757                 u8 events[8];
758
759                 memset(events, 0, sizeof(events));
760                 events[0] = 0x0f;
761
762                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
763                         events[0] |= 0x10;      /* LE Long Term Key Request */
764
765                 /* If controller supports the Connection Parameters Request
766                  * Link Layer Procedure, enable the corresponding event.
767                  */
768                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
769                         events[0] |= 0x20;      /* LE Remote Connection
770                                                  * Parameter Request
771                                                  */
772
773                 /* If the controller supports the Data Length Extension
774                  * feature, enable the corresponding event.
775                  */
776                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
777                         events[0] |= 0x40;      /* LE Data Length Change */
778
779                 /* If the controller supports Extended Scanner Filter
780                  * Policies, enable the correspondig event.
781                  */
782                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
783                         events[1] |= 0x04;      /* LE Direct Advertising
784                                                  * Report
785                                                  */
786
787                 /* If the controller supports the LE Read Local P-256
788                  * Public Key command, enable the corresponding event.
789                  */
790                 if (hdev->commands[34] & 0x02)
791                         events[0] |= 0x80;      /* LE Read Local P-256
792                                                  * Public Key Complete
793                                                  */
794
795                 /* If the controller supports the LE Generate DHKey
796                  * command, enable the corresponding event.
797                  */
798                 if (hdev->commands[34] & 0x04)
799                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
800
801                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
802                             events);
803
804                 if (hdev->commands[25] & 0x40) {
805                         /* Read LE Advertising Channel TX Power */
806                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
807                 }
808
809                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
810                         /* Read LE Maximum Data Length */
811                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
812
813                         /* Read LE Suggested Default Data Length */
814                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
815                 }
816
817                 hci_set_le_support(req);
818         }
819
820         /* Read features beyond page 1 if available */
821         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
822                 struct hci_cp_read_local_ext_features cp;
823
824                 cp.page = p;
825                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
826                             sizeof(cp), &cp);
827         }
828 }
829
830 static void hci_init4_req(struct hci_request *req, unsigned long opt)
831 {
832         struct hci_dev *hdev = req->hdev;
833
834         /* Some Broadcom based Bluetooth controllers do not support the
835          * Delete Stored Link Key command. They are clearly indicating its
836          * absence in the bit mask of supported commands.
837          *
838          * Check the supported commands and only if the the command is marked
839          * as supported send it. If not supported assume that the controller
840          * does not have actual support for stored link keys which makes this
841          * command redundant anyway.
842          *
843          * Some controllers indicate that they support handling deleting
844          * stored link keys, but they don't. The quirk lets a driver
845          * just disable this command.
846          */
847         if (hdev->commands[6] & 0x80 &&
848             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
849                 struct hci_cp_delete_stored_link_key cp;
850
851                 bacpy(&cp.bdaddr, BDADDR_ANY);
852                 cp.delete_all = 0x01;
853                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
854                             sizeof(cp), &cp);
855         }
856
857         /* Set event mask page 2 if the HCI command for it is supported */
858         if (hdev->commands[22] & 0x04)
859                 hci_set_event_mask_page_2(req);
860
861         /* Read local codec list if the HCI command is supported */
862         if (hdev->commands[29] & 0x20)
863                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
864
865         /* Get MWS transport configuration if the HCI command is supported */
866         if (hdev->commands[30] & 0x08)
867                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
868
869         /* Check for Synchronization Train support */
870         if (lmp_sync_train_capable(hdev))
871                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
872
873         /* Enable Secure Connections if supported and configured */
874         if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
875             bredr_sc_enabled(hdev)) {
876                 u8 support = 0x01;
877
878                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
879                             sizeof(support), &support);
880         }
881 }
882
883 static int __hci_init(struct hci_dev *hdev)
884 {
885         int err;
886
887         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
888         if (err < 0)
889                 return err;
890
891         /* The Device Under Test (DUT) mode is special and available for
892          * all controller types. So just create it early on.
893          */
894         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
895                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
896                                     &dut_mode_fops);
897         }
898
899         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
900          * BR/EDR/LE type controllers. AMP controllers only need the
901          * first stage init.
902          */
903         if (hdev->dev_type != HCI_BREDR)
904                 return 0;
905
906         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
907         if (err < 0)
908                 return err;
909
910         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
911         if (err < 0)
912                 return err;
913
914         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
915         if (err < 0)
916                 return err;
917
918         /* This function is only called when the controller is actually in
919          * configured state. When the controller is marked as unconfigured,
920          * this initialization procedure is not run.
921          *
922          * It means that it is possible that a controller runs through its
923          * setup phase and then discovers missing settings. If that is the
924          * case, then this function will not be called. It then will only
925          * be called during the config phase.
926          *
927          * So only when in setup phase or config phase, create the debugfs
928          * entries and register the SMP channels.
929          */
930         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
931             !test_bit(HCI_CONFIG, &hdev->dev_flags))
932                 return 0;
933
934         hci_debugfs_create_common(hdev);
935
936         if (lmp_bredr_capable(hdev))
937                 hci_debugfs_create_bredr(hdev);
938
939         if (lmp_le_capable(hdev))
940                 hci_debugfs_create_le(hdev);
941
942         return 0;
943 }
944
945 static void hci_init0_req(struct hci_request *req, unsigned long opt)
946 {
947         struct hci_dev *hdev = req->hdev;
948
949         BT_DBG("%s %ld", hdev->name, opt);
950
951         /* Reset */
952         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
953                 hci_reset_req(req, 0);
954
955         /* Read Local Version */
956         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
957
958         /* Read BD Address */
959         if (hdev->set_bdaddr)
960                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
961 }
962
963 static int __hci_unconf_init(struct hci_dev *hdev)
964 {
965         int err;
966
967         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
968                 return 0;
969
970         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
971         if (err < 0)
972                 return err;
973
974         return 0;
975 }
976
977 static void hci_scan_req(struct hci_request *req, unsigned long opt)
978 {
979         __u8 scan = opt;
980
981         BT_DBG("%s %x", req->hdev->name, scan);
982
983         /* Inquiry and Page scans */
984         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
985 }
986
987 static void hci_auth_req(struct hci_request *req, unsigned long opt)
988 {
989         __u8 auth = opt;
990
991         BT_DBG("%s %x", req->hdev->name, auth);
992
993         /* Authentication */
994         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
995 }
996
997 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
998 {
999         __u8 encrypt = opt;
1000
1001         BT_DBG("%s %x", req->hdev->name, encrypt);
1002
1003         /* Encryption */
1004         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1005 }
1006
1007 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1008 {
1009         __le16 policy = cpu_to_le16(opt);
1010
1011         BT_DBG("%s %x", req->hdev->name, policy);
1012
1013         /* Default link policy */
1014         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1015 }
1016
1017 /* Get HCI device by index.
1018  * Device is held on return. */
1019 struct hci_dev *hci_dev_get(int index)
1020 {
1021         struct hci_dev *hdev = NULL, *d;
1022
1023         BT_DBG("%d", index);
1024
1025         if (index < 0)
1026                 return NULL;
1027
1028         read_lock(&hci_dev_list_lock);
1029         list_for_each_entry(d, &hci_dev_list, list) {
1030                 if (d->id == index) {
1031                         hdev = hci_dev_hold(d);
1032                         break;
1033                 }
1034         }
1035         read_unlock(&hci_dev_list_lock);
1036         return hdev;
1037 }
1038
1039 /* ---- Inquiry support ---- */
1040
1041 bool hci_discovery_active(struct hci_dev *hdev)
1042 {
1043         struct discovery_state *discov = &hdev->discovery;
1044
1045         switch (discov->state) {
1046         case DISCOVERY_FINDING:
1047         case DISCOVERY_RESOLVING:
1048                 return true;
1049
1050         default:
1051                 return false;
1052         }
1053 }
1054
1055 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1056 {
1057         int old_state = hdev->discovery.state;
1058
1059         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1060
1061         if (old_state == state)
1062                 return;
1063
1064         hdev->discovery.state = state;
1065
1066         switch (state) {
1067         case DISCOVERY_STOPPED:
1068                 hci_update_background_scan(hdev);
1069
1070                 if (old_state != DISCOVERY_STARTING)
1071                         mgmt_discovering(hdev, 0);
1072                 break;
1073         case DISCOVERY_STARTING:
1074                 break;
1075         case DISCOVERY_FINDING:
1076                 mgmt_discovering(hdev, 1);
1077                 break;
1078         case DISCOVERY_RESOLVING:
1079                 break;
1080         case DISCOVERY_STOPPING:
1081                 break;
1082         }
1083 }
1084
1085 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1086 {
1087         struct discovery_state *cache = &hdev->discovery;
1088         struct inquiry_entry *p, *n;
1089
1090         list_for_each_entry_safe(p, n, &cache->all, all) {
1091                 list_del(&p->all);
1092                 kfree(p);
1093         }
1094
1095         INIT_LIST_HEAD(&cache->unknown);
1096         INIT_LIST_HEAD(&cache->resolve);
1097 }
1098
1099 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1100                                                bdaddr_t *bdaddr)
1101 {
1102         struct discovery_state *cache = &hdev->discovery;
1103         struct inquiry_entry *e;
1104
1105         BT_DBG("cache %p, %pMR", cache, bdaddr);
1106
1107         list_for_each_entry(e, &cache->all, all) {
1108                 if (!bacmp(&e->data.bdaddr, bdaddr))
1109                         return e;
1110         }
1111
1112         return NULL;
1113 }
1114
1115 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1116                                                        bdaddr_t *bdaddr)
1117 {
1118         struct discovery_state *cache = &hdev->discovery;
1119         struct inquiry_entry *e;
1120
1121         BT_DBG("cache %p, %pMR", cache, bdaddr);
1122
1123         list_for_each_entry(e, &cache->unknown, list) {
1124                 if (!bacmp(&e->data.bdaddr, bdaddr))
1125                         return e;
1126         }
1127
1128         return NULL;
1129 }
1130
1131 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1132                                                        bdaddr_t *bdaddr,
1133                                                        int state)
1134 {
1135         struct discovery_state *cache = &hdev->discovery;
1136         struct inquiry_entry *e;
1137
1138         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1139
1140         list_for_each_entry(e, &cache->resolve, list) {
1141                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1142                         return e;
1143                 if (!bacmp(&e->data.bdaddr, bdaddr))
1144                         return e;
1145         }
1146
1147         return NULL;
1148 }
1149
1150 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1151                                       struct inquiry_entry *ie)
1152 {
1153         struct discovery_state *cache = &hdev->discovery;
1154         struct list_head *pos = &cache->resolve;
1155         struct inquiry_entry *p;
1156
1157         list_del(&ie->list);
1158
1159         list_for_each_entry(p, &cache->resolve, list) {
1160                 if (p->name_state != NAME_PENDING &&
1161                     abs(p->data.rssi) >= abs(ie->data.rssi))
1162                         break;
1163                 pos = &p->list;
1164         }
1165
1166         list_add(&ie->list, pos);
1167 }
1168
1169 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1170                              bool name_known)
1171 {
1172         struct discovery_state *cache = &hdev->discovery;
1173         struct inquiry_entry *ie;
1174         u32 flags = 0;
1175
1176         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1177
1178         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1179
1180         if (!data->ssp_mode)
1181                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1182
1183         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1184         if (ie) {
1185                 if (!ie->data.ssp_mode)
1186                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1187
1188                 if (ie->name_state == NAME_NEEDED &&
1189                     data->rssi != ie->data.rssi) {
1190                         ie->data.rssi = data->rssi;
1191                         hci_inquiry_cache_update_resolve(hdev, ie);
1192                 }
1193
1194                 goto update;
1195         }
1196
1197         /* Entry not in the cache. Add new one. */
1198         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1199         if (!ie) {
1200                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1201                 goto done;
1202         }
1203
1204         list_add(&ie->all, &cache->all);
1205
1206         if (name_known) {
1207                 ie->name_state = NAME_KNOWN;
1208         } else {
1209                 ie->name_state = NAME_NOT_KNOWN;
1210                 list_add(&ie->list, &cache->unknown);
1211         }
1212
1213 update:
1214         if (name_known && ie->name_state != NAME_KNOWN &&
1215             ie->name_state != NAME_PENDING) {
1216                 ie->name_state = NAME_KNOWN;
1217                 list_del(&ie->list);
1218         }
1219
1220         memcpy(&ie->data, data, sizeof(*data));
1221         ie->timestamp = jiffies;
1222         cache->timestamp = jiffies;
1223
1224         if (ie->name_state == NAME_NOT_KNOWN)
1225                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1226
1227 done:
1228         return flags;
1229 }
1230
1231 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1232 {
1233         struct discovery_state *cache = &hdev->discovery;
1234         struct inquiry_info *info = (struct inquiry_info *) buf;
1235         struct inquiry_entry *e;
1236         int copied = 0;
1237
1238         list_for_each_entry(e, &cache->all, all) {
1239                 struct inquiry_data *data = &e->data;
1240
1241                 if (copied >= num)
1242                         break;
1243
1244                 bacpy(&info->bdaddr, &data->bdaddr);
1245                 info->pscan_rep_mode    = data->pscan_rep_mode;
1246                 info->pscan_period_mode = data->pscan_period_mode;
1247                 info->pscan_mode        = data->pscan_mode;
1248                 memcpy(info->dev_class, data->dev_class, 3);
1249                 info->clock_offset      = data->clock_offset;
1250
1251                 info++;
1252                 copied++;
1253         }
1254
1255         BT_DBG("cache %p, copied %d", cache, copied);
1256         return copied;
1257 }
1258
1259 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1260 {
1261         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1262         struct hci_dev *hdev = req->hdev;
1263         struct hci_cp_inquiry cp;
1264
1265         BT_DBG("%s", hdev->name);
1266
1267         if (test_bit(HCI_INQUIRY, &hdev->flags))
1268                 return;
1269
1270         /* Start Inquiry */
1271         memcpy(&cp.lap, &ir->lap, 3);
1272         cp.length  = ir->length;
1273         cp.num_rsp = ir->num_rsp;
1274         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1275 }
1276
1277 int hci_inquiry(void __user *arg)
1278 {
1279         __u8 __user *ptr = arg;
1280         struct hci_inquiry_req ir;
1281         struct hci_dev *hdev;
1282         int err = 0, do_inquiry = 0, max_rsp;
1283         long timeo;
1284         __u8 *buf;
1285
1286         if (copy_from_user(&ir, ptr, sizeof(ir)))
1287                 return -EFAULT;
1288
1289         hdev = hci_dev_get(ir.dev_id);
1290         if (!hdev)
1291                 return -ENODEV;
1292
1293         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1294                 err = -EBUSY;
1295                 goto done;
1296         }
1297
1298         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1299                 err = -EOPNOTSUPP;
1300                 goto done;
1301         }
1302
1303         if (hdev->dev_type != HCI_BREDR) {
1304                 err = -EOPNOTSUPP;
1305                 goto done;
1306         }
1307
1308         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1309                 err = -EOPNOTSUPP;
1310                 goto done;
1311         }
1312
1313         hci_dev_lock(hdev);
1314         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1315             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1316                 hci_inquiry_cache_flush(hdev);
1317                 do_inquiry = 1;
1318         }
1319         hci_dev_unlock(hdev);
1320
1321         timeo = ir.length * msecs_to_jiffies(2000);
1322
1323         if (do_inquiry) {
1324                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1325                                    timeo);
1326                 if (err < 0)
1327                         goto done;
1328
1329                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1330                  * cleared). If it is interrupted by a signal, return -EINTR.
1331                  */
1332                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1333                                 TASK_INTERRUPTIBLE))
1334                         return -EINTR;
1335         }
1336
1337         /* for unlimited number of responses we will use buffer with
1338          * 255 entries
1339          */
1340         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1341
1342         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1343          * copy it to the user space.
1344          */
1345         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1346         if (!buf) {
1347                 err = -ENOMEM;
1348                 goto done;
1349         }
1350
1351         hci_dev_lock(hdev);
1352         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1353         hci_dev_unlock(hdev);
1354
1355         BT_DBG("num_rsp %d", ir.num_rsp);
1356
1357         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1358                 ptr += sizeof(ir);
1359                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1360                                  ir.num_rsp))
1361                         err = -EFAULT;
1362         } else
1363                 err = -EFAULT;
1364
1365         kfree(buf);
1366
1367 done:
1368         hci_dev_put(hdev);
1369         return err;
1370 }
1371
1372 static int hci_dev_do_open(struct hci_dev *hdev)
1373 {
1374         int ret = 0;
1375
1376         BT_DBG("%s %p", hdev->name, hdev);
1377
1378         hci_req_lock(hdev);
1379
1380         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1381                 ret = -ENODEV;
1382                 goto done;
1383         }
1384
1385         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1386             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1387                 /* Check for rfkill but allow the HCI setup stage to
1388                  * proceed (which in itself doesn't cause any RF activity).
1389                  */
1390                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1391                         ret = -ERFKILL;
1392                         goto done;
1393                 }
1394
1395                 /* Check for valid public address or a configured static
1396                  * random adddress, but let the HCI setup proceed to
1397                  * be able to determine if there is a public address
1398                  * or not.
1399                  *
1400                  * In case of user channel usage, it is not important
1401                  * if a public address or static random address is
1402                  * available.
1403                  *
1404                  * This check is only valid for BR/EDR controllers
1405                  * since AMP controllers do not have an address.
1406                  */
1407                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1408                     hdev->dev_type == HCI_BREDR &&
1409                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1410                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1411                         ret = -EADDRNOTAVAIL;
1412                         goto done;
1413                 }
1414         }
1415
1416         if (test_bit(HCI_UP, &hdev->flags)) {
1417                 ret = -EALREADY;
1418                 goto done;
1419         }
1420
1421         if (hdev->open(hdev)) {
1422                 ret = -EIO;
1423                 goto done;
1424         }
1425
1426         atomic_set(&hdev->cmd_cnt, 1);
1427         set_bit(HCI_INIT, &hdev->flags);
1428
1429         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1430                 if (hdev->setup)
1431                         ret = hdev->setup(hdev);
1432
1433                 /* The transport driver can set these quirks before
1434                  * creating the HCI device or in its setup callback.
1435                  *
1436                  * In case any of them is set, the controller has to
1437                  * start up as unconfigured.
1438                  */
1439                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1440                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1441                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
1442
1443                 /* For an unconfigured controller it is required to
1444                  * read at least the version information provided by
1445                  * the Read Local Version Information command.
1446                  *
1447                  * If the set_bdaddr driver callback is provided, then
1448                  * also the original Bluetooth public device address
1449                  * will be read using the Read BD Address command.
1450                  */
1451                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1452                         ret = __hci_unconf_init(hdev);
1453         }
1454
1455         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1456                 /* If public address change is configured, ensure that
1457                  * the address gets programmed. If the driver does not
1458                  * support changing the public address, fail the power
1459                  * on procedure.
1460                  */
1461                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1462                     hdev->set_bdaddr)
1463                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1464                 else
1465                         ret = -EADDRNOTAVAIL;
1466         }
1467
1468         if (!ret) {
1469                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1470                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1471                         ret = __hci_init(hdev);
1472         }
1473
1474         clear_bit(HCI_INIT, &hdev->flags);
1475
1476         if (!ret) {
1477                 hci_dev_hold(hdev);
1478                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1479                 set_bit(HCI_UP, &hdev->flags);
1480                 hci_notify(hdev, HCI_DEV_UP);
1481                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1482                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
1483                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1484                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1485                     hdev->dev_type == HCI_BREDR) {
1486                         hci_dev_lock(hdev);
1487                         mgmt_powered(hdev, 1);
1488                         hci_dev_unlock(hdev);
1489                 }
1490         } else {
1491                 /* Init failed, cleanup */
1492                 flush_work(&hdev->tx_work);
1493                 flush_work(&hdev->cmd_work);
1494                 flush_work(&hdev->rx_work);
1495
1496                 skb_queue_purge(&hdev->cmd_q);
1497                 skb_queue_purge(&hdev->rx_q);
1498
1499                 if (hdev->flush)
1500                         hdev->flush(hdev);
1501
1502                 if (hdev->sent_cmd) {
1503                         kfree_skb(hdev->sent_cmd);
1504                         hdev->sent_cmd = NULL;
1505                 }
1506
1507                 hdev->close(hdev);
1508                 hdev->flags &= BIT(HCI_RAW);
1509         }
1510
1511 done:
1512         hci_req_unlock(hdev);
1513         return ret;
1514 }
1515
1516 /* ---- HCI ioctl helpers ---- */
1517
1518 int hci_dev_open(__u16 dev)
1519 {
1520         struct hci_dev *hdev;
1521         int err;
1522
1523         hdev = hci_dev_get(dev);
1524         if (!hdev)
1525                 return -ENODEV;
1526
1527         /* Devices that are marked as unconfigured can only be powered
1528          * up as user channel. Trying to bring them up as normal devices
1529          * will result into a failure. Only user channel operation is
1530          * possible.
1531          *
1532          * When this function is called for a user channel, the flag
1533          * HCI_USER_CHANNEL will be set first before attempting to
1534          * open the device.
1535          */
1536         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1537             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1538                 err = -EOPNOTSUPP;
1539                 goto done;
1540         }
1541
1542         /* We need to ensure that no other power on/off work is pending
1543          * before proceeding to call hci_dev_do_open. This is
1544          * particularly important if the setup procedure has not yet
1545          * completed.
1546          */
1547         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1548                 cancel_delayed_work(&hdev->power_off);
1549
1550         /* After this call it is guaranteed that the setup procedure
1551          * has finished. This means that error conditions like RFKILL
1552          * or no valid public or static random address apply.
1553          */
1554         flush_workqueue(hdev->req_workqueue);
1555
1556         /* For controllers not using the management interface and that
1557          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1558          * so that pairing works for them. Once the management interface
1559          * is in use this bit will be cleared again and userspace has
1560          * to explicitly enable it.
1561          */
1562         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1563             !test_bit(HCI_MGMT, &hdev->dev_flags))
1564                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
1565
1566         err = hci_dev_do_open(hdev);
1567
1568 done:
1569         hci_dev_put(hdev);
1570         return err;
1571 }
1572
1573 /* This function requires the caller holds hdev->lock */
1574 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1575 {
1576         struct hci_conn_params *p;
1577
1578         list_for_each_entry(p, &hdev->le_conn_params, list) {
1579                 if (p->conn) {
1580                         hci_conn_drop(p->conn);
1581                         hci_conn_put(p->conn);
1582                         p->conn = NULL;
1583                 }
1584                 list_del_init(&p->action);
1585         }
1586
1587         BT_DBG("All LE pending actions cleared");
1588 }
1589
1590 static int hci_dev_do_close(struct hci_dev *hdev)
1591 {
1592         BT_DBG("%s %p", hdev->name, hdev);
1593
1594         cancel_delayed_work(&hdev->power_off);
1595
1596         hci_req_cancel(hdev, ENODEV);
1597         hci_req_lock(hdev);
1598
1599         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1600                 cancel_delayed_work_sync(&hdev->cmd_timer);
1601                 hci_req_unlock(hdev);
1602                 return 0;
1603         }
1604
1605         /* Flush RX and TX works */
1606         flush_work(&hdev->tx_work);
1607         flush_work(&hdev->rx_work);
1608
1609         if (hdev->discov_timeout > 0) {
1610                 cancel_delayed_work(&hdev->discov_off);
1611                 hdev->discov_timeout = 0;
1612                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1613                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1614         }
1615
1616         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1617                 cancel_delayed_work(&hdev->service_cache);
1618
1619         cancel_delayed_work_sync(&hdev->le_scan_disable);
1620         cancel_delayed_work_sync(&hdev->le_scan_restart);
1621
1622         if (test_bit(HCI_MGMT, &hdev->dev_flags))
1623                 cancel_delayed_work_sync(&hdev->rpa_expired);
1624
1625         /* Avoid potential lockdep warnings from the *_flush() calls by
1626          * ensuring the workqueue is empty up front.
1627          */
1628         drain_workqueue(hdev->workqueue);
1629
1630         hci_dev_lock(hdev);
1631
1632         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1633
1634         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1635                 if (hdev->dev_type == HCI_BREDR)
1636                         mgmt_powered(hdev, 0);
1637         }
1638
1639         hci_inquiry_cache_flush(hdev);
1640         hci_pend_le_actions_clear(hdev);
1641         hci_conn_hash_flush(hdev);
1642         hci_dev_unlock(hdev);
1643
1644         smp_unregister(hdev);
1645
1646         hci_notify(hdev, HCI_DEV_DOWN);
1647
1648         if (hdev->flush)
1649                 hdev->flush(hdev);
1650
1651         /* Reset device */
1652         skb_queue_purge(&hdev->cmd_q);
1653         atomic_set(&hdev->cmd_cnt, 1);
1654         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1655             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1656             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1657                 set_bit(HCI_INIT, &hdev->flags);
1658                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1659                 clear_bit(HCI_INIT, &hdev->flags);
1660         }
1661
1662         /* flush cmd  work */
1663         flush_work(&hdev->cmd_work);
1664
1665         /* Drop queues */
1666         skb_queue_purge(&hdev->rx_q);
1667         skb_queue_purge(&hdev->cmd_q);
1668         skb_queue_purge(&hdev->raw_q);
1669
1670         /* Drop last sent command */
1671         if (hdev->sent_cmd) {
1672                 cancel_delayed_work_sync(&hdev->cmd_timer);
1673                 kfree_skb(hdev->sent_cmd);
1674                 hdev->sent_cmd = NULL;
1675         }
1676
1677         kfree_skb(hdev->recv_evt);
1678         hdev->recv_evt = NULL;
1679
1680         /* After this point our queues are empty
1681          * and no tasks are scheduled. */
1682         hdev->close(hdev);
1683
1684         /* Clear flags */
1685         hdev->flags &= BIT(HCI_RAW);
1686         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1687
1688         /* Controller radio is available but is currently powered down */
1689         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1690
1691         memset(hdev->eir, 0, sizeof(hdev->eir));
1692         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1693         bacpy(&hdev->random_addr, BDADDR_ANY);
1694
1695         hci_req_unlock(hdev);
1696
1697         hci_dev_put(hdev);
1698         return 0;
1699 }
1700
1701 int hci_dev_close(__u16 dev)
1702 {
1703         struct hci_dev *hdev;
1704         int err;
1705
1706         hdev = hci_dev_get(dev);
1707         if (!hdev)
1708                 return -ENODEV;
1709
1710         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1711                 err = -EBUSY;
1712                 goto done;
1713         }
1714
1715         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1716                 cancel_delayed_work(&hdev->power_off);
1717
1718         err = hci_dev_do_close(hdev);
1719
1720 done:
1721         hci_dev_put(hdev);
1722         return err;
1723 }
1724
1725 static int hci_dev_do_reset(struct hci_dev *hdev)
1726 {
1727         int ret;
1728
1729         BT_DBG("%s %p", hdev->name, hdev);
1730
1731         hci_req_lock(hdev);
1732
1733         /* Drop queues */
1734         skb_queue_purge(&hdev->rx_q);
1735         skb_queue_purge(&hdev->cmd_q);
1736
1737         /* Avoid potential lockdep warnings from the *_flush() calls by
1738          * ensuring the workqueue is empty up front.
1739          */
1740         drain_workqueue(hdev->workqueue);
1741
1742         hci_dev_lock(hdev);
1743         hci_inquiry_cache_flush(hdev);
1744         hci_conn_hash_flush(hdev);
1745         hci_dev_unlock(hdev);
1746
1747         if (hdev->flush)
1748                 hdev->flush(hdev);
1749
1750         atomic_set(&hdev->cmd_cnt, 1);
1751         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1752
1753         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1754
1755         hci_req_unlock(hdev);
1756         return ret;
1757 }
1758
1759 int hci_dev_reset(__u16 dev)
1760 {
1761         struct hci_dev *hdev;
1762         int err;
1763
1764         hdev = hci_dev_get(dev);
1765         if (!hdev)
1766                 return -ENODEV;
1767
1768         if (!test_bit(HCI_UP, &hdev->flags)) {
1769                 err = -ENETDOWN;
1770                 goto done;
1771         }
1772
1773         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1774                 err = -EBUSY;
1775                 goto done;
1776         }
1777
1778         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1779                 err = -EOPNOTSUPP;
1780                 goto done;
1781         }
1782
1783         err = hci_dev_do_reset(hdev);
1784
1785 done:
1786         hci_dev_put(hdev);
1787         return err;
1788 }
1789
1790 int hci_dev_reset_stat(__u16 dev)
1791 {
1792         struct hci_dev *hdev;
1793         int ret = 0;
1794
1795         hdev = hci_dev_get(dev);
1796         if (!hdev)
1797                 return -ENODEV;
1798
1799         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1800                 ret = -EBUSY;
1801                 goto done;
1802         }
1803
1804         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1805                 ret = -EOPNOTSUPP;
1806                 goto done;
1807         }
1808
1809         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1810
1811 done:
1812         hci_dev_put(hdev);
1813         return ret;
1814 }
1815
1816 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1817 {
1818         bool conn_changed, discov_changed;
1819
1820         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1821
1822         if ((scan & SCAN_PAGE))
1823                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1824                                                  &hdev->dev_flags);
1825         else
1826                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1827                                                   &hdev->dev_flags);
1828
1829         if ((scan & SCAN_INQUIRY)) {
1830                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1831                                                    &hdev->dev_flags);
1832         } else {
1833                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1834                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1835                                                     &hdev->dev_flags);
1836         }
1837
1838         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1839                 return;
1840
1841         if (conn_changed || discov_changed) {
1842                 /* In case this was disabled through mgmt */
1843                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1844
1845                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1846                         mgmt_update_adv_data(hdev);
1847
1848                 mgmt_new_settings(hdev);
1849         }
1850 }
1851
1852 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1853 {
1854         struct hci_dev *hdev;
1855         struct hci_dev_req dr;
1856         int err = 0;
1857
1858         if (copy_from_user(&dr, arg, sizeof(dr)))
1859                 return -EFAULT;
1860
1861         hdev = hci_dev_get(dr.dev_id);
1862         if (!hdev)
1863                 return -ENODEV;
1864
1865         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1866                 err = -EBUSY;
1867                 goto done;
1868         }
1869
1870         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1871                 err = -EOPNOTSUPP;
1872                 goto done;
1873         }
1874
1875         if (hdev->dev_type != HCI_BREDR) {
1876                 err = -EOPNOTSUPP;
1877                 goto done;
1878         }
1879
1880         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1881                 err = -EOPNOTSUPP;
1882                 goto done;
1883         }
1884
1885         switch (cmd) {
1886         case HCISETAUTH:
1887                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1888                                    HCI_INIT_TIMEOUT);
1889                 break;
1890
1891         case HCISETENCRYPT:
1892                 if (!lmp_encrypt_capable(hdev)) {
1893                         err = -EOPNOTSUPP;
1894                         break;
1895                 }
1896
1897                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1898                         /* Auth must be enabled first */
1899                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1900                                            HCI_INIT_TIMEOUT);
1901                         if (err)
1902                                 break;
1903                 }
1904
1905                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1906                                    HCI_INIT_TIMEOUT);
1907                 break;
1908
1909         case HCISETSCAN:
1910                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1911                                    HCI_INIT_TIMEOUT);
1912
1913                 /* Ensure that the connectable and discoverable states
1914                  * get correctly modified as this was a non-mgmt change.
1915                  */
1916                 if (!err)
1917                         hci_update_scan_state(hdev, dr.dev_opt);
1918                 break;
1919
1920         case HCISETLINKPOL:
1921                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1922                                    HCI_INIT_TIMEOUT);
1923                 break;
1924
1925         case HCISETLINKMODE:
1926                 hdev->link_mode = ((__u16) dr.dev_opt) &
1927                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1928                 break;
1929
1930         case HCISETPTYPE:
1931                 hdev->pkt_type = (__u16) dr.dev_opt;
1932                 break;
1933
1934         case HCISETACLMTU:
1935                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1936                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1937                 break;
1938
1939         case HCISETSCOMTU:
1940                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1941                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1942                 break;
1943
1944         default:
1945                 err = -EINVAL;
1946                 break;
1947         }
1948
1949 done:
1950         hci_dev_put(hdev);
1951         return err;
1952 }
1953
1954 int hci_get_dev_list(void __user *arg)
1955 {
1956         struct hci_dev *hdev;
1957         struct hci_dev_list_req *dl;
1958         struct hci_dev_req *dr;
1959         int n = 0, size, err;
1960         __u16 dev_num;
1961
1962         if (get_user(dev_num, (__u16 __user *) arg))
1963                 return -EFAULT;
1964
1965         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1966                 return -EINVAL;
1967
1968         size = sizeof(*dl) + dev_num * sizeof(*dr);
1969
1970         dl = kzalloc(size, GFP_KERNEL);
1971         if (!dl)
1972                 return -ENOMEM;
1973
1974         dr = dl->dev_req;
1975
1976         read_lock(&hci_dev_list_lock);
1977         list_for_each_entry(hdev, &hci_dev_list, list) {
1978                 unsigned long flags = hdev->flags;
1979
1980                 /* When the auto-off is configured it means the transport
1981                  * is running, but in that case still indicate that the
1982                  * device is actually down.
1983                  */
1984                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1985                         flags &= ~BIT(HCI_UP);
1986
1987                 (dr + n)->dev_id  = hdev->id;
1988                 (dr + n)->dev_opt = flags;
1989
1990                 if (++n >= dev_num)
1991                         break;
1992         }
1993         read_unlock(&hci_dev_list_lock);
1994
1995         dl->dev_num = n;
1996         size = sizeof(*dl) + n * sizeof(*dr);
1997
1998         err = copy_to_user(arg, dl, size);
1999         kfree(dl);
2000
2001         return err ? -EFAULT : 0;
2002 }
2003
2004 int hci_get_dev_info(void __user *arg)
2005 {
2006         struct hci_dev *hdev;
2007         struct hci_dev_info di;
2008         unsigned long flags;
2009         int err = 0;
2010
2011         if (copy_from_user(&di, arg, sizeof(di)))
2012                 return -EFAULT;
2013
2014         hdev = hci_dev_get(di.dev_id);
2015         if (!hdev)
2016                 return -ENODEV;
2017
2018         /* When the auto-off is configured it means the transport
2019          * is running, but in that case still indicate that the
2020          * device is actually down.
2021          */
2022         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2023                 flags = hdev->flags & ~BIT(HCI_UP);
2024         else
2025                 flags = hdev->flags;
2026
2027         strcpy(di.name, hdev->name);
2028         di.bdaddr   = hdev->bdaddr;
2029         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2030         di.flags    = flags;
2031         di.pkt_type = hdev->pkt_type;
2032         if (lmp_bredr_capable(hdev)) {
2033                 di.acl_mtu  = hdev->acl_mtu;
2034                 di.acl_pkts = hdev->acl_pkts;
2035                 di.sco_mtu  = hdev->sco_mtu;
2036                 di.sco_pkts = hdev->sco_pkts;
2037         } else {
2038                 di.acl_mtu  = hdev->le_mtu;
2039                 di.acl_pkts = hdev->le_pkts;
2040                 di.sco_mtu  = 0;
2041                 di.sco_pkts = 0;
2042         }
2043         di.link_policy = hdev->link_policy;
2044         di.link_mode   = hdev->link_mode;
2045
2046         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2047         memcpy(&di.features, &hdev->features, sizeof(di.features));
2048
2049         if (copy_to_user(arg, &di, sizeof(di)))
2050                 err = -EFAULT;
2051
2052         hci_dev_put(hdev);
2053
2054         return err;
2055 }
2056
2057 /* ---- Interface to HCI drivers ---- */
2058
2059 static int hci_rfkill_set_block(void *data, bool blocked)
2060 {
2061         struct hci_dev *hdev = data;
2062
2063         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2064
2065         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2066                 return -EBUSY;
2067
2068         if (blocked) {
2069                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2070                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2071                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2072                         hci_dev_do_close(hdev);
2073         } else {
2074                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2075         }
2076
2077         return 0;
2078 }
2079
2080 static const struct rfkill_ops hci_rfkill_ops = {
2081         .set_block = hci_rfkill_set_block,
2082 };
2083
2084 static void hci_power_on(struct work_struct *work)
2085 {
2086         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2087         int err;
2088
2089         BT_DBG("%s", hdev->name);
2090
2091         err = hci_dev_do_open(hdev);
2092         if (err < 0) {
2093                 hci_dev_lock(hdev);
2094                 mgmt_set_powered_failed(hdev, err);
2095                 hci_dev_unlock(hdev);
2096                 return;
2097         }
2098
2099         /* During the HCI setup phase, a few error conditions are
2100          * ignored and they need to be checked now. If they are still
2101          * valid, it is important to turn the device back off.
2102          */
2103         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2104             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2105             (hdev->dev_type == HCI_BREDR &&
2106              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2107              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2108                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2109                 hci_dev_do_close(hdev);
2110         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2111                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2112                                    HCI_AUTO_OFF_TIMEOUT);
2113         }
2114
2115         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2116                 /* For unconfigured devices, set the HCI_RAW flag
2117                  * so that userspace can easily identify them.
2118                  */
2119                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2120                         set_bit(HCI_RAW, &hdev->flags);
2121
2122                 /* For fully configured devices, this will send
2123                  * the Index Added event. For unconfigured devices,
2124                  * it will send Unconfigued Index Added event.
2125                  *
2126                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2127                  * and no event will be send.
2128                  */
2129                 mgmt_index_added(hdev);
2130         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2131                 /* When the controller is now configured, then it
2132                  * is important to clear the HCI_RAW flag.
2133                  */
2134                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2135                         clear_bit(HCI_RAW, &hdev->flags);
2136
2137                 /* Powering on the controller with HCI_CONFIG set only
2138                  * happens with the transition from unconfigured to
2139                  * configured. This will send the Index Added event.
2140                  */
2141                 mgmt_index_added(hdev);
2142         }
2143 }
2144
2145 static void hci_power_off(struct work_struct *work)
2146 {
2147         struct hci_dev *hdev = container_of(work, struct hci_dev,
2148                                             power_off.work);
2149
2150         BT_DBG("%s", hdev->name);
2151
2152         hci_dev_do_close(hdev);
2153 }
2154
2155 static void hci_error_reset(struct work_struct *work)
2156 {
2157         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2158
2159         BT_DBG("%s", hdev->name);
2160
2161         if (hdev->hw_error)
2162                 hdev->hw_error(hdev, hdev->hw_error_code);
2163         else
2164                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2165                        hdev->hw_error_code);
2166
2167         if (hci_dev_do_close(hdev))
2168                 return;
2169
2170         hci_dev_do_open(hdev);
2171 }
2172
2173 static void hci_discov_off(struct work_struct *work)
2174 {
2175         struct hci_dev *hdev;
2176
2177         hdev = container_of(work, struct hci_dev, discov_off.work);
2178
2179         BT_DBG("%s", hdev->name);
2180
2181         mgmt_discoverable_timeout(hdev);
2182 }
2183
2184 void hci_uuids_clear(struct hci_dev *hdev)
2185 {
2186         struct bt_uuid *uuid, *tmp;
2187
2188         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2189                 list_del(&uuid->list);
2190                 kfree(uuid);
2191         }
2192 }
2193
2194 void hci_link_keys_clear(struct hci_dev *hdev)
2195 {
2196         struct link_key *key;
2197
2198         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2199                 list_del_rcu(&key->list);
2200                 kfree_rcu(key, rcu);
2201         }
2202 }
2203
2204 void hci_smp_ltks_clear(struct hci_dev *hdev)
2205 {
2206         struct smp_ltk *k;
2207
2208         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2209                 list_del_rcu(&k->list);
2210                 kfree_rcu(k, rcu);
2211         }
2212 }
2213
2214 void hci_smp_irks_clear(struct hci_dev *hdev)
2215 {
2216         struct smp_irk *k;
2217
2218         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2219                 list_del_rcu(&k->list);
2220                 kfree_rcu(k, rcu);
2221         }
2222 }
2223
2224 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2225 {
2226         struct link_key *k;
2227
2228         rcu_read_lock();
2229         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2230                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2231                         rcu_read_unlock();
2232                         return k;
2233                 }
2234         }
2235         rcu_read_unlock();
2236
2237         return NULL;
2238 }
2239
2240 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2241                                u8 key_type, u8 old_key_type)
2242 {
2243         /* Legacy key */
2244         if (key_type < 0x03)
2245                 return true;
2246
2247         /* Debug keys are insecure so don't store them persistently */
2248         if (key_type == HCI_LK_DEBUG_COMBINATION)
2249                 return false;
2250
2251         /* Changed combination key and there's no previous one */
2252         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2253                 return false;
2254
2255         /* Security mode 3 case */
2256         if (!conn)
2257                 return true;
2258
2259         /* BR/EDR key derived using SC from an LE link */
2260         if (conn->type == LE_LINK)
2261                 return true;
2262
2263         /* Neither local nor remote side had no-bonding as requirement */
2264         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2265                 return true;
2266
2267         /* Local side had dedicated bonding as requirement */
2268         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2269                 return true;
2270
2271         /* Remote side had dedicated bonding as requirement */
2272         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2273                 return true;
2274
2275         /* If none of the above criteria match, then don't store the key
2276          * persistently */
2277         return false;
2278 }
2279
2280 static u8 ltk_role(u8 type)
2281 {
2282         if (type == SMP_LTK)
2283                 return HCI_ROLE_MASTER;
2284
2285         return HCI_ROLE_SLAVE;
2286 }
2287
2288 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2289                              u8 addr_type, u8 role)
2290 {
2291         struct smp_ltk *k;
2292
2293         rcu_read_lock();
2294         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2295                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2296                         continue;
2297
2298                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2299                         rcu_read_unlock();
2300                         return k;
2301                 }
2302         }
2303         rcu_read_unlock();
2304
2305         return NULL;
2306 }
2307
2308 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2309 {
2310         struct smp_irk *irk;
2311
2312         rcu_read_lock();
2313         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2314                 if (!bacmp(&irk->rpa, rpa)) {
2315                         rcu_read_unlock();
2316                         return irk;
2317                 }
2318         }
2319
2320         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2321                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2322                         bacpy(&irk->rpa, rpa);
2323                         rcu_read_unlock();
2324                         return irk;
2325                 }
2326         }
2327         rcu_read_unlock();
2328
2329         return NULL;
2330 }
2331
2332 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2333                                      u8 addr_type)
2334 {
2335         struct smp_irk *irk;
2336
2337         /* Identity Address must be public or static random */
2338         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2339                 return NULL;
2340
2341         rcu_read_lock();
2342         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2343                 if (addr_type == irk->addr_type &&
2344                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2345                         rcu_read_unlock();
2346                         return irk;
2347                 }
2348         }
2349         rcu_read_unlock();
2350
2351         return NULL;
2352 }
2353
2354 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2355                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2356                                   u8 pin_len, bool *persistent)
2357 {
2358         struct link_key *key, *old_key;
2359         u8 old_key_type;
2360
2361         old_key = hci_find_link_key(hdev, bdaddr);
2362         if (old_key) {
2363                 old_key_type = old_key->type;
2364                 key = old_key;
2365         } else {
2366                 old_key_type = conn ? conn->key_type : 0xff;
2367                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2368                 if (!key)
2369                         return NULL;
2370                 list_add_rcu(&key->list, &hdev->link_keys);
2371         }
2372
2373         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2374
2375         /* Some buggy controller combinations generate a changed
2376          * combination key for legacy pairing even when there's no
2377          * previous key */
2378         if (type == HCI_LK_CHANGED_COMBINATION &&
2379             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2380                 type = HCI_LK_COMBINATION;
2381                 if (conn)
2382                         conn->key_type = type;
2383         }
2384
2385         bacpy(&key->bdaddr, bdaddr);
2386         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2387         key->pin_len = pin_len;
2388
2389         if (type == HCI_LK_CHANGED_COMBINATION)
2390                 key->type = old_key_type;
2391         else
2392                 key->type = type;
2393
2394         if (persistent)
2395                 *persistent = hci_persistent_key(hdev, conn, type,
2396                                                  old_key_type);
2397
2398         return key;
2399 }
2400
2401 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2402                             u8 addr_type, u8 type, u8 authenticated,
2403                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2404 {
2405         struct smp_ltk *key, *old_key;
2406         u8 role = ltk_role(type);
2407
2408         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2409         if (old_key)
2410                 key = old_key;
2411         else {
2412                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2413                 if (!key)
2414                         return NULL;
2415                 list_add_rcu(&key->list, &hdev->long_term_keys);
2416         }
2417
2418         bacpy(&key->bdaddr, bdaddr);
2419         key->bdaddr_type = addr_type;
2420         memcpy(key->val, tk, sizeof(key->val));
2421         key->authenticated = authenticated;
2422         key->ediv = ediv;
2423         key->rand = rand;
2424         key->enc_size = enc_size;
2425         key->type = type;
2426
2427         return key;
2428 }
2429
2430 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2431                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2432 {
2433         struct smp_irk *irk;
2434
2435         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2436         if (!irk) {
2437                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2438                 if (!irk)
2439                         return NULL;
2440
2441                 bacpy(&irk->bdaddr, bdaddr);
2442                 irk->addr_type = addr_type;
2443
2444                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2445         }
2446
2447         memcpy(irk->val, val, 16);
2448         bacpy(&irk->rpa, rpa);
2449
2450         return irk;
2451 }
2452
2453 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2454 {
2455         struct link_key *key;
2456
2457         key = hci_find_link_key(hdev, bdaddr);
2458         if (!key)
2459                 return -ENOENT;
2460
2461         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2462
2463         list_del_rcu(&key->list);
2464         kfree_rcu(key, rcu);
2465
2466         return 0;
2467 }
2468
2469 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2470 {
2471         struct smp_ltk *k;
2472         int removed = 0;
2473
2474         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2475                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2476                         continue;
2477
2478                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2479
2480                 list_del_rcu(&k->list);
2481                 kfree_rcu(k, rcu);
2482                 removed++;
2483         }
2484
2485         return removed ? 0 : -ENOENT;
2486 }
2487
2488 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2489 {
2490         struct smp_irk *k;
2491
2492         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2493                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2494                         continue;
2495
2496                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2497
2498                 list_del_rcu(&k->list);
2499                 kfree_rcu(k, rcu);
2500         }
2501 }
2502
2503 /* HCI command timer function */
2504 static void hci_cmd_timeout(struct work_struct *work)
2505 {
2506         struct hci_dev *hdev = container_of(work, struct hci_dev,
2507                                             cmd_timer.work);
2508
2509         if (hdev->sent_cmd) {
2510                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2511                 u16 opcode = __le16_to_cpu(sent->opcode);
2512
2513                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2514         } else {
2515                 BT_ERR("%s command tx timeout", hdev->name);
2516         }
2517
2518         atomic_set(&hdev->cmd_cnt, 1);
2519         queue_work(hdev->workqueue, &hdev->cmd_work);
2520 }
2521
2522 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2523                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2524 {
2525         struct oob_data *data;
2526
2527         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2528                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2529                         continue;
2530                 if (data->bdaddr_type != bdaddr_type)
2531                         continue;
2532                 return data;
2533         }
2534
2535         return NULL;
2536 }
2537
2538 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2539                                u8 bdaddr_type)
2540 {
2541         struct oob_data *data;
2542
2543         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2544         if (!data)
2545                 return -ENOENT;
2546
2547         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2548
2549         list_del(&data->list);
2550         kfree(data);
2551
2552         return 0;
2553 }
2554
2555 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2556 {
2557         struct oob_data *data, *n;
2558
2559         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2560                 list_del(&data->list);
2561                 kfree(data);
2562         }
2563 }
2564
2565 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2566                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2567                             u8 *hash256, u8 *rand256)
2568 {
2569         struct oob_data *data;
2570
2571         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2572         if (!data) {
2573                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2574                 if (!data)
2575                         return -ENOMEM;
2576
2577                 bacpy(&data->bdaddr, bdaddr);
2578                 data->bdaddr_type = bdaddr_type;
2579                 list_add(&data->list, &hdev->remote_oob_data);
2580         }
2581
2582         if (hash192 && rand192) {
2583                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2584                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2585                 if (hash256 && rand256)
2586                         data->present = 0x03;
2587         } else {
2588                 memset(data->hash192, 0, sizeof(data->hash192));
2589                 memset(data->rand192, 0, sizeof(data->rand192));
2590                 if (hash256 && rand256)
2591                         data->present = 0x02;
2592                 else
2593                         data->present = 0x00;
2594         }
2595
2596         if (hash256 && rand256) {
2597                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2598                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2599         } else {
2600                 memset(data->hash256, 0, sizeof(data->hash256));
2601                 memset(data->rand256, 0, sizeof(data->rand256));
2602                 if (hash192 && rand192)
2603                         data->present = 0x01;
2604         }
2605
2606         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2607
2608         return 0;
2609 }
2610
2611 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2612                                          bdaddr_t *bdaddr, u8 type)
2613 {
2614         struct bdaddr_list *b;
2615
2616         list_for_each_entry(b, bdaddr_list, list) {
2617                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2618                         return b;
2619         }
2620
2621         return NULL;
2622 }
2623
2624 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2625 {
2626         struct list_head *p, *n;
2627
2628         list_for_each_safe(p, n, bdaddr_list) {
2629                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2630
2631                 list_del(p);
2632                 kfree(b);
2633         }
2634 }
2635
2636 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2637 {
2638         struct bdaddr_list *entry;
2639
2640         if (!bacmp(bdaddr, BDADDR_ANY))
2641                 return -EBADF;
2642
2643         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2644                 return -EEXIST;
2645
2646         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2647         if (!entry)
2648                 return -ENOMEM;
2649
2650         bacpy(&entry->bdaddr, bdaddr);
2651         entry->bdaddr_type = type;
2652
2653         list_add(&entry->list, list);
2654
2655         return 0;
2656 }
2657
2658 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2659 {
2660         struct bdaddr_list *entry;
2661
2662         if (!bacmp(bdaddr, BDADDR_ANY)) {
2663                 hci_bdaddr_list_clear(list);
2664                 return 0;
2665         }
2666
2667         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2668         if (!entry)
2669                 return -ENOENT;
2670
2671         list_del(&entry->list);
2672         kfree(entry);
2673
2674         return 0;
2675 }
2676
2677 /* This function requires the caller holds hdev->lock */
2678 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2679                                                bdaddr_t *addr, u8 addr_type)
2680 {
2681         struct hci_conn_params *params;
2682
2683         /* The conn params list only contains identity addresses */
2684         if (!hci_is_identity_address(addr, addr_type))
2685                 return NULL;
2686
2687         list_for_each_entry(params, &hdev->le_conn_params, list) {
2688                 if (bacmp(&params->addr, addr) == 0 &&
2689                     params->addr_type == addr_type) {
2690                         return params;
2691                 }
2692         }
2693
2694         return NULL;
2695 }
2696
2697 /* This function requires the caller holds hdev->lock */
2698 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2699                                                   bdaddr_t *addr, u8 addr_type)
2700 {
2701         struct hci_conn_params *param;
2702
2703         /* The list only contains identity addresses */
2704         if (!hci_is_identity_address(addr, addr_type))
2705                 return NULL;
2706
2707         list_for_each_entry(param, list, action) {
2708                 if (bacmp(&param->addr, addr) == 0 &&
2709                     param->addr_type == addr_type)
2710                         return param;
2711         }
2712
2713         return NULL;
2714 }
2715
2716 /* This function requires the caller holds hdev->lock */
2717 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2718                                             bdaddr_t *addr, u8 addr_type)
2719 {
2720         struct hci_conn_params *params;
2721
2722         if (!hci_is_identity_address(addr, addr_type))
2723                 return NULL;
2724
2725         params = hci_conn_params_lookup(hdev, addr, addr_type);
2726         if (params)
2727                 return params;
2728
2729         params = kzalloc(sizeof(*params), GFP_KERNEL);
2730         if (!params) {
2731                 BT_ERR("Out of memory");
2732                 return NULL;
2733         }
2734
2735         bacpy(&params->addr, addr);
2736         params->addr_type = addr_type;
2737
2738         list_add(&params->list, &hdev->le_conn_params);
2739         INIT_LIST_HEAD(&params->action);
2740
2741         params->conn_min_interval = hdev->le_conn_min_interval;
2742         params->conn_max_interval = hdev->le_conn_max_interval;
2743         params->conn_latency = hdev->le_conn_latency;
2744         params->supervision_timeout = hdev->le_supv_timeout;
2745         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2746
2747         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2748
2749         return params;
2750 }
2751
2752 static void hci_conn_params_free(struct hci_conn_params *params)
2753 {
2754         if (params->conn) {
2755                 hci_conn_drop(params->conn);
2756                 hci_conn_put(params->conn);
2757         }
2758
2759         list_del(&params->action);
2760         list_del(&params->list);
2761         kfree(params);
2762 }
2763
2764 /* This function requires the caller holds hdev->lock */
2765 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2766 {
2767         struct hci_conn_params *params;
2768
2769         params = hci_conn_params_lookup(hdev, addr, addr_type);
2770         if (!params)
2771                 return;
2772
2773         hci_conn_params_free(params);
2774
2775         hci_update_background_scan(hdev);
2776
2777         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2778 }
2779
2780 /* This function requires the caller holds hdev->lock */
2781 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2782 {
2783         struct hci_conn_params *params, *tmp;
2784
2785         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2786                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2787                         continue;
2788                 list_del(&params->list);
2789                 kfree(params);
2790         }
2791
2792         BT_DBG("All LE disabled connection parameters were removed");
2793 }
2794
2795 /* This function requires the caller holds hdev->lock */
2796 void hci_conn_params_clear_all(struct hci_dev *hdev)
2797 {
2798         struct hci_conn_params *params, *tmp;
2799
2800         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2801                 hci_conn_params_free(params);
2802
2803         hci_update_background_scan(hdev);
2804
2805         BT_DBG("All LE connection parameters were removed");
2806 }
2807
2808 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2809 {
2810         if (status) {
2811                 BT_ERR("Failed to start inquiry: status %d", status);
2812
2813                 hci_dev_lock(hdev);
2814                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2815                 hci_dev_unlock(hdev);
2816                 return;
2817         }
2818 }
2819
2820 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2821                                           u16 opcode)
2822 {
2823         /* General inquiry access code (GIAC) */
2824         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2825         struct hci_request req;
2826         struct hci_cp_inquiry cp;
2827         int err;
2828
2829         if (status) {
2830                 BT_ERR("Failed to disable LE scanning: status %d", status);
2831                 return;
2832         }
2833
2834         hdev->discovery.scan_start = 0;
2835
2836         switch (hdev->discovery.type) {
2837         case DISCOV_TYPE_LE:
2838                 hci_dev_lock(hdev);
2839                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2840                 hci_dev_unlock(hdev);
2841                 break;
2842
2843         case DISCOV_TYPE_INTERLEAVED:
2844                 hci_req_init(&req, hdev);
2845
2846                 memset(&cp, 0, sizeof(cp));
2847                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2848                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2849                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2850
2851                 hci_dev_lock(hdev);
2852
2853                 hci_inquiry_cache_flush(hdev);
2854
2855                 err = hci_req_run(&req, inquiry_complete);
2856                 if (err) {
2857                         BT_ERR("Inquiry request failed: err %d", err);
2858                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2859                 }
2860
2861                 hci_dev_unlock(hdev);
2862                 break;
2863         }
2864 }
2865
2866 static void le_scan_disable_work(struct work_struct *work)
2867 {
2868         struct hci_dev *hdev = container_of(work, struct hci_dev,
2869                                             le_scan_disable.work);
2870         struct hci_request req;
2871         int err;
2872
2873         BT_DBG("%s", hdev->name);
2874
2875         cancel_delayed_work_sync(&hdev->le_scan_restart);
2876
2877         hci_req_init(&req, hdev);
2878
2879         hci_req_add_le_scan_disable(&req);
2880
2881         err = hci_req_run(&req, le_scan_disable_work_complete);
2882         if (err)
2883                 BT_ERR("Disable LE scanning request failed: err %d", err);
2884 }
2885
2886 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2887                                           u16 opcode)
2888 {
2889         unsigned long timeout, duration, scan_start, now;
2890
2891         BT_DBG("%s", hdev->name);
2892
2893         if (status) {
2894                 BT_ERR("Failed to restart LE scan: status %d", status);
2895                 return;
2896         }
2897
2898         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2899             !hdev->discovery.scan_start)
2900                 return;
2901
2902         /* When the scan was started, hdev->le_scan_disable has been queued
2903          * after duration from scan_start. During scan restart this job
2904          * has been canceled, and we need to queue it again after proper
2905          * timeout, to make sure that scan does not run indefinitely.
2906          */
2907         duration = hdev->discovery.scan_duration;
2908         scan_start = hdev->discovery.scan_start;
2909         now = jiffies;
2910         if (now - scan_start <= duration) {
2911                 int elapsed;
2912
2913                 if (now >= scan_start)
2914                         elapsed = now - scan_start;
2915                 else
2916                         elapsed = ULONG_MAX - scan_start + now;
2917
2918                 timeout = duration - elapsed;
2919         } else {
2920                 timeout = 0;
2921         }
2922         queue_delayed_work(hdev->workqueue,
2923                            &hdev->le_scan_disable, timeout);
2924 }
2925
2926 static void le_scan_restart_work(struct work_struct *work)
2927 {
2928         struct hci_dev *hdev = container_of(work, struct hci_dev,
2929                                             le_scan_restart.work);
2930         struct hci_request req;
2931         struct hci_cp_le_set_scan_enable cp;
2932         int err;
2933
2934         BT_DBG("%s", hdev->name);
2935
2936         /* If controller is not scanning we are done. */
2937         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2938                 return;
2939
2940         hci_req_init(&req, hdev);
2941
2942         hci_req_add_le_scan_disable(&req);
2943
2944         memset(&cp, 0, sizeof(cp));
2945         cp.enable = LE_SCAN_ENABLE;
2946         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2947         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2948
2949         err = hci_req_run(&req, le_scan_restart_work_complete);
2950         if (err)
2951                 BT_ERR("Restart LE scan request failed: err %d", err);
2952 }
2953
2954 /* Copy the Identity Address of the controller.
2955  *
2956  * If the controller has a public BD_ADDR, then by default use that one.
2957  * If this is a LE only controller without a public address, default to
2958  * the static random address.
2959  *
2960  * For debugging purposes it is possible to force controllers with a
2961  * public address to use the static random address instead.
2962  *
2963  * In case BR/EDR has been disabled on a dual-mode controller and
2964  * userspace has configured a static address, then that address
2965  * becomes the identity address instead of the public BR/EDR address.
2966  */
2967 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2968                                u8 *bdaddr_type)
2969 {
2970         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
2971             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2972             (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2973              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2974                 bacpy(bdaddr, &hdev->static_addr);
2975                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2976         } else {
2977                 bacpy(bdaddr, &hdev->bdaddr);
2978                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2979         }
2980 }
2981
2982 /* Alloc HCI device */
2983 struct hci_dev *hci_alloc_dev(void)
2984 {
2985         struct hci_dev *hdev;
2986
2987         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2988         if (!hdev)
2989                 return NULL;
2990
2991         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2992         hdev->esco_type = (ESCO_HV1);
2993         hdev->link_mode = (HCI_LM_ACCEPT);
2994         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2995         hdev->io_capability = 0x03;     /* No Input No Output */
2996         hdev->manufacturer = 0xffff;    /* Default to internal use */
2997         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2998         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2999
3000         hdev->sniff_max_interval = 800;
3001         hdev->sniff_min_interval = 80;
3002
3003         hdev->le_adv_channel_map = 0x07;
3004         hdev->le_adv_min_interval = 0x0800;
3005         hdev->le_adv_max_interval = 0x0800;
3006         hdev->le_scan_interval = 0x0060;
3007         hdev->le_scan_window = 0x0030;
3008         hdev->le_conn_min_interval = 0x0028;
3009         hdev->le_conn_max_interval = 0x0038;
3010         hdev->le_conn_latency = 0x0000;
3011         hdev->le_supv_timeout = 0x002a;
3012         hdev->le_def_tx_len = 0x001b;
3013         hdev->le_def_tx_time = 0x0148;
3014         hdev->le_max_tx_len = 0x001b;
3015         hdev->le_max_tx_time = 0x0148;
3016         hdev->le_max_rx_len = 0x001b;
3017         hdev->le_max_rx_time = 0x0148;
3018
3019         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3020         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3021         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3022         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3023
3024         mutex_init(&hdev->lock);
3025         mutex_init(&hdev->req_lock);
3026
3027         INIT_LIST_HEAD(&hdev->mgmt_pending);
3028         INIT_LIST_HEAD(&hdev->blacklist);
3029         INIT_LIST_HEAD(&hdev->whitelist);
3030         INIT_LIST_HEAD(&hdev->uuids);
3031         INIT_LIST_HEAD(&hdev->link_keys);
3032         INIT_LIST_HEAD(&hdev->long_term_keys);
3033         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3034         INIT_LIST_HEAD(&hdev->remote_oob_data);
3035         INIT_LIST_HEAD(&hdev->le_white_list);
3036         INIT_LIST_HEAD(&hdev->le_conn_params);
3037         INIT_LIST_HEAD(&hdev->pend_le_conns);
3038         INIT_LIST_HEAD(&hdev->pend_le_reports);
3039         INIT_LIST_HEAD(&hdev->conn_hash.list);
3040
3041         INIT_WORK(&hdev->rx_work, hci_rx_work);
3042         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3043         INIT_WORK(&hdev->tx_work, hci_tx_work);
3044         INIT_WORK(&hdev->power_on, hci_power_on);
3045         INIT_WORK(&hdev->error_reset, hci_error_reset);
3046
3047         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3048         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3049         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3050         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3051
3052         skb_queue_head_init(&hdev->rx_q);
3053         skb_queue_head_init(&hdev->cmd_q);
3054         skb_queue_head_init(&hdev->raw_q);
3055
3056         init_waitqueue_head(&hdev->req_wait_q);
3057
3058         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3059
3060         hci_init_sysfs(hdev);
3061         discovery_init(hdev);
3062
3063         return hdev;
3064 }
3065 EXPORT_SYMBOL(hci_alloc_dev);
3066
3067 /* Free HCI device */
3068 void hci_free_dev(struct hci_dev *hdev)
3069 {
3070         /* will free via device release */
3071         put_device(&hdev->dev);
3072 }
3073 EXPORT_SYMBOL(hci_free_dev);
3074
3075 /* Register HCI device */
3076 int hci_register_dev(struct hci_dev *hdev)
3077 {
3078         int id, error;
3079
3080         if (!hdev->open || !hdev->close || !hdev->send)
3081                 return -EINVAL;
3082
3083         /* Do not allow HCI_AMP devices to register at index 0,
3084          * so the index can be used as the AMP controller ID.
3085          */
3086         switch (hdev->dev_type) {
3087         case HCI_BREDR:
3088                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3089                 break;
3090         case HCI_AMP:
3091                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3092                 break;
3093         default:
3094                 return -EINVAL;
3095         }
3096
3097         if (id < 0)
3098                 return id;
3099
3100         sprintf(hdev->name, "hci%d", id);
3101         hdev->id = id;
3102
3103         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3104
3105         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3106                                           WQ_MEM_RECLAIM, 1, hdev->name);
3107         if (!hdev->workqueue) {
3108                 error = -ENOMEM;
3109                 goto err;
3110         }
3111
3112         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3113                                               WQ_MEM_RECLAIM, 1, hdev->name);
3114         if (!hdev->req_workqueue) {
3115                 destroy_workqueue(hdev->workqueue);
3116                 error = -ENOMEM;
3117                 goto err;
3118         }
3119
3120         if (!IS_ERR_OR_NULL(bt_debugfs))
3121                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3122
3123         dev_set_name(&hdev->dev, "%s", hdev->name);
3124
3125         error = device_add(&hdev->dev);
3126         if (error < 0)
3127                 goto err_wqueue;
3128
3129         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3130                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3131                                     hdev);
3132         if (hdev->rfkill) {
3133                 if (rfkill_register(hdev->rfkill) < 0) {
3134                         rfkill_destroy(hdev->rfkill);
3135                         hdev->rfkill = NULL;
3136                 }
3137         }
3138
3139         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3140                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3141
3142         set_bit(HCI_SETUP, &hdev->dev_flags);
3143         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3144
3145         if (hdev->dev_type == HCI_BREDR) {
3146                 /* Assume BR/EDR support until proven otherwise (such as
3147                  * through reading supported features during init.
3148                  */
3149                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3150         }
3151
3152         write_lock(&hci_dev_list_lock);
3153         list_add(&hdev->list, &hci_dev_list);
3154         write_unlock(&hci_dev_list_lock);
3155
3156         /* Devices that are marked for raw-only usage are unconfigured
3157          * and should not be included in normal operation.
3158          */
3159         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3160                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3161
3162         hci_notify(hdev, HCI_DEV_REG);
3163         hci_dev_hold(hdev);
3164
3165         queue_work(hdev->req_workqueue, &hdev->power_on);
3166
3167         return id;
3168
3169 err_wqueue:
3170         destroy_workqueue(hdev->workqueue);
3171         destroy_workqueue(hdev->req_workqueue);
3172 err:
3173         ida_simple_remove(&hci_index_ida, hdev->id);
3174
3175         return error;
3176 }
3177 EXPORT_SYMBOL(hci_register_dev);
3178
3179 /* Unregister HCI device */
3180 void hci_unregister_dev(struct hci_dev *hdev)
3181 {
3182         int i, id;
3183
3184         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3185
3186         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3187
3188         id = hdev->id;
3189
3190         write_lock(&hci_dev_list_lock);
3191         list_del(&hdev->list);
3192         write_unlock(&hci_dev_list_lock);
3193
3194         hci_dev_do_close(hdev);
3195
3196         for (i = 0; i < NUM_REASSEMBLY; i++)
3197                 kfree_skb(hdev->reassembly[i]);
3198
3199         cancel_work_sync(&hdev->power_on);
3200
3201         if (!test_bit(HCI_INIT, &hdev->flags) &&
3202             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3203             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
3204                 hci_dev_lock(hdev);
3205                 mgmt_index_removed(hdev);
3206                 hci_dev_unlock(hdev);
3207         }
3208
3209         /* mgmt_index_removed should take care of emptying the
3210          * pending list */
3211         BUG_ON(!list_empty(&hdev->mgmt_pending));
3212
3213         hci_notify(hdev, HCI_DEV_UNREG);
3214
3215         if (hdev->rfkill) {
3216                 rfkill_unregister(hdev->rfkill);
3217                 rfkill_destroy(hdev->rfkill);
3218         }
3219
3220         device_del(&hdev->dev);
3221
3222         debugfs_remove_recursive(hdev->debugfs);
3223
3224         destroy_workqueue(hdev->workqueue);
3225         destroy_workqueue(hdev->req_workqueue);
3226
3227         hci_dev_lock(hdev);
3228         hci_bdaddr_list_clear(&hdev->blacklist);
3229         hci_bdaddr_list_clear(&hdev->whitelist);
3230         hci_uuids_clear(hdev);
3231         hci_link_keys_clear(hdev);
3232         hci_smp_ltks_clear(hdev);
3233         hci_smp_irks_clear(hdev);
3234         hci_remote_oob_data_clear(hdev);
3235         hci_bdaddr_list_clear(&hdev->le_white_list);
3236         hci_conn_params_clear_all(hdev);
3237         hci_discovery_filter_clear(hdev);
3238         hci_dev_unlock(hdev);
3239
3240         hci_dev_put(hdev);
3241
3242         ida_simple_remove(&hci_index_ida, id);
3243 }
3244 EXPORT_SYMBOL(hci_unregister_dev);
3245
3246 /* Suspend HCI device */
3247 int hci_suspend_dev(struct hci_dev *hdev)
3248 {
3249         hci_notify(hdev, HCI_DEV_SUSPEND);
3250         return 0;
3251 }
3252 EXPORT_SYMBOL(hci_suspend_dev);
3253
3254 /* Resume HCI device */
3255 int hci_resume_dev(struct hci_dev *hdev)
3256 {
3257         hci_notify(hdev, HCI_DEV_RESUME);
3258         return 0;
3259 }
3260 EXPORT_SYMBOL(hci_resume_dev);
3261
3262 /* Reset HCI device */
3263 int hci_reset_dev(struct hci_dev *hdev)
3264 {
3265         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3266         struct sk_buff *skb;
3267
3268         skb = bt_skb_alloc(3, GFP_ATOMIC);
3269         if (!skb)
3270                 return -ENOMEM;
3271
3272         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3273         memcpy(skb_put(skb, 3), hw_err, 3);
3274
3275         /* Send Hardware Error to upper stack */
3276         return hci_recv_frame(hdev, skb);
3277 }
3278 EXPORT_SYMBOL(hci_reset_dev);
3279
3280 /* Receive frame from HCI drivers */
3281 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3282 {
3283         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3284                       && !test_bit(HCI_INIT, &hdev->flags))) {
3285                 kfree_skb(skb);
3286                 return -ENXIO;
3287         }
3288
3289         /* Incoming skb */
3290         bt_cb(skb)->incoming = 1;
3291
3292         /* Time stamp */
3293         __net_timestamp(skb);
3294
3295         skb_queue_tail(&hdev->rx_q, skb);
3296         queue_work(hdev->workqueue, &hdev->rx_work);
3297
3298         return 0;
3299 }
3300 EXPORT_SYMBOL(hci_recv_frame);
3301
3302 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3303                           int count, __u8 index)
3304 {
3305         int len = 0;
3306         int hlen = 0;
3307         int remain = count;
3308         struct sk_buff *skb;
3309         struct bt_skb_cb *scb;
3310
3311         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3312             index >= NUM_REASSEMBLY)
3313                 return -EILSEQ;
3314
3315         skb = hdev->reassembly[index];
3316
3317         if (!skb) {
3318                 switch (type) {
3319                 case HCI_ACLDATA_PKT:
3320                         len = HCI_MAX_FRAME_SIZE;
3321                         hlen = HCI_ACL_HDR_SIZE;
3322                         break;
3323                 case HCI_EVENT_PKT:
3324                         len = HCI_MAX_EVENT_SIZE;
3325                         hlen = HCI_EVENT_HDR_SIZE;
3326                         break;
3327                 case HCI_SCODATA_PKT:
3328                         len = HCI_MAX_SCO_SIZE;
3329                         hlen = HCI_SCO_HDR_SIZE;
3330                         break;
3331                 }
3332
3333                 skb = bt_skb_alloc(len, GFP_ATOMIC);
3334                 if (!skb)
3335                         return -ENOMEM;
3336
3337                 scb = (void *) skb->cb;
3338                 scb->expect = hlen;
3339                 scb->pkt_type = type;
3340
3341                 hdev->reassembly[index] = skb;
3342         }
3343
3344         while (count) {
3345                 scb = (void *) skb->cb;
3346                 len = min_t(uint, scb->expect, count);
3347
3348                 memcpy(skb_put(skb, len), data, len);
3349
3350                 count -= len;
3351                 data += len;
3352                 scb->expect -= len;
3353                 remain = count;
3354
3355                 switch (type) {
3356                 case HCI_EVENT_PKT:
3357                         if (skb->len == HCI_EVENT_HDR_SIZE) {
3358                                 struct hci_event_hdr *h = hci_event_hdr(skb);
3359                                 scb->expect = h->plen;
3360
3361                                 if (skb_tailroom(skb) < scb->expect) {
3362                                         kfree_skb(skb);
3363                                         hdev->reassembly[index] = NULL;
3364                                         return -ENOMEM;
3365                                 }
3366                         }
3367                         break;
3368
3369                 case HCI_ACLDATA_PKT:
3370                         if (skb->len  == HCI_ACL_HDR_SIZE) {
3371                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3372                                 scb->expect = __le16_to_cpu(h->dlen);
3373
3374                                 if (skb_tailroom(skb) < scb->expect) {
3375                                         kfree_skb(skb);
3376                                         hdev->reassembly[index] = NULL;
3377                                         return -ENOMEM;
3378                                 }
3379                         }
3380                         break;
3381
3382                 case HCI_SCODATA_PKT:
3383                         if (skb->len == HCI_SCO_HDR_SIZE) {
3384                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3385                                 scb->expect = h->dlen;
3386
3387                                 if (skb_tailroom(skb) < scb->expect) {
3388                                         kfree_skb(skb);
3389                                         hdev->reassembly[index] = NULL;
3390                                         return -ENOMEM;
3391                                 }
3392                         }
3393                         break;
3394                 }
3395
3396                 if (scb->expect == 0) {
3397                         /* Complete frame */
3398
3399                         bt_cb(skb)->pkt_type = type;
3400                         hci_recv_frame(hdev, skb);
3401
3402                         hdev->reassembly[index] = NULL;
3403                         return remain;
3404                 }
3405         }
3406
3407         return remain;
3408 }
3409
3410 #define STREAM_REASSEMBLY 0
3411
3412 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3413 {
3414         int type;
3415         int rem = 0;
3416
3417         while (count) {
3418                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3419
3420                 if (!skb) {
3421                         struct { char type; } *pkt;
3422
3423                         /* Start of the frame */
3424                         pkt = data;
3425                         type = pkt->type;
3426
3427                         data++;
3428                         count--;
3429                 } else
3430                         type = bt_cb(skb)->pkt_type;
3431
3432                 rem = hci_reassembly(hdev, type, data, count,
3433                                      STREAM_REASSEMBLY);
3434                 if (rem < 0)
3435                         return rem;
3436
3437                 data += (count - rem);
3438                 count = rem;
3439         }
3440
3441         return rem;
3442 }
3443 EXPORT_SYMBOL(hci_recv_stream_fragment);
3444
3445 /* ---- Interface to upper protocols ---- */
3446
3447 int hci_register_cb(struct hci_cb *cb)
3448 {
3449         BT_DBG("%p name %s", cb, cb->name);
3450
3451         write_lock(&hci_cb_list_lock);
3452         list_add(&cb->list, &hci_cb_list);
3453         write_unlock(&hci_cb_list_lock);
3454
3455         return 0;
3456 }
3457 EXPORT_SYMBOL(hci_register_cb);
3458
3459 int hci_unregister_cb(struct hci_cb *cb)
3460 {
3461         BT_DBG("%p name %s", cb, cb->name);
3462
3463         write_lock(&hci_cb_list_lock);
3464         list_del(&cb->list);
3465         write_unlock(&hci_cb_list_lock);
3466
3467         return 0;
3468 }
3469 EXPORT_SYMBOL(hci_unregister_cb);
3470
3471 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3472 {
3473         int err;
3474
3475         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3476
3477         /* Time stamp */
3478         __net_timestamp(skb);
3479
3480         /* Send copy to monitor */
3481         hci_send_to_monitor(hdev, skb);
3482
3483         if (atomic_read(&hdev->promisc)) {
3484                 /* Send copy to the sockets */
3485                 hci_send_to_sock(hdev, skb);
3486         }
3487
3488         /* Get rid of skb owner, prior to sending to the driver. */
3489         skb_orphan(skb);
3490
3491         err = hdev->send(hdev, skb);
3492         if (err < 0) {
3493                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3494                 kfree_skb(skb);
3495         }
3496 }
3497
3498 bool hci_req_pending(struct hci_dev *hdev)
3499 {
3500         return (hdev->req_status == HCI_REQ_PEND);
3501 }
3502
3503 /* Send HCI command */
3504 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3505                  const void *param)
3506 {
3507         struct sk_buff *skb;
3508
3509         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3510
3511         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3512         if (!skb) {
3513                 BT_ERR("%s no memory for command", hdev->name);
3514                 return -ENOMEM;
3515         }
3516
3517         /* Stand-alone HCI commands must be flagged as
3518          * single-command requests.
3519          */
3520         bt_cb(skb)->req.start = true;
3521
3522         skb_queue_tail(&hdev->cmd_q, skb);
3523         queue_work(hdev->workqueue, &hdev->cmd_work);
3524
3525         return 0;
3526 }
3527
3528 /* Get data from the previously sent command */
3529 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3530 {
3531         struct hci_command_hdr *hdr;
3532
3533         if (!hdev->sent_cmd)
3534                 return NULL;
3535
3536         hdr = (void *) hdev->sent_cmd->data;
3537
3538         if (hdr->opcode != cpu_to_le16(opcode))
3539                 return NULL;
3540
3541         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3542
3543         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3544 }
3545
3546 /* Send ACL data */
3547 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3548 {
3549         struct hci_acl_hdr *hdr;
3550         int len = skb->len;
3551
3552         skb_push(skb, HCI_ACL_HDR_SIZE);
3553         skb_reset_transport_header(skb);
3554         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3555         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3556         hdr->dlen   = cpu_to_le16(len);
3557 }
3558
3559 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3560                           struct sk_buff *skb, __u16 flags)
3561 {
3562         struct hci_conn *conn = chan->conn;
3563         struct hci_dev *hdev = conn->hdev;
3564         struct sk_buff *list;
3565
3566         skb->len = skb_headlen(skb);
3567         skb->data_len = 0;
3568
3569         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3570
3571         switch (hdev->dev_type) {
3572         case HCI_BREDR:
3573                 hci_add_acl_hdr(skb, conn->handle, flags);
3574                 break;
3575         case HCI_AMP:
3576                 hci_add_acl_hdr(skb, chan->handle, flags);
3577                 break;
3578         default:
3579                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3580                 return;
3581         }
3582
3583         list = skb_shinfo(skb)->frag_list;
3584         if (!list) {
3585                 /* Non fragmented */
3586                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3587
3588                 skb_queue_tail(queue, skb);
3589         } else {
3590                 /* Fragmented */
3591                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3592
3593                 skb_shinfo(skb)->frag_list = NULL;
3594
3595                 /* Queue all fragments atomically. We need to use spin_lock_bh
3596                  * here because of 6LoWPAN links, as there this function is
3597                  * called from softirq and using normal spin lock could cause
3598                  * deadlocks.
3599                  */
3600                 spin_lock_bh(&queue->lock);
3601
3602                 __skb_queue_tail(queue, skb);
3603
3604                 flags &= ~ACL_START;
3605                 flags |= ACL_CONT;
3606                 do {
3607                         skb = list; list = list->next;
3608
3609                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3610                         hci_add_acl_hdr(skb, conn->handle, flags);
3611
3612                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3613
3614                         __skb_queue_tail(queue, skb);
3615                 } while (list);
3616
3617                 spin_unlock_bh(&queue->lock);
3618         }
3619 }
3620
3621 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3622 {
3623         struct hci_dev *hdev = chan->conn->hdev;
3624
3625         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3626
3627         hci_queue_acl(chan, &chan->data_q, skb, flags);
3628
3629         queue_work(hdev->workqueue, &hdev->tx_work);
3630 }
3631
3632 /* Send SCO data */
3633 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3634 {
3635         struct hci_dev *hdev = conn->hdev;
3636         struct hci_sco_hdr hdr;
3637
3638         BT_DBG("%s len %d", hdev->name, skb->len);
3639
3640         hdr.handle = cpu_to_le16(conn->handle);
3641         hdr.dlen   = skb->len;
3642
3643         skb_push(skb, HCI_SCO_HDR_SIZE);
3644         skb_reset_transport_header(skb);
3645         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3646
3647         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3648
3649         skb_queue_tail(&conn->data_q, skb);
3650         queue_work(hdev->workqueue, &hdev->tx_work);
3651 }
3652
3653 /* ---- HCI TX task (outgoing data) ---- */
3654
3655 /* HCI Connection scheduler */
3656 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3657                                      int *quote)
3658 {
3659         struct hci_conn_hash *h = &hdev->conn_hash;
3660         struct hci_conn *conn = NULL, *c;
3661         unsigned int num = 0, min = ~0;
3662
3663         /* We don't have to lock device here. Connections are always
3664          * added and removed with TX task disabled. */
3665
3666         rcu_read_lock();
3667
3668         list_for_each_entry_rcu(c, &h->list, list) {
3669                 if (c->type != type || skb_queue_empty(&c->data_q))
3670                         continue;
3671
3672                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3673                         continue;
3674
3675                 num++;
3676
3677                 if (c->sent < min) {
3678                         min  = c->sent;
3679                         conn = c;
3680                 }
3681
3682                 if (hci_conn_num(hdev, type) == num)
3683                         break;
3684         }
3685
3686         rcu_read_unlock();
3687
3688         if (conn) {
3689                 int cnt, q;
3690
3691                 switch (conn->type) {
3692                 case ACL_LINK:
3693                         cnt = hdev->acl_cnt;
3694                         break;
3695                 case SCO_LINK:
3696                 case ESCO_LINK:
3697                         cnt = hdev->sco_cnt;
3698                         break;
3699                 case LE_LINK:
3700                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3701                         break;
3702                 default:
3703                         cnt = 0;
3704                         BT_ERR("Unknown link type");
3705                 }
3706
3707                 q = cnt / num;
3708                 *quote = q ? q : 1;
3709         } else
3710                 *quote = 0;
3711
3712         BT_DBG("conn %p quote %d", conn, *quote);
3713         return conn;
3714 }
3715
3716 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3717 {
3718         struct hci_conn_hash *h = &hdev->conn_hash;
3719         struct hci_conn *c;
3720
3721         BT_ERR("%s link tx timeout", hdev->name);
3722
3723         rcu_read_lock();
3724
3725         /* Kill stalled connections */
3726         list_for_each_entry_rcu(c, &h->list, list) {
3727                 if (c->type == type && c->sent) {
3728                         BT_ERR("%s killing stalled connection %pMR",
3729                                hdev->name, &c->dst);
3730                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3731                 }
3732         }
3733
3734         rcu_read_unlock();
3735 }
3736
3737 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3738                                       int *quote)
3739 {
3740         struct hci_conn_hash *h = &hdev->conn_hash;
3741         struct hci_chan *chan = NULL;
3742         unsigned int num = 0, min = ~0, cur_prio = 0;
3743         struct hci_conn *conn;
3744         int cnt, q, conn_num = 0;
3745
3746         BT_DBG("%s", hdev->name);
3747
3748         rcu_read_lock();
3749
3750         list_for_each_entry_rcu(conn, &h->list, list) {
3751                 struct hci_chan *tmp;
3752
3753                 if (conn->type != type)
3754                         continue;
3755
3756                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3757                         continue;
3758
3759                 conn_num++;
3760
3761                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3762                         struct sk_buff *skb;
3763
3764                         if (skb_queue_empty(&tmp->data_q))
3765                                 continue;
3766
3767                         skb = skb_peek(&tmp->data_q);
3768                         if (skb->priority < cur_prio)
3769                                 continue;
3770
3771                         if (skb->priority > cur_prio) {
3772                                 num = 0;
3773                                 min = ~0;
3774                                 cur_prio = skb->priority;
3775                         }
3776
3777                         num++;
3778
3779                         if (conn->sent < min) {
3780                                 min  = conn->sent;
3781                                 chan = tmp;
3782                         }
3783                 }
3784
3785                 if (hci_conn_num(hdev, type) == conn_num)
3786                         break;
3787         }
3788
3789         rcu_read_unlock();
3790
3791         if (!chan)
3792                 return NULL;
3793
3794         switch (chan->conn->type) {
3795         case ACL_LINK:
3796                 cnt = hdev->acl_cnt;
3797                 break;
3798         case AMP_LINK:
3799                 cnt = hdev->block_cnt;
3800                 break;
3801         case SCO_LINK:
3802         case ESCO_LINK:
3803                 cnt = hdev->sco_cnt;
3804                 break;
3805         case LE_LINK:
3806                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3807                 break;
3808         default:
3809                 cnt = 0;
3810                 BT_ERR("Unknown link type");
3811         }
3812
3813         q = cnt / num;
3814         *quote = q ? q : 1;
3815         BT_DBG("chan %p quote %d", chan, *quote);
3816         return chan;
3817 }
3818
3819 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3820 {
3821         struct hci_conn_hash *h = &hdev->conn_hash;
3822         struct hci_conn *conn;
3823         int num = 0;
3824
3825         BT_DBG("%s", hdev->name);
3826
3827         rcu_read_lock();
3828
3829         list_for_each_entry_rcu(conn, &h->list, list) {
3830                 struct hci_chan *chan;
3831
3832                 if (conn->type != type)
3833                         continue;
3834
3835                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3836                         continue;
3837
3838                 num++;
3839
3840                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3841                         struct sk_buff *skb;
3842
3843                         if (chan->sent) {
3844                                 chan->sent = 0;
3845                                 continue;
3846                         }
3847
3848                         if (skb_queue_empty(&chan->data_q))
3849                                 continue;
3850
3851                         skb = skb_peek(&chan->data_q);
3852                         if (skb->priority >= HCI_PRIO_MAX - 1)
3853                                 continue;
3854
3855                         skb->priority = HCI_PRIO_MAX - 1;
3856
3857                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3858                                skb->priority);
3859                 }
3860
3861                 if (hci_conn_num(hdev, type) == num)
3862                         break;
3863         }
3864
3865         rcu_read_unlock();
3866
3867 }
3868
3869 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3870 {
3871         /* Calculate count of blocks used by this packet */
3872         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3873 }
3874
3875 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3876 {
3877         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3878                 /* ACL tx timeout must be longer than maximum
3879                  * link supervision timeout (40.9 seconds) */
3880                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3881                                        HCI_ACL_TX_TIMEOUT))
3882                         hci_link_tx_to(hdev, ACL_LINK);
3883         }
3884 }
3885
3886 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3887 {
3888         unsigned int cnt = hdev->acl_cnt;
3889         struct hci_chan *chan;
3890         struct sk_buff *skb;
3891         int quote;
3892
3893         __check_timeout(hdev, cnt);
3894
3895         while (hdev->acl_cnt &&
3896                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3897                 u32 priority = (skb_peek(&chan->data_q))->priority;
3898                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3899                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3900                                skb->len, skb->priority);
3901
3902                         /* Stop if priority has changed */
3903                         if (skb->priority < priority)
3904                                 break;
3905
3906                         skb = skb_dequeue(&chan->data_q);
3907
3908                         hci_conn_enter_active_mode(chan->conn,
3909                                                    bt_cb(skb)->force_active);
3910
3911                         hci_send_frame(hdev, skb);
3912                         hdev->acl_last_tx = jiffies;
3913
3914                         hdev->acl_cnt--;
3915                         chan->sent++;
3916                         chan->conn->sent++;
3917                 }
3918         }
3919
3920         if (cnt != hdev->acl_cnt)
3921                 hci_prio_recalculate(hdev, ACL_LINK);
3922 }
3923
3924 static void hci_sched_acl_blk(struct hci_dev *hdev)
3925 {
3926         unsigned int cnt = hdev->block_cnt;
3927         struct hci_chan *chan;
3928         struct sk_buff *skb;
3929         int quote;
3930         u8 type;
3931
3932         __check_timeout(hdev, cnt);
3933
3934         BT_DBG("%s", hdev->name);
3935
3936         if (hdev->dev_type == HCI_AMP)
3937                 type = AMP_LINK;
3938         else
3939                 type = ACL_LINK;
3940
3941         while (hdev->block_cnt > 0 &&
3942                (chan = hci_chan_sent(hdev, type, &quote))) {
3943                 u32 priority = (skb_peek(&chan->data_q))->priority;
3944                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3945                         int blocks;
3946
3947                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3948                                skb->len, skb->priority);
3949
3950                         /* Stop if priority has changed */
3951                         if (skb->priority < priority)
3952                                 break;
3953
3954                         skb = skb_dequeue(&chan->data_q);
3955
3956                         blocks = __get_blocks(hdev, skb);
3957                         if (blocks > hdev->block_cnt)
3958                                 return;
3959
3960                         hci_conn_enter_active_mode(chan->conn,
3961                                                    bt_cb(skb)->force_active);
3962
3963                         hci_send_frame(hdev, skb);
3964                         hdev->acl_last_tx = jiffies;
3965
3966                         hdev->block_cnt -= blocks;
3967                         quote -= blocks;
3968
3969                         chan->sent += blocks;
3970                         chan->conn->sent += blocks;
3971                 }
3972         }
3973
3974         if (cnt != hdev->block_cnt)
3975                 hci_prio_recalculate(hdev, type);
3976 }
3977
3978 static void hci_sched_acl(struct hci_dev *hdev)
3979 {
3980         BT_DBG("%s", hdev->name);
3981
3982         /* No ACL link over BR/EDR controller */
3983         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3984                 return;
3985
3986         /* No AMP link over AMP controller */
3987         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3988                 return;
3989
3990         switch (hdev->flow_ctl_mode) {
3991         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3992                 hci_sched_acl_pkt(hdev);
3993                 break;
3994
3995         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3996                 hci_sched_acl_blk(hdev);
3997                 break;
3998         }
3999 }
4000
4001 /* Schedule SCO */
4002 static void hci_sched_sco(struct hci_dev *hdev)
4003 {
4004         struct hci_conn *conn;
4005         struct sk_buff *skb;
4006         int quote;
4007
4008         BT_DBG("%s", hdev->name);
4009
4010         if (!hci_conn_num(hdev, SCO_LINK))
4011                 return;
4012
4013         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4014                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4015                         BT_DBG("skb %p len %d", skb, skb->len);
4016                         hci_send_frame(hdev, skb);
4017
4018                         conn->sent++;
4019                         if (conn->sent == ~0)
4020                                 conn->sent = 0;
4021                 }
4022         }
4023 }
4024
4025 static void hci_sched_esco(struct hci_dev *hdev)
4026 {
4027         struct hci_conn *conn;
4028         struct sk_buff *skb;
4029         int quote;
4030
4031         BT_DBG("%s", hdev->name);
4032
4033         if (!hci_conn_num(hdev, ESCO_LINK))
4034                 return;
4035
4036         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4037                                                      &quote))) {
4038                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4039                         BT_DBG("skb %p len %d", skb, skb->len);
4040                         hci_send_frame(hdev, skb);
4041
4042                         conn->sent++;
4043                         if (conn->sent == ~0)
4044                                 conn->sent = 0;
4045                 }
4046         }
4047 }
4048
4049 static void hci_sched_le(struct hci_dev *hdev)
4050 {
4051         struct hci_chan *chan;
4052         struct sk_buff *skb;
4053         int quote, cnt, tmp;
4054
4055         BT_DBG("%s", hdev->name);
4056
4057         if (!hci_conn_num(hdev, LE_LINK))
4058                 return;
4059
4060         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4061                 /* LE tx timeout must be longer than maximum
4062                  * link supervision timeout (40.9 seconds) */
4063                 if (!hdev->le_cnt && hdev->le_pkts &&
4064                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4065                         hci_link_tx_to(hdev, LE_LINK);
4066         }
4067
4068         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4069         tmp = cnt;
4070         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4071                 u32 priority = (skb_peek(&chan->data_q))->priority;
4072                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4073                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4074                                skb->len, skb->priority);
4075
4076                         /* Stop if priority has changed */
4077                         if (skb->priority < priority)
4078                                 break;
4079
4080                         skb = skb_dequeue(&chan->data_q);
4081
4082                         hci_send_frame(hdev, skb);
4083                         hdev->le_last_tx = jiffies;
4084
4085                         cnt--;
4086                         chan->sent++;
4087                         chan->conn->sent++;
4088                 }
4089         }
4090
4091         if (hdev->le_pkts)
4092                 hdev->le_cnt = cnt;
4093         else
4094                 hdev->acl_cnt = cnt;
4095
4096         if (cnt != tmp)
4097                 hci_prio_recalculate(hdev, LE_LINK);
4098 }
4099
4100 static void hci_tx_work(struct work_struct *work)
4101 {
4102         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4103         struct sk_buff *skb;
4104
4105         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4106                hdev->sco_cnt, hdev->le_cnt);
4107
4108         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4109                 /* Schedule queues and send stuff to HCI driver */
4110                 hci_sched_acl(hdev);
4111                 hci_sched_sco(hdev);
4112                 hci_sched_esco(hdev);
4113                 hci_sched_le(hdev);
4114         }
4115
4116         /* Send next queued raw (unknown type) packet */
4117         while ((skb = skb_dequeue(&hdev->raw_q)))
4118                 hci_send_frame(hdev, skb);
4119 }
4120
4121 /* ----- HCI RX task (incoming data processing) ----- */
4122
4123 /* ACL data packet */
4124 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4125 {
4126         struct hci_acl_hdr *hdr = (void *) skb->data;
4127         struct hci_conn *conn;
4128         __u16 handle, flags;
4129
4130         skb_pull(skb, HCI_ACL_HDR_SIZE);
4131
4132         handle = __le16_to_cpu(hdr->handle);
4133         flags  = hci_flags(handle);
4134         handle = hci_handle(handle);
4135
4136         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4137                handle, flags);
4138
4139         hdev->stat.acl_rx++;
4140
4141         hci_dev_lock(hdev);
4142         conn = hci_conn_hash_lookup_handle(hdev, handle);
4143         hci_dev_unlock(hdev);
4144
4145         if (conn) {
4146                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4147
4148                 /* Send to upper protocol */
4149                 l2cap_recv_acldata(conn, skb, flags);
4150                 return;
4151         } else {
4152                 BT_ERR("%s ACL packet for unknown connection handle %d",
4153                        hdev->name, handle);
4154         }
4155
4156         kfree_skb(skb);
4157 }
4158
4159 /* SCO data packet */
4160 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4161 {
4162         struct hci_sco_hdr *hdr = (void *) skb->data;
4163         struct hci_conn *conn;
4164         __u16 handle;
4165
4166         skb_pull(skb, HCI_SCO_HDR_SIZE);
4167
4168         handle = __le16_to_cpu(hdr->handle);
4169
4170         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4171
4172         hdev->stat.sco_rx++;
4173
4174         hci_dev_lock(hdev);
4175         conn = hci_conn_hash_lookup_handle(hdev, handle);
4176         hci_dev_unlock(hdev);
4177
4178         if (conn) {
4179                 /* Send to upper protocol */
4180                 sco_recv_scodata(conn, skb);
4181                 return;
4182         } else {
4183                 BT_ERR("%s SCO packet for unknown connection handle %d",
4184                        hdev->name, handle);
4185         }
4186
4187         kfree_skb(skb);
4188 }
4189
4190 static bool hci_req_is_complete(struct hci_dev *hdev)
4191 {
4192         struct sk_buff *skb;
4193
4194         skb = skb_peek(&hdev->cmd_q);
4195         if (!skb)
4196                 return true;
4197
4198         return bt_cb(skb)->req.start;
4199 }
4200
4201 static void hci_resend_last(struct hci_dev *hdev)
4202 {
4203         struct hci_command_hdr *sent;
4204         struct sk_buff *skb;
4205         u16 opcode;
4206
4207         if (!hdev->sent_cmd)
4208                 return;
4209
4210         sent = (void *) hdev->sent_cmd->data;
4211         opcode = __le16_to_cpu(sent->opcode);
4212         if (opcode == HCI_OP_RESET)
4213                 return;
4214
4215         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4216         if (!skb)
4217                 return;
4218
4219         skb_queue_head(&hdev->cmd_q, skb);
4220         queue_work(hdev->workqueue, &hdev->cmd_work);
4221 }
4222
4223 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4224 {
4225         hci_req_complete_t req_complete = NULL;
4226         struct sk_buff *skb;
4227         unsigned long flags;
4228
4229         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4230
4231         /* If the completed command doesn't match the last one that was
4232          * sent we need to do special handling of it.
4233          */
4234         if (!hci_sent_cmd_data(hdev, opcode)) {
4235                 /* Some CSR based controllers generate a spontaneous
4236                  * reset complete event during init and any pending
4237                  * command will never be completed. In such a case we
4238                  * need to resend whatever was the last sent
4239                  * command.
4240                  */
4241                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4242                         hci_resend_last(hdev);
4243
4244                 return;
4245         }
4246
4247         /* If the command succeeded and there's still more commands in
4248          * this request the request is not yet complete.
4249          */
4250         if (!status && !hci_req_is_complete(hdev))
4251                 return;
4252
4253         /* If this was the last command in a request the complete
4254          * callback would be found in hdev->sent_cmd instead of the
4255          * command queue (hdev->cmd_q).
4256          */
4257         if (hdev->sent_cmd) {
4258                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4259
4260                 if (req_complete) {
4261                         /* We must set the complete callback to NULL to
4262                          * avoid calling the callback more than once if
4263                          * this function gets called again.
4264                          */
4265                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
4266
4267                         goto call_complete;
4268                 }
4269         }
4270
4271         /* Remove all pending commands belonging to this request */
4272         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4273         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4274                 if (bt_cb(skb)->req.start) {
4275                         __skb_queue_head(&hdev->cmd_q, skb);
4276                         break;
4277                 }
4278
4279                 req_complete = bt_cb(skb)->req.complete;
4280                 kfree_skb(skb);
4281         }
4282         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4283
4284 call_complete:
4285         if (req_complete)
4286                 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
4287 }
4288
4289 static void hci_rx_work(struct work_struct *work)
4290 {
4291         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4292         struct sk_buff *skb;
4293
4294         BT_DBG("%s", hdev->name);
4295
4296         while ((skb = skb_dequeue(&hdev->rx_q))) {
4297                 /* Send copy to monitor */
4298                 hci_send_to_monitor(hdev, skb);
4299
4300                 if (atomic_read(&hdev->promisc)) {
4301                         /* Send copy to the sockets */
4302                         hci_send_to_sock(hdev, skb);
4303                 }
4304
4305                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4306                         kfree_skb(skb);
4307                         continue;
4308                 }
4309
4310                 if (test_bit(HCI_INIT, &hdev->flags)) {
4311                         /* Don't process data packets in this states. */
4312                         switch (bt_cb(skb)->pkt_type) {
4313                         case HCI_ACLDATA_PKT:
4314                         case HCI_SCODATA_PKT:
4315                                 kfree_skb(skb);
4316                                 continue;
4317                         }
4318                 }
4319
4320                 /* Process frame */
4321                 switch (bt_cb(skb)->pkt_type) {
4322                 case HCI_EVENT_PKT:
4323                         BT_DBG("%s Event packet", hdev->name);
4324                         hci_event_packet(hdev, skb);
4325                         break;
4326
4327                 case HCI_ACLDATA_PKT:
4328                         BT_DBG("%s ACL data packet", hdev->name);
4329                         hci_acldata_packet(hdev, skb);
4330                         break;
4331
4332                 case HCI_SCODATA_PKT:
4333                         BT_DBG("%s SCO data packet", hdev->name);
4334                         hci_scodata_packet(hdev, skb);
4335                         break;
4336
4337                 default:
4338                         kfree_skb(skb);
4339                         break;
4340                 }
4341         }
4342 }
4343
4344 static void hci_cmd_work(struct work_struct *work)
4345 {
4346         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4347         struct sk_buff *skb;
4348
4349         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4350                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4351
4352         /* Send queued commands */
4353         if (atomic_read(&hdev->cmd_cnt)) {
4354                 skb = skb_dequeue(&hdev->cmd_q);
4355                 if (!skb)
4356                         return;
4357
4358                 kfree_skb(hdev->sent_cmd);
4359
4360                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4361                 if (hdev->sent_cmd) {
4362                         atomic_dec(&hdev->cmd_cnt);
4363                         hci_send_frame(hdev, skb);
4364                         if (test_bit(HCI_RESET, &hdev->flags))
4365                                 cancel_delayed_work(&hdev->cmd_timer);
4366                         else
4367                                 schedule_delayed_work(&hdev->cmd_timer,
4368                                                       HCI_CMD_TIMEOUT);
4369                 } else {
4370                         skb_queue_head(&hdev->cmd_q, skb);
4371                         queue_work(hdev->workqueue, &hdev->cmd_work);
4372                 }
4373         }
4374 }