Bluetooth: Auto connection and power on
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37
38 #include "smp.h"
39
40 static void hci_rx_work(struct work_struct *work);
41 static void hci_cmd_work(struct work_struct *work);
42 static void hci_tx_work(struct work_struct *work);
43
44 /* HCI device list */
45 LIST_HEAD(hci_dev_list);
46 DEFINE_RWLOCK(hci_dev_list_lock);
47
48 /* HCI callback list */
49 LIST_HEAD(hci_cb_list);
50 DEFINE_RWLOCK(hci_cb_list_lock);
51
52 /* HCI ID Numbering */
53 static DEFINE_IDA(hci_index_ida);
54
55 /* ---- HCI notifications ---- */
56
57 static void hci_notify(struct hci_dev *hdev, int event)
58 {
59         hci_sock_dev_event(hdev, event);
60 }
61
62 /* ---- HCI debugfs entries ---- */
63
64 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65                              size_t count, loff_t *ppos)
66 {
67         struct hci_dev *hdev = file->private_data;
68         char buf[3];
69
70         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71         buf[1] = '\n';
72         buf[2] = '\0';
73         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74 }
75
76 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77                               size_t count, loff_t *ppos)
78 {
79         struct hci_dev *hdev = file->private_data;
80         struct sk_buff *skb;
81         char buf[32];
82         size_t buf_size = min(count, (sizeof(buf)-1));
83         bool enable;
84         int err;
85
86         if (!test_bit(HCI_UP, &hdev->flags))
87                 return -ENETDOWN;
88
89         if (copy_from_user(buf, user_buf, buf_size))
90                 return -EFAULT;
91
92         buf[buf_size] = '\0';
93         if (strtobool(buf, &enable))
94                 return -EINVAL;
95
96         if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97                 return -EALREADY;
98
99         hci_req_lock(hdev);
100         if (enable)
101                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         else
104                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105                                      HCI_CMD_TIMEOUT);
106         hci_req_unlock(hdev);
107
108         if (IS_ERR(skb))
109                 return PTR_ERR(skb);
110
111         err = -bt_to_errno(skb->data[0]);
112         kfree_skb(skb);
113
114         if (err < 0)
115                 return err;
116
117         change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119         return count;
120 }
121
122 static const struct file_operations dut_mode_fops = {
123         .open           = simple_open,
124         .read           = dut_mode_read,
125         .write          = dut_mode_write,
126         .llseek         = default_llseek,
127 };
128
129 static int features_show(struct seq_file *f, void *ptr)
130 {
131         struct hci_dev *hdev = f->private;
132         u8 p;
133
134         hci_dev_lock(hdev);
135         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
136                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
137                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138                            hdev->features[p][0], hdev->features[p][1],
139                            hdev->features[p][2], hdev->features[p][3],
140                            hdev->features[p][4], hdev->features[p][5],
141                            hdev->features[p][6], hdev->features[p][7]);
142         }
143         if (lmp_le_capable(hdev))
144                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146                            hdev->le_features[0], hdev->le_features[1],
147                            hdev->le_features[2], hdev->le_features[3],
148                            hdev->le_features[4], hdev->le_features[5],
149                            hdev->le_features[6], hdev->le_features[7]);
150         hci_dev_unlock(hdev);
151
152         return 0;
153 }
154
155 static int features_open(struct inode *inode, struct file *file)
156 {
157         return single_open(file, features_show, inode->i_private);
158 }
159
160 static const struct file_operations features_fops = {
161         .open           = features_open,
162         .read           = seq_read,
163         .llseek         = seq_lseek,
164         .release        = single_release,
165 };
166
167 static int blacklist_show(struct seq_file *f, void *p)
168 {
169         struct hci_dev *hdev = f->private;
170         struct bdaddr_list *b;
171
172         hci_dev_lock(hdev);
173         list_for_each_entry(b, &hdev->blacklist, list)
174                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
175         hci_dev_unlock(hdev);
176
177         return 0;
178 }
179
180 static int blacklist_open(struct inode *inode, struct file *file)
181 {
182         return single_open(file, blacklist_show, inode->i_private);
183 }
184
185 static const struct file_operations blacklist_fops = {
186         .open           = blacklist_open,
187         .read           = seq_read,
188         .llseek         = seq_lseek,
189         .release        = single_release,
190 };
191
192 static int uuids_show(struct seq_file *f, void *p)
193 {
194         struct hci_dev *hdev = f->private;
195         struct bt_uuid *uuid;
196
197         hci_dev_lock(hdev);
198         list_for_each_entry(uuid, &hdev->uuids, list) {
199                 u8 i, val[16];
200
201                 /* The Bluetooth UUID values are stored in big endian,
202                  * but with reversed byte order. So convert them into
203                  * the right order for the %pUb modifier.
204                  */
205                 for (i = 0; i < 16; i++)
206                         val[i] = uuid->uuid[15 - i];
207
208                 seq_printf(f, "%pUb\n", val);
209         }
210         hci_dev_unlock(hdev);
211
212         return 0;
213 }
214
215 static int uuids_open(struct inode *inode, struct file *file)
216 {
217         return single_open(file, uuids_show, inode->i_private);
218 }
219
220 static const struct file_operations uuids_fops = {
221         .open           = uuids_open,
222         .read           = seq_read,
223         .llseek         = seq_lseek,
224         .release        = single_release,
225 };
226
227 static int inquiry_cache_show(struct seq_file *f, void *p)
228 {
229         struct hci_dev *hdev = f->private;
230         struct discovery_state *cache = &hdev->discovery;
231         struct inquiry_entry *e;
232
233         hci_dev_lock(hdev);
234
235         list_for_each_entry(e, &cache->all, all) {
236                 struct inquiry_data *data = &e->data;
237                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238                            &data->bdaddr,
239                            data->pscan_rep_mode, data->pscan_period_mode,
240                            data->pscan_mode, data->dev_class[2],
241                            data->dev_class[1], data->dev_class[0],
242                            __le16_to_cpu(data->clock_offset),
243                            data->rssi, data->ssp_mode, e->timestamp);
244         }
245
246         hci_dev_unlock(hdev);
247
248         return 0;
249 }
250
251 static int inquiry_cache_open(struct inode *inode, struct file *file)
252 {
253         return single_open(file, inquiry_cache_show, inode->i_private);
254 }
255
256 static const struct file_operations inquiry_cache_fops = {
257         .open           = inquiry_cache_open,
258         .read           = seq_read,
259         .llseek         = seq_lseek,
260         .release        = single_release,
261 };
262
263 static int link_keys_show(struct seq_file *f, void *ptr)
264 {
265         struct hci_dev *hdev = f->private;
266         struct list_head *p, *n;
267
268         hci_dev_lock(hdev);
269         list_for_each_safe(p, n, &hdev->link_keys) {
270                 struct link_key *key = list_entry(p, struct link_key, list);
271                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273         }
274         hci_dev_unlock(hdev);
275
276         return 0;
277 }
278
279 static int link_keys_open(struct inode *inode, struct file *file)
280 {
281         return single_open(file, link_keys_show, inode->i_private);
282 }
283
284 static const struct file_operations link_keys_fops = {
285         .open           = link_keys_open,
286         .read           = seq_read,
287         .llseek         = seq_lseek,
288         .release        = single_release,
289 };
290
291 static int dev_class_show(struct seq_file *f, void *ptr)
292 {
293         struct hci_dev *hdev = f->private;
294
295         hci_dev_lock(hdev);
296         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297                    hdev->dev_class[1], hdev->dev_class[0]);
298         hci_dev_unlock(hdev);
299
300         return 0;
301 }
302
303 static int dev_class_open(struct inode *inode, struct file *file)
304 {
305         return single_open(file, dev_class_show, inode->i_private);
306 }
307
308 static const struct file_operations dev_class_fops = {
309         .open           = dev_class_open,
310         .read           = seq_read,
311         .llseek         = seq_lseek,
312         .release        = single_release,
313 };
314
315 static int voice_setting_get(void *data, u64 *val)
316 {
317         struct hci_dev *hdev = data;
318
319         hci_dev_lock(hdev);
320         *val = hdev->voice_setting;
321         hci_dev_unlock(hdev);
322
323         return 0;
324 }
325
326 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327                         NULL, "0x%4.4llx\n");
328
329 static int auto_accept_delay_set(void *data, u64 val)
330 {
331         struct hci_dev *hdev = data;
332
333         hci_dev_lock(hdev);
334         hdev->auto_accept_delay = val;
335         hci_dev_unlock(hdev);
336
337         return 0;
338 }
339
340 static int auto_accept_delay_get(void *data, u64 *val)
341 {
342         struct hci_dev *hdev = data;
343
344         hci_dev_lock(hdev);
345         *val = hdev->auto_accept_delay;
346         hci_dev_unlock(hdev);
347
348         return 0;
349 }
350
351 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352                         auto_accept_delay_set, "%llu\n");
353
354 static int ssp_debug_mode_set(void *data, u64 val)
355 {
356         struct hci_dev *hdev = data;
357         struct sk_buff *skb;
358         __u8 mode;
359         int err;
360
361         if (val != 0 && val != 1)
362                 return -EINVAL;
363
364         if (!test_bit(HCI_UP, &hdev->flags))
365                 return -ENETDOWN;
366
367         hci_req_lock(hdev);
368         mode = val;
369         skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370                              &mode, HCI_CMD_TIMEOUT);
371         hci_req_unlock(hdev);
372
373         if (IS_ERR(skb))
374                 return PTR_ERR(skb);
375
376         err = -bt_to_errno(skb->data[0]);
377         kfree_skb(skb);
378
379         if (err < 0)
380                 return err;
381
382         hci_dev_lock(hdev);
383         hdev->ssp_debug_mode = val;
384         hci_dev_unlock(hdev);
385
386         return 0;
387 }
388
389 static int ssp_debug_mode_get(void *data, u64 *val)
390 {
391         struct hci_dev *hdev = data;
392
393         hci_dev_lock(hdev);
394         *val = hdev->ssp_debug_mode;
395         hci_dev_unlock(hdev);
396
397         return 0;
398 }
399
400 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401                         ssp_debug_mode_set, "%llu\n");
402
403 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404                                      size_t count, loff_t *ppos)
405 {
406         struct hci_dev *hdev = file->private_data;
407         char buf[3];
408
409         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410         buf[1] = '\n';
411         buf[2] = '\0';
412         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413 }
414
415 static ssize_t force_sc_support_write(struct file *file,
416                                       const char __user *user_buf,
417                                       size_t count, loff_t *ppos)
418 {
419         struct hci_dev *hdev = file->private_data;
420         char buf[32];
421         size_t buf_size = min(count, (sizeof(buf)-1));
422         bool enable;
423
424         if (test_bit(HCI_UP, &hdev->flags))
425                 return -EBUSY;
426
427         if (copy_from_user(buf, user_buf, buf_size))
428                 return -EFAULT;
429
430         buf[buf_size] = '\0';
431         if (strtobool(buf, &enable))
432                 return -EINVAL;
433
434         if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435                 return -EALREADY;
436
437         change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439         return count;
440 }
441
442 static const struct file_operations force_sc_support_fops = {
443         .open           = simple_open,
444         .read           = force_sc_support_read,
445         .write          = force_sc_support_write,
446         .llseek         = default_llseek,
447 };
448
449 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450                                  size_t count, loff_t *ppos)
451 {
452         struct hci_dev *hdev = file->private_data;
453         char buf[3];
454
455         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456         buf[1] = '\n';
457         buf[2] = '\0';
458         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459 }
460
461 static const struct file_operations sc_only_mode_fops = {
462         .open           = simple_open,
463         .read           = sc_only_mode_read,
464         .llseek         = default_llseek,
465 };
466
467 static int idle_timeout_set(void *data, u64 val)
468 {
469         struct hci_dev *hdev = data;
470
471         if (val != 0 && (val < 500 || val > 3600000))
472                 return -EINVAL;
473
474         hci_dev_lock(hdev);
475         hdev->idle_timeout = val;
476         hci_dev_unlock(hdev);
477
478         return 0;
479 }
480
481 static int idle_timeout_get(void *data, u64 *val)
482 {
483         struct hci_dev *hdev = data;
484
485         hci_dev_lock(hdev);
486         *val = hdev->idle_timeout;
487         hci_dev_unlock(hdev);
488
489         return 0;
490 }
491
492 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493                         idle_timeout_set, "%llu\n");
494
495 static int rpa_timeout_set(void *data, u64 val)
496 {
497         struct hci_dev *hdev = data;
498
499         /* Require the RPA timeout to be at least 30 seconds and at most
500          * 24 hours.
501          */
502         if (val < 30 || val > (60 * 60 * 24))
503                 return -EINVAL;
504
505         hci_dev_lock(hdev);
506         hdev->rpa_timeout = val;
507         hci_dev_unlock(hdev);
508
509         return 0;
510 }
511
512 static int rpa_timeout_get(void *data, u64 *val)
513 {
514         struct hci_dev *hdev = data;
515
516         hci_dev_lock(hdev);
517         *val = hdev->rpa_timeout;
518         hci_dev_unlock(hdev);
519
520         return 0;
521 }
522
523 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524                         rpa_timeout_set, "%llu\n");
525
526 static int sniff_min_interval_set(void *data, u64 val)
527 {
528         struct hci_dev *hdev = data;
529
530         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531                 return -EINVAL;
532
533         hci_dev_lock(hdev);
534         hdev->sniff_min_interval = val;
535         hci_dev_unlock(hdev);
536
537         return 0;
538 }
539
540 static int sniff_min_interval_get(void *data, u64 *val)
541 {
542         struct hci_dev *hdev = data;
543
544         hci_dev_lock(hdev);
545         *val = hdev->sniff_min_interval;
546         hci_dev_unlock(hdev);
547
548         return 0;
549 }
550
551 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552                         sniff_min_interval_set, "%llu\n");
553
554 static int sniff_max_interval_set(void *data, u64 val)
555 {
556         struct hci_dev *hdev = data;
557
558         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559                 return -EINVAL;
560
561         hci_dev_lock(hdev);
562         hdev->sniff_max_interval = val;
563         hci_dev_unlock(hdev);
564
565         return 0;
566 }
567
568 static int sniff_max_interval_get(void *data, u64 *val)
569 {
570         struct hci_dev *hdev = data;
571
572         hci_dev_lock(hdev);
573         *val = hdev->sniff_max_interval;
574         hci_dev_unlock(hdev);
575
576         return 0;
577 }
578
579 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580                         sniff_max_interval_set, "%llu\n");
581
582 static int identity_show(struct seq_file *f, void *p)
583 {
584         struct hci_dev *hdev = f->private;
585         bdaddr_t *addr;
586         u8 addr_type;
587
588         hci_dev_lock(hdev);
589
590         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
591             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
592                 addr = &hdev->static_addr;
593                 addr_type = ADDR_LE_DEV_RANDOM;
594         } else {
595                 addr = &hdev->bdaddr;
596                 addr_type = ADDR_LE_DEV_PUBLIC;
597         }
598
599         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", addr, addr_type,
600                    16, hdev->irk, &hdev->rpa);
601
602         hci_dev_unlock(hdev);
603
604         return 0;
605 }
606
607 static int identity_open(struct inode *inode, struct file *file)
608 {
609         return single_open(file, identity_show, inode->i_private);
610 }
611
612 static const struct file_operations identity_fops = {
613         .open           = identity_open,
614         .read           = seq_read,
615         .llseek         = seq_lseek,
616         .release        = single_release,
617 };
618
619 static int random_address_show(struct seq_file *f, void *p)
620 {
621         struct hci_dev *hdev = f->private;
622
623         hci_dev_lock(hdev);
624         seq_printf(f, "%pMR\n", &hdev->random_addr);
625         hci_dev_unlock(hdev);
626
627         return 0;
628 }
629
630 static int random_address_open(struct inode *inode, struct file *file)
631 {
632         return single_open(file, random_address_show, inode->i_private);
633 }
634
635 static const struct file_operations random_address_fops = {
636         .open           = random_address_open,
637         .read           = seq_read,
638         .llseek         = seq_lseek,
639         .release        = single_release,
640 };
641
642 static int static_address_show(struct seq_file *f, void *p)
643 {
644         struct hci_dev *hdev = f->private;
645
646         hci_dev_lock(hdev);
647         seq_printf(f, "%pMR\n", &hdev->static_addr);
648         hci_dev_unlock(hdev);
649
650         return 0;
651 }
652
653 static int static_address_open(struct inode *inode, struct file *file)
654 {
655         return single_open(file, static_address_show, inode->i_private);
656 }
657
658 static const struct file_operations static_address_fops = {
659         .open           = static_address_open,
660         .read           = seq_read,
661         .llseek         = seq_lseek,
662         .release        = single_release,
663 };
664
665 static ssize_t force_static_address_read(struct file *file,
666                                          char __user *user_buf,
667                                          size_t count, loff_t *ppos)
668 {
669         struct hci_dev *hdev = file->private_data;
670         char buf[3];
671
672         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
673         buf[1] = '\n';
674         buf[2] = '\0';
675         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
676 }
677
678 static ssize_t force_static_address_write(struct file *file,
679                                           const char __user *user_buf,
680                                           size_t count, loff_t *ppos)
681 {
682         struct hci_dev *hdev = file->private_data;
683         char buf[32];
684         size_t buf_size = min(count, (sizeof(buf)-1));
685         bool enable;
686
687         if (test_bit(HCI_UP, &hdev->flags))
688                 return -EBUSY;
689
690         if (copy_from_user(buf, user_buf, buf_size))
691                 return -EFAULT;
692
693         buf[buf_size] = '\0';
694         if (strtobool(buf, &enable))
695                 return -EINVAL;
696
697         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
698                 return -EALREADY;
699
700         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
701
702         return count;
703 }
704
705 static const struct file_operations force_static_address_fops = {
706         .open           = simple_open,
707         .read           = force_static_address_read,
708         .write          = force_static_address_write,
709         .llseek         = default_llseek,
710 };
711
712 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
713 {
714         struct hci_dev *hdev = f->private;
715         struct list_head *p, *n;
716
717         hci_dev_lock(hdev);
718         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
719                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
720                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
721                            &irk->bdaddr, irk->addr_type,
722                            16, irk->val, &irk->rpa);
723         }
724         hci_dev_unlock(hdev);
725
726         return 0;
727 }
728
729 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
730 {
731         return single_open(file, identity_resolving_keys_show,
732                            inode->i_private);
733 }
734
735 static const struct file_operations identity_resolving_keys_fops = {
736         .open           = identity_resolving_keys_open,
737         .read           = seq_read,
738         .llseek         = seq_lseek,
739         .release        = single_release,
740 };
741
742 static int long_term_keys_show(struct seq_file *f, void *ptr)
743 {
744         struct hci_dev *hdev = f->private;
745         struct list_head *p, *n;
746
747         hci_dev_lock(hdev);
748         list_for_each_safe(p, n, &hdev->long_term_keys) {
749                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
750                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
751                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
752                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
753                            8, ltk->rand, 16, ltk->val);
754         }
755         hci_dev_unlock(hdev);
756
757         return 0;
758 }
759
760 static int long_term_keys_open(struct inode *inode, struct file *file)
761 {
762         return single_open(file, long_term_keys_show, inode->i_private);
763 }
764
765 static const struct file_operations long_term_keys_fops = {
766         .open           = long_term_keys_open,
767         .read           = seq_read,
768         .llseek         = seq_lseek,
769         .release        = single_release,
770 };
771
772 static int conn_min_interval_set(void *data, u64 val)
773 {
774         struct hci_dev *hdev = data;
775
776         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
777                 return -EINVAL;
778
779         hci_dev_lock(hdev);
780         hdev->le_conn_min_interval = val;
781         hci_dev_unlock(hdev);
782
783         return 0;
784 }
785
786 static int conn_min_interval_get(void *data, u64 *val)
787 {
788         struct hci_dev *hdev = data;
789
790         hci_dev_lock(hdev);
791         *val = hdev->le_conn_min_interval;
792         hci_dev_unlock(hdev);
793
794         return 0;
795 }
796
797 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
798                         conn_min_interval_set, "%llu\n");
799
800 static int conn_max_interval_set(void *data, u64 val)
801 {
802         struct hci_dev *hdev = data;
803
804         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
805                 return -EINVAL;
806
807         hci_dev_lock(hdev);
808         hdev->le_conn_max_interval = val;
809         hci_dev_unlock(hdev);
810
811         return 0;
812 }
813
814 static int conn_max_interval_get(void *data, u64 *val)
815 {
816         struct hci_dev *hdev = data;
817
818         hci_dev_lock(hdev);
819         *val = hdev->le_conn_max_interval;
820         hci_dev_unlock(hdev);
821
822         return 0;
823 }
824
825 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
826                         conn_max_interval_set, "%llu\n");
827
828 static int adv_channel_map_set(void *data, u64 val)
829 {
830         struct hci_dev *hdev = data;
831
832         if (val < 0x01 || val > 0x07)
833                 return -EINVAL;
834
835         hci_dev_lock(hdev);
836         hdev->le_adv_channel_map = val;
837         hci_dev_unlock(hdev);
838
839         return 0;
840 }
841
842 static int adv_channel_map_get(void *data, u64 *val)
843 {
844         struct hci_dev *hdev = data;
845
846         hci_dev_lock(hdev);
847         *val = hdev->le_adv_channel_map;
848         hci_dev_unlock(hdev);
849
850         return 0;
851 }
852
853 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
854                         adv_channel_map_set, "%llu\n");
855
856 static ssize_t lowpan_read(struct file *file, char __user *user_buf,
857                            size_t count, loff_t *ppos)
858 {
859         struct hci_dev *hdev = file->private_data;
860         char buf[3];
861
862         buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
863         buf[1] = '\n';
864         buf[2] = '\0';
865         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
866 }
867
868 static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
869                             size_t count, loff_t *position)
870 {
871         struct hci_dev *hdev = fp->private_data;
872         bool enable;
873         char buf[32];
874         size_t buf_size = min(count, (sizeof(buf)-1));
875
876         if (copy_from_user(buf, user_buffer, buf_size))
877                 return -EFAULT;
878
879         buf[buf_size] = '\0';
880
881         if (strtobool(buf, &enable) < 0)
882                 return -EINVAL;
883
884         if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
885                 return -EALREADY;
886
887         change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
888
889         return count;
890 }
891
892 static const struct file_operations lowpan_debugfs_fops = {
893         .open           = simple_open,
894         .read           = lowpan_read,
895         .write          = lowpan_write,
896         .llseek         = default_llseek,
897 };
898
899 /* ---- HCI requests ---- */
900
901 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
902 {
903         BT_DBG("%s result 0x%2.2x", hdev->name, result);
904
905         if (hdev->req_status == HCI_REQ_PEND) {
906                 hdev->req_result = result;
907                 hdev->req_status = HCI_REQ_DONE;
908                 wake_up_interruptible(&hdev->req_wait_q);
909         }
910 }
911
912 static void hci_req_cancel(struct hci_dev *hdev, int err)
913 {
914         BT_DBG("%s err 0x%2.2x", hdev->name, err);
915
916         if (hdev->req_status == HCI_REQ_PEND) {
917                 hdev->req_result = err;
918                 hdev->req_status = HCI_REQ_CANCELED;
919                 wake_up_interruptible(&hdev->req_wait_q);
920         }
921 }
922
923 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
924                                             u8 event)
925 {
926         struct hci_ev_cmd_complete *ev;
927         struct hci_event_hdr *hdr;
928         struct sk_buff *skb;
929
930         hci_dev_lock(hdev);
931
932         skb = hdev->recv_evt;
933         hdev->recv_evt = NULL;
934
935         hci_dev_unlock(hdev);
936
937         if (!skb)
938                 return ERR_PTR(-ENODATA);
939
940         if (skb->len < sizeof(*hdr)) {
941                 BT_ERR("Too short HCI event");
942                 goto failed;
943         }
944
945         hdr = (void *) skb->data;
946         skb_pull(skb, HCI_EVENT_HDR_SIZE);
947
948         if (event) {
949                 if (hdr->evt != event)
950                         goto failed;
951                 return skb;
952         }
953
954         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
955                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
956                 goto failed;
957         }
958
959         if (skb->len < sizeof(*ev)) {
960                 BT_ERR("Too short cmd_complete event");
961                 goto failed;
962         }
963
964         ev = (void *) skb->data;
965         skb_pull(skb, sizeof(*ev));
966
967         if (opcode == __le16_to_cpu(ev->opcode))
968                 return skb;
969
970         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
971                __le16_to_cpu(ev->opcode));
972
973 failed:
974         kfree_skb(skb);
975         return ERR_PTR(-ENODATA);
976 }
977
978 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
979                                   const void *param, u8 event, u32 timeout)
980 {
981         DECLARE_WAITQUEUE(wait, current);
982         struct hci_request req;
983         int err = 0;
984
985         BT_DBG("%s", hdev->name);
986
987         hci_req_init(&req, hdev);
988
989         hci_req_add_ev(&req, opcode, plen, param, event);
990
991         hdev->req_status = HCI_REQ_PEND;
992
993         err = hci_req_run(&req, hci_req_sync_complete);
994         if (err < 0)
995                 return ERR_PTR(err);
996
997         add_wait_queue(&hdev->req_wait_q, &wait);
998         set_current_state(TASK_INTERRUPTIBLE);
999
1000         schedule_timeout(timeout);
1001
1002         remove_wait_queue(&hdev->req_wait_q, &wait);
1003
1004         if (signal_pending(current))
1005                 return ERR_PTR(-EINTR);
1006
1007         switch (hdev->req_status) {
1008         case HCI_REQ_DONE:
1009                 err = -bt_to_errno(hdev->req_result);
1010                 break;
1011
1012         case HCI_REQ_CANCELED:
1013                 err = -hdev->req_result;
1014                 break;
1015
1016         default:
1017                 err = -ETIMEDOUT;
1018                 break;
1019         }
1020
1021         hdev->req_status = hdev->req_result = 0;
1022
1023         BT_DBG("%s end: err %d", hdev->name, err);
1024
1025         if (err < 0)
1026                 return ERR_PTR(err);
1027
1028         return hci_get_cmd_complete(hdev, opcode, event);
1029 }
1030 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1031
1032 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1033                                const void *param, u32 timeout)
1034 {
1035         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1036 }
1037 EXPORT_SYMBOL(__hci_cmd_sync);
1038
1039 /* Execute request and wait for completion. */
1040 static int __hci_req_sync(struct hci_dev *hdev,
1041                           void (*func)(struct hci_request *req,
1042                                       unsigned long opt),
1043                           unsigned long opt, __u32 timeout)
1044 {
1045         struct hci_request req;
1046         DECLARE_WAITQUEUE(wait, current);
1047         int err = 0;
1048
1049         BT_DBG("%s start", hdev->name);
1050
1051         hci_req_init(&req, hdev);
1052
1053         hdev->req_status = HCI_REQ_PEND;
1054
1055         func(&req, opt);
1056
1057         err = hci_req_run(&req, hci_req_sync_complete);
1058         if (err < 0) {
1059                 hdev->req_status = 0;
1060
1061                 /* ENODATA means the HCI request command queue is empty.
1062                  * This can happen when a request with conditionals doesn't
1063                  * trigger any commands to be sent. This is normal behavior
1064                  * and should not trigger an error return.
1065                  */
1066                 if (err == -ENODATA)
1067                         return 0;
1068
1069                 return err;
1070         }
1071
1072         add_wait_queue(&hdev->req_wait_q, &wait);
1073         set_current_state(TASK_INTERRUPTIBLE);
1074
1075         schedule_timeout(timeout);
1076
1077         remove_wait_queue(&hdev->req_wait_q, &wait);
1078
1079         if (signal_pending(current))
1080                 return -EINTR;
1081
1082         switch (hdev->req_status) {
1083         case HCI_REQ_DONE:
1084                 err = -bt_to_errno(hdev->req_result);
1085                 break;
1086
1087         case HCI_REQ_CANCELED:
1088                 err = -hdev->req_result;
1089                 break;
1090
1091         default:
1092                 err = -ETIMEDOUT;
1093                 break;
1094         }
1095
1096         hdev->req_status = hdev->req_result = 0;
1097
1098         BT_DBG("%s end: err %d", hdev->name, err);
1099
1100         return err;
1101 }
1102
1103 static int hci_req_sync(struct hci_dev *hdev,
1104                         void (*req)(struct hci_request *req,
1105                                     unsigned long opt),
1106                         unsigned long opt, __u32 timeout)
1107 {
1108         int ret;
1109
1110         if (!test_bit(HCI_UP, &hdev->flags))
1111                 return -ENETDOWN;
1112
1113         /* Serialize all requests */
1114         hci_req_lock(hdev);
1115         ret = __hci_req_sync(hdev, req, opt, timeout);
1116         hci_req_unlock(hdev);
1117
1118         return ret;
1119 }
1120
1121 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1122 {
1123         BT_DBG("%s %ld", req->hdev->name, opt);
1124
1125         /* Reset device */
1126         set_bit(HCI_RESET, &req->hdev->flags);
1127         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1128 }
1129
1130 static void bredr_init(struct hci_request *req)
1131 {
1132         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1133
1134         /* Read Local Supported Features */
1135         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1136
1137         /* Read Local Version */
1138         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1139
1140         /* Read BD Address */
1141         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1142 }
1143
1144 static void amp_init(struct hci_request *req)
1145 {
1146         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1147
1148         /* Read Local Version */
1149         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1150
1151         /* Read Local Supported Commands */
1152         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1153
1154         /* Read Local Supported Features */
1155         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1156
1157         /* Read Local AMP Info */
1158         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1159
1160         /* Read Data Blk size */
1161         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1162
1163         /* Read Flow Control Mode */
1164         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1165
1166         /* Read Location Data */
1167         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1168 }
1169
1170 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1171 {
1172         struct hci_dev *hdev = req->hdev;
1173
1174         BT_DBG("%s %ld", hdev->name, opt);
1175
1176         /* Reset */
1177         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1178                 hci_reset_req(req, 0);
1179
1180         switch (hdev->dev_type) {
1181         case HCI_BREDR:
1182                 bredr_init(req);
1183                 break;
1184
1185         case HCI_AMP:
1186                 amp_init(req);
1187                 break;
1188
1189         default:
1190                 BT_ERR("Unknown device type %d", hdev->dev_type);
1191                 break;
1192         }
1193 }
1194
1195 static void bredr_setup(struct hci_request *req)
1196 {
1197         struct hci_dev *hdev = req->hdev;
1198
1199         __le16 param;
1200         __u8 flt_type;
1201
1202         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1203         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1204
1205         /* Read Class of Device */
1206         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1207
1208         /* Read Local Name */
1209         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1210
1211         /* Read Voice Setting */
1212         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1213
1214         /* Read Number of Supported IAC */
1215         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1216
1217         /* Read Current IAC LAP */
1218         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1219
1220         /* Clear Event Filters */
1221         flt_type = HCI_FLT_CLEAR_ALL;
1222         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1223
1224         /* Connection accept timeout ~20 secs */
1225         param = __constant_cpu_to_le16(0x7d00);
1226         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1227
1228         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1229          * but it does not support page scan related HCI commands.
1230          */
1231         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1232                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1233                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1234         }
1235 }
1236
1237 static void le_setup(struct hci_request *req)
1238 {
1239         struct hci_dev *hdev = req->hdev;
1240
1241         /* Read LE Buffer Size */
1242         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1243
1244         /* Read LE Local Supported Features */
1245         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1246
1247         /* Read LE Advertising Channel TX Power */
1248         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1249
1250         /* Read LE White List Size */
1251         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1252
1253         /* Read LE Supported States */
1254         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1255
1256         /* LE-only controllers have LE implicitly enabled */
1257         if (!lmp_bredr_capable(hdev))
1258                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1259 }
1260
1261 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1262 {
1263         if (lmp_ext_inq_capable(hdev))
1264                 return 0x02;
1265
1266         if (lmp_inq_rssi_capable(hdev))
1267                 return 0x01;
1268
1269         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1270             hdev->lmp_subver == 0x0757)
1271                 return 0x01;
1272
1273         if (hdev->manufacturer == 15) {
1274                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1275                         return 0x01;
1276                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1277                         return 0x01;
1278                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1279                         return 0x01;
1280         }
1281
1282         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1283             hdev->lmp_subver == 0x1805)
1284                 return 0x01;
1285
1286         return 0x00;
1287 }
1288
1289 static void hci_setup_inquiry_mode(struct hci_request *req)
1290 {
1291         u8 mode;
1292
1293         mode = hci_get_inquiry_mode(req->hdev);
1294
1295         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1296 }
1297
1298 static void hci_setup_event_mask(struct hci_request *req)
1299 {
1300         struct hci_dev *hdev = req->hdev;
1301
1302         /* The second byte is 0xff instead of 0x9f (two reserved bits
1303          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1304          * command otherwise.
1305          */
1306         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1307
1308         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1309          * any event mask for pre 1.2 devices.
1310          */
1311         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1312                 return;
1313
1314         if (lmp_bredr_capable(hdev)) {
1315                 events[4] |= 0x01; /* Flow Specification Complete */
1316                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1317                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1318                 events[5] |= 0x08; /* Synchronous Connection Complete */
1319                 events[5] |= 0x10; /* Synchronous Connection Changed */
1320         } else {
1321                 /* Use a different default for LE-only devices */
1322                 memset(events, 0, sizeof(events));
1323                 events[0] |= 0x10; /* Disconnection Complete */
1324                 events[0] |= 0x80; /* Encryption Change */
1325                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1326                 events[1] |= 0x20; /* Command Complete */
1327                 events[1] |= 0x40; /* Command Status */
1328                 events[1] |= 0x80; /* Hardware Error */
1329                 events[2] |= 0x04; /* Number of Completed Packets */
1330                 events[3] |= 0x02; /* Data Buffer Overflow */
1331                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1332         }
1333
1334         if (lmp_inq_rssi_capable(hdev))
1335                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1336
1337         if (lmp_sniffsubr_capable(hdev))
1338                 events[5] |= 0x20; /* Sniff Subrating */
1339
1340         if (lmp_pause_enc_capable(hdev))
1341                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1342
1343         if (lmp_ext_inq_capable(hdev))
1344                 events[5] |= 0x40; /* Extended Inquiry Result */
1345
1346         if (lmp_no_flush_capable(hdev))
1347                 events[7] |= 0x01; /* Enhanced Flush Complete */
1348
1349         if (lmp_lsto_capable(hdev))
1350                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1351
1352         if (lmp_ssp_capable(hdev)) {
1353                 events[6] |= 0x01;      /* IO Capability Request */
1354                 events[6] |= 0x02;      /* IO Capability Response */
1355                 events[6] |= 0x04;      /* User Confirmation Request */
1356                 events[6] |= 0x08;      /* User Passkey Request */
1357                 events[6] |= 0x10;      /* Remote OOB Data Request */
1358                 events[6] |= 0x20;      /* Simple Pairing Complete */
1359                 events[7] |= 0x04;      /* User Passkey Notification */
1360                 events[7] |= 0x08;      /* Keypress Notification */
1361                 events[7] |= 0x10;      /* Remote Host Supported
1362                                          * Features Notification
1363                                          */
1364         }
1365
1366         if (lmp_le_capable(hdev))
1367                 events[7] |= 0x20;      /* LE Meta-Event */
1368
1369         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1370
1371         if (lmp_le_capable(hdev)) {
1372                 memset(events, 0, sizeof(events));
1373                 events[0] = 0x1f;
1374                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1375                             sizeof(events), events);
1376         }
1377 }
1378
1379 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1380 {
1381         struct hci_dev *hdev = req->hdev;
1382
1383         if (lmp_bredr_capable(hdev))
1384                 bredr_setup(req);
1385         else
1386                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1387
1388         if (lmp_le_capable(hdev))
1389                 le_setup(req);
1390
1391         hci_setup_event_mask(req);
1392
1393         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1394          * local supported commands HCI command.
1395          */
1396         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1397                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1398
1399         if (lmp_ssp_capable(hdev)) {
1400                 /* When SSP is available, then the host features page
1401                  * should also be available as well. However some
1402                  * controllers list the max_page as 0 as long as SSP
1403                  * has not been enabled. To achieve proper debugging
1404                  * output, force the minimum max_page to 1 at least.
1405                  */
1406                 hdev->max_page = 0x01;
1407
1408                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1409                         u8 mode = 0x01;
1410                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1411                                     sizeof(mode), &mode);
1412                 } else {
1413                         struct hci_cp_write_eir cp;
1414
1415                         memset(hdev->eir, 0, sizeof(hdev->eir));
1416                         memset(&cp, 0, sizeof(cp));
1417
1418                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1419                 }
1420         }
1421
1422         if (lmp_inq_rssi_capable(hdev))
1423                 hci_setup_inquiry_mode(req);
1424
1425         if (lmp_inq_tx_pwr_capable(hdev))
1426                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1427
1428         if (lmp_ext_feat_capable(hdev)) {
1429                 struct hci_cp_read_local_ext_features cp;
1430
1431                 cp.page = 0x01;
1432                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1433                             sizeof(cp), &cp);
1434         }
1435
1436         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1437                 u8 enable = 1;
1438                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1439                             &enable);
1440         }
1441 }
1442
1443 static void hci_setup_link_policy(struct hci_request *req)
1444 {
1445         struct hci_dev *hdev = req->hdev;
1446         struct hci_cp_write_def_link_policy cp;
1447         u16 link_policy = 0;
1448
1449         if (lmp_rswitch_capable(hdev))
1450                 link_policy |= HCI_LP_RSWITCH;
1451         if (lmp_hold_capable(hdev))
1452                 link_policy |= HCI_LP_HOLD;
1453         if (lmp_sniff_capable(hdev))
1454                 link_policy |= HCI_LP_SNIFF;
1455         if (lmp_park_capable(hdev))
1456                 link_policy |= HCI_LP_PARK;
1457
1458         cp.policy = cpu_to_le16(link_policy);
1459         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1460 }
1461
1462 static void hci_set_le_support(struct hci_request *req)
1463 {
1464         struct hci_dev *hdev = req->hdev;
1465         struct hci_cp_write_le_host_supported cp;
1466
1467         /* LE-only devices do not support explicit enablement */
1468         if (!lmp_bredr_capable(hdev))
1469                 return;
1470
1471         memset(&cp, 0, sizeof(cp));
1472
1473         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1474                 cp.le = 0x01;
1475                 cp.simul = lmp_le_br_capable(hdev);
1476         }
1477
1478         if (cp.le != lmp_host_le_capable(hdev))
1479                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1480                             &cp);
1481 }
1482
1483 static void hci_set_event_mask_page_2(struct hci_request *req)
1484 {
1485         struct hci_dev *hdev = req->hdev;
1486         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1487
1488         /* If Connectionless Slave Broadcast master role is supported
1489          * enable all necessary events for it.
1490          */
1491         if (lmp_csb_master_capable(hdev)) {
1492                 events[1] |= 0x40;      /* Triggered Clock Capture */
1493                 events[1] |= 0x80;      /* Synchronization Train Complete */
1494                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1495                 events[2] |= 0x20;      /* CSB Channel Map Change */
1496         }
1497
1498         /* If Connectionless Slave Broadcast slave role is supported
1499          * enable all necessary events for it.
1500          */
1501         if (lmp_csb_slave_capable(hdev)) {
1502                 events[2] |= 0x01;      /* Synchronization Train Received */
1503                 events[2] |= 0x02;      /* CSB Receive */
1504                 events[2] |= 0x04;      /* CSB Timeout */
1505                 events[2] |= 0x08;      /* Truncated Page Complete */
1506         }
1507
1508         /* Enable Authenticated Payload Timeout Expired event if supported */
1509         if (lmp_ping_capable(hdev))
1510                 events[2] |= 0x80;
1511
1512         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1513 }
1514
1515 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1516 {
1517         struct hci_dev *hdev = req->hdev;
1518         u8 p;
1519
1520         /* Some Broadcom based Bluetooth controllers do not support the
1521          * Delete Stored Link Key command. They are clearly indicating its
1522          * absence in the bit mask of supported commands.
1523          *
1524          * Check the supported commands and only if the the command is marked
1525          * as supported send it. If not supported assume that the controller
1526          * does not have actual support for stored link keys which makes this
1527          * command redundant anyway.
1528          *
1529          * Some controllers indicate that they support handling deleting
1530          * stored link keys, but they don't. The quirk lets a driver
1531          * just disable this command.
1532          */
1533         if (hdev->commands[6] & 0x80 &&
1534             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1535                 struct hci_cp_delete_stored_link_key cp;
1536
1537                 bacpy(&cp.bdaddr, BDADDR_ANY);
1538                 cp.delete_all = 0x01;
1539                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1540                             sizeof(cp), &cp);
1541         }
1542
1543         if (hdev->commands[5] & 0x10)
1544                 hci_setup_link_policy(req);
1545
1546         if (lmp_le_capable(hdev))
1547                 hci_set_le_support(req);
1548
1549         /* Read features beyond page 1 if available */
1550         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1551                 struct hci_cp_read_local_ext_features cp;
1552
1553                 cp.page = p;
1554                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1555                             sizeof(cp), &cp);
1556         }
1557 }
1558
1559 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1560 {
1561         struct hci_dev *hdev = req->hdev;
1562
1563         /* Set event mask page 2 if the HCI command for it is supported */
1564         if (hdev->commands[22] & 0x04)
1565                 hci_set_event_mask_page_2(req);
1566
1567         /* Check for Synchronization Train support */
1568         if (lmp_sync_train_capable(hdev))
1569                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1570
1571         /* Enable Secure Connections if supported and configured */
1572         if ((lmp_sc_capable(hdev) ||
1573              test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
1574             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1575                 u8 support = 0x01;
1576                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1577                             sizeof(support), &support);
1578         }
1579 }
1580
1581 static int __hci_init(struct hci_dev *hdev)
1582 {
1583         int err;
1584
1585         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1586         if (err < 0)
1587                 return err;
1588
1589         /* The Device Under Test (DUT) mode is special and available for
1590          * all controller types. So just create it early on.
1591          */
1592         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1593                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1594                                     &dut_mode_fops);
1595         }
1596
1597         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1598          * BR/EDR/LE type controllers. AMP controllers only need the
1599          * first stage init.
1600          */
1601         if (hdev->dev_type != HCI_BREDR)
1602                 return 0;
1603
1604         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1605         if (err < 0)
1606                 return err;
1607
1608         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1609         if (err < 0)
1610                 return err;
1611
1612         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1613         if (err < 0)
1614                 return err;
1615
1616         /* Only create debugfs entries during the initial setup
1617          * phase and not every time the controller gets powered on.
1618          */
1619         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1620                 return 0;
1621
1622         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1623                             &features_fops);
1624         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1625                            &hdev->manufacturer);
1626         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1627         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1628         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1629                             &blacklist_fops);
1630         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1631
1632         if (lmp_bredr_capable(hdev)) {
1633                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1634                                     hdev, &inquiry_cache_fops);
1635                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1636                                     hdev, &link_keys_fops);
1637                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1638                                     hdev, &dev_class_fops);
1639                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1640                                     hdev, &voice_setting_fops);
1641         }
1642
1643         if (lmp_ssp_capable(hdev)) {
1644                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1645                                     hdev, &auto_accept_delay_fops);
1646                 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1647                                     hdev, &ssp_debug_mode_fops);
1648                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1649                                     hdev, &force_sc_support_fops);
1650                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1651                                     hdev, &sc_only_mode_fops);
1652         }
1653
1654         if (lmp_sniff_capable(hdev)) {
1655                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1656                                     hdev, &idle_timeout_fops);
1657                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1658                                     hdev, &sniff_min_interval_fops);
1659                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1660                                     hdev, &sniff_max_interval_fops);
1661         }
1662
1663         if (lmp_le_capable(hdev)) {
1664                 debugfs_create_file("identity", 0400, hdev->debugfs,
1665                                     hdev, &identity_fops);
1666                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1667                                     hdev, &rpa_timeout_fops);
1668                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1669                                     hdev, &random_address_fops);
1670                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1671                                     hdev, &static_address_fops);
1672
1673                 /* For controllers with a public address, provide a debug
1674                  * option to force the usage of the configured static
1675                  * address. By default the public address is used.
1676                  */
1677                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1678                         debugfs_create_file("force_static_address", 0644,
1679                                             hdev->debugfs, hdev,
1680                                             &force_static_address_fops);
1681
1682                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1683                                   &hdev->le_white_list_size);
1684                 debugfs_create_file("identity_resolving_keys", 0400,
1685                                     hdev->debugfs, hdev,
1686                                     &identity_resolving_keys_fops);
1687                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1688                                     hdev, &long_term_keys_fops);
1689                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1690                                     hdev, &conn_min_interval_fops);
1691                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1692                                     hdev, &conn_max_interval_fops);
1693                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1694                                     hdev, &adv_channel_map_fops);
1695                 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1696                                     &lowpan_debugfs_fops);
1697         }
1698
1699         return 0;
1700 }
1701
1702 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1703 {
1704         __u8 scan = opt;
1705
1706         BT_DBG("%s %x", req->hdev->name, scan);
1707
1708         /* Inquiry and Page scans */
1709         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1710 }
1711
1712 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1713 {
1714         __u8 auth = opt;
1715
1716         BT_DBG("%s %x", req->hdev->name, auth);
1717
1718         /* Authentication */
1719         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1720 }
1721
1722 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1723 {
1724         __u8 encrypt = opt;
1725
1726         BT_DBG("%s %x", req->hdev->name, encrypt);
1727
1728         /* Encryption */
1729         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1730 }
1731
1732 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1733 {
1734         __le16 policy = cpu_to_le16(opt);
1735
1736         BT_DBG("%s %x", req->hdev->name, policy);
1737
1738         /* Default link policy */
1739         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1740 }
1741
1742 /* Get HCI device by index.
1743  * Device is held on return. */
1744 struct hci_dev *hci_dev_get(int index)
1745 {
1746         struct hci_dev *hdev = NULL, *d;
1747
1748         BT_DBG("%d", index);
1749
1750         if (index < 0)
1751                 return NULL;
1752
1753         read_lock(&hci_dev_list_lock);
1754         list_for_each_entry(d, &hci_dev_list, list) {
1755                 if (d->id == index) {
1756                         hdev = hci_dev_hold(d);
1757                         break;
1758                 }
1759         }
1760         read_unlock(&hci_dev_list_lock);
1761         return hdev;
1762 }
1763
1764 /* ---- Inquiry support ---- */
1765
1766 bool hci_discovery_active(struct hci_dev *hdev)
1767 {
1768         struct discovery_state *discov = &hdev->discovery;
1769
1770         switch (discov->state) {
1771         case DISCOVERY_FINDING:
1772         case DISCOVERY_RESOLVING:
1773                 return true;
1774
1775         default:
1776                 return false;
1777         }
1778 }
1779
1780 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1781 {
1782         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1783
1784         if (hdev->discovery.state == state)
1785                 return;
1786
1787         switch (state) {
1788         case DISCOVERY_STOPPED:
1789                 hci_update_background_scan(hdev);
1790
1791                 if (hdev->discovery.state != DISCOVERY_STARTING)
1792                         mgmt_discovering(hdev, 0);
1793                 break;
1794         case DISCOVERY_STARTING:
1795                 break;
1796         case DISCOVERY_FINDING:
1797                 mgmt_discovering(hdev, 1);
1798                 break;
1799         case DISCOVERY_RESOLVING:
1800                 break;
1801         case DISCOVERY_STOPPING:
1802                 break;
1803         }
1804
1805         hdev->discovery.state = state;
1806 }
1807
1808 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1809 {
1810         struct discovery_state *cache = &hdev->discovery;
1811         struct inquiry_entry *p, *n;
1812
1813         list_for_each_entry_safe(p, n, &cache->all, all) {
1814                 list_del(&p->all);
1815                 kfree(p);
1816         }
1817
1818         INIT_LIST_HEAD(&cache->unknown);
1819         INIT_LIST_HEAD(&cache->resolve);
1820 }
1821
1822 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1823                                                bdaddr_t *bdaddr)
1824 {
1825         struct discovery_state *cache = &hdev->discovery;
1826         struct inquiry_entry *e;
1827
1828         BT_DBG("cache %p, %pMR", cache, bdaddr);
1829
1830         list_for_each_entry(e, &cache->all, all) {
1831                 if (!bacmp(&e->data.bdaddr, bdaddr))
1832                         return e;
1833         }
1834
1835         return NULL;
1836 }
1837
1838 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1839                                                        bdaddr_t *bdaddr)
1840 {
1841         struct discovery_state *cache = &hdev->discovery;
1842         struct inquiry_entry *e;
1843
1844         BT_DBG("cache %p, %pMR", cache, bdaddr);
1845
1846         list_for_each_entry(e, &cache->unknown, list) {
1847                 if (!bacmp(&e->data.bdaddr, bdaddr))
1848                         return e;
1849         }
1850
1851         return NULL;
1852 }
1853
1854 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1855                                                        bdaddr_t *bdaddr,
1856                                                        int state)
1857 {
1858         struct discovery_state *cache = &hdev->discovery;
1859         struct inquiry_entry *e;
1860
1861         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1862
1863         list_for_each_entry(e, &cache->resolve, list) {
1864                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1865                         return e;
1866                 if (!bacmp(&e->data.bdaddr, bdaddr))
1867                         return e;
1868         }
1869
1870         return NULL;
1871 }
1872
1873 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1874                                       struct inquiry_entry *ie)
1875 {
1876         struct discovery_state *cache = &hdev->discovery;
1877         struct list_head *pos = &cache->resolve;
1878         struct inquiry_entry *p;
1879
1880         list_del(&ie->list);
1881
1882         list_for_each_entry(p, &cache->resolve, list) {
1883                 if (p->name_state != NAME_PENDING &&
1884                     abs(p->data.rssi) >= abs(ie->data.rssi))
1885                         break;
1886                 pos = &p->list;
1887         }
1888
1889         list_add(&ie->list, pos);
1890 }
1891
1892 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1893                               bool name_known, bool *ssp)
1894 {
1895         struct discovery_state *cache = &hdev->discovery;
1896         struct inquiry_entry *ie;
1897
1898         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1899
1900         hci_remove_remote_oob_data(hdev, &data->bdaddr);
1901
1902         if (ssp)
1903                 *ssp = data->ssp_mode;
1904
1905         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1906         if (ie) {
1907                 if (ie->data.ssp_mode && ssp)
1908                         *ssp = true;
1909
1910                 if (ie->name_state == NAME_NEEDED &&
1911                     data->rssi != ie->data.rssi) {
1912                         ie->data.rssi = data->rssi;
1913                         hci_inquiry_cache_update_resolve(hdev, ie);
1914                 }
1915
1916                 goto update;
1917         }
1918
1919         /* Entry not in the cache. Add new one. */
1920         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1921         if (!ie)
1922                 return false;
1923
1924         list_add(&ie->all, &cache->all);
1925
1926         if (name_known) {
1927                 ie->name_state = NAME_KNOWN;
1928         } else {
1929                 ie->name_state = NAME_NOT_KNOWN;
1930                 list_add(&ie->list, &cache->unknown);
1931         }
1932
1933 update:
1934         if (name_known && ie->name_state != NAME_KNOWN &&
1935             ie->name_state != NAME_PENDING) {
1936                 ie->name_state = NAME_KNOWN;
1937                 list_del(&ie->list);
1938         }
1939
1940         memcpy(&ie->data, data, sizeof(*data));
1941         ie->timestamp = jiffies;
1942         cache->timestamp = jiffies;
1943
1944         if (ie->name_state == NAME_NOT_KNOWN)
1945                 return false;
1946
1947         return true;
1948 }
1949
1950 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1951 {
1952         struct discovery_state *cache = &hdev->discovery;
1953         struct inquiry_info *info = (struct inquiry_info *) buf;
1954         struct inquiry_entry *e;
1955         int copied = 0;
1956
1957         list_for_each_entry(e, &cache->all, all) {
1958                 struct inquiry_data *data = &e->data;
1959
1960                 if (copied >= num)
1961                         break;
1962
1963                 bacpy(&info->bdaddr, &data->bdaddr);
1964                 info->pscan_rep_mode    = data->pscan_rep_mode;
1965                 info->pscan_period_mode = data->pscan_period_mode;
1966                 info->pscan_mode        = data->pscan_mode;
1967                 memcpy(info->dev_class, data->dev_class, 3);
1968                 info->clock_offset      = data->clock_offset;
1969
1970                 info++;
1971                 copied++;
1972         }
1973
1974         BT_DBG("cache %p, copied %d", cache, copied);
1975         return copied;
1976 }
1977
1978 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1979 {
1980         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1981         struct hci_dev *hdev = req->hdev;
1982         struct hci_cp_inquiry cp;
1983
1984         BT_DBG("%s", hdev->name);
1985
1986         if (test_bit(HCI_INQUIRY, &hdev->flags))
1987                 return;
1988
1989         /* Start Inquiry */
1990         memcpy(&cp.lap, &ir->lap, 3);
1991         cp.length  = ir->length;
1992         cp.num_rsp = ir->num_rsp;
1993         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1994 }
1995
1996 static int wait_inquiry(void *word)
1997 {
1998         schedule();
1999         return signal_pending(current);
2000 }
2001
2002 int hci_inquiry(void __user *arg)
2003 {
2004         __u8 __user *ptr = arg;
2005         struct hci_inquiry_req ir;
2006         struct hci_dev *hdev;
2007         int err = 0, do_inquiry = 0, max_rsp;
2008         long timeo;
2009         __u8 *buf;
2010
2011         if (copy_from_user(&ir, ptr, sizeof(ir)))
2012                 return -EFAULT;
2013
2014         hdev = hci_dev_get(ir.dev_id);
2015         if (!hdev)
2016                 return -ENODEV;
2017
2018         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2019                 err = -EBUSY;
2020                 goto done;
2021         }
2022
2023         if (hdev->dev_type != HCI_BREDR) {
2024                 err = -EOPNOTSUPP;
2025                 goto done;
2026         }
2027
2028         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2029                 err = -EOPNOTSUPP;
2030                 goto done;
2031         }
2032
2033         hci_dev_lock(hdev);
2034         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2035             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2036                 hci_inquiry_cache_flush(hdev);
2037                 do_inquiry = 1;
2038         }
2039         hci_dev_unlock(hdev);
2040
2041         timeo = ir.length * msecs_to_jiffies(2000);
2042
2043         if (do_inquiry) {
2044                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2045                                    timeo);
2046                 if (err < 0)
2047                         goto done;
2048
2049                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2050                  * cleared). If it is interrupted by a signal, return -EINTR.
2051                  */
2052                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2053                                 TASK_INTERRUPTIBLE))
2054                         return -EINTR;
2055         }
2056
2057         /* for unlimited number of responses we will use buffer with
2058          * 255 entries
2059          */
2060         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2061
2062         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2063          * copy it to the user space.
2064          */
2065         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2066         if (!buf) {
2067                 err = -ENOMEM;
2068                 goto done;
2069         }
2070
2071         hci_dev_lock(hdev);
2072         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2073         hci_dev_unlock(hdev);
2074
2075         BT_DBG("num_rsp %d", ir.num_rsp);
2076
2077         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2078                 ptr += sizeof(ir);
2079                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2080                                  ir.num_rsp))
2081                         err = -EFAULT;
2082         } else
2083                 err = -EFAULT;
2084
2085         kfree(buf);
2086
2087 done:
2088         hci_dev_put(hdev);
2089         return err;
2090 }
2091
2092 static int hci_dev_do_open(struct hci_dev *hdev)
2093 {
2094         int ret = 0;
2095
2096         BT_DBG("%s %p", hdev->name, hdev);
2097
2098         hci_req_lock(hdev);
2099
2100         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2101                 ret = -ENODEV;
2102                 goto done;
2103         }
2104
2105         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2106                 /* Check for rfkill but allow the HCI setup stage to
2107                  * proceed (which in itself doesn't cause any RF activity).
2108                  */
2109                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2110                         ret = -ERFKILL;
2111                         goto done;
2112                 }
2113
2114                 /* Check for valid public address or a configured static
2115                  * random adddress, but let the HCI setup proceed to
2116                  * be able to determine if there is a public address
2117                  * or not.
2118                  *
2119                  * In case of user channel usage, it is not important
2120                  * if a public address or static random address is
2121                  * available.
2122                  *
2123                  * This check is only valid for BR/EDR controllers
2124                  * since AMP controllers do not have an address.
2125                  */
2126                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2127                     hdev->dev_type == HCI_BREDR &&
2128                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2129                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2130                         ret = -EADDRNOTAVAIL;
2131                         goto done;
2132                 }
2133         }
2134
2135         if (test_bit(HCI_UP, &hdev->flags)) {
2136                 ret = -EALREADY;
2137                 goto done;
2138         }
2139
2140         if (hdev->open(hdev)) {
2141                 ret = -EIO;
2142                 goto done;
2143         }
2144
2145         atomic_set(&hdev->cmd_cnt, 1);
2146         set_bit(HCI_INIT, &hdev->flags);
2147
2148         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2149                 ret = hdev->setup(hdev);
2150
2151         if (!ret) {
2152                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2153                         set_bit(HCI_RAW, &hdev->flags);
2154
2155                 if (!test_bit(HCI_RAW, &hdev->flags) &&
2156                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2157                         ret = __hci_init(hdev);
2158         }
2159
2160         clear_bit(HCI_INIT, &hdev->flags);
2161
2162         if (!ret) {
2163                 hci_dev_hold(hdev);
2164                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2165                 set_bit(HCI_UP, &hdev->flags);
2166                 hci_notify(hdev, HCI_DEV_UP);
2167                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2168                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2169                     hdev->dev_type == HCI_BREDR) {
2170                         hci_dev_lock(hdev);
2171                         mgmt_powered(hdev, 1);
2172                         hci_dev_unlock(hdev);
2173                 }
2174         } else {
2175                 /* Init failed, cleanup */
2176                 flush_work(&hdev->tx_work);
2177                 flush_work(&hdev->cmd_work);
2178                 flush_work(&hdev->rx_work);
2179
2180                 skb_queue_purge(&hdev->cmd_q);
2181                 skb_queue_purge(&hdev->rx_q);
2182
2183                 if (hdev->flush)
2184                         hdev->flush(hdev);
2185
2186                 if (hdev->sent_cmd) {
2187                         kfree_skb(hdev->sent_cmd);
2188                         hdev->sent_cmd = NULL;
2189                 }
2190
2191                 hdev->close(hdev);
2192                 hdev->flags = 0;
2193         }
2194
2195 done:
2196         hci_req_unlock(hdev);
2197         return ret;
2198 }
2199
2200 /* ---- HCI ioctl helpers ---- */
2201
2202 int hci_dev_open(__u16 dev)
2203 {
2204         struct hci_dev *hdev;
2205         int err;
2206
2207         hdev = hci_dev_get(dev);
2208         if (!hdev)
2209                 return -ENODEV;
2210
2211         /* We need to ensure that no other power on/off work is pending
2212          * before proceeding to call hci_dev_do_open. This is
2213          * particularly important if the setup procedure has not yet
2214          * completed.
2215          */
2216         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2217                 cancel_delayed_work(&hdev->power_off);
2218
2219         /* After this call it is guaranteed that the setup procedure
2220          * has finished. This means that error conditions like RFKILL
2221          * or no valid public or static random address apply.
2222          */
2223         flush_workqueue(hdev->req_workqueue);
2224
2225         err = hci_dev_do_open(hdev);
2226
2227         hci_dev_put(hdev);
2228
2229         return err;
2230 }
2231
2232 static int hci_dev_do_close(struct hci_dev *hdev)
2233 {
2234         BT_DBG("%s %p", hdev->name, hdev);
2235
2236         cancel_delayed_work(&hdev->power_off);
2237
2238         hci_req_cancel(hdev, ENODEV);
2239         hci_req_lock(hdev);
2240
2241         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2242                 del_timer_sync(&hdev->cmd_timer);
2243                 hci_req_unlock(hdev);
2244                 return 0;
2245         }
2246
2247         /* Flush RX and TX works */
2248         flush_work(&hdev->tx_work);
2249         flush_work(&hdev->rx_work);
2250
2251         if (hdev->discov_timeout > 0) {
2252                 cancel_delayed_work(&hdev->discov_off);
2253                 hdev->discov_timeout = 0;
2254                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2255                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2256         }
2257
2258         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2259                 cancel_delayed_work(&hdev->service_cache);
2260
2261         cancel_delayed_work_sync(&hdev->le_scan_disable);
2262
2263         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2264                 cancel_delayed_work_sync(&hdev->rpa_expired);
2265
2266         hci_dev_lock(hdev);
2267         hci_inquiry_cache_flush(hdev);
2268         hci_conn_hash_flush(hdev);
2269         hci_pend_le_conns_clear(hdev);
2270         hci_dev_unlock(hdev);
2271
2272         hci_notify(hdev, HCI_DEV_DOWN);
2273
2274         if (hdev->flush)
2275                 hdev->flush(hdev);
2276
2277         /* Reset device */
2278         skb_queue_purge(&hdev->cmd_q);
2279         atomic_set(&hdev->cmd_cnt, 1);
2280         if (!test_bit(HCI_RAW, &hdev->flags) &&
2281             !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2282             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2283                 set_bit(HCI_INIT, &hdev->flags);
2284                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2285                 clear_bit(HCI_INIT, &hdev->flags);
2286         }
2287
2288         /* flush cmd  work */
2289         flush_work(&hdev->cmd_work);
2290
2291         /* Drop queues */
2292         skb_queue_purge(&hdev->rx_q);
2293         skb_queue_purge(&hdev->cmd_q);
2294         skb_queue_purge(&hdev->raw_q);
2295
2296         /* Drop last sent command */
2297         if (hdev->sent_cmd) {
2298                 del_timer_sync(&hdev->cmd_timer);
2299                 kfree_skb(hdev->sent_cmd);
2300                 hdev->sent_cmd = NULL;
2301         }
2302
2303         kfree_skb(hdev->recv_evt);
2304         hdev->recv_evt = NULL;
2305
2306         /* After this point our queues are empty
2307          * and no tasks are scheduled. */
2308         hdev->close(hdev);
2309
2310         /* Clear flags */
2311         hdev->flags = 0;
2312         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2313
2314         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2315                 if (hdev->dev_type == HCI_BREDR) {
2316                         hci_dev_lock(hdev);
2317                         mgmt_powered(hdev, 0);
2318                         hci_dev_unlock(hdev);
2319                 }
2320         }
2321
2322         /* Controller radio is available but is currently powered down */
2323         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2324
2325         memset(hdev->eir, 0, sizeof(hdev->eir));
2326         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2327         bacpy(&hdev->random_addr, BDADDR_ANY);
2328
2329         hci_req_unlock(hdev);
2330
2331         hci_dev_put(hdev);
2332         return 0;
2333 }
2334
2335 int hci_dev_close(__u16 dev)
2336 {
2337         struct hci_dev *hdev;
2338         int err;
2339
2340         hdev = hci_dev_get(dev);
2341         if (!hdev)
2342                 return -ENODEV;
2343
2344         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2345                 err = -EBUSY;
2346                 goto done;
2347         }
2348
2349         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2350                 cancel_delayed_work(&hdev->power_off);
2351
2352         err = hci_dev_do_close(hdev);
2353
2354 done:
2355         hci_dev_put(hdev);
2356         return err;
2357 }
2358
2359 int hci_dev_reset(__u16 dev)
2360 {
2361         struct hci_dev *hdev;
2362         int ret = 0;
2363
2364         hdev = hci_dev_get(dev);
2365         if (!hdev)
2366                 return -ENODEV;
2367
2368         hci_req_lock(hdev);
2369
2370         if (!test_bit(HCI_UP, &hdev->flags)) {
2371                 ret = -ENETDOWN;
2372                 goto done;
2373         }
2374
2375         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2376                 ret = -EBUSY;
2377                 goto done;
2378         }
2379
2380         /* Drop queues */
2381         skb_queue_purge(&hdev->rx_q);
2382         skb_queue_purge(&hdev->cmd_q);
2383
2384         hci_dev_lock(hdev);
2385         hci_inquiry_cache_flush(hdev);
2386         hci_conn_hash_flush(hdev);
2387         hci_dev_unlock(hdev);
2388
2389         if (hdev->flush)
2390                 hdev->flush(hdev);
2391
2392         atomic_set(&hdev->cmd_cnt, 1);
2393         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2394
2395         if (!test_bit(HCI_RAW, &hdev->flags))
2396                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2397
2398 done:
2399         hci_req_unlock(hdev);
2400         hci_dev_put(hdev);
2401         return ret;
2402 }
2403
2404 int hci_dev_reset_stat(__u16 dev)
2405 {
2406         struct hci_dev *hdev;
2407         int ret = 0;
2408
2409         hdev = hci_dev_get(dev);
2410         if (!hdev)
2411                 return -ENODEV;
2412
2413         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2414                 ret = -EBUSY;
2415                 goto done;
2416         }
2417
2418         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2419
2420 done:
2421         hci_dev_put(hdev);
2422         return ret;
2423 }
2424
2425 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2426 {
2427         struct hci_dev *hdev;
2428         struct hci_dev_req dr;
2429         int err = 0;
2430
2431         if (copy_from_user(&dr, arg, sizeof(dr)))
2432                 return -EFAULT;
2433
2434         hdev = hci_dev_get(dr.dev_id);
2435         if (!hdev)
2436                 return -ENODEV;
2437
2438         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2439                 err = -EBUSY;
2440                 goto done;
2441         }
2442
2443         if (hdev->dev_type != HCI_BREDR) {
2444                 err = -EOPNOTSUPP;
2445                 goto done;
2446         }
2447
2448         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2449                 err = -EOPNOTSUPP;
2450                 goto done;
2451         }
2452
2453         switch (cmd) {
2454         case HCISETAUTH:
2455                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2456                                    HCI_INIT_TIMEOUT);
2457                 break;
2458
2459         case HCISETENCRYPT:
2460                 if (!lmp_encrypt_capable(hdev)) {
2461                         err = -EOPNOTSUPP;
2462                         break;
2463                 }
2464
2465                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2466                         /* Auth must be enabled first */
2467                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2468                                            HCI_INIT_TIMEOUT);
2469                         if (err)
2470                                 break;
2471                 }
2472
2473                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2474                                    HCI_INIT_TIMEOUT);
2475                 break;
2476
2477         case HCISETSCAN:
2478                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2479                                    HCI_INIT_TIMEOUT);
2480                 break;
2481
2482         case HCISETLINKPOL:
2483                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2484                                    HCI_INIT_TIMEOUT);
2485                 break;
2486
2487         case HCISETLINKMODE:
2488                 hdev->link_mode = ((__u16) dr.dev_opt) &
2489                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2490                 break;
2491
2492         case HCISETPTYPE:
2493                 hdev->pkt_type = (__u16) dr.dev_opt;
2494                 break;
2495
2496         case HCISETACLMTU:
2497                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2498                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2499                 break;
2500
2501         case HCISETSCOMTU:
2502                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2503                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2504                 break;
2505
2506         default:
2507                 err = -EINVAL;
2508                 break;
2509         }
2510
2511 done:
2512         hci_dev_put(hdev);
2513         return err;
2514 }
2515
2516 int hci_get_dev_list(void __user *arg)
2517 {
2518         struct hci_dev *hdev;
2519         struct hci_dev_list_req *dl;
2520         struct hci_dev_req *dr;
2521         int n = 0, size, err;
2522         __u16 dev_num;
2523
2524         if (get_user(dev_num, (__u16 __user *) arg))
2525                 return -EFAULT;
2526
2527         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2528                 return -EINVAL;
2529
2530         size = sizeof(*dl) + dev_num * sizeof(*dr);
2531
2532         dl = kzalloc(size, GFP_KERNEL);
2533         if (!dl)
2534                 return -ENOMEM;
2535
2536         dr = dl->dev_req;
2537
2538         read_lock(&hci_dev_list_lock);
2539         list_for_each_entry(hdev, &hci_dev_list, list) {
2540                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2541                         cancel_delayed_work(&hdev->power_off);
2542
2543                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2544                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2545
2546                 (dr + n)->dev_id  = hdev->id;
2547                 (dr + n)->dev_opt = hdev->flags;
2548
2549                 if (++n >= dev_num)
2550                         break;
2551         }
2552         read_unlock(&hci_dev_list_lock);
2553
2554         dl->dev_num = n;
2555         size = sizeof(*dl) + n * sizeof(*dr);
2556
2557         err = copy_to_user(arg, dl, size);
2558         kfree(dl);
2559
2560         return err ? -EFAULT : 0;
2561 }
2562
2563 int hci_get_dev_info(void __user *arg)
2564 {
2565         struct hci_dev *hdev;
2566         struct hci_dev_info di;
2567         int err = 0;
2568
2569         if (copy_from_user(&di, arg, sizeof(di)))
2570                 return -EFAULT;
2571
2572         hdev = hci_dev_get(di.dev_id);
2573         if (!hdev)
2574                 return -ENODEV;
2575
2576         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2577                 cancel_delayed_work_sync(&hdev->power_off);
2578
2579         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2580                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2581
2582         strcpy(di.name, hdev->name);
2583         di.bdaddr   = hdev->bdaddr;
2584         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2585         di.flags    = hdev->flags;
2586         di.pkt_type = hdev->pkt_type;
2587         if (lmp_bredr_capable(hdev)) {
2588                 di.acl_mtu  = hdev->acl_mtu;
2589                 di.acl_pkts = hdev->acl_pkts;
2590                 di.sco_mtu  = hdev->sco_mtu;
2591                 di.sco_pkts = hdev->sco_pkts;
2592         } else {
2593                 di.acl_mtu  = hdev->le_mtu;
2594                 di.acl_pkts = hdev->le_pkts;
2595                 di.sco_mtu  = 0;
2596                 di.sco_pkts = 0;
2597         }
2598         di.link_policy = hdev->link_policy;
2599         di.link_mode   = hdev->link_mode;
2600
2601         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2602         memcpy(&di.features, &hdev->features, sizeof(di.features));
2603
2604         if (copy_to_user(arg, &di, sizeof(di)))
2605                 err = -EFAULT;
2606
2607         hci_dev_put(hdev);
2608
2609         return err;
2610 }
2611
2612 /* ---- Interface to HCI drivers ---- */
2613
2614 static int hci_rfkill_set_block(void *data, bool blocked)
2615 {
2616         struct hci_dev *hdev = data;
2617
2618         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2619
2620         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2621                 return -EBUSY;
2622
2623         if (blocked) {
2624                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2625                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2626                         hci_dev_do_close(hdev);
2627         } else {
2628                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2629         }
2630
2631         return 0;
2632 }
2633
2634 static const struct rfkill_ops hci_rfkill_ops = {
2635         .set_block = hci_rfkill_set_block,
2636 };
2637
2638 static void hci_power_on(struct work_struct *work)
2639 {
2640         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2641         int err;
2642
2643         BT_DBG("%s", hdev->name);
2644
2645         err = hci_dev_do_open(hdev);
2646         if (err < 0) {
2647                 mgmt_set_powered_failed(hdev, err);
2648                 return;
2649         }
2650
2651         /* During the HCI setup phase, a few error conditions are
2652          * ignored and they need to be checked now. If they are still
2653          * valid, it is important to turn the device back off.
2654          */
2655         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2656             (hdev->dev_type == HCI_BREDR &&
2657              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2658              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2659                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2660                 hci_dev_do_close(hdev);
2661         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2662                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2663                                    HCI_AUTO_OFF_TIMEOUT);
2664         }
2665
2666         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2667                 mgmt_index_added(hdev);
2668 }
2669
2670 static void hci_power_off(struct work_struct *work)
2671 {
2672         struct hci_dev *hdev = container_of(work, struct hci_dev,
2673                                             power_off.work);
2674
2675         BT_DBG("%s", hdev->name);
2676
2677         hci_dev_do_close(hdev);
2678 }
2679
2680 static void hci_discov_off(struct work_struct *work)
2681 {
2682         struct hci_dev *hdev;
2683
2684         hdev = container_of(work, struct hci_dev, discov_off.work);
2685
2686         BT_DBG("%s", hdev->name);
2687
2688         mgmt_discoverable_timeout(hdev);
2689 }
2690
2691 void hci_uuids_clear(struct hci_dev *hdev)
2692 {
2693         struct bt_uuid *uuid, *tmp;
2694
2695         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2696                 list_del(&uuid->list);
2697                 kfree(uuid);
2698         }
2699 }
2700
2701 void hci_link_keys_clear(struct hci_dev *hdev)
2702 {
2703         struct list_head *p, *n;
2704
2705         list_for_each_safe(p, n, &hdev->link_keys) {
2706                 struct link_key *key;
2707
2708                 key = list_entry(p, struct link_key, list);
2709
2710                 list_del(p);
2711                 kfree(key);
2712         }
2713 }
2714
2715 void hci_smp_ltks_clear(struct hci_dev *hdev)
2716 {
2717         struct smp_ltk *k, *tmp;
2718
2719         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2720                 list_del(&k->list);
2721                 kfree(k);
2722         }
2723 }
2724
2725 void hci_smp_irks_clear(struct hci_dev *hdev)
2726 {
2727         struct smp_irk *k, *tmp;
2728
2729         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2730                 list_del(&k->list);
2731                 kfree(k);
2732         }
2733 }
2734
2735 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2736 {
2737         struct link_key *k;
2738
2739         list_for_each_entry(k, &hdev->link_keys, list)
2740                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2741                         return k;
2742
2743         return NULL;
2744 }
2745
2746 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2747                                u8 key_type, u8 old_key_type)
2748 {
2749         /* Legacy key */
2750         if (key_type < 0x03)
2751                 return true;
2752
2753         /* Debug keys are insecure so don't store them persistently */
2754         if (key_type == HCI_LK_DEBUG_COMBINATION)
2755                 return false;
2756
2757         /* Changed combination key and there's no previous one */
2758         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2759                 return false;
2760
2761         /* Security mode 3 case */
2762         if (!conn)
2763                 return true;
2764
2765         /* Neither local nor remote side had no-bonding as requirement */
2766         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2767                 return true;
2768
2769         /* Local side had dedicated bonding as requirement */
2770         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2771                 return true;
2772
2773         /* Remote side had dedicated bonding as requirement */
2774         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2775                 return true;
2776
2777         /* If none of the above criteria match, then don't store the key
2778          * persistently */
2779         return false;
2780 }
2781
2782 static bool ltk_type_master(u8 type)
2783 {
2784         if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2785                 return true;
2786
2787         return false;
2788 }
2789
2790 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2791                              bool master)
2792 {
2793         struct smp_ltk *k;
2794
2795         list_for_each_entry(k, &hdev->long_term_keys, list) {
2796                 if (k->ediv != ediv ||
2797                     memcmp(rand, k->rand, sizeof(k->rand)))
2798                         continue;
2799
2800                 if (ltk_type_master(k->type) != master)
2801                         continue;
2802
2803                 return k;
2804         }
2805
2806         return NULL;
2807 }
2808
2809 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2810                                      u8 addr_type, bool master)
2811 {
2812         struct smp_ltk *k;
2813
2814         list_for_each_entry(k, &hdev->long_term_keys, list)
2815                 if (addr_type == k->bdaddr_type &&
2816                     bacmp(bdaddr, &k->bdaddr) == 0 &&
2817                     ltk_type_master(k->type) == master)
2818                         return k;
2819
2820         return NULL;
2821 }
2822
2823 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2824 {
2825         struct smp_irk *irk;
2826
2827         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2828                 if (!bacmp(&irk->rpa, rpa))
2829                         return irk;
2830         }
2831
2832         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2833                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2834                         bacpy(&irk->rpa, rpa);
2835                         return irk;
2836                 }
2837         }
2838
2839         return NULL;
2840 }
2841
2842 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2843                                      u8 addr_type)
2844 {
2845         struct smp_irk *irk;
2846
2847         /* Identity Address must be public or static random */
2848         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2849                 return NULL;
2850
2851         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2852                 if (addr_type == irk->addr_type &&
2853                     bacmp(bdaddr, &irk->bdaddr) == 0)
2854                         return irk;
2855         }
2856
2857         return NULL;
2858 }
2859
2860 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2861                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2862 {
2863         struct link_key *key, *old_key;
2864         u8 old_key_type;
2865         bool persistent;
2866
2867         old_key = hci_find_link_key(hdev, bdaddr);
2868         if (old_key) {
2869                 old_key_type = old_key->type;
2870                 key = old_key;
2871         } else {
2872                 old_key_type = conn ? conn->key_type : 0xff;
2873                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2874                 if (!key)
2875                         return -ENOMEM;
2876                 list_add(&key->list, &hdev->link_keys);
2877         }
2878
2879         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2880
2881         /* Some buggy controller combinations generate a changed
2882          * combination key for legacy pairing even when there's no
2883          * previous key */
2884         if (type == HCI_LK_CHANGED_COMBINATION &&
2885             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2886                 type = HCI_LK_COMBINATION;
2887                 if (conn)
2888                         conn->key_type = type;
2889         }
2890
2891         bacpy(&key->bdaddr, bdaddr);
2892         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2893         key->pin_len = pin_len;
2894
2895         if (type == HCI_LK_CHANGED_COMBINATION)
2896                 key->type = old_key_type;
2897         else
2898                 key->type = type;
2899
2900         if (!new_key)
2901                 return 0;
2902
2903         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2904
2905         mgmt_new_link_key(hdev, key, persistent);
2906
2907         if (conn)
2908                 conn->flush_key = !persistent;
2909
2910         return 0;
2911 }
2912
2913 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2914                             u8 addr_type, u8 type, u8 authenticated,
2915                             u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
2916 {
2917         struct smp_ltk *key, *old_key;
2918         bool master = ltk_type_master(type);
2919
2920         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
2921         if (old_key)
2922                 key = old_key;
2923         else {
2924                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2925                 if (!key)
2926                         return NULL;
2927                 list_add(&key->list, &hdev->long_term_keys);
2928         }
2929
2930         bacpy(&key->bdaddr, bdaddr);
2931         key->bdaddr_type = addr_type;
2932         memcpy(key->val, tk, sizeof(key->val));
2933         key->authenticated = authenticated;
2934         key->ediv = ediv;
2935         key->enc_size = enc_size;
2936         key->type = type;
2937         memcpy(key->rand, rand, sizeof(key->rand));
2938
2939         return key;
2940 }
2941
2942 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2943                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2944 {
2945         struct smp_irk *irk;
2946
2947         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2948         if (!irk) {
2949                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2950                 if (!irk)
2951                         return NULL;
2952
2953                 bacpy(&irk->bdaddr, bdaddr);
2954                 irk->addr_type = addr_type;
2955
2956                 list_add(&irk->list, &hdev->identity_resolving_keys);
2957         }
2958
2959         memcpy(irk->val, val, 16);
2960         bacpy(&irk->rpa, rpa);
2961
2962         return irk;
2963 }
2964
2965 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2966 {
2967         struct link_key *key;
2968
2969         key = hci_find_link_key(hdev, bdaddr);
2970         if (!key)
2971                 return -ENOENT;
2972
2973         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2974
2975         list_del(&key->list);
2976         kfree(key);
2977
2978         return 0;
2979 }
2980
2981 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2982 {
2983         struct smp_ltk *k, *tmp;
2984         int removed = 0;
2985
2986         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2987                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2988                         continue;
2989
2990                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2991
2992                 list_del(&k->list);
2993                 kfree(k);
2994                 removed++;
2995         }
2996
2997         return removed ? 0 : -ENOENT;
2998 }
2999
3000 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3001 {
3002         struct smp_irk *k, *tmp;
3003
3004         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3005                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3006                         continue;
3007
3008                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3009
3010                 list_del(&k->list);
3011                 kfree(k);
3012         }
3013 }
3014
3015 /* HCI command timer function */
3016 static void hci_cmd_timeout(unsigned long arg)
3017 {
3018         struct hci_dev *hdev = (void *) arg;
3019
3020         if (hdev->sent_cmd) {
3021                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3022                 u16 opcode = __le16_to_cpu(sent->opcode);
3023
3024                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3025         } else {
3026                 BT_ERR("%s command tx timeout", hdev->name);
3027         }
3028
3029         atomic_set(&hdev->cmd_cnt, 1);
3030         queue_work(hdev->workqueue, &hdev->cmd_work);
3031 }
3032
3033 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3034                                           bdaddr_t *bdaddr)
3035 {
3036         struct oob_data *data;
3037
3038         list_for_each_entry(data, &hdev->remote_oob_data, list)
3039                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3040                         return data;
3041
3042         return NULL;
3043 }
3044
3045 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3046 {
3047         struct oob_data *data;
3048
3049         data = hci_find_remote_oob_data(hdev, bdaddr);
3050         if (!data)
3051                 return -ENOENT;
3052
3053         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3054
3055         list_del(&data->list);
3056         kfree(data);
3057
3058         return 0;
3059 }
3060
3061 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3062 {
3063         struct oob_data *data, *n;
3064
3065         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3066                 list_del(&data->list);
3067                 kfree(data);
3068         }
3069 }
3070
3071 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3072                             u8 *hash, u8 *randomizer)
3073 {
3074         struct oob_data *data;
3075
3076         data = hci_find_remote_oob_data(hdev, bdaddr);
3077         if (!data) {
3078                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3079                 if (!data)
3080                         return -ENOMEM;
3081
3082                 bacpy(&data->bdaddr, bdaddr);
3083                 list_add(&data->list, &hdev->remote_oob_data);
3084         }
3085
3086         memcpy(data->hash192, hash, sizeof(data->hash192));
3087         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3088
3089         memset(data->hash256, 0, sizeof(data->hash256));
3090         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3091
3092         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3093
3094         return 0;
3095 }
3096
3097 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3098                                 u8 *hash192, u8 *randomizer192,
3099                                 u8 *hash256, u8 *randomizer256)
3100 {
3101         struct oob_data *data;
3102
3103         data = hci_find_remote_oob_data(hdev, bdaddr);
3104         if (!data) {
3105                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3106                 if (!data)
3107                         return -ENOMEM;
3108
3109                 bacpy(&data->bdaddr, bdaddr);
3110                 list_add(&data->list, &hdev->remote_oob_data);
3111         }
3112
3113         memcpy(data->hash192, hash192, sizeof(data->hash192));
3114         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3115
3116         memcpy(data->hash256, hash256, sizeof(data->hash256));
3117         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3118
3119         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3120
3121         return 0;
3122 }
3123
3124 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3125                                          bdaddr_t *bdaddr, u8 type)
3126 {
3127         struct bdaddr_list *b;
3128
3129         list_for_each_entry(b, &hdev->blacklist, list) {
3130                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3131                         return b;
3132         }
3133
3134         return NULL;
3135 }
3136
3137 void hci_blacklist_clear(struct hci_dev *hdev)
3138 {
3139         struct list_head *p, *n;
3140
3141         list_for_each_safe(p, n, &hdev->blacklist) {
3142                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3143
3144                 list_del(p);
3145                 kfree(b);
3146         }
3147 }
3148
3149 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3150 {
3151         struct bdaddr_list *entry;
3152
3153         if (!bacmp(bdaddr, BDADDR_ANY))
3154                 return -EBADF;
3155
3156         if (hci_blacklist_lookup(hdev, bdaddr, type))
3157                 return -EEXIST;
3158
3159         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3160         if (!entry)
3161                 return -ENOMEM;
3162
3163         bacpy(&entry->bdaddr, bdaddr);
3164         entry->bdaddr_type = type;
3165
3166         list_add(&entry->list, &hdev->blacklist);
3167
3168         return mgmt_device_blocked(hdev, bdaddr, type);
3169 }
3170
3171 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3172 {
3173         struct bdaddr_list *entry;
3174
3175         if (!bacmp(bdaddr, BDADDR_ANY)) {
3176                 hci_blacklist_clear(hdev);
3177                 return 0;
3178         }
3179
3180         entry = hci_blacklist_lookup(hdev, bdaddr, type);
3181         if (!entry)
3182                 return -ENOENT;
3183
3184         list_del(&entry->list);
3185         kfree(entry);
3186
3187         return mgmt_device_unblocked(hdev, bdaddr, type);
3188 }
3189
3190 /* This function requires the caller holds hdev->lock */
3191 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3192                                                bdaddr_t *addr, u8 addr_type)
3193 {
3194         struct hci_conn_params *params;
3195
3196         list_for_each_entry(params, &hdev->le_conn_params, list) {
3197                 if (bacmp(&params->addr, addr) == 0 &&
3198                     params->addr_type == addr_type) {
3199                         return params;
3200                 }
3201         }
3202
3203         return NULL;
3204 }
3205
3206 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3207 {
3208         struct hci_conn *conn;
3209
3210         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3211         if (!conn)
3212                 return false;
3213
3214         if (conn->dst_type != type)
3215                 return false;
3216
3217         if (conn->state != BT_CONNECTED)
3218                 return false;
3219
3220         return true;
3221 }
3222
3223 /* This function requires the caller holds hdev->lock */
3224 void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3225                          u8 auto_connect, u16 conn_min_interval,
3226                          u16 conn_max_interval)
3227 {
3228         struct hci_conn_params *params;
3229
3230         params = hci_conn_params_lookup(hdev, addr, addr_type);
3231         if (params)
3232                 goto update;
3233
3234         params = kzalloc(sizeof(*params), GFP_KERNEL);
3235         if (!params) {
3236                 BT_ERR("Out of memory");
3237                 return;
3238         }
3239
3240         bacpy(&params->addr, addr);
3241         params->addr_type = addr_type;
3242
3243         list_add(&params->list, &hdev->le_conn_params);
3244
3245 update:
3246         params->conn_min_interval = conn_min_interval;
3247         params->conn_max_interval = conn_max_interval;
3248         params->auto_connect = auto_connect;
3249
3250         switch (auto_connect) {
3251         case HCI_AUTO_CONN_DISABLED:
3252         case HCI_AUTO_CONN_LINK_LOSS:
3253                 hci_pend_le_conn_del(hdev, addr, addr_type);
3254                 break;
3255         case HCI_AUTO_CONN_ALWAYS:
3256                 if (!is_connected(hdev, addr, addr_type))
3257                         hci_pend_le_conn_add(hdev, addr, addr_type);
3258                 break;
3259         }
3260
3261         BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3262                "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3263                conn_min_interval, conn_max_interval);
3264 }
3265
3266 /* This function requires the caller holds hdev->lock */
3267 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3268 {
3269         struct hci_conn_params *params;
3270
3271         params = hci_conn_params_lookup(hdev, addr, addr_type);
3272         if (!params)
3273                 return;
3274
3275         hci_pend_le_conn_del(hdev, addr, addr_type);
3276
3277         list_del(&params->list);
3278         kfree(params);
3279
3280         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3281 }
3282
3283 /* This function requires the caller holds hdev->lock */
3284 void hci_conn_params_clear(struct hci_dev *hdev)
3285 {
3286         struct hci_conn_params *params, *tmp;
3287
3288         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3289                 list_del(&params->list);
3290                 kfree(params);
3291         }
3292
3293         BT_DBG("All LE connection parameters were removed");
3294 }
3295
3296 /* This function requires the caller holds hdev->lock */
3297 struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3298                                             bdaddr_t *addr, u8 addr_type)
3299 {
3300         struct bdaddr_list *entry;
3301
3302         list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3303                 if (bacmp(&entry->bdaddr, addr) == 0 &&
3304                     entry->bdaddr_type == addr_type)
3305                         return entry;
3306         }
3307
3308         return NULL;
3309 }
3310
3311 /* This function requires the caller holds hdev->lock */
3312 void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3313 {
3314         struct bdaddr_list *entry;
3315
3316         entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3317         if (entry)
3318                 goto done;
3319
3320         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3321         if (!entry) {
3322                 BT_ERR("Out of memory");
3323                 return;
3324         }
3325
3326         bacpy(&entry->bdaddr, addr);
3327         entry->bdaddr_type = addr_type;
3328
3329         list_add(&entry->list, &hdev->pend_le_conns);
3330
3331         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3332
3333 done:
3334         hci_update_background_scan(hdev);
3335 }
3336
3337 /* This function requires the caller holds hdev->lock */
3338 void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3339 {
3340         struct bdaddr_list *entry;
3341
3342         entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3343         if (!entry)
3344                 goto done;
3345
3346         list_del(&entry->list);
3347         kfree(entry);
3348
3349         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3350
3351 done:
3352         hci_update_background_scan(hdev);
3353 }
3354
3355 /* This function requires the caller holds hdev->lock */
3356 void hci_pend_le_conns_clear(struct hci_dev *hdev)
3357 {
3358         struct bdaddr_list *entry, *tmp;
3359
3360         list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3361                 list_del(&entry->list);
3362                 kfree(entry);
3363         }
3364
3365         BT_DBG("All LE pending connections cleared");
3366 }
3367
3368 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3369 {
3370         if (status) {
3371                 BT_ERR("Failed to start inquiry: status %d", status);
3372
3373                 hci_dev_lock(hdev);
3374                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3375                 hci_dev_unlock(hdev);
3376                 return;
3377         }
3378 }
3379
3380 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3381 {
3382         /* General inquiry access code (GIAC) */
3383         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3384         struct hci_request req;
3385         struct hci_cp_inquiry cp;
3386         int err;
3387
3388         if (status) {
3389                 BT_ERR("Failed to disable LE scanning: status %d", status);
3390                 return;
3391         }
3392
3393         switch (hdev->discovery.type) {
3394         case DISCOV_TYPE_LE:
3395                 hci_dev_lock(hdev);
3396                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3397                 hci_dev_unlock(hdev);
3398                 break;
3399
3400         case DISCOV_TYPE_INTERLEAVED:
3401                 hci_req_init(&req, hdev);
3402
3403                 memset(&cp, 0, sizeof(cp));
3404                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3405                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3406                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3407
3408                 hci_dev_lock(hdev);
3409
3410                 hci_inquiry_cache_flush(hdev);
3411
3412                 err = hci_req_run(&req, inquiry_complete);
3413                 if (err) {
3414                         BT_ERR("Inquiry request failed: err %d", err);
3415                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3416                 }
3417
3418                 hci_dev_unlock(hdev);
3419                 break;
3420         }
3421 }
3422
3423 static void le_scan_disable_work(struct work_struct *work)
3424 {
3425         struct hci_dev *hdev = container_of(work, struct hci_dev,
3426                                             le_scan_disable.work);
3427         struct hci_request req;
3428         int err;
3429
3430         BT_DBG("%s", hdev->name);
3431
3432         hci_req_init(&req, hdev);
3433
3434         hci_req_add_le_scan_disable(&req);
3435
3436         err = hci_req_run(&req, le_scan_disable_work_complete);
3437         if (err)
3438                 BT_ERR("Disable LE scanning request failed: err %d", err);
3439 }
3440
3441 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3442                               u8 *own_addr_type)
3443 {
3444         struct hci_dev *hdev = req->hdev;
3445         int err;
3446
3447         /* If privacy is enabled use a resolvable private address. If
3448          * current RPA has expired or there is something else than
3449          * the current RPA in use, then generate a new one.
3450          */
3451         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3452                 int to;
3453
3454                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3455
3456                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3457                     !bacmp(&hdev->random_addr, &hdev->rpa))
3458                         return 0;
3459
3460                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3461                 if (err < 0) {
3462                         BT_ERR("%s failed to generate new RPA", hdev->name);
3463                         return err;
3464                 }
3465
3466                 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->rpa);
3467
3468                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3469                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3470
3471                 return 0;
3472         }
3473
3474         /* In case of required privacy without resolvable private address,
3475          * use an unresolvable private address. This is useful for active
3476          * scanning and non-connectable advertising.
3477          */
3478         if (require_privacy) {
3479                 bdaddr_t urpa;
3480
3481                 get_random_bytes(&urpa, 6);
3482                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3483
3484                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3485                 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &urpa);
3486                 return 0;
3487         }
3488
3489         /* If forcing static address is in use or there is no public
3490          * address use the static address as random address (but skip
3491          * the HCI command if the current random address is already the
3492          * static one.
3493          */
3494         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3495             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3496                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3497                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3498                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3499                                     &hdev->static_addr);
3500                 return 0;
3501         }
3502
3503         /* Neither privacy nor static address is being used so use a
3504          * public address.
3505          */
3506         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3507
3508         return 0;
3509 }
3510
3511 /* Alloc HCI device */
3512 struct hci_dev *hci_alloc_dev(void)
3513 {
3514         struct hci_dev *hdev;
3515
3516         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3517         if (!hdev)
3518                 return NULL;
3519
3520         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3521         hdev->esco_type = (ESCO_HV1);
3522         hdev->link_mode = (HCI_LM_ACCEPT);
3523         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3524         hdev->io_capability = 0x03;     /* No Input No Output */
3525         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3526         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3527
3528         hdev->sniff_max_interval = 800;
3529         hdev->sniff_min_interval = 80;
3530
3531         hdev->le_adv_channel_map = 0x07;
3532         hdev->le_scan_interval = 0x0060;
3533         hdev->le_scan_window = 0x0030;
3534         hdev->le_conn_min_interval = 0x0028;
3535         hdev->le_conn_max_interval = 0x0038;
3536
3537         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3538
3539         mutex_init(&hdev->lock);
3540         mutex_init(&hdev->req_lock);
3541
3542         INIT_LIST_HEAD(&hdev->mgmt_pending);
3543         INIT_LIST_HEAD(&hdev->blacklist);
3544         INIT_LIST_HEAD(&hdev->uuids);
3545         INIT_LIST_HEAD(&hdev->link_keys);
3546         INIT_LIST_HEAD(&hdev->long_term_keys);
3547         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3548         INIT_LIST_HEAD(&hdev->remote_oob_data);
3549         INIT_LIST_HEAD(&hdev->le_conn_params);
3550         INIT_LIST_HEAD(&hdev->pend_le_conns);
3551         INIT_LIST_HEAD(&hdev->conn_hash.list);
3552
3553         INIT_WORK(&hdev->rx_work, hci_rx_work);
3554         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3555         INIT_WORK(&hdev->tx_work, hci_tx_work);
3556         INIT_WORK(&hdev->power_on, hci_power_on);
3557
3558         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3559         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3560         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3561
3562         skb_queue_head_init(&hdev->rx_q);
3563         skb_queue_head_init(&hdev->cmd_q);
3564         skb_queue_head_init(&hdev->raw_q);
3565
3566         init_waitqueue_head(&hdev->req_wait_q);
3567
3568         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
3569
3570         hci_init_sysfs(hdev);
3571         discovery_init(hdev);
3572
3573         return hdev;
3574 }
3575 EXPORT_SYMBOL(hci_alloc_dev);
3576
3577 /* Free HCI device */
3578 void hci_free_dev(struct hci_dev *hdev)
3579 {
3580         /* will free via device release */
3581         put_device(&hdev->dev);
3582 }
3583 EXPORT_SYMBOL(hci_free_dev);
3584
3585 /* Register HCI device */
3586 int hci_register_dev(struct hci_dev *hdev)
3587 {
3588         int id, error;
3589
3590         if (!hdev->open || !hdev->close)
3591                 return -EINVAL;
3592
3593         /* Do not allow HCI_AMP devices to register at index 0,
3594          * so the index can be used as the AMP controller ID.
3595          */
3596         switch (hdev->dev_type) {
3597         case HCI_BREDR:
3598                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3599                 break;
3600         case HCI_AMP:
3601                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3602                 break;
3603         default:
3604                 return -EINVAL;
3605         }
3606
3607         if (id < 0)
3608                 return id;
3609
3610         sprintf(hdev->name, "hci%d", id);
3611         hdev->id = id;
3612
3613         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3614
3615         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3616                                           WQ_MEM_RECLAIM, 1, hdev->name);
3617         if (!hdev->workqueue) {
3618                 error = -ENOMEM;
3619                 goto err;
3620         }
3621
3622         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3623                                               WQ_MEM_RECLAIM, 1, hdev->name);
3624         if (!hdev->req_workqueue) {
3625                 destroy_workqueue(hdev->workqueue);
3626                 error = -ENOMEM;
3627                 goto err;
3628         }
3629
3630         if (!IS_ERR_OR_NULL(bt_debugfs))
3631                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3632
3633         dev_set_name(&hdev->dev, "%s", hdev->name);
3634
3635         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3636                                                CRYPTO_ALG_ASYNC);
3637         if (IS_ERR(hdev->tfm_aes)) {
3638                 BT_ERR("Unable to create crypto context");
3639                 error = PTR_ERR(hdev->tfm_aes);
3640                 hdev->tfm_aes = NULL;
3641                 goto err_wqueue;
3642         }
3643
3644         error = device_add(&hdev->dev);
3645         if (error < 0)
3646                 goto err_tfm;
3647
3648         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3649                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3650                                     hdev);
3651         if (hdev->rfkill) {
3652                 if (rfkill_register(hdev->rfkill) < 0) {
3653                         rfkill_destroy(hdev->rfkill);
3654                         hdev->rfkill = NULL;
3655                 }
3656         }
3657
3658         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3659                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3660
3661         set_bit(HCI_SETUP, &hdev->dev_flags);
3662         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3663
3664         if (hdev->dev_type == HCI_BREDR) {
3665                 /* Assume BR/EDR support until proven otherwise (such as
3666                  * through reading supported features during init.
3667                  */
3668                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3669         }
3670
3671         write_lock(&hci_dev_list_lock);
3672         list_add(&hdev->list, &hci_dev_list);
3673         write_unlock(&hci_dev_list_lock);
3674
3675         hci_notify(hdev, HCI_DEV_REG);
3676         hci_dev_hold(hdev);
3677
3678         queue_work(hdev->req_workqueue, &hdev->power_on);
3679
3680         return id;
3681
3682 err_tfm:
3683         crypto_free_blkcipher(hdev->tfm_aes);
3684 err_wqueue:
3685         destroy_workqueue(hdev->workqueue);
3686         destroy_workqueue(hdev->req_workqueue);
3687 err:
3688         ida_simple_remove(&hci_index_ida, hdev->id);
3689
3690         return error;
3691 }
3692 EXPORT_SYMBOL(hci_register_dev);
3693
3694 /* Unregister HCI device */
3695 void hci_unregister_dev(struct hci_dev *hdev)
3696 {
3697         int i, id;
3698
3699         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3700
3701         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3702
3703         id = hdev->id;
3704
3705         write_lock(&hci_dev_list_lock);
3706         list_del(&hdev->list);
3707         write_unlock(&hci_dev_list_lock);
3708
3709         hci_dev_do_close(hdev);
3710
3711         for (i = 0; i < NUM_REASSEMBLY; i++)
3712                 kfree_skb(hdev->reassembly[i]);
3713
3714         cancel_work_sync(&hdev->power_on);
3715
3716         if (!test_bit(HCI_INIT, &hdev->flags) &&
3717             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
3718                 hci_dev_lock(hdev);
3719                 mgmt_index_removed(hdev);
3720                 hci_dev_unlock(hdev);
3721         }
3722
3723         /* mgmt_index_removed should take care of emptying the
3724          * pending list */
3725         BUG_ON(!list_empty(&hdev->mgmt_pending));
3726
3727         hci_notify(hdev, HCI_DEV_UNREG);
3728
3729         if (hdev->rfkill) {
3730                 rfkill_unregister(hdev->rfkill);
3731                 rfkill_destroy(hdev->rfkill);
3732         }
3733
3734         if (hdev->tfm_aes)
3735                 crypto_free_blkcipher(hdev->tfm_aes);
3736
3737         device_del(&hdev->dev);
3738
3739         debugfs_remove_recursive(hdev->debugfs);
3740
3741         destroy_workqueue(hdev->workqueue);
3742         destroy_workqueue(hdev->req_workqueue);
3743
3744         hci_dev_lock(hdev);
3745         hci_blacklist_clear(hdev);
3746         hci_uuids_clear(hdev);
3747         hci_link_keys_clear(hdev);
3748         hci_smp_ltks_clear(hdev);
3749         hci_smp_irks_clear(hdev);
3750         hci_remote_oob_data_clear(hdev);
3751         hci_conn_params_clear(hdev);
3752         hci_pend_le_conns_clear(hdev);
3753         hci_dev_unlock(hdev);
3754
3755         hci_dev_put(hdev);
3756
3757         ida_simple_remove(&hci_index_ida, id);
3758 }
3759 EXPORT_SYMBOL(hci_unregister_dev);
3760
3761 /* Suspend HCI device */
3762 int hci_suspend_dev(struct hci_dev *hdev)
3763 {
3764         hci_notify(hdev, HCI_DEV_SUSPEND);
3765         return 0;
3766 }
3767 EXPORT_SYMBOL(hci_suspend_dev);
3768
3769 /* Resume HCI device */
3770 int hci_resume_dev(struct hci_dev *hdev)
3771 {
3772         hci_notify(hdev, HCI_DEV_RESUME);
3773         return 0;
3774 }
3775 EXPORT_SYMBOL(hci_resume_dev);
3776
3777 /* Receive frame from HCI drivers */
3778 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3779 {
3780         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3781                       && !test_bit(HCI_INIT, &hdev->flags))) {
3782                 kfree_skb(skb);
3783                 return -ENXIO;
3784         }
3785
3786         /* Incoming skb */
3787         bt_cb(skb)->incoming = 1;
3788
3789         /* Time stamp */
3790         __net_timestamp(skb);
3791
3792         skb_queue_tail(&hdev->rx_q, skb);
3793         queue_work(hdev->workqueue, &hdev->rx_work);
3794
3795         return 0;
3796 }
3797 EXPORT_SYMBOL(hci_recv_frame);
3798
3799 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3800                           int count, __u8 index)
3801 {
3802         int len = 0;
3803         int hlen = 0;
3804         int remain = count;
3805         struct sk_buff *skb;
3806         struct bt_skb_cb *scb;
3807
3808         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3809             index >= NUM_REASSEMBLY)
3810                 return -EILSEQ;
3811
3812         skb = hdev->reassembly[index];
3813
3814         if (!skb) {
3815                 switch (type) {
3816                 case HCI_ACLDATA_PKT:
3817                         len = HCI_MAX_FRAME_SIZE;
3818                         hlen = HCI_ACL_HDR_SIZE;
3819                         break;
3820                 case HCI_EVENT_PKT:
3821                         len = HCI_MAX_EVENT_SIZE;
3822                         hlen = HCI_EVENT_HDR_SIZE;
3823                         break;
3824                 case HCI_SCODATA_PKT:
3825                         len = HCI_MAX_SCO_SIZE;
3826                         hlen = HCI_SCO_HDR_SIZE;
3827                         break;
3828                 }
3829
3830                 skb = bt_skb_alloc(len, GFP_ATOMIC);
3831                 if (!skb)
3832                         return -ENOMEM;
3833
3834                 scb = (void *) skb->cb;
3835                 scb->expect = hlen;
3836                 scb->pkt_type = type;
3837
3838                 hdev->reassembly[index] = skb;
3839         }
3840
3841         while (count) {
3842                 scb = (void *) skb->cb;
3843                 len = min_t(uint, scb->expect, count);
3844
3845                 memcpy(skb_put(skb, len), data, len);
3846
3847                 count -= len;
3848                 data += len;
3849                 scb->expect -= len;
3850                 remain = count;
3851
3852                 switch (type) {
3853                 case HCI_EVENT_PKT:
3854                         if (skb->len == HCI_EVENT_HDR_SIZE) {
3855                                 struct hci_event_hdr *h = hci_event_hdr(skb);
3856                                 scb->expect = h->plen;
3857
3858                                 if (skb_tailroom(skb) < scb->expect) {
3859                                         kfree_skb(skb);
3860                                         hdev->reassembly[index] = NULL;
3861                                         return -ENOMEM;
3862                                 }
3863                         }
3864                         break;
3865
3866                 case HCI_ACLDATA_PKT:
3867                         if (skb->len  == HCI_ACL_HDR_SIZE) {
3868                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3869                                 scb->expect = __le16_to_cpu(h->dlen);
3870
3871                                 if (skb_tailroom(skb) < scb->expect) {
3872                                         kfree_skb(skb);
3873                                         hdev->reassembly[index] = NULL;
3874                                         return -ENOMEM;
3875                                 }
3876                         }
3877                         break;
3878
3879                 case HCI_SCODATA_PKT:
3880                         if (skb->len == HCI_SCO_HDR_SIZE) {
3881                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3882                                 scb->expect = h->dlen;
3883
3884                                 if (skb_tailroom(skb) < scb->expect) {
3885                                         kfree_skb(skb);
3886                                         hdev->reassembly[index] = NULL;
3887                                         return -ENOMEM;
3888                                 }
3889                         }
3890                         break;
3891                 }
3892
3893                 if (scb->expect == 0) {
3894                         /* Complete frame */
3895
3896                         bt_cb(skb)->pkt_type = type;
3897                         hci_recv_frame(hdev, skb);
3898
3899                         hdev->reassembly[index] = NULL;
3900                         return remain;
3901                 }
3902         }
3903
3904         return remain;
3905 }
3906
3907 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3908 {
3909         int rem = 0;
3910
3911         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3912                 return -EILSEQ;
3913
3914         while (count) {
3915                 rem = hci_reassembly(hdev, type, data, count, type - 1);
3916                 if (rem < 0)
3917                         return rem;
3918
3919                 data += (count - rem);
3920                 count = rem;
3921         }
3922
3923         return rem;
3924 }
3925 EXPORT_SYMBOL(hci_recv_fragment);
3926
3927 #define STREAM_REASSEMBLY 0
3928
3929 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3930 {
3931         int type;
3932         int rem = 0;
3933
3934         while (count) {
3935                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3936
3937                 if (!skb) {
3938                         struct { char type; } *pkt;
3939
3940                         /* Start of the frame */
3941                         pkt = data;
3942                         type = pkt->type;
3943
3944                         data++;
3945                         count--;
3946                 } else
3947                         type = bt_cb(skb)->pkt_type;
3948
3949                 rem = hci_reassembly(hdev, type, data, count,
3950                                      STREAM_REASSEMBLY);
3951                 if (rem < 0)
3952                         return rem;
3953
3954                 data += (count - rem);
3955                 count = rem;
3956         }
3957
3958         return rem;
3959 }
3960 EXPORT_SYMBOL(hci_recv_stream_fragment);
3961
3962 /* ---- Interface to upper protocols ---- */
3963
3964 int hci_register_cb(struct hci_cb *cb)
3965 {
3966         BT_DBG("%p name %s", cb, cb->name);
3967
3968         write_lock(&hci_cb_list_lock);
3969         list_add(&cb->list, &hci_cb_list);
3970         write_unlock(&hci_cb_list_lock);
3971
3972         return 0;
3973 }
3974 EXPORT_SYMBOL(hci_register_cb);
3975
3976 int hci_unregister_cb(struct hci_cb *cb)
3977 {
3978         BT_DBG("%p name %s", cb, cb->name);
3979
3980         write_lock(&hci_cb_list_lock);
3981         list_del(&cb->list);
3982         write_unlock(&hci_cb_list_lock);
3983
3984         return 0;
3985 }
3986 EXPORT_SYMBOL(hci_unregister_cb);
3987
3988 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3989 {
3990         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3991
3992         /* Time stamp */
3993         __net_timestamp(skb);
3994
3995         /* Send copy to monitor */
3996         hci_send_to_monitor(hdev, skb);
3997
3998         if (atomic_read(&hdev->promisc)) {
3999                 /* Send copy to the sockets */
4000                 hci_send_to_sock(hdev, skb);
4001         }
4002
4003         /* Get rid of skb owner, prior to sending to the driver. */
4004         skb_orphan(skb);
4005
4006         if (hdev->send(hdev, skb) < 0)
4007                 BT_ERR("%s sending frame failed", hdev->name);
4008 }
4009
4010 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4011 {
4012         skb_queue_head_init(&req->cmd_q);
4013         req->hdev = hdev;
4014         req->err = 0;
4015 }
4016
4017 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4018 {
4019         struct hci_dev *hdev = req->hdev;
4020         struct sk_buff *skb;
4021         unsigned long flags;
4022
4023         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4024
4025         /* If an error occured during request building, remove all HCI
4026          * commands queued on the HCI request queue.
4027          */
4028         if (req->err) {
4029                 skb_queue_purge(&req->cmd_q);
4030                 return req->err;
4031         }
4032
4033         /* Do not allow empty requests */
4034         if (skb_queue_empty(&req->cmd_q))
4035                 return -ENODATA;
4036
4037         skb = skb_peek_tail(&req->cmd_q);
4038         bt_cb(skb)->req.complete = complete;
4039
4040         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4041         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4042         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4043
4044         queue_work(hdev->workqueue, &hdev->cmd_work);
4045
4046         return 0;
4047 }
4048
4049 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4050                                        u32 plen, const void *param)
4051 {
4052         int len = HCI_COMMAND_HDR_SIZE + plen;
4053         struct hci_command_hdr *hdr;
4054         struct sk_buff *skb;
4055
4056         skb = bt_skb_alloc(len, GFP_ATOMIC);
4057         if (!skb)
4058                 return NULL;
4059
4060         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4061         hdr->opcode = cpu_to_le16(opcode);
4062         hdr->plen   = plen;
4063
4064         if (plen)
4065                 memcpy(skb_put(skb, plen), param, plen);
4066
4067         BT_DBG("skb len %d", skb->len);
4068
4069         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4070
4071         return skb;
4072 }
4073
4074 /* Send HCI command */
4075 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4076                  const void *param)
4077 {
4078         struct sk_buff *skb;
4079
4080         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4081
4082         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4083         if (!skb) {
4084                 BT_ERR("%s no memory for command", hdev->name);
4085                 return -ENOMEM;
4086         }
4087
4088         /* Stand-alone HCI commands must be flaged as
4089          * single-command requests.
4090          */
4091         bt_cb(skb)->req.start = true;
4092
4093         skb_queue_tail(&hdev->cmd_q, skb);
4094         queue_work(hdev->workqueue, &hdev->cmd_work);
4095
4096         return 0;
4097 }
4098
4099 /* Queue a command to an asynchronous HCI request */
4100 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4101                     const void *param, u8 event)
4102 {
4103         struct hci_dev *hdev = req->hdev;
4104         struct sk_buff *skb;
4105
4106         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4107
4108         /* If an error occured during request building, there is no point in
4109          * queueing the HCI command. We can simply return.
4110          */
4111         if (req->err)
4112                 return;
4113
4114         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4115         if (!skb) {
4116                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4117                        hdev->name, opcode);
4118                 req->err = -ENOMEM;
4119                 return;
4120         }
4121
4122         if (skb_queue_empty(&req->cmd_q))
4123                 bt_cb(skb)->req.start = true;
4124
4125         bt_cb(skb)->req.event = event;
4126
4127         skb_queue_tail(&req->cmd_q, skb);
4128 }
4129
4130 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4131                  const void *param)
4132 {
4133         hci_req_add_ev(req, opcode, plen, param, 0);
4134 }
4135
4136 /* Get data from the previously sent command */
4137 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4138 {
4139         struct hci_command_hdr *hdr;
4140
4141         if (!hdev->sent_cmd)
4142                 return NULL;
4143
4144         hdr = (void *) hdev->sent_cmd->data;
4145
4146         if (hdr->opcode != cpu_to_le16(opcode))
4147                 return NULL;
4148
4149         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4150
4151         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4152 }
4153
4154 /* Send ACL data */
4155 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4156 {
4157         struct hci_acl_hdr *hdr;
4158         int len = skb->len;
4159
4160         skb_push(skb, HCI_ACL_HDR_SIZE);
4161         skb_reset_transport_header(skb);
4162         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4163         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4164         hdr->dlen   = cpu_to_le16(len);
4165 }
4166
4167 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4168                           struct sk_buff *skb, __u16 flags)
4169 {
4170         struct hci_conn *conn = chan->conn;
4171         struct hci_dev *hdev = conn->hdev;
4172         struct sk_buff *list;
4173
4174         skb->len = skb_headlen(skb);
4175         skb->data_len = 0;
4176
4177         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4178
4179         switch (hdev->dev_type) {
4180         case HCI_BREDR:
4181                 hci_add_acl_hdr(skb, conn->handle, flags);
4182                 break;
4183         case HCI_AMP:
4184                 hci_add_acl_hdr(skb, chan->handle, flags);
4185                 break;
4186         default:
4187                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4188                 return;
4189         }
4190
4191         list = skb_shinfo(skb)->frag_list;
4192         if (!list) {
4193                 /* Non fragmented */
4194                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4195
4196                 skb_queue_tail(queue, skb);
4197         } else {
4198                 /* Fragmented */
4199                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4200
4201                 skb_shinfo(skb)->frag_list = NULL;
4202
4203                 /* Queue all fragments atomically */
4204                 spin_lock(&queue->lock);
4205
4206                 __skb_queue_tail(queue, skb);
4207
4208                 flags &= ~ACL_START;
4209                 flags |= ACL_CONT;
4210                 do {
4211                         skb = list; list = list->next;
4212
4213                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4214                         hci_add_acl_hdr(skb, conn->handle, flags);
4215
4216                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4217
4218                         __skb_queue_tail(queue, skb);
4219                 } while (list);
4220
4221                 spin_unlock(&queue->lock);
4222         }
4223 }
4224
4225 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4226 {
4227         struct hci_dev *hdev = chan->conn->hdev;
4228
4229         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4230
4231         hci_queue_acl(chan, &chan->data_q, skb, flags);
4232
4233         queue_work(hdev->workqueue, &hdev->tx_work);
4234 }
4235
4236 /* Send SCO data */
4237 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4238 {
4239         struct hci_dev *hdev = conn->hdev;
4240         struct hci_sco_hdr hdr;
4241
4242         BT_DBG("%s len %d", hdev->name, skb->len);
4243
4244         hdr.handle = cpu_to_le16(conn->handle);
4245         hdr.dlen   = skb->len;
4246
4247         skb_push(skb, HCI_SCO_HDR_SIZE);
4248         skb_reset_transport_header(skb);
4249         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4250
4251         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4252
4253         skb_queue_tail(&conn->data_q, skb);
4254         queue_work(hdev->workqueue, &hdev->tx_work);
4255 }
4256
4257 /* ---- HCI TX task (outgoing data) ---- */
4258
4259 /* HCI Connection scheduler */
4260 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4261                                      int *quote)
4262 {
4263         struct hci_conn_hash *h = &hdev->conn_hash;
4264         struct hci_conn *conn = NULL, *c;
4265         unsigned int num = 0, min = ~0;
4266
4267         /* We don't have to lock device here. Connections are always
4268          * added and removed with TX task disabled. */
4269
4270         rcu_read_lock();
4271
4272         list_for_each_entry_rcu(c, &h->list, list) {
4273                 if (c->type != type || skb_queue_empty(&c->data_q))
4274                         continue;
4275
4276                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4277                         continue;
4278
4279                 num++;
4280
4281                 if (c->sent < min) {
4282                         min  = c->sent;
4283                         conn = c;
4284                 }
4285
4286                 if (hci_conn_num(hdev, type) == num)
4287                         break;
4288         }
4289
4290         rcu_read_unlock();
4291
4292         if (conn) {
4293                 int cnt, q;
4294
4295                 switch (conn->type) {
4296                 case ACL_LINK:
4297                         cnt = hdev->acl_cnt;
4298                         break;
4299                 case SCO_LINK:
4300                 case ESCO_LINK:
4301                         cnt = hdev->sco_cnt;
4302                         break;
4303                 case LE_LINK:
4304                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4305                         break;
4306                 default:
4307                         cnt = 0;
4308                         BT_ERR("Unknown link type");
4309                 }
4310
4311                 q = cnt / num;
4312                 *quote = q ? q : 1;
4313         } else
4314                 *quote = 0;
4315
4316         BT_DBG("conn %p quote %d", conn, *quote);
4317         return conn;
4318 }
4319
4320 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4321 {
4322         struct hci_conn_hash *h = &hdev->conn_hash;
4323         struct hci_conn *c;
4324
4325         BT_ERR("%s link tx timeout", hdev->name);
4326
4327         rcu_read_lock();
4328
4329         /* Kill stalled connections */
4330         list_for_each_entry_rcu(c, &h->list, list) {
4331                 if (c->type == type && c->sent) {
4332                         BT_ERR("%s killing stalled connection %pMR",
4333                                hdev->name, &c->dst);
4334                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4335                 }
4336         }
4337
4338         rcu_read_unlock();
4339 }
4340
4341 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4342                                       int *quote)
4343 {
4344         struct hci_conn_hash *h = &hdev->conn_hash;
4345         struct hci_chan *chan = NULL;
4346         unsigned int num = 0, min = ~0, cur_prio = 0;
4347         struct hci_conn *conn;
4348         int cnt, q, conn_num = 0;
4349
4350         BT_DBG("%s", hdev->name);
4351
4352         rcu_read_lock();
4353
4354         list_for_each_entry_rcu(conn, &h->list, list) {
4355                 struct hci_chan *tmp;
4356
4357                 if (conn->type != type)
4358                         continue;
4359
4360                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4361                         continue;
4362
4363                 conn_num++;
4364
4365                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4366                         struct sk_buff *skb;
4367
4368                         if (skb_queue_empty(&tmp->data_q))
4369                                 continue;
4370
4371                         skb = skb_peek(&tmp->data_q);
4372                         if (skb->priority < cur_prio)
4373                                 continue;
4374
4375                         if (skb->priority > cur_prio) {
4376                                 num = 0;
4377                                 min = ~0;
4378                                 cur_prio = skb->priority;
4379                         }
4380
4381                         num++;
4382
4383                         if (conn->sent < min) {
4384                                 min  = conn->sent;
4385                                 chan = tmp;
4386                         }
4387                 }
4388
4389                 if (hci_conn_num(hdev, type) == conn_num)
4390                         break;
4391         }
4392
4393         rcu_read_unlock();
4394
4395         if (!chan)
4396                 return NULL;
4397
4398         switch (chan->conn->type) {
4399         case ACL_LINK:
4400                 cnt = hdev->acl_cnt;
4401                 break;
4402         case AMP_LINK:
4403                 cnt = hdev->block_cnt;
4404                 break;
4405         case SCO_LINK:
4406         case ESCO_LINK:
4407                 cnt = hdev->sco_cnt;
4408                 break;
4409         case LE_LINK:
4410                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4411                 break;
4412         default:
4413                 cnt = 0;
4414                 BT_ERR("Unknown link type");
4415         }
4416
4417         q = cnt / num;
4418         *quote = q ? q : 1;
4419         BT_DBG("chan %p quote %d", chan, *quote);
4420         return chan;
4421 }
4422
4423 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4424 {
4425         struct hci_conn_hash *h = &hdev->conn_hash;
4426         struct hci_conn *conn;
4427         int num = 0;
4428
4429         BT_DBG("%s", hdev->name);
4430
4431         rcu_read_lock();
4432
4433         list_for_each_entry_rcu(conn, &h->list, list) {
4434                 struct hci_chan *chan;
4435
4436                 if (conn->type != type)
4437                         continue;
4438
4439                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4440                         continue;
4441
4442                 num++;
4443
4444                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4445                         struct sk_buff *skb;
4446
4447                         if (chan->sent) {
4448                                 chan->sent = 0;
4449                                 continue;
4450                         }
4451
4452                         if (skb_queue_empty(&chan->data_q))
4453                                 continue;
4454
4455                         skb = skb_peek(&chan->data_q);
4456                         if (skb->priority >= HCI_PRIO_MAX - 1)
4457                                 continue;
4458
4459                         skb->priority = HCI_PRIO_MAX - 1;
4460
4461                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4462                                skb->priority);
4463                 }
4464
4465                 if (hci_conn_num(hdev, type) == num)
4466                         break;
4467         }
4468
4469         rcu_read_unlock();
4470
4471 }
4472
4473 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4474 {
4475         /* Calculate count of blocks used by this packet */
4476         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4477 }
4478
4479 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4480 {
4481         if (!test_bit(HCI_RAW, &hdev->flags)) {
4482                 /* ACL tx timeout must be longer than maximum
4483                  * link supervision timeout (40.9 seconds) */
4484                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4485                                        HCI_ACL_TX_TIMEOUT))
4486                         hci_link_tx_to(hdev, ACL_LINK);
4487         }
4488 }
4489
4490 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4491 {
4492         unsigned int cnt = hdev->acl_cnt;
4493         struct hci_chan *chan;
4494         struct sk_buff *skb;
4495         int quote;
4496
4497         __check_timeout(hdev, cnt);
4498
4499         while (hdev->acl_cnt &&
4500                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4501                 u32 priority = (skb_peek(&chan->data_q))->priority;
4502                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4503                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4504                                skb->len, skb->priority);
4505
4506                         /* Stop if priority has changed */
4507                         if (skb->priority < priority)
4508                                 break;
4509
4510                         skb = skb_dequeue(&chan->data_q);
4511
4512                         hci_conn_enter_active_mode(chan->conn,
4513                                                    bt_cb(skb)->force_active);
4514
4515                         hci_send_frame(hdev, skb);
4516                         hdev->acl_last_tx = jiffies;
4517
4518                         hdev->acl_cnt--;
4519                         chan->sent++;
4520                         chan->conn->sent++;
4521                 }
4522         }
4523
4524         if (cnt != hdev->acl_cnt)
4525                 hci_prio_recalculate(hdev, ACL_LINK);
4526 }
4527
4528 static void hci_sched_acl_blk(struct hci_dev *hdev)
4529 {
4530         unsigned int cnt = hdev->block_cnt;
4531         struct hci_chan *chan;
4532         struct sk_buff *skb;
4533         int quote;
4534         u8 type;
4535
4536         __check_timeout(hdev, cnt);
4537
4538         BT_DBG("%s", hdev->name);
4539
4540         if (hdev->dev_type == HCI_AMP)
4541                 type = AMP_LINK;
4542         else
4543                 type = ACL_LINK;
4544
4545         while (hdev->block_cnt > 0 &&
4546                (chan = hci_chan_sent(hdev, type, &quote))) {
4547                 u32 priority = (skb_peek(&chan->data_q))->priority;
4548                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4549                         int blocks;
4550
4551                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4552                                skb->len, skb->priority);
4553
4554                         /* Stop if priority has changed */
4555                         if (skb->priority < priority)
4556                                 break;
4557
4558                         skb = skb_dequeue(&chan->data_q);
4559
4560                         blocks = __get_blocks(hdev, skb);
4561                         if (blocks > hdev->block_cnt)
4562                                 return;
4563
4564                         hci_conn_enter_active_mode(chan->conn,
4565                                                    bt_cb(skb)->force_active);
4566
4567                         hci_send_frame(hdev, skb);
4568                         hdev->acl_last_tx = jiffies;
4569
4570                         hdev->block_cnt -= blocks;
4571                         quote -= blocks;
4572
4573                         chan->sent += blocks;
4574                         chan->conn->sent += blocks;
4575                 }
4576         }
4577
4578         if (cnt != hdev->block_cnt)
4579                 hci_prio_recalculate(hdev, type);
4580 }
4581
4582 static void hci_sched_acl(struct hci_dev *hdev)
4583 {
4584         BT_DBG("%s", hdev->name);
4585
4586         /* No ACL link over BR/EDR controller */
4587         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4588                 return;
4589
4590         /* No AMP link over AMP controller */
4591         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4592                 return;
4593
4594         switch (hdev->flow_ctl_mode) {
4595         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4596                 hci_sched_acl_pkt(hdev);
4597                 break;
4598
4599         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4600                 hci_sched_acl_blk(hdev);
4601                 break;
4602         }
4603 }
4604
4605 /* Schedule SCO */
4606 static void hci_sched_sco(struct hci_dev *hdev)
4607 {
4608         struct hci_conn *conn;
4609         struct sk_buff *skb;
4610         int quote;
4611
4612         BT_DBG("%s", hdev->name);
4613
4614         if (!hci_conn_num(hdev, SCO_LINK))
4615                 return;
4616
4617         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4618                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4619                         BT_DBG("skb %p len %d", skb, skb->len);
4620                         hci_send_frame(hdev, skb);
4621
4622                         conn->sent++;
4623                         if (conn->sent == ~0)
4624                                 conn->sent = 0;
4625                 }
4626         }
4627 }
4628
4629 static void hci_sched_esco(struct hci_dev *hdev)
4630 {
4631         struct hci_conn *conn;
4632         struct sk_buff *skb;
4633         int quote;
4634
4635         BT_DBG("%s", hdev->name);
4636
4637         if (!hci_conn_num(hdev, ESCO_LINK))
4638                 return;
4639
4640         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4641                                                      &quote))) {
4642                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4643                         BT_DBG("skb %p len %d", skb, skb->len);
4644                         hci_send_frame(hdev, skb);
4645
4646                         conn->sent++;
4647                         if (conn->sent == ~0)
4648                                 conn->sent = 0;
4649                 }
4650         }
4651 }
4652
4653 static void hci_sched_le(struct hci_dev *hdev)
4654 {
4655         struct hci_chan *chan;
4656         struct sk_buff *skb;
4657         int quote, cnt, tmp;
4658
4659         BT_DBG("%s", hdev->name);
4660
4661         if (!hci_conn_num(hdev, LE_LINK))
4662                 return;
4663
4664         if (!test_bit(HCI_RAW, &hdev->flags)) {
4665                 /* LE tx timeout must be longer than maximum
4666                  * link supervision timeout (40.9 seconds) */
4667                 if (!hdev->le_cnt && hdev->le_pkts &&
4668                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4669                         hci_link_tx_to(hdev, LE_LINK);
4670         }
4671
4672         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4673         tmp = cnt;
4674         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4675                 u32 priority = (skb_peek(&chan->data_q))->priority;
4676                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4677                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4678                                skb->len, skb->priority);
4679
4680                         /* Stop if priority has changed */
4681                         if (skb->priority < priority)
4682                                 break;
4683
4684                         skb = skb_dequeue(&chan->data_q);
4685
4686                         hci_send_frame(hdev, skb);
4687                         hdev->le_last_tx = jiffies;
4688
4689                         cnt--;
4690                         chan->sent++;
4691                         chan->conn->sent++;
4692                 }
4693         }
4694
4695         if (hdev->le_pkts)
4696                 hdev->le_cnt = cnt;
4697         else
4698                 hdev->acl_cnt = cnt;
4699
4700         if (cnt != tmp)
4701                 hci_prio_recalculate(hdev, LE_LINK);
4702 }
4703
4704 static void hci_tx_work(struct work_struct *work)
4705 {
4706         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4707         struct sk_buff *skb;
4708
4709         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4710                hdev->sco_cnt, hdev->le_cnt);
4711
4712         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4713                 /* Schedule queues and send stuff to HCI driver */
4714                 hci_sched_acl(hdev);
4715                 hci_sched_sco(hdev);
4716                 hci_sched_esco(hdev);
4717                 hci_sched_le(hdev);
4718         }
4719
4720         /* Send next queued raw (unknown type) packet */
4721         while ((skb = skb_dequeue(&hdev->raw_q)))
4722                 hci_send_frame(hdev, skb);
4723 }
4724
4725 /* ----- HCI RX task (incoming data processing) ----- */
4726
4727 /* ACL data packet */
4728 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4729 {
4730         struct hci_acl_hdr *hdr = (void *) skb->data;
4731         struct hci_conn *conn;
4732         __u16 handle, flags;
4733
4734         skb_pull(skb, HCI_ACL_HDR_SIZE);
4735
4736         handle = __le16_to_cpu(hdr->handle);
4737         flags  = hci_flags(handle);
4738         handle = hci_handle(handle);
4739
4740         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4741                handle, flags);
4742
4743         hdev->stat.acl_rx++;
4744
4745         hci_dev_lock(hdev);
4746         conn = hci_conn_hash_lookup_handle(hdev, handle);
4747         hci_dev_unlock(hdev);
4748
4749         if (conn) {
4750                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4751
4752                 /* Send to upper protocol */
4753                 l2cap_recv_acldata(conn, skb, flags);
4754                 return;
4755         } else {
4756                 BT_ERR("%s ACL packet for unknown connection handle %d",
4757                        hdev->name, handle);
4758         }
4759
4760         kfree_skb(skb);
4761 }
4762
4763 /* SCO data packet */
4764 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4765 {
4766         struct hci_sco_hdr *hdr = (void *) skb->data;
4767         struct hci_conn *conn;
4768         __u16 handle;
4769
4770         skb_pull(skb, HCI_SCO_HDR_SIZE);
4771
4772         handle = __le16_to_cpu(hdr->handle);
4773
4774         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4775
4776         hdev->stat.sco_rx++;
4777
4778         hci_dev_lock(hdev);
4779         conn = hci_conn_hash_lookup_handle(hdev, handle);
4780         hci_dev_unlock(hdev);
4781
4782         if (conn) {
4783                 /* Send to upper protocol */
4784                 sco_recv_scodata(conn, skb);
4785                 return;
4786         } else {
4787                 BT_ERR("%s SCO packet for unknown connection handle %d",
4788                        hdev->name, handle);
4789         }
4790
4791         kfree_skb(skb);
4792 }
4793
4794 static bool hci_req_is_complete(struct hci_dev *hdev)
4795 {
4796         struct sk_buff *skb;
4797
4798         skb = skb_peek(&hdev->cmd_q);
4799         if (!skb)
4800                 return true;
4801
4802         return bt_cb(skb)->req.start;
4803 }
4804
4805 static void hci_resend_last(struct hci_dev *hdev)
4806 {
4807         struct hci_command_hdr *sent;
4808         struct sk_buff *skb;
4809         u16 opcode;
4810
4811         if (!hdev->sent_cmd)
4812                 return;
4813
4814         sent = (void *) hdev->sent_cmd->data;
4815         opcode = __le16_to_cpu(sent->opcode);
4816         if (opcode == HCI_OP_RESET)
4817                 return;
4818
4819         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4820         if (!skb)
4821                 return;
4822
4823         skb_queue_head(&hdev->cmd_q, skb);
4824         queue_work(hdev->workqueue, &hdev->cmd_work);
4825 }
4826
4827 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4828 {
4829         hci_req_complete_t req_complete = NULL;
4830         struct sk_buff *skb;
4831         unsigned long flags;
4832
4833         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4834
4835         /* If the completed command doesn't match the last one that was
4836          * sent we need to do special handling of it.
4837          */
4838         if (!hci_sent_cmd_data(hdev, opcode)) {
4839                 /* Some CSR based controllers generate a spontaneous
4840                  * reset complete event during init and any pending
4841                  * command will never be completed. In such a case we
4842                  * need to resend whatever was the last sent
4843                  * command.
4844                  */
4845                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4846                         hci_resend_last(hdev);
4847
4848                 return;
4849         }
4850
4851         /* If the command succeeded and there's still more commands in
4852          * this request the request is not yet complete.
4853          */
4854         if (!status && !hci_req_is_complete(hdev))
4855                 return;
4856
4857         /* If this was the last command in a request the complete
4858          * callback would be found in hdev->sent_cmd instead of the
4859          * command queue (hdev->cmd_q).
4860          */
4861         if (hdev->sent_cmd) {
4862                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4863
4864                 if (req_complete) {
4865                         /* We must set the complete callback to NULL to
4866                          * avoid calling the callback more than once if
4867                          * this function gets called again.
4868                          */
4869                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
4870
4871                         goto call_complete;
4872                 }
4873         }
4874
4875         /* Remove all pending commands belonging to this request */
4876         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4877         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4878                 if (bt_cb(skb)->req.start) {
4879                         __skb_queue_head(&hdev->cmd_q, skb);
4880                         break;
4881                 }
4882
4883                 req_complete = bt_cb(skb)->req.complete;
4884                 kfree_skb(skb);
4885         }
4886         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4887
4888 call_complete:
4889         if (req_complete)
4890                 req_complete(hdev, status);
4891 }
4892
4893 static void hci_rx_work(struct work_struct *work)
4894 {
4895         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4896         struct sk_buff *skb;
4897
4898         BT_DBG("%s", hdev->name);
4899
4900         while ((skb = skb_dequeue(&hdev->rx_q))) {
4901                 /* Send copy to monitor */
4902                 hci_send_to_monitor(hdev, skb);
4903
4904                 if (atomic_read(&hdev->promisc)) {
4905                         /* Send copy to the sockets */
4906                         hci_send_to_sock(hdev, skb);
4907                 }
4908
4909                 if (test_bit(HCI_RAW, &hdev->flags) ||
4910                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4911                         kfree_skb(skb);
4912                         continue;
4913                 }
4914
4915                 if (test_bit(HCI_INIT, &hdev->flags)) {
4916                         /* Don't process data packets in this states. */
4917                         switch (bt_cb(skb)->pkt_type) {
4918                         case HCI_ACLDATA_PKT:
4919                         case HCI_SCODATA_PKT:
4920                                 kfree_skb(skb);
4921                                 continue;
4922                         }
4923                 }
4924
4925                 /* Process frame */
4926                 switch (bt_cb(skb)->pkt_type) {
4927                 case HCI_EVENT_PKT:
4928                         BT_DBG("%s Event packet", hdev->name);
4929                         hci_event_packet(hdev, skb);
4930                         break;
4931
4932                 case HCI_ACLDATA_PKT:
4933                         BT_DBG("%s ACL data packet", hdev->name);
4934                         hci_acldata_packet(hdev, skb);
4935                         break;
4936
4937                 case HCI_SCODATA_PKT:
4938                         BT_DBG("%s SCO data packet", hdev->name);
4939                         hci_scodata_packet(hdev, skb);
4940                         break;
4941
4942                 default:
4943                         kfree_skb(skb);
4944                         break;
4945                 }
4946         }
4947 }
4948
4949 static void hci_cmd_work(struct work_struct *work)
4950 {
4951         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4952         struct sk_buff *skb;
4953
4954         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4955                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4956
4957         /* Send queued commands */
4958         if (atomic_read(&hdev->cmd_cnt)) {
4959                 skb = skb_dequeue(&hdev->cmd_q);
4960                 if (!skb)
4961                         return;
4962
4963                 kfree_skb(hdev->sent_cmd);
4964
4965                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4966                 if (hdev->sent_cmd) {
4967                         atomic_dec(&hdev->cmd_cnt);
4968                         hci_send_frame(hdev, skb);
4969                         if (test_bit(HCI_RESET, &hdev->flags))
4970                                 del_timer(&hdev->cmd_timer);
4971                         else
4972                                 mod_timer(&hdev->cmd_timer,
4973                                           jiffies + HCI_CMD_TIMEOUT);
4974                 } else {
4975                         skb_queue_head(&hdev->cmd_q, skb);
4976                         queue_work(hdev->workqueue, &hdev->cmd_work);
4977                 }
4978         }
4979 }
4980
4981 void hci_req_add_le_scan_disable(struct hci_request *req)
4982 {
4983         struct hci_cp_le_set_scan_enable cp;
4984
4985         memset(&cp, 0, sizeof(cp));
4986         cp.enable = LE_SCAN_DISABLE;
4987         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
4988 }
4989
4990 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
4991 {
4992         if (status)
4993                 BT_DBG("HCI request failed to update background scanning: "
4994                        "status 0x%2.2x", status);
4995 }
4996
4997 /* This function controls the background scanning based on hdev->pend_le_conns
4998  * list. If there are pending LE connection we start the background scanning,
4999  * otherwise we stop it.
5000  *
5001  * This function requires the caller holds hdev->lock.
5002  */
5003 void hci_update_background_scan(struct hci_dev *hdev)
5004 {
5005         struct hci_cp_le_set_scan_param param_cp;
5006         struct hci_cp_le_set_scan_enable enable_cp;
5007         struct hci_request req;
5008         struct hci_conn *conn;
5009         int err;
5010
5011         hci_req_init(&req, hdev);
5012
5013         if (list_empty(&hdev->pend_le_conns)) {
5014                 /* If there is no pending LE connections, we should stop
5015                  * the background scanning.
5016                  */
5017
5018                 /* If controller is not scanning we are done. */
5019                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5020                         return;
5021
5022                 hci_req_add_le_scan_disable(&req);
5023
5024                 BT_DBG("%s stopping background scanning", hdev->name);
5025         } else {
5026                 u8 own_addr_type;
5027
5028                 /* If there is at least one pending LE connection, we should
5029                  * keep the background scan running.
5030                  */
5031
5032                 /* If controller is already scanning we are done. */
5033                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5034                         return;
5035
5036                 /* If controller is connecting, we should not start scanning
5037                  * since some controllers are not able to scan and connect at
5038                  * the same time.
5039                  */
5040                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5041                 if (conn)
5042                         return;
5043
5044                 /* Set require_privacy to true to avoid identification from
5045                  * unknown peer devices. Since this is passive scanning, no
5046                  * SCAN_REQ using the local identity should be sent. Mandating
5047                  * privacy is just an extra precaution.
5048                  */
5049                 if (hci_update_random_address(&req, true, &own_addr_type))
5050                         return;
5051
5052                 memset(&param_cp, 0, sizeof(param_cp));
5053                 param_cp.type = LE_SCAN_PASSIVE;
5054                 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5055                 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5056                 param_cp.own_address_type = own_addr_type;
5057                 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5058                             &param_cp);
5059
5060                 memset(&enable_cp, 0, sizeof(enable_cp));
5061                 enable_cp.enable = LE_SCAN_ENABLE;
5062                 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5063                 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5064                             &enable_cp);
5065
5066                 BT_DBG("%s starting background scanning", hdev->name);
5067         }
5068
5069         err = hci_req_run(&req, update_background_scan_complete);
5070         if (err)
5071                 BT_ERR("Failed to run HCI request: err %d", err);
5072 }