Bluetooth: Fix properly ignoring unexpected SMP PDUs
[cascardo/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37
38 #include "smp.h"
39
40 static void hci_rx_work(struct work_struct *work);
41 static void hci_cmd_work(struct work_struct *work);
42 static void hci_tx_work(struct work_struct *work);
43
44 /* HCI device list */
45 LIST_HEAD(hci_dev_list);
46 DEFINE_RWLOCK(hci_dev_list_lock);
47
48 /* HCI callback list */
49 LIST_HEAD(hci_cb_list);
50 DEFINE_RWLOCK(hci_cb_list_lock);
51
52 /* HCI ID Numbering */
53 static DEFINE_IDA(hci_index_ida);
54
55 /* ---- HCI notifications ---- */
56
57 static void hci_notify(struct hci_dev *hdev, int event)
58 {
59         hci_sock_dev_event(hdev, event);
60 }
61
62 /* ---- HCI debugfs entries ---- */
63
64 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65                              size_t count, loff_t *ppos)
66 {
67         struct hci_dev *hdev = file->private_data;
68         char buf[3];
69
70         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71         buf[1] = '\n';
72         buf[2] = '\0';
73         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74 }
75
76 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77                               size_t count, loff_t *ppos)
78 {
79         struct hci_dev *hdev = file->private_data;
80         struct sk_buff *skb;
81         char buf[32];
82         size_t buf_size = min(count, (sizeof(buf)-1));
83         bool enable;
84         int err;
85
86         if (!test_bit(HCI_UP, &hdev->flags))
87                 return -ENETDOWN;
88
89         if (copy_from_user(buf, user_buf, buf_size))
90                 return -EFAULT;
91
92         buf[buf_size] = '\0';
93         if (strtobool(buf, &enable))
94                 return -EINVAL;
95
96         if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97                 return -EALREADY;
98
99         hci_req_lock(hdev);
100         if (enable)
101                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         else
104                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105                                      HCI_CMD_TIMEOUT);
106         hci_req_unlock(hdev);
107
108         if (IS_ERR(skb))
109                 return PTR_ERR(skb);
110
111         err = -bt_to_errno(skb->data[0]);
112         kfree_skb(skb);
113
114         if (err < 0)
115                 return err;
116
117         change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119         return count;
120 }
121
122 static const struct file_operations dut_mode_fops = {
123         .open           = simple_open,
124         .read           = dut_mode_read,
125         .write          = dut_mode_write,
126         .llseek         = default_llseek,
127 };
128
129 static int features_show(struct seq_file *f, void *ptr)
130 {
131         struct hci_dev *hdev = f->private;
132         u8 p;
133
134         hci_dev_lock(hdev);
135         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
136                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
137                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138                            hdev->features[p][0], hdev->features[p][1],
139                            hdev->features[p][2], hdev->features[p][3],
140                            hdev->features[p][4], hdev->features[p][5],
141                            hdev->features[p][6], hdev->features[p][7]);
142         }
143         if (lmp_le_capable(hdev))
144                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146                            hdev->le_features[0], hdev->le_features[1],
147                            hdev->le_features[2], hdev->le_features[3],
148                            hdev->le_features[4], hdev->le_features[5],
149                            hdev->le_features[6], hdev->le_features[7]);
150         hci_dev_unlock(hdev);
151
152         return 0;
153 }
154
155 static int features_open(struct inode *inode, struct file *file)
156 {
157         return single_open(file, features_show, inode->i_private);
158 }
159
160 static const struct file_operations features_fops = {
161         .open           = features_open,
162         .read           = seq_read,
163         .llseek         = seq_lseek,
164         .release        = single_release,
165 };
166
167 static int blacklist_show(struct seq_file *f, void *p)
168 {
169         struct hci_dev *hdev = f->private;
170         struct bdaddr_list *b;
171
172         hci_dev_lock(hdev);
173         list_for_each_entry(b, &hdev->blacklist, list)
174                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
175         hci_dev_unlock(hdev);
176
177         return 0;
178 }
179
180 static int blacklist_open(struct inode *inode, struct file *file)
181 {
182         return single_open(file, blacklist_show, inode->i_private);
183 }
184
185 static const struct file_operations blacklist_fops = {
186         .open           = blacklist_open,
187         .read           = seq_read,
188         .llseek         = seq_lseek,
189         .release        = single_release,
190 };
191
192 static int uuids_show(struct seq_file *f, void *p)
193 {
194         struct hci_dev *hdev = f->private;
195         struct bt_uuid *uuid;
196
197         hci_dev_lock(hdev);
198         list_for_each_entry(uuid, &hdev->uuids, list) {
199                 u8 i, val[16];
200
201                 /* The Bluetooth UUID values are stored in big endian,
202                  * but with reversed byte order. So convert them into
203                  * the right order for the %pUb modifier.
204                  */
205                 for (i = 0; i < 16; i++)
206                         val[i] = uuid->uuid[15 - i];
207
208                 seq_printf(f, "%pUb\n", val);
209         }
210         hci_dev_unlock(hdev);
211
212         return 0;
213 }
214
215 static int uuids_open(struct inode *inode, struct file *file)
216 {
217         return single_open(file, uuids_show, inode->i_private);
218 }
219
220 static const struct file_operations uuids_fops = {
221         .open           = uuids_open,
222         .read           = seq_read,
223         .llseek         = seq_lseek,
224         .release        = single_release,
225 };
226
227 static int inquiry_cache_show(struct seq_file *f, void *p)
228 {
229         struct hci_dev *hdev = f->private;
230         struct discovery_state *cache = &hdev->discovery;
231         struct inquiry_entry *e;
232
233         hci_dev_lock(hdev);
234
235         list_for_each_entry(e, &cache->all, all) {
236                 struct inquiry_data *data = &e->data;
237                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238                            &data->bdaddr,
239                            data->pscan_rep_mode, data->pscan_period_mode,
240                            data->pscan_mode, data->dev_class[2],
241                            data->dev_class[1], data->dev_class[0],
242                            __le16_to_cpu(data->clock_offset),
243                            data->rssi, data->ssp_mode, e->timestamp);
244         }
245
246         hci_dev_unlock(hdev);
247
248         return 0;
249 }
250
251 static int inquiry_cache_open(struct inode *inode, struct file *file)
252 {
253         return single_open(file, inquiry_cache_show, inode->i_private);
254 }
255
256 static const struct file_operations inquiry_cache_fops = {
257         .open           = inquiry_cache_open,
258         .read           = seq_read,
259         .llseek         = seq_lseek,
260         .release        = single_release,
261 };
262
263 static int link_keys_show(struct seq_file *f, void *ptr)
264 {
265         struct hci_dev *hdev = f->private;
266         struct list_head *p, *n;
267
268         hci_dev_lock(hdev);
269         list_for_each_safe(p, n, &hdev->link_keys) {
270                 struct link_key *key = list_entry(p, struct link_key, list);
271                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273         }
274         hci_dev_unlock(hdev);
275
276         return 0;
277 }
278
279 static int link_keys_open(struct inode *inode, struct file *file)
280 {
281         return single_open(file, link_keys_show, inode->i_private);
282 }
283
284 static const struct file_operations link_keys_fops = {
285         .open           = link_keys_open,
286         .read           = seq_read,
287         .llseek         = seq_lseek,
288         .release        = single_release,
289 };
290
291 static int dev_class_show(struct seq_file *f, void *ptr)
292 {
293         struct hci_dev *hdev = f->private;
294
295         hci_dev_lock(hdev);
296         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297                    hdev->dev_class[1], hdev->dev_class[0]);
298         hci_dev_unlock(hdev);
299
300         return 0;
301 }
302
303 static int dev_class_open(struct inode *inode, struct file *file)
304 {
305         return single_open(file, dev_class_show, inode->i_private);
306 }
307
308 static const struct file_operations dev_class_fops = {
309         .open           = dev_class_open,
310         .read           = seq_read,
311         .llseek         = seq_lseek,
312         .release        = single_release,
313 };
314
315 static int voice_setting_get(void *data, u64 *val)
316 {
317         struct hci_dev *hdev = data;
318
319         hci_dev_lock(hdev);
320         *val = hdev->voice_setting;
321         hci_dev_unlock(hdev);
322
323         return 0;
324 }
325
326 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327                         NULL, "0x%4.4llx\n");
328
329 static int auto_accept_delay_set(void *data, u64 val)
330 {
331         struct hci_dev *hdev = data;
332
333         hci_dev_lock(hdev);
334         hdev->auto_accept_delay = val;
335         hci_dev_unlock(hdev);
336
337         return 0;
338 }
339
340 static int auto_accept_delay_get(void *data, u64 *val)
341 {
342         struct hci_dev *hdev = data;
343
344         hci_dev_lock(hdev);
345         *val = hdev->auto_accept_delay;
346         hci_dev_unlock(hdev);
347
348         return 0;
349 }
350
351 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352                         auto_accept_delay_set, "%llu\n");
353
354 static int ssp_debug_mode_set(void *data, u64 val)
355 {
356         struct hci_dev *hdev = data;
357         struct sk_buff *skb;
358         __u8 mode;
359         int err;
360
361         if (val != 0 && val != 1)
362                 return -EINVAL;
363
364         if (!test_bit(HCI_UP, &hdev->flags))
365                 return -ENETDOWN;
366
367         hci_req_lock(hdev);
368         mode = val;
369         skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370                              &mode, HCI_CMD_TIMEOUT);
371         hci_req_unlock(hdev);
372
373         if (IS_ERR(skb))
374                 return PTR_ERR(skb);
375
376         err = -bt_to_errno(skb->data[0]);
377         kfree_skb(skb);
378
379         if (err < 0)
380                 return err;
381
382         hci_dev_lock(hdev);
383         hdev->ssp_debug_mode = val;
384         hci_dev_unlock(hdev);
385
386         return 0;
387 }
388
389 static int ssp_debug_mode_get(void *data, u64 *val)
390 {
391         struct hci_dev *hdev = data;
392
393         hci_dev_lock(hdev);
394         *val = hdev->ssp_debug_mode;
395         hci_dev_unlock(hdev);
396
397         return 0;
398 }
399
400 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401                         ssp_debug_mode_set, "%llu\n");
402
403 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404                                      size_t count, loff_t *ppos)
405 {
406         struct hci_dev *hdev = file->private_data;
407         char buf[3];
408
409         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410         buf[1] = '\n';
411         buf[2] = '\0';
412         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413 }
414
415 static ssize_t force_sc_support_write(struct file *file,
416                                       const char __user *user_buf,
417                                       size_t count, loff_t *ppos)
418 {
419         struct hci_dev *hdev = file->private_data;
420         char buf[32];
421         size_t buf_size = min(count, (sizeof(buf)-1));
422         bool enable;
423
424         if (test_bit(HCI_UP, &hdev->flags))
425                 return -EBUSY;
426
427         if (copy_from_user(buf, user_buf, buf_size))
428                 return -EFAULT;
429
430         buf[buf_size] = '\0';
431         if (strtobool(buf, &enable))
432                 return -EINVAL;
433
434         if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435                 return -EALREADY;
436
437         change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439         return count;
440 }
441
442 static const struct file_operations force_sc_support_fops = {
443         .open           = simple_open,
444         .read           = force_sc_support_read,
445         .write          = force_sc_support_write,
446         .llseek         = default_llseek,
447 };
448
449 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450                                  size_t count, loff_t *ppos)
451 {
452         struct hci_dev *hdev = file->private_data;
453         char buf[3];
454
455         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456         buf[1] = '\n';
457         buf[2] = '\0';
458         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459 }
460
461 static const struct file_operations sc_only_mode_fops = {
462         .open           = simple_open,
463         .read           = sc_only_mode_read,
464         .llseek         = default_llseek,
465 };
466
467 static int idle_timeout_set(void *data, u64 val)
468 {
469         struct hci_dev *hdev = data;
470
471         if (val != 0 && (val < 500 || val > 3600000))
472                 return -EINVAL;
473
474         hci_dev_lock(hdev);
475         hdev->idle_timeout = val;
476         hci_dev_unlock(hdev);
477
478         return 0;
479 }
480
481 static int idle_timeout_get(void *data, u64 *val)
482 {
483         struct hci_dev *hdev = data;
484
485         hci_dev_lock(hdev);
486         *val = hdev->idle_timeout;
487         hci_dev_unlock(hdev);
488
489         return 0;
490 }
491
492 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493                         idle_timeout_set, "%llu\n");
494
495 static int sniff_min_interval_set(void *data, u64 val)
496 {
497         struct hci_dev *hdev = data;
498
499         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
500                 return -EINVAL;
501
502         hci_dev_lock(hdev);
503         hdev->sniff_min_interval = val;
504         hci_dev_unlock(hdev);
505
506         return 0;
507 }
508
509 static int sniff_min_interval_get(void *data, u64 *val)
510 {
511         struct hci_dev *hdev = data;
512
513         hci_dev_lock(hdev);
514         *val = hdev->sniff_min_interval;
515         hci_dev_unlock(hdev);
516
517         return 0;
518 }
519
520 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
521                         sniff_min_interval_set, "%llu\n");
522
523 static int sniff_max_interval_set(void *data, u64 val)
524 {
525         struct hci_dev *hdev = data;
526
527         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
528                 return -EINVAL;
529
530         hci_dev_lock(hdev);
531         hdev->sniff_max_interval = val;
532         hci_dev_unlock(hdev);
533
534         return 0;
535 }
536
537 static int sniff_max_interval_get(void *data, u64 *val)
538 {
539         struct hci_dev *hdev = data;
540
541         hci_dev_lock(hdev);
542         *val = hdev->sniff_max_interval;
543         hci_dev_unlock(hdev);
544
545         return 0;
546 }
547
548 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
549                         sniff_max_interval_set, "%llu\n");
550
551 static int static_address_show(struct seq_file *f, void *p)
552 {
553         struct hci_dev *hdev = f->private;
554
555         hci_dev_lock(hdev);
556         seq_printf(f, "%pMR\n", &hdev->static_addr);
557         hci_dev_unlock(hdev);
558
559         return 0;
560 }
561
562 static int static_address_open(struct inode *inode, struct file *file)
563 {
564         return single_open(file, static_address_show, inode->i_private);
565 }
566
567 static const struct file_operations static_address_fops = {
568         .open           = static_address_open,
569         .read           = seq_read,
570         .llseek         = seq_lseek,
571         .release        = single_release,
572 };
573
574 static int own_address_type_set(void *data, u64 val)
575 {
576         struct hci_dev *hdev = data;
577
578         if (val != 0 && val != 1)
579                 return -EINVAL;
580
581         hci_dev_lock(hdev);
582         hdev->own_addr_type = val;
583         hci_dev_unlock(hdev);
584
585         return 0;
586 }
587
588 static int own_address_type_get(void *data, u64 *val)
589 {
590         struct hci_dev *hdev = data;
591
592         hci_dev_lock(hdev);
593         *val = hdev->own_addr_type;
594         hci_dev_unlock(hdev);
595
596         return 0;
597 }
598
599 DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
600                         own_address_type_set, "%llu\n");
601
602 static int long_term_keys_show(struct seq_file *f, void *ptr)
603 {
604         struct hci_dev *hdev = f->private;
605         struct list_head *p, *n;
606
607         hci_dev_lock(hdev);
608         list_for_each_safe(p, n, &hdev->long_term_keys) {
609                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
610                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
611                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
612                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
613                            8, ltk->rand, 16, ltk->val);
614         }
615         hci_dev_unlock(hdev);
616
617         return 0;
618 }
619
620 static int long_term_keys_open(struct inode *inode, struct file *file)
621 {
622         return single_open(file, long_term_keys_show, inode->i_private);
623 }
624
625 static const struct file_operations long_term_keys_fops = {
626         .open           = long_term_keys_open,
627         .read           = seq_read,
628         .llseek         = seq_lseek,
629         .release        = single_release,
630 };
631
632 static int conn_min_interval_set(void *data, u64 val)
633 {
634         struct hci_dev *hdev = data;
635
636         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
637                 return -EINVAL;
638
639         hci_dev_lock(hdev);
640         hdev->le_conn_min_interval = val;
641         hci_dev_unlock(hdev);
642
643         return 0;
644 }
645
646 static int conn_min_interval_get(void *data, u64 *val)
647 {
648         struct hci_dev *hdev = data;
649
650         hci_dev_lock(hdev);
651         *val = hdev->le_conn_min_interval;
652         hci_dev_unlock(hdev);
653
654         return 0;
655 }
656
657 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
658                         conn_min_interval_set, "%llu\n");
659
660 static int conn_max_interval_set(void *data, u64 val)
661 {
662         struct hci_dev *hdev = data;
663
664         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
665                 return -EINVAL;
666
667         hci_dev_lock(hdev);
668         hdev->le_conn_max_interval = val;
669         hci_dev_unlock(hdev);
670
671         return 0;
672 }
673
674 static int conn_max_interval_get(void *data, u64 *val)
675 {
676         struct hci_dev *hdev = data;
677
678         hci_dev_lock(hdev);
679         *val = hdev->le_conn_max_interval;
680         hci_dev_unlock(hdev);
681
682         return 0;
683 }
684
685 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
686                         conn_max_interval_set, "%llu\n");
687
688 static ssize_t lowpan_read(struct file *file, char __user *user_buf,
689                            size_t count, loff_t *ppos)
690 {
691         struct hci_dev *hdev = file->private_data;
692         char buf[3];
693
694         buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
695         buf[1] = '\n';
696         buf[2] = '\0';
697         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
698 }
699
700 static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
701                             size_t count, loff_t *position)
702 {
703         struct hci_dev *hdev = fp->private_data;
704         bool enable;
705         char buf[32];
706         size_t buf_size = min(count, (sizeof(buf)-1));
707
708         if (copy_from_user(buf, user_buffer, buf_size))
709                 return -EFAULT;
710
711         buf[buf_size] = '\0';
712
713         if (strtobool(buf, &enable) < 0)
714                 return -EINVAL;
715
716         if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
717                 return -EALREADY;
718
719         change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
720
721         return count;
722 }
723
724 static const struct file_operations lowpan_debugfs_fops = {
725         .open           = simple_open,
726         .read           = lowpan_read,
727         .write          = lowpan_write,
728         .llseek         = default_llseek,
729 };
730
731 /* ---- HCI requests ---- */
732
733 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
734 {
735         BT_DBG("%s result 0x%2.2x", hdev->name, result);
736
737         if (hdev->req_status == HCI_REQ_PEND) {
738                 hdev->req_result = result;
739                 hdev->req_status = HCI_REQ_DONE;
740                 wake_up_interruptible(&hdev->req_wait_q);
741         }
742 }
743
744 static void hci_req_cancel(struct hci_dev *hdev, int err)
745 {
746         BT_DBG("%s err 0x%2.2x", hdev->name, err);
747
748         if (hdev->req_status == HCI_REQ_PEND) {
749                 hdev->req_result = err;
750                 hdev->req_status = HCI_REQ_CANCELED;
751                 wake_up_interruptible(&hdev->req_wait_q);
752         }
753 }
754
755 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
756                                             u8 event)
757 {
758         struct hci_ev_cmd_complete *ev;
759         struct hci_event_hdr *hdr;
760         struct sk_buff *skb;
761
762         hci_dev_lock(hdev);
763
764         skb = hdev->recv_evt;
765         hdev->recv_evt = NULL;
766
767         hci_dev_unlock(hdev);
768
769         if (!skb)
770                 return ERR_PTR(-ENODATA);
771
772         if (skb->len < sizeof(*hdr)) {
773                 BT_ERR("Too short HCI event");
774                 goto failed;
775         }
776
777         hdr = (void *) skb->data;
778         skb_pull(skb, HCI_EVENT_HDR_SIZE);
779
780         if (event) {
781                 if (hdr->evt != event)
782                         goto failed;
783                 return skb;
784         }
785
786         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
787                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
788                 goto failed;
789         }
790
791         if (skb->len < sizeof(*ev)) {
792                 BT_ERR("Too short cmd_complete event");
793                 goto failed;
794         }
795
796         ev = (void *) skb->data;
797         skb_pull(skb, sizeof(*ev));
798
799         if (opcode == __le16_to_cpu(ev->opcode))
800                 return skb;
801
802         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
803                __le16_to_cpu(ev->opcode));
804
805 failed:
806         kfree_skb(skb);
807         return ERR_PTR(-ENODATA);
808 }
809
810 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
811                                   const void *param, u8 event, u32 timeout)
812 {
813         DECLARE_WAITQUEUE(wait, current);
814         struct hci_request req;
815         int err = 0;
816
817         BT_DBG("%s", hdev->name);
818
819         hci_req_init(&req, hdev);
820
821         hci_req_add_ev(&req, opcode, plen, param, event);
822
823         hdev->req_status = HCI_REQ_PEND;
824
825         err = hci_req_run(&req, hci_req_sync_complete);
826         if (err < 0)
827                 return ERR_PTR(err);
828
829         add_wait_queue(&hdev->req_wait_q, &wait);
830         set_current_state(TASK_INTERRUPTIBLE);
831
832         schedule_timeout(timeout);
833
834         remove_wait_queue(&hdev->req_wait_q, &wait);
835
836         if (signal_pending(current))
837                 return ERR_PTR(-EINTR);
838
839         switch (hdev->req_status) {
840         case HCI_REQ_DONE:
841                 err = -bt_to_errno(hdev->req_result);
842                 break;
843
844         case HCI_REQ_CANCELED:
845                 err = -hdev->req_result;
846                 break;
847
848         default:
849                 err = -ETIMEDOUT;
850                 break;
851         }
852
853         hdev->req_status = hdev->req_result = 0;
854
855         BT_DBG("%s end: err %d", hdev->name, err);
856
857         if (err < 0)
858                 return ERR_PTR(err);
859
860         return hci_get_cmd_complete(hdev, opcode, event);
861 }
862 EXPORT_SYMBOL(__hci_cmd_sync_ev);
863
864 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
865                                const void *param, u32 timeout)
866 {
867         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
868 }
869 EXPORT_SYMBOL(__hci_cmd_sync);
870
871 /* Execute request and wait for completion. */
872 static int __hci_req_sync(struct hci_dev *hdev,
873                           void (*func)(struct hci_request *req,
874                                       unsigned long opt),
875                           unsigned long opt, __u32 timeout)
876 {
877         struct hci_request req;
878         DECLARE_WAITQUEUE(wait, current);
879         int err = 0;
880
881         BT_DBG("%s start", hdev->name);
882
883         hci_req_init(&req, hdev);
884
885         hdev->req_status = HCI_REQ_PEND;
886
887         func(&req, opt);
888
889         err = hci_req_run(&req, hci_req_sync_complete);
890         if (err < 0) {
891                 hdev->req_status = 0;
892
893                 /* ENODATA means the HCI request command queue is empty.
894                  * This can happen when a request with conditionals doesn't
895                  * trigger any commands to be sent. This is normal behavior
896                  * and should not trigger an error return.
897                  */
898                 if (err == -ENODATA)
899                         return 0;
900
901                 return err;
902         }
903
904         add_wait_queue(&hdev->req_wait_q, &wait);
905         set_current_state(TASK_INTERRUPTIBLE);
906
907         schedule_timeout(timeout);
908
909         remove_wait_queue(&hdev->req_wait_q, &wait);
910
911         if (signal_pending(current))
912                 return -EINTR;
913
914         switch (hdev->req_status) {
915         case HCI_REQ_DONE:
916                 err = -bt_to_errno(hdev->req_result);
917                 break;
918
919         case HCI_REQ_CANCELED:
920                 err = -hdev->req_result;
921                 break;
922
923         default:
924                 err = -ETIMEDOUT;
925                 break;
926         }
927
928         hdev->req_status = hdev->req_result = 0;
929
930         BT_DBG("%s end: err %d", hdev->name, err);
931
932         return err;
933 }
934
935 static int hci_req_sync(struct hci_dev *hdev,
936                         void (*req)(struct hci_request *req,
937                                     unsigned long opt),
938                         unsigned long opt, __u32 timeout)
939 {
940         int ret;
941
942         if (!test_bit(HCI_UP, &hdev->flags))
943                 return -ENETDOWN;
944
945         /* Serialize all requests */
946         hci_req_lock(hdev);
947         ret = __hci_req_sync(hdev, req, opt, timeout);
948         hci_req_unlock(hdev);
949
950         return ret;
951 }
952
953 static void hci_reset_req(struct hci_request *req, unsigned long opt)
954 {
955         BT_DBG("%s %ld", req->hdev->name, opt);
956
957         /* Reset device */
958         set_bit(HCI_RESET, &req->hdev->flags);
959         hci_req_add(req, HCI_OP_RESET, 0, NULL);
960 }
961
962 static void bredr_init(struct hci_request *req)
963 {
964         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
965
966         /* Read Local Supported Features */
967         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
968
969         /* Read Local Version */
970         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
971
972         /* Read BD Address */
973         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
974 }
975
976 static void amp_init(struct hci_request *req)
977 {
978         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
979
980         /* Read Local Version */
981         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
982
983         /* Read Local Supported Commands */
984         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
985
986         /* Read Local Supported Features */
987         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
988
989         /* Read Local AMP Info */
990         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
991
992         /* Read Data Blk size */
993         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
994
995         /* Read Flow Control Mode */
996         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
997
998         /* Read Location Data */
999         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1000 }
1001
1002 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1003 {
1004         struct hci_dev *hdev = req->hdev;
1005
1006         BT_DBG("%s %ld", hdev->name, opt);
1007
1008         /* Reset */
1009         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1010                 hci_reset_req(req, 0);
1011
1012         switch (hdev->dev_type) {
1013         case HCI_BREDR:
1014                 bredr_init(req);
1015                 break;
1016
1017         case HCI_AMP:
1018                 amp_init(req);
1019                 break;
1020
1021         default:
1022                 BT_ERR("Unknown device type %d", hdev->dev_type);
1023                 break;
1024         }
1025 }
1026
1027 static void bredr_setup(struct hci_request *req)
1028 {
1029         struct hci_dev *hdev = req->hdev;
1030
1031         __le16 param;
1032         __u8 flt_type;
1033
1034         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1035         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1036
1037         /* Read Class of Device */
1038         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1039
1040         /* Read Local Name */
1041         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1042
1043         /* Read Voice Setting */
1044         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1045
1046         /* Read Number of Supported IAC */
1047         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1048
1049         /* Read Current IAC LAP */
1050         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1051
1052         /* Clear Event Filters */
1053         flt_type = HCI_FLT_CLEAR_ALL;
1054         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1055
1056         /* Connection accept timeout ~20 secs */
1057         param = __constant_cpu_to_le16(0x7d00);
1058         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1059
1060         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1061          * but it does not support page scan related HCI commands.
1062          */
1063         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1064                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1065                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1066         }
1067 }
1068
1069 static void le_setup(struct hci_request *req)
1070 {
1071         struct hci_dev *hdev = req->hdev;
1072
1073         /* Read LE Buffer Size */
1074         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1075
1076         /* Read LE Local Supported Features */
1077         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1078
1079         /* Read LE Advertising Channel TX Power */
1080         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1081
1082         /* Read LE White List Size */
1083         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1084
1085         /* Read LE Supported States */
1086         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1087
1088         /* LE-only controllers have LE implicitly enabled */
1089         if (!lmp_bredr_capable(hdev))
1090                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1091 }
1092
1093 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1094 {
1095         if (lmp_ext_inq_capable(hdev))
1096                 return 0x02;
1097
1098         if (lmp_inq_rssi_capable(hdev))
1099                 return 0x01;
1100
1101         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1102             hdev->lmp_subver == 0x0757)
1103                 return 0x01;
1104
1105         if (hdev->manufacturer == 15) {
1106                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1107                         return 0x01;
1108                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1109                         return 0x01;
1110                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1111                         return 0x01;
1112         }
1113
1114         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1115             hdev->lmp_subver == 0x1805)
1116                 return 0x01;
1117
1118         return 0x00;
1119 }
1120
1121 static void hci_setup_inquiry_mode(struct hci_request *req)
1122 {
1123         u8 mode;
1124
1125         mode = hci_get_inquiry_mode(req->hdev);
1126
1127         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1128 }
1129
1130 static void hci_setup_event_mask(struct hci_request *req)
1131 {
1132         struct hci_dev *hdev = req->hdev;
1133
1134         /* The second byte is 0xff instead of 0x9f (two reserved bits
1135          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1136          * command otherwise.
1137          */
1138         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1139
1140         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1141          * any event mask for pre 1.2 devices.
1142          */
1143         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1144                 return;
1145
1146         if (lmp_bredr_capable(hdev)) {
1147                 events[4] |= 0x01; /* Flow Specification Complete */
1148                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1149                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1150                 events[5] |= 0x08; /* Synchronous Connection Complete */
1151                 events[5] |= 0x10; /* Synchronous Connection Changed */
1152         } else {
1153                 /* Use a different default for LE-only devices */
1154                 memset(events, 0, sizeof(events));
1155                 events[0] |= 0x10; /* Disconnection Complete */
1156                 events[0] |= 0x80; /* Encryption Change */
1157                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1158                 events[1] |= 0x20; /* Command Complete */
1159                 events[1] |= 0x40; /* Command Status */
1160                 events[1] |= 0x80; /* Hardware Error */
1161                 events[2] |= 0x04; /* Number of Completed Packets */
1162                 events[3] |= 0x02; /* Data Buffer Overflow */
1163                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1164         }
1165
1166         if (lmp_inq_rssi_capable(hdev))
1167                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1168
1169         if (lmp_sniffsubr_capable(hdev))
1170                 events[5] |= 0x20; /* Sniff Subrating */
1171
1172         if (lmp_pause_enc_capable(hdev))
1173                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1174
1175         if (lmp_ext_inq_capable(hdev))
1176                 events[5] |= 0x40; /* Extended Inquiry Result */
1177
1178         if (lmp_no_flush_capable(hdev))
1179                 events[7] |= 0x01; /* Enhanced Flush Complete */
1180
1181         if (lmp_lsto_capable(hdev))
1182                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1183
1184         if (lmp_ssp_capable(hdev)) {
1185                 events[6] |= 0x01;      /* IO Capability Request */
1186                 events[6] |= 0x02;      /* IO Capability Response */
1187                 events[6] |= 0x04;      /* User Confirmation Request */
1188                 events[6] |= 0x08;      /* User Passkey Request */
1189                 events[6] |= 0x10;      /* Remote OOB Data Request */
1190                 events[6] |= 0x20;      /* Simple Pairing Complete */
1191                 events[7] |= 0x04;      /* User Passkey Notification */
1192                 events[7] |= 0x08;      /* Keypress Notification */
1193                 events[7] |= 0x10;      /* Remote Host Supported
1194                                          * Features Notification
1195                                          */
1196         }
1197
1198         if (lmp_le_capable(hdev))
1199                 events[7] |= 0x20;      /* LE Meta-Event */
1200
1201         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1202
1203         if (lmp_le_capable(hdev)) {
1204                 memset(events, 0, sizeof(events));
1205                 events[0] = 0x1f;
1206                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1207                             sizeof(events), events);
1208         }
1209 }
1210
1211 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1212 {
1213         struct hci_dev *hdev = req->hdev;
1214
1215         if (lmp_bredr_capable(hdev))
1216                 bredr_setup(req);
1217         else
1218                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1219
1220         if (lmp_le_capable(hdev))
1221                 le_setup(req);
1222
1223         hci_setup_event_mask(req);
1224
1225         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1226          * local supported commands HCI command.
1227          */
1228         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1229                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1230
1231         if (lmp_ssp_capable(hdev)) {
1232                 /* When SSP is available, then the host features page
1233                  * should also be available as well. However some
1234                  * controllers list the max_page as 0 as long as SSP
1235                  * has not been enabled. To achieve proper debugging
1236                  * output, force the minimum max_page to 1 at least.
1237                  */
1238                 hdev->max_page = 0x01;
1239
1240                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1241                         u8 mode = 0x01;
1242                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1243                                     sizeof(mode), &mode);
1244                 } else {
1245                         struct hci_cp_write_eir cp;
1246
1247                         memset(hdev->eir, 0, sizeof(hdev->eir));
1248                         memset(&cp, 0, sizeof(cp));
1249
1250                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1251                 }
1252         }
1253
1254         if (lmp_inq_rssi_capable(hdev))
1255                 hci_setup_inquiry_mode(req);
1256
1257         if (lmp_inq_tx_pwr_capable(hdev))
1258                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1259
1260         if (lmp_ext_feat_capable(hdev)) {
1261                 struct hci_cp_read_local_ext_features cp;
1262
1263                 cp.page = 0x01;
1264                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1265                             sizeof(cp), &cp);
1266         }
1267
1268         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1269                 u8 enable = 1;
1270                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1271                             &enable);
1272         }
1273 }
1274
1275 static void hci_setup_link_policy(struct hci_request *req)
1276 {
1277         struct hci_dev *hdev = req->hdev;
1278         struct hci_cp_write_def_link_policy cp;
1279         u16 link_policy = 0;
1280
1281         if (lmp_rswitch_capable(hdev))
1282                 link_policy |= HCI_LP_RSWITCH;
1283         if (lmp_hold_capable(hdev))
1284                 link_policy |= HCI_LP_HOLD;
1285         if (lmp_sniff_capable(hdev))
1286                 link_policy |= HCI_LP_SNIFF;
1287         if (lmp_park_capable(hdev))
1288                 link_policy |= HCI_LP_PARK;
1289
1290         cp.policy = cpu_to_le16(link_policy);
1291         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1292 }
1293
1294 static void hci_set_le_support(struct hci_request *req)
1295 {
1296         struct hci_dev *hdev = req->hdev;
1297         struct hci_cp_write_le_host_supported cp;
1298
1299         /* LE-only devices do not support explicit enablement */
1300         if (!lmp_bredr_capable(hdev))
1301                 return;
1302
1303         memset(&cp, 0, sizeof(cp));
1304
1305         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1306                 cp.le = 0x01;
1307                 cp.simul = lmp_le_br_capable(hdev);
1308         }
1309
1310         if (cp.le != lmp_host_le_capable(hdev))
1311                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1312                             &cp);
1313 }
1314
1315 static void hci_set_event_mask_page_2(struct hci_request *req)
1316 {
1317         struct hci_dev *hdev = req->hdev;
1318         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1319
1320         /* If Connectionless Slave Broadcast master role is supported
1321          * enable all necessary events for it.
1322          */
1323         if (lmp_csb_master_capable(hdev)) {
1324                 events[1] |= 0x40;      /* Triggered Clock Capture */
1325                 events[1] |= 0x80;      /* Synchronization Train Complete */
1326                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1327                 events[2] |= 0x20;      /* CSB Channel Map Change */
1328         }
1329
1330         /* If Connectionless Slave Broadcast slave role is supported
1331          * enable all necessary events for it.
1332          */
1333         if (lmp_csb_slave_capable(hdev)) {
1334                 events[2] |= 0x01;      /* Synchronization Train Received */
1335                 events[2] |= 0x02;      /* CSB Receive */
1336                 events[2] |= 0x04;      /* CSB Timeout */
1337                 events[2] |= 0x08;      /* Truncated Page Complete */
1338         }
1339
1340         /* Enable Authenticated Payload Timeout Expired event if supported */
1341         if (lmp_ping_capable(hdev))
1342                 events[2] |= 0x80;
1343
1344         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1345 }
1346
1347 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1348 {
1349         struct hci_dev *hdev = req->hdev;
1350         u8 p;
1351
1352         /* Some Broadcom based Bluetooth controllers do not support the
1353          * Delete Stored Link Key command. They are clearly indicating its
1354          * absence in the bit mask of supported commands.
1355          *
1356          * Check the supported commands and only if the the command is marked
1357          * as supported send it. If not supported assume that the controller
1358          * does not have actual support for stored link keys which makes this
1359          * command redundant anyway.
1360          *
1361          * Some controllers indicate that they support handling deleting
1362          * stored link keys, but they don't. The quirk lets a driver
1363          * just disable this command.
1364          */
1365         if (hdev->commands[6] & 0x80 &&
1366             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1367                 struct hci_cp_delete_stored_link_key cp;
1368
1369                 bacpy(&cp.bdaddr, BDADDR_ANY);
1370                 cp.delete_all = 0x01;
1371                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1372                             sizeof(cp), &cp);
1373         }
1374
1375         if (hdev->commands[5] & 0x10)
1376                 hci_setup_link_policy(req);
1377
1378         if (lmp_le_capable(hdev)) {
1379                 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1380                         /* If the controller has a public BD_ADDR, then
1381                          * by default use that one. If this is a LE only
1382                          * controller without a public address, default
1383                          * to the random address.
1384                          */
1385                         if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1386                                 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1387                         else
1388                                 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1389                 }
1390
1391                 hci_set_le_support(req);
1392         }
1393
1394         /* Read features beyond page 1 if available */
1395         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1396                 struct hci_cp_read_local_ext_features cp;
1397
1398                 cp.page = p;
1399                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1400                             sizeof(cp), &cp);
1401         }
1402 }
1403
1404 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1405 {
1406         struct hci_dev *hdev = req->hdev;
1407
1408         /* Set event mask page 2 if the HCI command for it is supported */
1409         if (hdev->commands[22] & 0x04)
1410                 hci_set_event_mask_page_2(req);
1411
1412         /* Check for Synchronization Train support */
1413         if (lmp_sync_train_capable(hdev))
1414                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1415
1416         /* Enable Secure Connections if supported and configured */
1417         if ((lmp_sc_capable(hdev) ||
1418              test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
1419             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1420                 u8 support = 0x01;
1421                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1422                             sizeof(support), &support);
1423         }
1424 }
1425
1426 static int __hci_init(struct hci_dev *hdev)
1427 {
1428         int err;
1429
1430         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1431         if (err < 0)
1432                 return err;
1433
1434         /* The Device Under Test (DUT) mode is special and available for
1435          * all controller types. So just create it early on.
1436          */
1437         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1438                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1439                                     &dut_mode_fops);
1440         }
1441
1442         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1443          * BR/EDR/LE type controllers. AMP controllers only need the
1444          * first stage init.
1445          */
1446         if (hdev->dev_type != HCI_BREDR)
1447                 return 0;
1448
1449         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1450         if (err < 0)
1451                 return err;
1452
1453         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1454         if (err < 0)
1455                 return err;
1456
1457         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1458         if (err < 0)
1459                 return err;
1460
1461         /* Only create debugfs entries during the initial setup
1462          * phase and not every time the controller gets powered on.
1463          */
1464         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1465                 return 0;
1466
1467         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1468                             &features_fops);
1469         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1470                            &hdev->manufacturer);
1471         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1472         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1473         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1474                             &blacklist_fops);
1475         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1476
1477         if (lmp_bredr_capable(hdev)) {
1478                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1479                                     hdev, &inquiry_cache_fops);
1480                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1481                                     hdev, &link_keys_fops);
1482                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1483                                     hdev, &dev_class_fops);
1484                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1485                                     hdev, &voice_setting_fops);
1486         }
1487
1488         if (lmp_ssp_capable(hdev)) {
1489                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1490                                     hdev, &auto_accept_delay_fops);
1491                 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1492                                     hdev, &ssp_debug_mode_fops);
1493                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1494                                     hdev, &force_sc_support_fops);
1495                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1496                                     hdev, &sc_only_mode_fops);
1497         }
1498
1499         if (lmp_sniff_capable(hdev)) {
1500                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1501                                     hdev, &idle_timeout_fops);
1502                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1503                                     hdev, &sniff_min_interval_fops);
1504                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1505                                     hdev, &sniff_max_interval_fops);
1506         }
1507
1508         if (lmp_le_capable(hdev)) {
1509                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1510                                   &hdev->le_white_list_size);
1511                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1512                                    hdev, &static_address_fops);
1513                 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1514                                     hdev, &own_address_type_fops);
1515                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1516                                     hdev, &long_term_keys_fops);
1517                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1518                                     hdev, &conn_min_interval_fops);
1519                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1520                                     hdev, &conn_max_interval_fops);
1521                 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1522                                     &lowpan_debugfs_fops);
1523         }
1524
1525         return 0;
1526 }
1527
1528 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1529 {
1530         __u8 scan = opt;
1531
1532         BT_DBG("%s %x", req->hdev->name, scan);
1533
1534         /* Inquiry and Page scans */
1535         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1536 }
1537
1538 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1539 {
1540         __u8 auth = opt;
1541
1542         BT_DBG("%s %x", req->hdev->name, auth);
1543
1544         /* Authentication */
1545         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1546 }
1547
1548 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1549 {
1550         __u8 encrypt = opt;
1551
1552         BT_DBG("%s %x", req->hdev->name, encrypt);
1553
1554         /* Encryption */
1555         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1556 }
1557
1558 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1559 {
1560         __le16 policy = cpu_to_le16(opt);
1561
1562         BT_DBG("%s %x", req->hdev->name, policy);
1563
1564         /* Default link policy */
1565         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1566 }
1567
1568 /* Get HCI device by index.
1569  * Device is held on return. */
1570 struct hci_dev *hci_dev_get(int index)
1571 {
1572         struct hci_dev *hdev = NULL, *d;
1573
1574         BT_DBG("%d", index);
1575
1576         if (index < 0)
1577                 return NULL;
1578
1579         read_lock(&hci_dev_list_lock);
1580         list_for_each_entry(d, &hci_dev_list, list) {
1581                 if (d->id == index) {
1582                         hdev = hci_dev_hold(d);
1583                         break;
1584                 }
1585         }
1586         read_unlock(&hci_dev_list_lock);
1587         return hdev;
1588 }
1589
1590 /* ---- Inquiry support ---- */
1591
1592 bool hci_discovery_active(struct hci_dev *hdev)
1593 {
1594         struct discovery_state *discov = &hdev->discovery;
1595
1596         switch (discov->state) {
1597         case DISCOVERY_FINDING:
1598         case DISCOVERY_RESOLVING:
1599                 return true;
1600
1601         default:
1602                 return false;
1603         }
1604 }
1605
1606 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1607 {
1608         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1609
1610         if (hdev->discovery.state == state)
1611                 return;
1612
1613         switch (state) {
1614         case DISCOVERY_STOPPED:
1615                 if (hdev->discovery.state != DISCOVERY_STARTING)
1616                         mgmt_discovering(hdev, 0);
1617                 break;
1618         case DISCOVERY_STARTING:
1619                 break;
1620         case DISCOVERY_FINDING:
1621                 mgmt_discovering(hdev, 1);
1622                 break;
1623         case DISCOVERY_RESOLVING:
1624                 break;
1625         case DISCOVERY_STOPPING:
1626                 break;
1627         }
1628
1629         hdev->discovery.state = state;
1630 }
1631
1632 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1633 {
1634         struct discovery_state *cache = &hdev->discovery;
1635         struct inquiry_entry *p, *n;
1636
1637         list_for_each_entry_safe(p, n, &cache->all, all) {
1638                 list_del(&p->all);
1639                 kfree(p);
1640         }
1641
1642         INIT_LIST_HEAD(&cache->unknown);
1643         INIT_LIST_HEAD(&cache->resolve);
1644 }
1645
1646 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1647                                                bdaddr_t *bdaddr)
1648 {
1649         struct discovery_state *cache = &hdev->discovery;
1650         struct inquiry_entry *e;
1651
1652         BT_DBG("cache %p, %pMR", cache, bdaddr);
1653
1654         list_for_each_entry(e, &cache->all, all) {
1655                 if (!bacmp(&e->data.bdaddr, bdaddr))
1656                         return e;
1657         }
1658
1659         return NULL;
1660 }
1661
1662 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1663                                                        bdaddr_t *bdaddr)
1664 {
1665         struct discovery_state *cache = &hdev->discovery;
1666         struct inquiry_entry *e;
1667
1668         BT_DBG("cache %p, %pMR", cache, bdaddr);
1669
1670         list_for_each_entry(e, &cache->unknown, list) {
1671                 if (!bacmp(&e->data.bdaddr, bdaddr))
1672                         return e;
1673         }
1674
1675         return NULL;
1676 }
1677
1678 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1679                                                        bdaddr_t *bdaddr,
1680                                                        int state)
1681 {
1682         struct discovery_state *cache = &hdev->discovery;
1683         struct inquiry_entry *e;
1684
1685         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1686
1687         list_for_each_entry(e, &cache->resolve, list) {
1688                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1689                         return e;
1690                 if (!bacmp(&e->data.bdaddr, bdaddr))
1691                         return e;
1692         }
1693
1694         return NULL;
1695 }
1696
1697 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1698                                       struct inquiry_entry *ie)
1699 {
1700         struct discovery_state *cache = &hdev->discovery;
1701         struct list_head *pos = &cache->resolve;
1702         struct inquiry_entry *p;
1703
1704         list_del(&ie->list);
1705
1706         list_for_each_entry(p, &cache->resolve, list) {
1707                 if (p->name_state != NAME_PENDING &&
1708                     abs(p->data.rssi) >= abs(ie->data.rssi))
1709                         break;
1710                 pos = &p->list;
1711         }
1712
1713         list_add(&ie->list, pos);
1714 }
1715
1716 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1717                               bool name_known, bool *ssp)
1718 {
1719         struct discovery_state *cache = &hdev->discovery;
1720         struct inquiry_entry *ie;
1721
1722         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1723
1724         hci_remove_remote_oob_data(hdev, &data->bdaddr);
1725
1726         if (ssp)
1727                 *ssp = data->ssp_mode;
1728
1729         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1730         if (ie) {
1731                 if (ie->data.ssp_mode && ssp)
1732                         *ssp = true;
1733
1734                 if (ie->name_state == NAME_NEEDED &&
1735                     data->rssi != ie->data.rssi) {
1736                         ie->data.rssi = data->rssi;
1737                         hci_inquiry_cache_update_resolve(hdev, ie);
1738                 }
1739
1740                 goto update;
1741         }
1742
1743         /* Entry not in the cache. Add new one. */
1744         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1745         if (!ie)
1746                 return false;
1747
1748         list_add(&ie->all, &cache->all);
1749
1750         if (name_known) {
1751                 ie->name_state = NAME_KNOWN;
1752         } else {
1753                 ie->name_state = NAME_NOT_KNOWN;
1754                 list_add(&ie->list, &cache->unknown);
1755         }
1756
1757 update:
1758         if (name_known && ie->name_state != NAME_KNOWN &&
1759             ie->name_state != NAME_PENDING) {
1760                 ie->name_state = NAME_KNOWN;
1761                 list_del(&ie->list);
1762         }
1763
1764         memcpy(&ie->data, data, sizeof(*data));
1765         ie->timestamp = jiffies;
1766         cache->timestamp = jiffies;
1767
1768         if (ie->name_state == NAME_NOT_KNOWN)
1769                 return false;
1770
1771         return true;
1772 }
1773
1774 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1775 {
1776         struct discovery_state *cache = &hdev->discovery;
1777         struct inquiry_info *info = (struct inquiry_info *) buf;
1778         struct inquiry_entry *e;
1779         int copied = 0;
1780
1781         list_for_each_entry(e, &cache->all, all) {
1782                 struct inquiry_data *data = &e->data;
1783
1784                 if (copied >= num)
1785                         break;
1786
1787                 bacpy(&info->bdaddr, &data->bdaddr);
1788                 info->pscan_rep_mode    = data->pscan_rep_mode;
1789                 info->pscan_period_mode = data->pscan_period_mode;
1790                 info->pscan_mode        = data->pscan_mode;
1791                 memcpy(info->dev_class, data->dev_class, 3);
1792                 info->clock_offset      = data->clock_offset;
1793
1794                 info++;
1795                 copied++;
1796         }
1797
1798         BT_DBG("cache %p, copied %d", cache, copied);
1799         return copied;
1800 }
1801
1802 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1803 {
1804         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1805         struct hci_dev *hdev = req->hdev;
1806         struct hci_cp_inquiry cp;
1807
1808         BT_DBG("%s", hdev->name);
1809
1810         if (test_bit(HCI_INQUIRY, &hdev->flags))
1811                 return;
1812
1813         /* Start Inquiry */
1814         memcpy(&cp.lap, &ir->lap, 3);
1815         cp.length  = ir->length;
1816         cp.num_rsp = ir->num_rsp;
1817         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1818 }
1819
1820 static int wait_inquiry(void *word)
1821 {
1822         schedule();
1823         return signal_pending(current);
1824 }
1825
1826 int hci_inquiry(void __user *arg)
1827 {
1828         __u8 __user *ptr = arg;
1829         struct hci_inquiry_req ir;
1830         struct hci_dev *hdev;
1831         int err = 0, do_inquiry = 0, max_rsp;
1832         long timeo;
1833         __u8 *buf;
1834
1835         if (copy_from_user(&ir, ptr, sizeof(ir)))
1836                 return -EFAULT;
1837
1838         hdev = hci_dev_get(ir.dev_id);
1839         if (!hdev)
1840                 return -ENODEV;
1841
1842         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1843                 err = -EBUSY;
1844                 goto done;
1845         }
1846
1847         if (hdev->dev_type != HCI_BREDR) {
1848                 err = -EOPNOTSUPP;
1849                 goto done;
1850         }
1851
1852         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1853                 err = -EOPNOTSUPP;
1854                 goto done;
1855         }
1856
1857         hci_dev_lock(hdev);
1858         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1859             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1860                 hci_inquiry_cache_flush(hdev);
1861                 do_inquiry = 1;
1862         }
1863         hci_dev_unlock(hdev);
1864
1865         timeo = ir.length * msecs_to_jiffies(2000);
1866
1867         if (do_inquiry) {
1868                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1869                                    timeo);
1870                 if (err < 0)
1871                         goto done;
1872
1873                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1874                  * cleared). If it is interrupted by a signal, return -EINTR.
1875                  */
1876                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1877                                 TASK_INTERRUPTIBLE))
1878                         return -EINTR;
1879         }
1880
1881         /* for unlimited number of responses we will use buffer with
1882          * 255 entries
1883          */
1884         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1885
1886         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1887          * copy it to the user space.
1888          */
1889         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1890         if (!buf) {
1891                 err = -ENOMEM;
1892                 goto done;
1893         }
1894
1895         hci_dev_lock(hdev);
1896         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1897         hci_dev_unlock(hdev);
1898
1899         BT_DBG("num_rsp %d", ir.num_rsp);
1900
1901         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1902                 ptr += sizeof(ir);
1903                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1904                                  ir.num_rsp))
1905                         err = -EFAULT;
1906         } else
1907                 err = -EFAULT;
1908
1909         kfree(buf);
1910
1911 done:
1912         hci_dev_put(hdev);
1913         return err;
1914 }
1915
1916 static int hci_dev_do_open(struct hci_dev *hdev)
1917 {
1918         int ret = 0;
1919
1920         BT_DBG("%s %p", hdev->name, hdev);
1921
1922         hci_req_lock(hdev);
1923
1924         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1925                 ret = -ENODEV;
1926                 goto done;
1927         }
1928
1929         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1930                 /* Check for rfkill but allow the HCI setup stage to
1931                  * proceed (which in itself doesn't cause any RF activity).
1932                  */
1933                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1934                         ret = -ERFKILL;
1935                         goto done;
1936                 }
1937
1938                 /* Check for valid public address or a configured static
1939                  * random adddress, but let the HCI setup proceed to
1940                  * be able to determine if there is a public address
1941                  * or not.
1942                  *
1943                  * In case of user channel usage, it is not important
1944                  * if a public address or static random address is
1945                  * available.
1946                  *
1947                  * This check is only valid for BR/EDR controllers
1948                  * since AMP controllers do not have an address.
1949                  */
1950                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1951                     hdev->dev_type == HCI_BREDR &&
1952                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1953                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1954                         ret = -EADDRNOTAVAIL;
1955                         goto done;
1956                 }
1957         }
1958
1959         if (test_bit(HCI_UP, &hdev->flags)) {
1960                 ret = -EALREADY;
1961                 goto done;
1962         }
1963
1964         if (hdev->open(hdev)) {
1965                 ret = -EIO;
1966                 goto done;
1967         }
1968
1969         atomic_set(&hdev->cmd_cnt, 1);
1970         set_bit(HCI_INIT, &hdev->flags);
1971
1972         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1973                 ret = hdev->setup(hdev);
1974
1975         if (!ret) {
1976                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1977                         set_bit(HCI_RAW, &hdev->flags);
1978
1979                 if (!test_bit(HCI_RAW, &hdev->flags) &&
1980                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1981                         ret = __hci_init(hdev);
1982         }
1983
1984         clear_bit(HCI_INIT, &hdev->flags);
1985
1986         if (!ret) {
1987                 hci_dev_hold(hdev);
1988                 set_bit(HCI_UP, &hdev->flags);
1989                 hci_notify(hdev, HCI_DEV_UP);
1990                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1991                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1992                     hdev->dev_type == HCI_BREDR) {
1993                         hci_dev_lock(hdev);
1994                         mgmt_powered(hdev, 1);
1995                         hci_dev_unlock(hdev);
1996                 }
1997         } else {
1998                 /* Init failed, cleanup */
1999                 flush_work(&hdev->tx_work);
2000                 flush_work(&hdev->cmd_work);
2001                 flush_work(&hdev->rx_work);
2002
2003                 skb_queue_purge(&hdev->cmd_q);
2004                 skb_queue_purge(&hdev->rx_q);
2005
2006                 if (hdev->flush)
2007                         hdev->flush(hdev);
2008
2009                 if (hdev->sent_cmd) {
2010                         kfree_skb(hdev->sent_cmd);
2011                         hdev->sent_cmd = NULL;
2012                 }
2013
2014                 hdev->close(hdev);
2015                 hdev->flags = 0;
2016         }
2017
2018 done:
2019         hci_req_unlock(hdev);
2020         return ret;
2021 }
2022
2023 /* ---- HCI ioctl helpers ---- */
2024
2025 int hci_dev_open(__u16 dev)
2026 {
2027         struct hci_dev *hdev;
2028         int err;
2029
2030         hdev = hci_dev_get(dev);
2031         if (!hdev)
2032                 return -ENODEV;
2033
2034         /* We need to ensure that no other power on/off work is pending
2035          * before proceeding to call hci_dev_do_open. This is
2036          * particularly important if the setup procedure has not yet
2037          * completed.
2038          */
2039         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2040                 cancel_delayed_work(&hdev->power_off);
2041
2042         /* After this call it is guaranteed that the setup procedure
2043          * has finished. This means that error conditions like RFKILL
2044          * or no valid public or static random address apply.
2045          */
2046         flush_workqueue(hdev->req_workqueue);
2047
2048         err = hci_dev_do_open(hdev);
2049
2050         hci_dev_put(hdev);
2051
2052         return err;
2053 }
2054
2055 static int hci_dev_do_close(struct hci_dev *hdev)
2056 {
2057         BT_DBG("%s %p", hdev->name, hdev);
2058
2059         cancel_delayed_work(&hdev->power_off);
2060
2061         hci_req_cancel(hdev, ENODEV);
2062         hci_req_lock(hdev);
2063
2064         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2065                 del_timer_sync(&hdev->cmd_timer);
2066                 hci_req_unlock(hdev);
2067                 return 0;
2068         }
2069
2070         /* Flush RX and TX works */
2071         flush_work(&hdev->tx_work);
2072         flush_work(&hdev->rx_work);
2073
2074         if (hdev->discov_timeout > 0) {
2075                 cancel_delayed_work(&hdev->discov_off);
2076                 hdev->discov_timeout = 0;
2077                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2078                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2079         }
2080
2081         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2082                 cancel_delayed_work(&hdev->service_cache);
2083
2084         cancel_delayed_work_sync(&hdev->le_scan_disable);
2085
2086         hci_dev_lock(hdev);
2087         hci_inquiry_cache_flush(hdev);
2088         hci_conn_hash_flush(hdev);
2089         hci_dev_unlock(hdev);
2090
2091         hci_notify(hdev, HCI_DEV_DOWN);
2092
2093         if (hdev->flush)
2094                 hdev->flush(hdev);
2095
2096         /* Reset device */
2097         skb_queue_purge(&hdev->cmd_q);
2098         atomic_set(&hdev->cmd_cnt, 1);
2099         if (!test_bit(HCI_RAW, &hdev->flags) &&
2100             !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2101             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2102                 set_bit(HCI_INIT, &hdev->flags);
2103                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2104                 clear_bit(HCI_INIT, &hdev->flags);
2105         }
2106
2107         /* flush cmd  work */
2108         flush_work(&hdev->cmd_work);
2109
2110         /* Drop queues */
2111         skb_queue_purge(&hdev->rx_q);
2112         skb_queue_purge(&hdev->cmd_q);
2113         skb_queue_purge(&hdev->raw_q);
2114
2115         /* Drop last sent command */
2116         if (hdev->sent_cmd) {
2117                 del_timer_sync(&hdev->cmd_timer);
2118                 kfree_skb(hdev->sent_cmd);
2119                 hdev->sent_cmd = NULL;
2120         }
2121
2122         kfree_skb(hdev->recv_evt);
2123         hdev->recv_evt = NULL;
2124
2125         /* After this point our queues are empty
2126          * and no tasks are scheduled. */
2127         hdev->close(hdev);
2128
2129         /* Clear flags */
2130         hdev->flags = 0;
2131         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2132
2133         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2134                 if (hdev->dev_type == HCI_BREDR) {
2135                         hci_dev_lock(hdev);
2136                         mgmt_powered(hdev, 0);
2137                         hci_dev_unlock(hdev);
2138                 }
2139         }
2140
2141         /* Controller radio is available but is currently powered down */
2142         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2143
2144         memset(hdev->eir, 0, sizeof(hdev->eir));
2145         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2146
2147         hci_req_unlock(hdev);
2148
2149         hci_dev_put(hdev);
2150         return 0;
2151 }
2152
2153 int hci_dev_close(__u16 dev)
2154 {
2155         struct hci_dev *hdev;
2156         int err;
2157
2158         hdev = hci_dev_get(dev);
2159         if (!hdev)
2160                 return -ENODEV;
2161
2162         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2163                 err = -EBUSY;
2164                 goto done;
2165         }
2166
2167         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2168                 cancel_delayed_work(&hdev->power_off);
2169
2170         err = hci_dev_do_close(hdev);
2171
2172 done:
2173         hci_dev_put(hdev);
2174         return err;
2175 }
2176
2177 int hci_dev_reset(__u16 dev)
2178 {
2179         struct hci_dev *hdev;
2180         int ret = 0;
2181
2182         hdev = hci_dev_get(dev);
2183         if (!hdev)
2184                 return -ENODEV;
2185
2186         hci_req_lock(hdev);
2187
2188         if (!test_bit(HCI_UP, &hdev->flags)) {
2189                 ret = -ENETDOWN;
2190                 goto done;
2191         }
2192
2193         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2194                 ret = -EBUSY;
2195                 goto done;
2196         }
2197
2198         /* Drop queues */
2199         skb_queue_purge(&hdev->rx_q);
2200         skb_queue_purge(&hdev->cmd_q);
2201
2202         hci_dev_lock(hdev);
2203         hci_inquiry_cache_flush(hdev);
2204         hci_conn_hash_flush(hdev);
2205         hci_dev_unlock(hdev);
2206
2207         if (hdev->flush)
2208                 hdev->flush(hdev);
2209
2210         atomic_set(&hdev->cmd_cnt, 1);
2211         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2212
2213         if (!test_bit(HCI_RAW, &hdev->flags))
2214                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2215
2216 done:
2217         hci_req_unlock(hdev);
2218         hci_dev_put(hdev);
2219         return ret;
2220 }
2221
2222 int hci_dev_reset_stat(__u16 dev)
2223 {
2224         struct hci_dev *hdev;
2225         int ret = 0;
2226
2227         hdev = hci_dev_get(dev);
2228         if (!hdev)
2229                 return -ENODEV;
2230
2231         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2232                 ret = -EBUSY;
2233                 goto done;
2234         }
2235
2236         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2237
2238 done:
2239         hci_dev_put(hdev);
2240         return ret;
2241 }
2242
2243 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2244 {
2245         struct hci_dev *hdev;
2246         struct hci_dev_req dr;
2247         int err = 0;
2248
2249         if (copy_from_user(&dr, arg, sizeof(dr)))
2250                 return -EFAULT;
2251
2252         hdev = hci_dev_get(dr.dev_id);
2253         if (!hdev)
2254                 return -ENODEV;
2255
2256         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2257                 err = -EBUSY;
2258                 goto done;
2259         }
2260
2261         if (hdev->dev_type != HCI_BREDR) {
2262                 err = -EOPNOTSUPP;
2263                 goto done;
2264         }
2265
2266         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2267                 err = -EOPNOTSUPP;
2268                 goto done;
2269         }
2270
2271         switch (cmd) {
2272         case HCISETAUTH:
2273                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2274                                    HCI_INIT_TIMEOUT);
2275                 break;
2276
2277         case HCISETENCRYPT:
2278                 if (!lmp_encrypt_capable(hdev)) {
2279                         err = -EOPNOTSUPP;
2280                         break;
2281                 }
2282
2283                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2284                         /* Auth must be enabled first */
2285                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2286                                            HCI_INIT_TIMEOUT);
2287                         if (err)
2288                                 break;
2289                 }
2290
2291                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2292                                    HCI_INIT_TIMEOUT);
2293                 break;
2294
2295         case HCISETSCAN:
2296                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2297                                    HCI_INIT_TIMEOUT);
2298                 break;
2299
2300         case HCISETLINKPOL:
2301                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2302                                    HCI_INIT_TIMEOUT);
2303                 break;
2304
2305         case HCISETLINKMODE:
2306                 hdev->link_mode = ((__u16) dr.dev_opt) &
2307                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2308                 break;
2309
2310         case HCISETPTYPE:
2311                 hdev->pkt_type = (__u16) dr.dev_opt;
2312                 break;
2313
2314         case HCISETACLMTU:
2315                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2316                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2317                 break;
2318
2319         case HCISETSCOMTU:
2320                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2321                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2322                 break;
2323
2324         default:
2325                 err = -EINVAL;
2326                 break;
2327         }
2328
2329 done:
2330         hci_dev_put(hdev);
2331         return err;
2332 }
2333
2334 int hci_get_dev_list(void __user *arg)
2335 {
2336         struct hci_dev *hdev;
2337         struct hci_dev_list_req *dl;
2338         struct hci_dev_req *dr;
2339         int n = 0, size, err;
2340         __u16 dev_num;
2341
2342         if (get_user(dev_num, (__u16 __user *) arg))
2343                 return -EFAULT;
2344
2345         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2346                 return -EINVAL;
2347
2348         size = sizeof(*dl) + dev_num * sizeof(*dr);
2349
2350         dl = kzalloc(size, GFP_KERNEL);
2351         if (!dl)
2352                 return -ENOMEM;
2353
2354         dr = dl->dev_req;
2355
2356         read_lock(&hci_dev_list_lock);
2357         list_for_each_entry(hdev, &hci_dev_list, list) {
2358                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2359                         cancel_delayed_work(&hdev->power_off);
2360
2361                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2362                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2363
2364                 (dr + n)->dev_id  = hdev->id;
2365                 (dr + n)->dev_opt = hdev->flags;
2366
2367                 if (++n >= dev_num)
2368                         break;
2369         }
2370         read_unlock(&hci_dev_list_lock);
2371
2372         dl->dev_num = n;
2373         size = sizeof(*dl) + n * sizeof(*dr);
2374
2375         err = copy_to_user(arg, dl, size);
2376         kfree(dl);
2377
2378         return err ? -EFAULT : 0;
2379 }
2380
2381 int hci_get_dev_info(void __user *arg)
2382 {
2383         struct hci_dev *hdev;
2384         struct hci_dev_info di;
2385         int err = 0;
2386
2387         if (copy_from_user(&di, arg, sizeof(di)))
2388                 return -EFAULT;
2389
2390         hdev = hci_dev_get(di.dev_id);
2391         if (!hdev)
2392                 return -ENODEV;
2393
2394         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2395                 cancel_delayed_work_sync(&hdev->power_off);
2396
2397         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2398                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2399
2400         strcpy(di.name, hdev->name);
2401         di.bdaddr   = hdev->bdaddr;
2402         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2403         di.flags    = hdev->flags;
2404         di.pkt_type = hdev->pkt_type;
2405         if (lmp_bredr_capable(hdev)) {
2406                 di.acl_mtu  = hdev->acl_mtu;
2407                 di.acl_pkts = hdev->acl_pkts;
2408                 di.sco_mtu  = hdev->sco_mtu;
2409                 di.sco_pkts = hdev->sco_pkts;
2410         } else {
2411                 di.acl_mtu  = hdev->le_mtu;
2412                 di.acl_pkts = hdev->le_pkts;
2413                 di.sco_mtu  = 0;
2414                 di.sco_pkts = 0;
2415         }
2416         di.link_policy = hdev->link_policy;
2417         di.link_mode   = hdev->link_mode;
2418
2419         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2420         memcpy(&di.features, &hdev->features, sizeof(di.features));
2421
2422         if (copy_to_user(arg, &di, sizeof(di)))
2423                 err = -EFAULT;
2424
2425         hci_dev_put(hdev);
2426
2427         return err;
2428 }
2429
2430 /* ---- Interface to HCI drivers ---- */
2431
2432 static int hci_rfkill_set_block(void *data, bool blocked)
2433 {
2434         struct hci_dev *hdev = data;
2435
2436         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2437
2438         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2439                 return -EBUSY;
2440
2441         if (blocked) {
2442                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2443                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2444                         hci_dev_do_close(hdev);
2445         } else {
2446                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2447         }
2448
2449         return 0;
2450 }
2451
2452 static const struct rfkill_ops hci_rfkill_ops = {
2453         .set_block = hci_rfkill_set_block,
2454 };
2455
2456 static void hci_power_on(struct work_struct *work)
2457 {
2458         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2459         int err;
2460
2461         BT_DBG("%s", hdev->name);
2462
2463         err = hci_dev_do_open(hdev);
2464         if (err < 0) {
2465                 mgmt_set_powered_failed(hdev, err);
2466                 return;
2467         }
2468
2469         /* During the HCI setup phase, a few error conditions are
2470          * ignored and they need to be checked now. If they are still
2471          * valid, it is important to turn the device back off.
2472          */
2473         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2474             (hdev->dev_type == HCI_BREDR &&
2475              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2476              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2477                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2478                 hci_dev_do_close(hdev);
2479         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2480                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2481                                    HCI_AUTO_OFF_TIMEOUT);
2482         }
2483
2484         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2485                 mgmt_index_added(hdev);
2486 }
2487
2488 static void hci_power_off(struct work_struct *work)
2489 {
2490         struct hci_dev *hdev = container_of(work, struct hci_dev,
2491                                             power_off.work);
2492
2493         BT_DBG("%s", hdev->name);
2494
2495         hci_dev_do_close(hdev);
2496 }
2497
2498 static void hci_discov_off(struct work_struct *work)
2499 {
2500         struct hci_dev *hdev;
2501
2502         hdev = container_of(work, struct hci_dev, discov_off.work);
2503
2504         BT_DBG("%s", hdev->name);
2505
2506         mgmt_discoverable_timeout(hdev);
2507 }
2508
2509 int hci_uuids_clear(struct hci_dev *hdev)
2510 {
2511         struct bt_uuid *uuid, *tmp;
2512
2513         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2514                 list_del(&uuid->list);
2515                 kfree(uuid);
2516         }
2517
2518         return 0;
2519 }
2520
2521 int hci_link_keys_clear(struct hci_dev *hdev)
2522 {
2523         struct list_head *p, *n;
2524
2525         list_for_each_safe(p, n, &hdev->link_keys) {
2526                 struct link_key *key;
2527
2528                 key = list_entry(p, struct link_key, list);
2529
2530                 list_del(p);
2531                 kfree(key);
2532         }
2533
2534         return 0;
2535 }
2536
2537 int hci_smp_ltks_clear(struct hci_dev *hdev)
2538 {
2539         struct smp_ltk *k, *tmp;
2540
2541         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2542                 list_del(&k->list);
2543                 kfree(k);
2544         }
2545
2546         return 0;
2547 }
2548
2549 void hci_smp_irks_clear(struct hci_dev *hdev)
2550 {
2551         struct smp_irk *k, *tmp;
2552
2553         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2554                 list_del(&k->list);
2555                 kfree(k);
2556         }
2557 }
2558
2559 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2560 {
2561         struct link_key *k;
2562
2563         list_for_each_entry(k, &hdev->link_keys, list)
2564                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2565                         return k;
2566
2567         return NULL;
2568 }
2569
2570 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2571                                u8 key_type, u8 old_key_type)
2572 {
2573         /* Legacy key */
2574         if (key_type < 0x03)
2575                 return true;
2576
2577         /* Debug keys are insecure so don't store them persistently */
2578         if (key_type == HCI_LK_DEBUG_COMBINATION)
2579                 return false;
2580
2581         /* Changed combination key and there's no previous one */
2582         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2583                 return false;
2584
2585         /* Security mode 3 case */
2586         if (!conn)
2587                 return true;
2588
2589         /* Neither local nor remote side had no-bonding as requirement */
2590         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2591                 return true;
2592
2593         /* Local side had dedicated bonding as requirement */
2594         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2595                 return true;
2596
2597         /* Remote side had dedicated bonding as requirement */
2598         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2599                 return true;
2600
2601         /* If none of the above criteria match, then don't store the key
2602          * persistently */
2603         return false;
2604 }
2605
2606 static bool ltk_type_master(u8 type)
2607 {
2608         if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2609                 return true;
2610
2611         return false;
2612 }
2613
2614 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2615                              bool master)
2616 {
2617         struct smp_ltk *k;
2618
2619         list_for_each_entry(k, &hdev->long_term_keys, list) {
2620                 if (k->ediv != ediv ||
2621                     memcmp(rand, k->rand, sizeof(k->rand)))
2622                         continue;
2623
2624                 if (ltk_type_master(k->type) != master)
2625                         continue;
2626
2627                 return k;
2628         }
2629
2630         return NULL;
2631 }
2632
2633 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2634                                      u8 addr_type, bool master)
2635 {
2636         struct smp_ltk *k;
2637
2638         list_for_each_entry(k, &hdev->long_term_keys, list)
2639                 if (addr_type == k->bdaddr_type &&
2640                     bacmp(bdaddr, &k->bdaddr) == 0 &&
2641                     ltk_type_master(k->type) == master)
2642                         return k;
2643
2644         return NULL;
2645 }
2646
2647 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2648 {
2649         struct smp_irk *irk;
2650
2651         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2652                 if (!bacmp(&irk->rpa, rpa))
2653                         return irk;
2654         }
2655
2656         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2657                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2658                         bacpy(&irk->rpa, rpa);
2659                         return irk;
2660                 }
2661         }
2662
2663         return NULL;
2664 }
2665
2666 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2667                                      u8 addr_type)
2668 {
2669         struct smp_irk *irk;
2670
2671         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2672                 if (addr_type == irk->addr_type &&
2673                     bacmp(bdaddr, &irk->bdaddr) == 0)
2674                         return irk;
2675         }
2676
2677         return NULL;
2678 }
2679
2680 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2681                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2682 {
2683         struct link_key *key, *old_key;
2684         u8 old_key_type;
2685         bool persistent;
2686
2687         old_key = hci_find_link_key(hdev, bdaddr);
2688         if (old_key) {
2689                 old_key_type = old_key->type;
2690                 key = old_key;
2691         } else {
2692                 old_key_type = conn ? conn->key_type : 0xff;
2693                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2694                 if (!key)
2695                         return -ENOMEM;
2696                 list_add(&key->list, &hdev->link_keys);
2697         }
2698
2699         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2700
2701         /* Some buggy controller combinations generate a changed
2702          * combination key for legacy pairing even when there's no
2703          * previous key */
2704         if (type == HCI_LK_CHANGED_COMBINATION &&
2705             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2706                 type = HCI_LK_COMBINATION;
2707                 if (conn)
2708                         conn->key_type = type;
2709         }
2710
2711         bacpy(&key->bdaddr, bdaddr);
2712         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2713         key->pin_len = pin_len;
2714
2715         if (type == HCI_LK_CHANGED_COMBINATION)
2716                 key->type = old_key_type;
2717         else
2718                 key->type = type;
2719
2720         if (!new_key)
2721                 return 0;
2722
2723         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2724
2725         mgmt_new_link_key(hdev, key, persistent);
2726
2727         if (conn)
2728                 conn->flush_key = !persistent;
2729
2730         return 0;
2731 }
2732
2733 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
2734                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
2735                 ediv, u8 rand[8])
2736 {
2737         struct smp_ltk *key, *old_key;
2738         bool master = ltk_type_master(type);
2739         u8 persistent;
2740
2741         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
2742         if (old_key)
2743                 key = old_key;
2744         else {
2745                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2746                 if (!key)
2747                         return -ENOMEM;
2748                 list_add(&key->list, &hdev->long_term_keys);
2749         }
2750
2751         bacpy(&key->bdaddr, bdaddr);
2752         key->bdaddr_type = addr_type;
2753         memcpy(key->val, tk, sizeof(key->val));
2754         key->authenticated = authenticated;
2755         key->ediv = ediv;
2756         key->enc_size = enc_size;
2757         key->type = type;
2758         memcpy(key->rand, rand, sizeof(key->rand));
2759
2760         if (!new_key)
2761                 return 0;
2762
2763         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2764                 persistent = 0;
2765         else
2766                 persistent = 1;
2767
2768         if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
2769                 mgmt_new_ltk(hdev, key, persistent);
2770
2771         return 0;
2772 }
2773
2774 int hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type,
2775                 u8 val[16], bdaddr_t *rpa)
2776 {
2777         struct smp_irk *irk;
2778
2779         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2780         if (!irk) {
2781                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2782                 if (!irk)
2783                         return -ENOMEM;
2784
2785                 bacpy(&irk->bdaddr, bdaddr);
2786                 irk->addr_type = addr_type;
2787
2788                 list_add(&irk->list, &hdev->identity_resolving_keys);
2789         }
2790
2791         memcpy(irk->val, val, 16);
2792         bacpy(&irk->rpa, rpa);
2793
2794         return 0;
2795 }
2796
2797 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2798 {
2799         struct link_key *key;
2800
2801         key = hci_find_link_key(hdev, bdaddr);
2802         if (!key)
2803                 return -ENOENT;
2804
2805         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2806
2807         list_del(&key->list);
2808         kfree(key);
2809
2810         return 0;
2811 }
2812
2813 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2814 {
2815         struct smp_ltk *k, *tmp;
2816
2817         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2818                 if (bacmp(bdaddr, &k->bdaddr))
2819                         continue;
2820
2821                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2822
2823                 list_del(&k->list);
2824                 kfree(k);
2825         }
2826
2827         return 0;
2828 }
2829
2830 /* HCI command timer function */
2831 static void hci_cmd_timeout(unsigned long arg)
2832 {
2833         struct hci_dev *hdev = (void *) arg;
2834
2835         if (hdev->sent_cmd) {
2836                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2837                 u16 opcode = __le16_to_cpu(sent->opcode);
2838
2839                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2840         } else {
2841                 BT_ERR("%s command tx timeout", hdev->name);
2842         }
2843
2844         atomic_set(&hdev->cmd_cnt, 1);
2845         queue_work(hdev->workqueue, &hdev->cmd_work);
2846 }
2847
2848 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2849                                           bdaddr_t *bdaddr)
2850 {
2851         struct oob_data *data;
2852
2853         list_for_each_entry(data, &hdev->remote_oob_data, list)
2854                 if (bacmp(bdaddr, &data->bdaddr) == 0)
2855                         return data;
2856
2857         return NULL;
2858 }
2859
2860 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2861 {
2862         struct oob_data *data;
2863
2864         data = hci_find_remote_oob_data(hdev, bdaddr);
2865         if (!data)
2866                 return -ENOENT;
2867
2868         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2869
2870         list_del(&data->list);
2871         kfree(data);
2872
2873         return 0;
2874 }
2875
2876 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2877 {
2878         struct oob_data *data, *n;
2879
2880         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2881                 list_del(&data->list);
2882                 kfree(data);
2883         }
2884
2885         return 0;
2886 }
2887
2888 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2889                             u8 *hash, u8 *randomizer)
2890 {
2891         struct oob_data *data;
2892
2893         data = hci_find_remote_oob_data(hdev, bdaddr);
2894         if (!data) {
2895                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2896                 if (!data)
2897                         return -ENOMEM;
2898
2899                 bacpy(&data->bdaddr, bdaddr);
2900                 list_add(&data->list, &hdev->remote_oob_data);
2901         }
2902
2903         memcpy(data->hash192, hash, sizeof(data->hash192));
2904         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2905
2906         memset(data->hash256, 0, sizeof(data->hash256));
2907         memset(data->randomizer256, 0, sizeof(data->randomizer256));
2908
2909         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2910
2911         return 0;
2912 }
2913
2914 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2915                                 u8 *hash192, u8 *randomizer192,
2916                                 u8 *hash256, u8 *randomizer256)
2917 {
2918         struct oob_data *data;
2919
2920         data = hci_find_remote_oob_data(hdev, bdaddr);
2921         if (!data) {
2922                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2923                 if (!data)
2924                         return -ENOMEM;
2925
2926                 bacpy(&data->bdaddr, bdaddr);
2927                 list_add(&data->list, &hdev->remote_oob_data);
2928         }
2929
2930         memcpy(data->hash192, hash192, sizeof(data->hash192));
2931         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2932
2933         memcpy(data->hash256, hash256, sizeof(data->hash256));
2934         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2935
2936         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2937
2938         return 0;
2939 }
2940
2941 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2942                                          bdaddr_t *bdaddr, u8 type)
2943 {
2944         struct bdaddr_list *b;
2945
2946         list_for_each_entry(b, &hdev->blacklist, list) {
2947                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2948                         return b;
2949         }
2950
2951         return NULL;
2952 }
2953
2954 int hci_blacklist_clear(struct hci_dev *hdev)
2955 {
2956         struct list_head *p, *n;
2957
2958         list_for_each_safe(p, n, &hdev->blacklist) {
2959                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2960
2961                 list_del(p);
2962                 kfree(b);
2963         }
2964
2965         return 0;
2966 }
2967
2968 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2969 {
2970         struct bdaddr_list *entry;
2971
2972         if (!bacmp(bdaddr, BDADDR_ANY))
2973                 return -EBADF;
2974
2975         if (hci_blacklist_lookup(hdev, bdaddr, type))
2976                 return -EEXIST;
2977
2978         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2979         if (!entry)
2980                 return -ENOMEM;
2981
2982         bacpy(&entry->bdaddr, bdaddr);
2983         entry->bdaddr_type = type;
2984
2985         list_add(&entry->list, &hdev->blacklist);
2986
2987         return mgmt_device_blocked(hdev, bdaddr, type);
2988 }
2989
2990 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2991 {
2992         struct bdaddr_list *entry;
2993
2994         if (!bacmp(bdaddr, BDADDR_ANY))
2995                 return hci_blacklist_clear(hdev);
2996
2997         entry = hci_blacklist_lookup(hdev, bdaddr, type);
2998         if (!entry)
2999                 return -ENOENT;
3000
3001         list_del(&entry->list);
3002         kfree(entry);
3003
3004         return mgmt_device_unblocked(hdev, bdaddr, type);
3005 }
3006
3007 /* This function requires the caller holds hdev->lock */
3008 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3009                                                bdaddr_t *addr, u8 addr_type)
3010 {
3011         struct hci_conn_params *params;
3012
3013         list_for_each_entry(params, &hdev->le_conn_params, list) {
3014                 if (bacmp(&params->addr, addr) == 0 &&
3015                     params->addr_type == addr_type) {
3016                         return params;
3017                 }
3018         }
3019
3020         return NULL;
3021 }
3022
3023 /* This function requires the caller holds hdev->lock */
3024 void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3025                          u16 conn_min_interval, u16 conn_max_interval)
3026 {
3027         struct hci_conn_params *params;
3028
3029         params = hci_conn_params_lookup(hdev, addr, addr_type);
3030         if (params) {
3031                 params->conn_min_interval = conn_min_interval;
3032                 params->conn_max_interval = conn_max_interval;
3033                 return;
3034         }
3035
3036         params = kzalloc(sizeof(*params), GFP_KERNEL);
3037         if (!params) {
3038                 BT_ERR("Out of memory");
3039                 return;
3040         }
3041
3042         bacpy(&params->addr, addr);
3043         params->addr_type = addr_type;
3044         params->conn_min_interval = conn_min_interval;
3045         params->conn_max_interval = conn_max_interval;
3046
3047         list_add(&params->list, &hdev->le_conn_params);
3048
3049         BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3050                "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3051                conn_max_interval);
3052 }
3053
3054 /* This function requires the caller holds hdev->lock */
3055 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3056 {
3057         struct hci_conn_params *params;
3058
3059         params = hci_conn_params_lookup(hdev, addr, addr_type);
3060         if (!params)
3061                 return;
3062
3063         list_del(&params->list);
3064         kfree(params);
3065
3066         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3067 }
3068
3069 /* This function requires the caller holds hdev->lock */
3070 void hci_conn_params_clear(struct hci_dev *hdev)
3071 {
3072         struct hci_conn_params *params, *tmp;
3073
3074         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3075                 list_del(&params->list);
3076                 kfree(params);
3077         }
3078
3079         BT_DBG("All LE connection parameters were removed");
3080 }
3081
3082 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3083 {
3084         if (status) {
3085                 BT_ERR("Failed to start inquiry: status %d", status);
3086
3087                 hci_dev_lock(hdev);
3088                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3089                 hci_dev_unlock(hdev);
3090                 return;
3091         }
3092 }
3093
3094 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3095 {
3096         /* General inquiry access code (GIAC) */
3097         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3098         struct hci_request req;
3099         struct hci_cp_inquiry cp;
3100         int err;
3101
3102         if (status) {
3103                 BT_ERR("Failed to disable LE scanning: status %d", status);
3104                 return;
3105         }
3106
3107         switch (hdev->discovery.type) {
3108         case DISCOV_TYPE_LE:
3109                 hci_dev_lock(hdev);
3110                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3111                 hci_dev_unlock(hdev);
3112                 break;
3113
3114         case DISCOV_TYPE_INTERLEAVED:
3115                 hci_req_init(&req, hdev);
3116
3117                 memset(&cp, 0, sizeof(cp));
3118                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3119                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3120                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3121
3122                 hci_dev_lock(hdev);
3123
3124                 hci_inquiry_cache_flush(hdev);
3125
3126                 err = hci_req_run(&req, inquiry_complete);
3127                 if (err) {
3128                         BT_ERR("Inquiry request failed: err %d", err);
3129                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3130                 }
3131
3132                 hci_dev_unlock(hdev);
3133                 break;
3134         }
3135 }
3136
3137 static void le_scan_disable_work(struct work_struct *work)
3138 {
3139         struct hci_dev *hdev = container_of(work, struct hci_dev,
3140                                             le_scan_disable.work);
3141         struct hci_cp_le_set_scan_enable cp;
3142         struct hci_request req;
3143         int err;
3144
3145         BT_DBG("%s", hdev->name);
3146
3147         hci_req_init(&req, hdev);
3148
3149         memset(&cp, 0, sizeof(cp));
3150         cp.enable = LE_SCAN_DISABLE;
3151         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3152
3153         err = hci_req_run(&req, le_scan_disable_work_complete);
3154         if (err)
3155                 BT_ERR("Disable LE scanning request failed: err %d", err);
3156 }
3157
3158 /* Alloc HCI device */
3159 struct hci_dev *hci_alloc_dev(void)
3160 {
3161         struct hci_dev *hdev;
3162
3163         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3164         if (!hdev)
3165                 return NULL;
3166
3167         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3168         hdev->esco_type = (ESCO_HV1);
3169         hdev->link_mode = (HCI_LM_ACCEPT);
3170         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3171         hdev->io_capability = 0x03;     /* No Input No Output */
3172         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3173         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3174
3175         hdev->sniff_max_interval = 800;
3176         hdev->sniff_min_interval = 80;
3177
3178         hdev->le_scan_interval = 0x0060;
3179         hdev->le_scan_window = 0x0030;
3180         hdev->le_conn_min_interval = 0x0028;
3181         hdev->le_conn_max_interval = 0x0038;
3182
3183         mutex_init(&hdev->lock);
3184         mutex_init(&hdev->req_lock);
3185
3186         INIT_LIST_HEAD(&hdev->mgmt_pending);
3187         INIT_LIST_HEAD(&hdev->blacklist);
3188         INIT_LIST_HEAD(&hdev->uuids);
3189         INIT_LIST_HEAD(&hdev->link_keys);
3190         INIT_LIST_HEAD(&hdev->long_term_keys);
3191         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3192         INIT_LIST_HEAD(&hdev->remote_oob_data);
3193         INIT_LIST_HEAD(&hdev->le_conn_params);
3194         INIT_LIST_HEAD(&hdev->conn_hash.list);
3195
3196         INIT_WORK(&hdev->rx_work, hci_rx_work);
3197         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3198         INIT_WORK(&hdev->tx_work, hci_tx_work);
3199         INIT_WORK(&hdev->power_on, hci_power_on);
3200
3201         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3202         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3203         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3204
3205         skb_queue_head_init(&hdev->rx_q);
3206         skb_queue_head_init(&hdev->cmd_q);
3207         skb_queue_head_init(&hdev->raw_q);
3208
3209         init_waitqueue_head(&hdev->req_wait_q);
3210
3211         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
3212
3213         hci_init_sysfs(hdev);
3214         discovery_init(hdev);
3215
3216         return hdev;
3217 }
3218 EXPORT_SYMBOL(hci_alloc_dev);
3219
3220 /* Free HCI device */
3221 void hci_free_dev(struct hci_dev *hdev)
3222 {
3223         /* will free via device release */
3224         put_device(&hdev->dev);
3225 }
3226 EXPORT_SYMBOL(hci_free_dev);
3227
3228 /* Register HCI device */
3229 int hci_register_dev(struct hci_dev *hdev)
3230 {
3231         int id, error;
3232
3233         if (!hdev->open || !hdev->close)
3234                 return -EINVAL;
3235
3236         /* Do not allow HCI_AMP devices to register at index 0,
3237          * so the index can be used as the AMP controller ID.
3238          */
3239         switch (hdev->dev_type) {
3240         case HCI_BREDR:
3241                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3242                 break;
3243         case HCI_AMP:
3244                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3245                 break;
3246         default:
3247                 return -EINVAL;
3248         }
3249
3250         if (id < 0)
3251                 return id;
3252
3253         sprintf(hdev->name, "hci%d", id);
3254         hdev->id = id;
3255
3256         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3257
3258         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3259                                           WQ_MEM_RECLAIM, 1, hdev->name);
3260         if (!hdev->workqueue) {
3261                 error = -ENOMEM;
3262                 goto err;
3263         }
3264
3265         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3266                                               WQ_MEM_RECLAIM, 1, hdev->name);
3267         if (!hdev->req_workqueue) {
3268                 destroy_workqueue(hdev->workqueue);
3269                 error = -ENOMEM;
3270                 goto err;
3271         }
3272
3273         if (!IS_ERR_OR_NULL(bt_debugfs))
3274                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3275
3276         dev_set_name(&hdev->dev, "%s", hdev->name);
3277
3278         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3279                                                CRYPTO_ALG_ASYNC);
3280         if (IS_ERR(hdev->tfm_aes)) {
3281                 BT_ERR("Unable to create crypto context");
3282                 error = PTR_ERR(hdev->tfm_aes);
3283                 hdev->tfm_aes = NULL;
3284                 goto err_wqueue;
3285         }
3286
3287         error = device_add(&hdev->dev);
3288         if (error < 0)
3289                 goto err_tfm;
3290
3291         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3292                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3293                                     hdev);
3294         if (hdev->rfkill) {
3295                 if (rfkill_register(hdev->rfkill) < 0) {
3296                         rfkill_destroy(hdev->rfkill);
3297                         hdev->rfkill = NULL;
3298                 }
3299         }
3300
3301         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3302                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3303
3304         set_bit(HCI_SETUP, &hdev->dev_flags);
3305         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3306
3307         if (hdev->dev_type == HCI_BREDR) {
3308                 /* Assume BR/EDR support until proven otherwise (such as
3309                  * through reading supported features during init.
3310                  */
3311                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3312         }
3313
3314         write_lock(&hci_dev_list_lock);
3315         list_add(&hdev->list, &hci_dev_list);
3316         write_unlock(&hci_dev_list_lock);
3317
3318         hci_notify(hdev, HCI_DEV_REG);
3319         hci_dev_hold(hdev);
3320
3321         queue_work(hdev->req_workqueue, &hdev->power_on);
3322
3323         return id;
3324
3325 err_tfm:
3326         crypto_free_blkcipher(hdev->tfm_aes);
3327 err_wqueue:
3328         destroy_workqueue(hdev->workqueue);
3329         destroy_workqueue(hdev->req_workqueue);
3330 err:
3331         ida_simple_remove(&hci_index_ida, hdev->id);
3332
3333         return error;
3334 }
3335 EXPORT_SYMBOL(hci_register_dev);
3336
3337 /* Unregister HCI device */
3338 void hci_unregister_dev(struct hci_dev *hdev)
3339 {
3340         int i, id;
3341
3342         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3343
3344         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3345
3346         id = hdev->id;
3347
3348         write_lock(&hci_dev_list_lock);
3349         list_del(&hdev->list);
3350         write_unlock(&hci_dev_list_lock);
3351
3352         hci_dev_do_close(hdev);
3353
3354         for (i = 0; i < NUM_REASSEMBLY; i++)
3355                 kfree_skb(hdev->reassembly[i]);
3356
3357         cancel_work_sync(&hdev->power_on);
3358
3359         if (!test_bit(HCI_INIT, &hdev->flags) &&
3360             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
3361                 hci_dev_lock(hdev);
3362                 mgmt_index_removed(hdev);
3363                 hci_dev_unlock(hdev);
3364         }
3365
3366         /* mgmt_index_removed should take care of emptying the
3367          * pending list */
3368         BUG_ON(!list_empty(&hdev->mgmt_pending));
3369
3370         hci_notify(hdev, HCI_DEV_UNREG);
3371
3372         if (hdev->rfkill) {
3373                 rfkill_unregister(hdev->rfkill);
3374                 rfkill_destroy(hdev->rfkill);
3375         }
3376
3377         if (hdev->tfm_aes)
3378                 crypto_free_blkcipher(hdev->tfm_aes);
3379
3380         device_del(&hdev->dev);
3381
3382         debugfs_remove_recursive(hdev->debugfs);
3383
3384         destroy_workqueue(hdev->workqueue);
3385         destroy_workqueue(hdev->req_workqueue);
3386
3387         hci_dev_lock(hdev);
3388         hci_blacklist_clear(hdev);
3389         hci_uuids_clear(hdev);
3390         hci_link_keys_clear(hdev);
3391         hci_smp_ltks_clear(hdev);
3392         hci_smp_irks_clear(hdev);
3393         hci_remote_oob_data_clear(hdev);
3394         hci_conn_params_clear(hdev);
3395         hci_dev_unlock(hdev);
3396
3397         hci_dev_put(hdev);
3398
3399         ida_simple_remove(&hci_index_ida, id);
3400 }
3401 EXPORT_SYMBOL(hci_unregister_dev);
3402
3403 /* Suspend HCI device */
3404 int hci_suspend_dev(struct hci_dev *hdev)
3405 {
3406         hci_notify(hdev, HCI_DEV_SUSPEND);
3407         return 0;
3408 }
3409 EXPORT_SYMBOL(hci_suspend_dev);
3410
3411 /* Resume HCI device */
3412 int hci_resume_dev(struct hci_dev *hdev)
3413 {
3414         hci_notify(hdev, HCI_DEV_RESUME);
3415         return 0;
3416 }
3417 EXPORT_SYMBOL(hci_resume_dev);
3418
3419 /* Receive frame from HCI drivers */
3420 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3421 {
3422         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3423                       && !test_bit(HCI_INIT, &hdev->flags))) {
3424                 kfree_skb(skb);
3425                 return -ENXIO;
3426         }
3427
3428         /* Incoming skb */
3429         bt_cb(skb)->incoming = 1;
3430
3431         /* Time stamp */
3432         __net_timestamp(skb);
3433
3434         skb_queue_tail(&hdev->rx_q, skb);
3435         queue_work(hdev->workqueue, &hdev->rx_work);
3436
3437         return 0;
3438 }
3439 EXPORT_SYMBOL(hci_recv_frame);
3440
3441 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3442                           int count, __u8 index)
3443 {
3444         int len = 0;
3445         int hlen = 0;
3446         int remain = count;
3447         struct sk_buff *skb;
3448         struct bt_skb_cb *scb;
3449
3450         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3451             index >= NUM_REASSEMBLY)
3452                 return -EILSEQ;
3453
3454         skb = hdev->reassembly[index];
3455
3456         if (!skb) {
3457                 switch (type) {
3458                 case HCI_ACLDATA_PKT:
3459                         len = HCI_MAX_FRAME_SIZE;
3460                         hlen = HCI_ACL_HDR_SIZE;
3461                         break;
3462                 case HCI_EVENT_PKT:
3463                         len = HCI_MAX_EVENT_SIZE;
3464                         hlen = HCI_EVENT_HDR_SIZE;
3465                         break;
3466                 case HCI_SCODATA_PKT:
3467                         len = HCI_MAX_SCO_SIZE;
3468                         hlen = HCI_SCO_HDR_SIZE;
3469                         break;
3470                 }
3471
3472                 skb = bt_skb_alloc(len, GFP_ATOMIC);
3473                 if (!skb)
3474                         return -ENOMEM;
3475
3476                 scb = (void *) skb->cb;
3477                 scb->expect = hlen;
3478                 scb->pkt_type = type;
3479
3480                 hdev->reassembly[index] = skb;
3481         }
3482
3483         while (count) {
3484                 scb = (void *) skb->cb;
3485                 len = min_t(uint, scb->expect, count);
3486
3487                 memcpy(skb_put(skb, len), data, len);
3488
3489                 count -= len;
3490                 data += len;
3491                 scb->expect -= len;
3492                 remain = count;
3493
3494                 switch (type) {
3495                 case HCI_EVENT_PKT:
3496                         if (skb->len == HCI_EVENT_HDR_SIZE) {
3497                                 struct hci_event_hdr *h = hci_event_hdr(skb);
3498                                 scb->expect = h->plen;
3499
3500                                 if (skb_tailroom(skb) < scb->expect) {
3501                                         kfree_skb(skb);
3502                                         hdev->reassembly[index] = NULL;
3503                                         return -ENOMEM;
3504                                 }
3505                         }
3506                         break;
3507
3508                 case HCI_ACLDATA_PKT:
3509                         if (skb->len  == HCI_ACL_HDR_SIZE) {
3510                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3511                                 scb->expect = __le16_to_cpu(h->dlen);
3512
3513                                 if (skb_tailroom(skb) < scb->expect) {
3514                                         kfree_skb(skb);
3515                                         hdev->reassembly[index] = NULL;
3516                                         return -ENOMEM;
3517                                 }
3518                         }
3519                         break;
3520
3521                 case HCI_SCODATA_PKT:
3522                         if (skb->len == HCI_SCO_HDR_SIZE) {
3523                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3524                                 scb->expect = h->dlen;
3525
3526                                 if (skb_tailroom(skb) < scb->expect) {
3527                                         kfree_skb(skb);
3528                                         hdev->reassembly[index] = NULL;
3529                                         return -ENOMEM;
3530                                 }
3531                         }
3532                         break;
3533                 }
3534
3535                 if (scb->expect == 0) {
3536                         /* Complete frame */
3537
3538                         bt_cb(skb)->pkt_type = type;
3539                         hci_recv_frame(hdev, skb);
3540
3541                         hdev->reassembly[index] = NULL;
3542                         return remain;
3543                 }
3544         }
3545
3546         return remain;
3547 }
3548
3549 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3550 {
3551         int rem = 0;
3552
3553         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3554                 return -EILSEQ;
3555
3556         while (count) {
3557                 rem = hci_reassembly(hdev, type, data, count, type - 1);
3558                 if (rem < 0)
3559                         return rem;
3560
3561                 data += (count - rem);
3562                 count = rem;
3563         }
3564
3565         return rem;
3566 }
3567 EXPORT_SYMBOL(hci_recv_fragment);
3568
3569 #define STREAM_REASSEMBLY 0
3570
3571 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3572 {
3573         int type;
3574         int rem = 0;
3575
3576         while (count) {
3577                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3578
3579                 if (!skb) {
3580                         struct { char type; } *pkt;
3581
3582                         /* Start of the frame */
3583                         pkt = data;
3584                         type = pkt->type;
3585
3586                         data++;
3587                         count--;
3588                 } else
3589                         type = bt_cb(skb)->pkt_type;
3590
3591                 rem = hci_reassembly(hdev, type, data, count,
3592                                      STREAM_REASSEMBLY);
3593                 if (rem < 0)
3594                         return rem;
3595
3596                 data += (count - rem);
3597                 count = rem;
3598         }
3599
3600         return rem;
3601 }
3602 EXPORT_SYMBOL(hci_recv_stream_fragment);
3603
3604 /* ---- Interface to upper protocols ---- */
3605
3606 int hci_register_cb(struct hci_cb *cb)
3607 {
3608         BT_DBG("%p name %s", cb, cb->name);
3609
3610         write_lock(&hci_cb_list_lock);
3611         list_add(&cb->list, &hci_cb_list);
3612         write_unlock(&hci_cb_list_lock);
3613
3614         return 0;
3615 }
3616 EXPORT_SYMBOL(hci_register_cb);
3617
3618 int hci_unregister_cb(struct hci_cb *cb)
3619 {
3620         BT_DBG("%p name %s", cb, cb->name);
3621
3622         write_lock(&hci_cb_list_lock);
3623         list_del(&cb->list);
3624         write_unlock(&hci_cb_list_lock);
3625
3626         return 0;
3627 }
3628 EXPORT_SYMBOL(hci_unregister_cb);
3629
3630 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3631 {
3632         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3633
3634         /* Time stamp */
3635         __net_timestamp(skb);
3636
3637         /* Send copy to monitor */
3638         hci_send_to_monitor(hdev, skb);
3639
3640         if (atomic_read(&hdev->promisc)) {
3641                 /* Send copy to the sockets */
3642                 hci_send_to_sock(hdev, skb);
3643         }
3644
3645         /* Get rid of skb owner, prior to sending to the driver. */
3646         skb_orphan(skb);
3647
3648         if (hdev->send(hdev, skb) < 0)
3649                 BT_ERR("%s sending frame failed", hdev->name);
3650 }
3651
3652 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3653 {
3654         skb_queue_head_init(&req->cmd_q);
3655         req->hdev = hdev;
3656         req->err = 0;
3657 }
3658
3659 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3660 {
3661         struct hci_dev *hdev = req->hdev;
3662         struct sk_buff *skb;
3663         unsigned long flags;
3664
3665         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3666
3667         /* If an error occured during request building, remove all HCI
3668          * commands queued on the HCI request queue.
3669          */
3670         if (req->err) {
3671                 skb_queue_purge(&req->cmd_q);
3672                 return req->err;
3673         }
3674
3675         /* Do not allow empty requests */
3676         if (skb_queue_empty(&req->cmd_q))
3677                 return -ENODATA;
3678
3679         skb = skb_peek_tail(&req->cmd_q);
3680         bt_cb(skb)->req.complete = complete;
3681
3682         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3683         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3684         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3685
3686         queue_work(hdev->workqueue, &hdev->cmd_work);
3687
3688         return 0;
3689 }
3690
3691 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
3692                                        u32 plen, const void *param)
3693 {
3694         int len = HCI_COMMAND_HDR_SIZE + plen;
3695         struct hci_command_hdr *hdr;
3696         struct sk_buff *skb;
3697
3698         skb = bt_skb_alloc(len, GFP_ATOMIC);
3699         if (!skb)
3700                 return NULL;
3701
3702         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
3703         hdr->opcode = cpu_to_le16(opcode);
3704         hdr->plen   = plen;
3705
3706         if (plen)
3707                 memcpy(skb_put(skb, plen), param, plen);
3708
3709         BT_DBG("skb len %d", skb->len);
3710
3711         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
3712
3713         return skb;
3714 }
3715
3716 /* Send HCI command */
3717 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3718                  const void *param)
3719 {
3720         struct sk_buff *skb;
3721
3722         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3723
3724         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3725         if (!skb) {
3726                 BT_ERR("%s no memory for command", hdev->name);
3727                 return -ENOMEM;
3728         }
3729
3730         /* Stand-alone HCI commands must be flaged as
3731          * single-command requests.
3732          */
3733         bt_cb(skb)->req.start = true;
3734
3735         skb_queue_tail(&hdev->cmd_q, skb);
3736         queue_work(hdev->workqueue, &hdev->cmd_work);
3737
3738         return 0;
3739 }
3740
3741 /* Queue a command to an asynchronous HCI request */
3742 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3743                     const void *param, u8 event)
3744 {
3745         struct hci_dev *hdev = req->hdev;
3746         struct sk_buff *skb;
3747
3748         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3749
3750         /* If an error occured during request building, there is no point in
3751          * queueing the HCI command. We can simply return.
3752          */
3753         if (req->err)
3754                 return;
3755
3756         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3757         if (!skb) {
3758                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3759                        hdev->name, opcode);
3760                 req->err = -ENOMEM;
3761                 return;
3762         }
3763
3764         if (skb_queue_empty(&req->cmd_q))
3765                 bt_cb(skb)->req.start = true;
3766
3767         bt_cb(skb)->req.event = event;
3768
3769         skb_queue_tail(&req->cmd_q, skb);
3770 }
3771
3772 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3773                  const void *param)
3774 {
3775         hci_req_add_ev(req, opcode, plen, param, 0);
3776 }
3777
3778 /* Get data from the previously sent command */
3779 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3780 {
3781         struct hci_command_hdr *hdr;
3782
3783         if (!hdev->sent_cmd)
3784                 return NULL;
3785
3786         hdr = (void *) hdev->sent_cmd->data;
3787
3788         if (hdr->opcode != cpu_to_le16(opcode))
3789                 return NULL;
3790
3791         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3792
3793         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3794 }
3795
3796 /* Send ACL data */
3797 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3798 {
3799         struct hci_acl_hdr *hdr;
3800         int len = skb->len;
3801
3802         skb_push(skb, HCI_ACL_HDR_SIZE);
3803         skb_reset_transport_header(skb);
3804         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3805         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3806         hdr->dlen   = cpu_to_le16(len);
3807 }
3808
3809 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3810                           struct sk_buff *skb, __u16 flags)
3811 {
3812         struct hci_conn *conn = chan->conn;
3813         struct hci_dev *hdev = conn->hdev;
3814         struct sk_buff *list;
3815
3816         skb->len = skb_headlen(skb);
3817         skb->data_len = 0;
3818
3819         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3820
3821         switch (hdev->dev_type) {
3822         case HCI_BREDR:
3823                 hci_add_acl_hdr(skb, conn->handle, flags);
3824                 break;
3825         case HCI_AMP:
3826                 hci_add_acl_hdr(skb, chan->handle, flags);
3827                 break;
3828         default:
3829                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3830                 return;
3831         }
3832
3833         list = skb_shinfo(skb)->frag_list;
3834         if (!list) {
3835                 /* Non fragmented */
3836                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3837
3838                 skb_queue_tail(queue, skb);
3839         } else {
3840                 /* Fragmented */
3841                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3842
3843                 skb_shinfo(skb)->frag_list = NULL;
3844
3845                 /* Queue all fragments atomically */
3846                 spin_lock(&queue->lock);
3847
3848                 __skb_queue_tail(queue, skb);
3849
3850                 flags &= ~ACL_START;
3851                 flags |= ACL_CONT;
3852                 do {
3853                         skb = list; list = list->next;
3854
3855                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3856                         hci_add_acl_hdr(skb, conn->handle, flags);
3857
3858                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3859
3860                         __skb_queue_tail(queue, skb);
3861                 } while (list);
3862
3863                 spin_unlock(&queue->lock);
3864         }
3865 }
3866
3867 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3868 {
3869         struct hci_dev *hdev = chan->conn->hdev;
3870
3871         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3872
3873         hci_queue_acl(chan, &chan->data_q, skb, flags);
3874
3875         queue_work(hdev->workqueue, &hdev->tx_work);
3876 }
3877
3878 /* Send SCO data */
3879 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3880 {
3881         struct hci_dev *hdev = conn->hdev;
3882         struct hci_sco_hdr hdr;
3883
3884         BT_DBG("%s len %d", hdev->name, skb->len);
3885
3886         hdr.handle = cpu_to_le16(conn->handle);
3887         hdr.dlen   = skb->len;
3888
3889         skb_push(skb, HCI_SCO_HDR_SIZE);
3890         skb_reset_transport_header(skb);
3891         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3892
3893         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3894
3895         skb_queue_tail(&conn->data_q, skb);
3896         queue_work(hdev->workqueue, &hdev->tx_work);
3897 }
3898
3899 /* ---- HCI TX task (outgoing data) ---- */
3900
3901 /* HCI Connection scheduler */
3902 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3903                                      int *quote)
3904 {
3905         struct hci_conn_hash *h = &hdev->conn_hash;
3906         struct hci_conn *conn = NULL, *c;
3907         unsigned int num = 0, min = ~0;
3908
3909         /* We don't have to lock device here. Connections are always
3910          * added and removed with TX task disabled. */
3911
3912         rcu_read_lock();
3913
3914         list_for_each_entry_rcu(c, &h->list, list) {
3915                 if (c->type != type || skb_queue_empty(&c->data_q))
3916                         continue;
3917
3918                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3919                         continue;
3920
3921                 num++;
3922
3923                 if (c->sent < min) {
3924                         min  = c->sent;
3925                         conn = c;
3926                 }
3927
3928                 if (hci_conn_num(hdev, type) == num)
3929                         break;
3930         }
3931
3932         rcu_read_unlock();
3933
3934         if (conn) {
3935                 int cnt, q;
3936
3937                 switch (conn->type) {
3938                 case ACL_LINK:
3939                         cnt = hdev->acl_cnt;
3940                         break;
3941                 case SCO_LINK:
3942                 case ESCO_LINK:
3943                         cnt = hdev->sco_cnt;
3944                         break;
3945                 case LE_LINK:
3946                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3947                         break;
3948                 default:
3949                         cnt = 0;
3950                         BT_ERR("Unknown link type");
3951                 }
3952
3953                 q = cnt / num;
3954                 *quote = q ? q : 1;
3955         } else
3956                 *quote = 0;
3957
3958         BT_DBG("conn %p quote %d", conn, *quote);
3959         return conn;
3960 }
3961
3962 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3963 {
3964         struct hci_conn_hash *h = &hdev->conn_hash;
3965         struct hci_conn *c;
3966
3967         BT_ERR("%s link tx timeout", hdev->name);
3968
3969         rcu_read_lock();
3970
3971         /* Kill stalled connections */
3972         list_for_each_entry_rcu(c, &h->list, list) {
3973                 if (c->type == type && c->sent) {
3974                         BT_ERR("%s killing stalled connection %pMR",
3975                                hdev->name, &c->dst);
3976                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3977                 }
3978         }
3979
3980         rcu_read_unlock();
3981 }
3982
3983 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3984                                       int *quote)
3985 {
3986         struct hci_conn_hash *h = &hdev->conn_hash;
3987         struct hci_chan *chan = NULL;
3988         unsigned int num = 0, min = ~0, cur_prio = 0;
3989         struct hci_conn *conn;
3990         int cnt, q, conn_num = 0;
3991
3992         BT_DBG("%s", hdev->name);
3993
3994         rcu_read_lock();
3995
3996         list_for_each_entry_rcu(conn, &h->list, list) {
3997                 struct hci_chan *tmp;
3998
3999                 if (conn->type != type)
4000                         continue;
4001
4002                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4003                         continue;
4004
4005                 conn_num++;
4006
4007                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4008                         struct sk_buff *skb;
4009
4010                         if (skb_queue_empty(&tmp->data_q))
4011                                 continue;
4012
4013                         skb = skb_peek(&tmp->data_q);
4014                         if (skb->priority < cur_prio)
4015                                 continue;
4016
4017                         if (skb->priority > cur_prio) {
4018                                 num = 0;
4019                                 min = ~0;
4020                                 cur_prio = skb->priority;
4021                         }
4022
4023                         num++;
4024
4025                         if (conn->sent < min) {
4026                                 min  = conn->sent;
4027                                 chan = tmp;
4028                         }
4029                 }
4030
4031                 if (hci_conn_num(hdev, type) == conn_num)
4032                         break;
4033         }
4034
4035         rcu_read_unlock();
4036
4037         if (!chan)
4038                 return NULL;
4039
4040         switch (chan->conn->type) {
4041         case ACL_LINK:
4042                 cnt = hdev->acl_cnt;
4043                 break;
4044         case AMP_LINK:
4045                 cnt = hdev->block_cnt;
4046                 break;
4047         case SCO_LINK:
4048         case ESCO_LINK:
4049                 cnt = hdev->sco_cnt;
4050                 break;
4051         case LE_LINK:
4052                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4053                 break;
4054         default:
4055                 cnt = 0;
4056                 BT_ERR("Unknown link type");
4057         }
4058
4059         q = cnt / num;
4060         *quote = q ? q : 1;
4061         BT_DBG("chan %p quote %d", chan, *quote);
4062         return chan;
4063 }
4064
4065 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4066 {
4067         struct hci_conn_hash *h = &hdev->conn_hash;
4068         struct hci_conn *conn;
4069         int num = 0;
4070
4071         BT_DBG("%s", hdev->name);
4072
4073         rcu_read_lock();
4074
4075         list_for_each_entry_rcu(conn, &h->list, list) {
4076                 struct hci_chan *chan;
4077
4078                 if (conn->type != type)
4079                         continue;
4080
4081                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4082                         continue;
4083
4084                 num++;
4085
4086                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4087                         struct sk_buff *skb;
4088
4089                         if (chan->sent) {
4090                                 chan->sent = 0;
4091                                 continue;
4092                         }
4093
4094                         if (skb_queue_empty(&chan->data_q))
4095                                 continue;
4096
4097                         skb = skb_peek(&chan->data_q);
4098                         if (skb->priority >= HCI_PRIO_MAX - 1)
4099                                 continue;
4100
4101                         skb->priority = HCI_PRIO_MAX - 1;
4102
4103                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4104                                skb->priority);
4105                 }
4106
4107                 if (hci_conn_num(hdev, type) == num)
4108                         break;
4109         }
4110
4111         rcu_read_unlock();
4112
4113 }
4114
4115 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4116 {
4117         /* Calculate count of blocks used by this packet */
4118         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4119 }
4120
4121 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4122 {
4123         if (!test_bit(HCI_RAW, &hdev->flags)) {
4124                 /* ACL tx timeout must be longer than maximum
4125                  * link supervision timeout (40.9 seconds) */
4126                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4127                                        HCI_ACL_TX_TIMEOUT))
4128                         hci_link_tx_to(hdev, ACL_LINK);
4129         }
4130 }
4131
4132 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4133 {
4134         unsigned int cnt = hdev->acl_cnt;
4135         struct hci_chan *chan;
4136         struct sk_buff *skb;
4137         int quote;
4138
4139         __check_timeout(hdev, cnt);
4140
4141         while (hdev->acl_cnt &&
4142                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4143                 u32 priority = (skb_peek(&chan->data_q))->priority;
4144                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4145                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4146                                skb->len, skb->priority);
4147
4148                         /* Stop if priority has changed */
4149                         if (skb->priority < priority)
4150                                 break;
4151
4152                         skb = skb_dequeue(&chan->data_q);
4153
4154                         hci_conn_enter_active_mode(chan->conn,
4155                                                    bt_cb(skb)->force_active);
4156
4157                         hci_send_frame(hdev, skb);
4158                         hdev->acl_last_tx = jiffies;
4159
4160                         hdev->acl_cnt--;
4161                         chan->sent++;
4162                         chan->conn->sent++;
4163                 }
4164         }
4165
4166         if (cnt != hdev->acl_cnt)
4167                 hci_prio_recalculate(hdev, ACL_LINK);
4168 }
4169
4170 static void hci_sched_acl_blk(struct hci_dev *hdev)
4171 {
4172         unsigned int cnt = hdev->block_cnt;
4173         struct hci_chan *chan;
4174         struct sk_buff *skb;
4175         int quote;
4176         u8 type;
4177
4178         __check_timeout(hdev, cnt);
4179
4180         BT_DBG("%s", hdev->name);
4181
4182         if (hdev->dev_type == HCI_AMP)
4183                 type = AMP_LINK;
4184         else
4185                 type = ACL_LINK;
4186
4187         while (hdev->block_cnt > 0 &&
4188                (chan = hci_chan_sent(hdev, type, &quote))) {
4189                 u32 priority = (skb_peek(&chan->data_q))->priority;
4190                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4191                         int blocks;
4192
4193                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4194                                skb->len, skb->priority);
4195
4196                         /* Stop if priority has changed */
4197                         if (skb->priority < priority)
4198                                 break;
4199
4200                         skb = skb_dequeue(&chan->data_q);
4201
4202                         blocks = __get_blocks(hdev, skb);
4203                         if (blocks > hdev->block_cnt)
4204                                 return;
4205
4206                         hci_conn_enter_active_mode(chan->conn,
4207                                                    bt_cb(skb)->force_active);
4208
4209                         hci_send_frame(hdev, skb);
4210                         hdev->acl_last_tx = jiffies;
4211
4212                         hdev->block_cnt -= blocks;
4213                         quote -= blocks;
4214
4215                         chan->sent += blocks;
4216                         chan->conn->sent += blocks;
4217                 }
4218         }
4219
4220         if (cnt != hdev->block_cnt)
4221                 hci_prio_recalculate(hdev, type);
4222 }
4223
4224 static void hci_sched_acl(struct hci_dev *hdev)
4225 {
4226         BT_DBG("%s", hdev->name);
4227
4228         /* No ACL link over BR/EDR controller */
4229         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4230                 return;
4231
4232         /* No AMP link over AMP controller */
4233         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4234                 return;
4235
4236         switch (hdev->flow_ctl_mode) {
4237         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4238                 hci_sched_acl_pkt(hdev);
4239                 break;
4240
4241         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4242                 hci_sched_acl_blk(hdev);
4243                 break;
4244         }
4245 }
4246
4247 /* Schedule SCO */
4248 static void hci_sched_sco(struct hci_dev *hdev)
4249 {
4250         struct hci_conn *conn;
4251         struct sk_buff *skb;
4252         int quote;
4253
4254         BT_DBG("%s", hdev->name);
4255
4256         if (!hci_conn_num(hdev, SCO_LINK))
4257                 return;
4258
4259         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4260                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4261                         BT_DBG("skb %p len %d", skb, skb->len);
4262                         hci_send_frame(hdev, skb);
4263
4264                         conn->sent++;
4265                         if (conn->sent == ~0)
4266                                 conn->sent = 0;
4267                 }
4268         }
4269 }
4270
4271 static void hci_sched_esco(struct hci_dev *hdev)
4272 {
4273         struct hci_conn *conn;
4274         struct sk_buff *skb;
4275         int quote;
4276
4277         BT_DBG("%s", hdev->name);
4278
4279         if (!hci_conn_num(hdev, ESCO_LINK))
4280                 return;
4281
4282         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4283                                                      &quote))) {
4284                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4285                         BT_DBG("skb %p len %d", skb, skb->len);
4286                         hci_send_frame(hdev, skb);
4287
4288                         conn->sent++;
4289                         if (conn->sent == ~0)
4290                                 conn->sent = 0;
4291                 }
4292         }
4293 }
4294
4295 static void hci_sched_le(struct hci_dev *hdev)
4296 {
4297         struct hci_chan *chan;
4298         struct sk_buff *skb;
4299         int quote, cnt, tmp;
4300
4301         BT_DBG("%s", hdev->name);
4302
4303         if (!hci_conn_num(hdev, LE_LINK))
4304                 return;
4305
4306         if (!test_bit(HCI_RAW, &hdev->flags)) {
4307                 /* LE tx timeout must be longer than maximum
4308                  * link supervision timeout (40.9 seconds) */
4309                 if (!hdev->le_cnt && hdev->le_pkts &&
4310                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4311                         hci_link_tx_to(hdev, LE_LINK);
4312         }
4313
4314         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4315         tmp = cnt;
4316         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4317                 u32 priority = (skb_peek(&chan->data_q))->priority;
4318                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4319                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4320                                skb->len, skb->priority);
4321
4322                         /* Stop if priority has changed */
4323                         if (skb->priority < priority)
4324                                 break;
4325
4326                         skb = skb_dequeue(&chan->data_q);
4327
4328                         hci_send_frame(hdev, skb);
4329                         hdev->le_last_tx = jiffies;
4330
4331                         cnt--;
4332                         chan->sent++;
4333                         chan->conn->sent++;
4334                 }
4335         }
4336
4337         if (hdev->le_pkts)
4338                 hdev->le_cnt = cnt;
4339         else
4340                 hdev->acl_cnt = cnt;
4341
4342         if (cnt != tmp)
4343                 hci_prio_recalculate(hdev, LE_LINK);
4344 }
4345
4346 static void hci_tx_work(struct work_struct *work)
4347 {
4348         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4349         struct sk_buff *skb;
4350
4351         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4352                hdev->sco_cnt, hdev->le_cnt);
4353
4354         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4355                 /* Schedule queues and send stuff to HCI driver */
4356                 hci_sched_acl(hdev);
4357                 hci_sched_sco(hdev);
4358                 hci_sched_esco(hdev);
4359                 hci_sched_le(hdev);
4360         }
4361
4362         /* Send next queued raw (unknown type) packet */
4363         while ((skb = skb_dequeue(&hdev->raw_q)))
4364                 hci_send_frame(hdev, skb);
4365 }
4366
4367 /* ----- HCI RX task (incoming data processing) ----- */
4368
4369 /* ACL data packet */
4370 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4371 {
4372         struct hci_acl_hdr *hdr = (void *) skb->data;
4373         struct hci_conn *conn;
4374         __u16 handle, flags;
4375
4376         skb_pull(skb, HCI_ACL_HDR_SIZE);
4377
4378         handle = __le16_to_cpu(hdr->handle);
4379         flags  = hci_flags(handle);
4380         handle = hci_handle(handle);
4381
4382         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4383                handle, flags);
4384
4385         hdev->stat.acl_rx++;
4386
4387         hci_dev_lock(hdev);
4388         conn = hci_conn_hash_lookup_handle(hdev, handle);
4389         hci_dev_unlock(hdev);
4390
4391         if (conn) {
4392                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4393
4394                 /* Send to upper protocol */
4395                 l2cap_recv_acldata(conn, skb, flags);
4396                 return;
4397         } else {
4398                 BT_ERR("%s ACL packet for unknown connection handle %d",
4399                        hdev->name, handle);
4400         }
4401
4402         kfree_skb(skb);
4403 }
4404
4405 /* SCO data packet */
4406 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4407 {
4408         struct hci_sco_hdr *hdr = (void *) skb->data;
4409         struct hci_conn *conn;
4410         __u16 handle;
4411
4412         skb_pull(skb, HCI_SCO_HDR_SIZE);
4413
4414         handle = __le16_to_cpu(hdr->handle);
4415
4416         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4417
4418         hdev->stat.sco_rx++;
4419
4420         hci_dev_lock(hdev);
4421         conn = hci_conn_hash_lookup_handle(hdev, handle);
4422         hci_dev_unlock(hdev);
4423
4424         if (conn) {
4425                 /* Send to upper protocol */
4426                 sco_recv_scodata(conn, skb);
4427                 return;
4428         } else {
4429                 BT_ERR("%s SCO packet for unknown connection handle %d",
4430                        hdev->name, handle);
4431         }
4432
4433         kfree_skb(skb);
4434 }
4435
4436 static bool hci_req_is_complete(struct hci_dev *hdev)
4437 {
4438         struct sk_buff *skb;
4439
4440         skb = skb_peek(&hdev->cmd_q);
4441         if (!skb)
4442                 return true;
4443
4444         return bt_cb(skb)->req.start;
4445 }
4446
4447 static void hci_resend_last(struct hci_dev *hdev)
4448 {
4449         struct hci_command_hdr *sent;
4450         struct sk_buff *skb;
4451         u16 opcode;
4452
4453         if (!hdev->sent_cmd)
4454                 return;
4455
4456         sent = (void *) hdev->sent_cmd->data;
4457         opcode = __le16_to_cpu(sent->opcode);
4458         if (opcode == HCI_OP_RESET)
4459                 return;
4460
4461         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4462         if (!skb)
4463                 return;
4464
4465         skb_queue_head(&hdev->cmd_q, skb);
4466         queue_work(hdev->workqueue, &hdev->cmd_work);
4467 }
4468
4469 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4470 {
4471         hci_req_complete_t req_complete = NULL;
4472         struct sk_buff *skb;
4473         unsigned long flags;
4474
4475         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4476
4477         /* If the completed command doesn't match the last one that was
4478          * sent we need to do special handling of it.
4479          */
4480         if (!hci_sent_cmd_data(hdev, opcode)) {
4481                 /* Some CSR based controllers generate a spontaneous
4482                  * reset complete event during init and any pending
4483                  * command will never be completed. In such a case we
4484                  * need to resend whatever was the last sent
4485                  * command.
4486                  */
4487                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4488                         hci_resend_last(hdev);
4489
4490                 return;
4491         }
4492
4493         /* If the command succeeded and there's still more commands in
4494          * this request the request is not yet complete.
4495          */
4496         if (!status && !hci_req_is_complete(hdev))
4497                 return;
4498
4499         /* If this was the last command in a request the complete
4500          * callback would be found in hdev->sent_cmd instead of the
4501          * command queue (hdev->cmd_q).
4502          */
4503         if (hdev->sent_cmd) {
4504                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4505
4506                 if (req_complete) {
4507                         /* We must set the complete callback to NULL to
4508                          * avoid calling the callback more than once if
4509                          * this function gets called again.
4510                          */
4511                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
4512
4513                         goto call_complete;
4514                 }
4515         }
4516
4517         /* Remove all pending commands belonging to this request */
4518         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4519         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4520                 if (bt_cb(skb)->req.start) {
4521                         __skb_queue_head(&hdev->cmd_q, skb);
4522                         break;
4523                 }
4524
4525                 req_complete = bt_cb(skb)->req.complete;
4526                 kfree_skb(skb);
4527         }
4528         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4529
4530 call_complete:
4531         if (req_complete)
4532                 req_complete(hdev, status);
4533 }
4534
4535 static void hci_rx_work(struct work_struct *work)
4536 {
4537         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4538         struct sk_buff *skb;
4539
4540         BT_DBG("%s", hdev->name);
4541
4542         while ((skb = skb_dequeue(&hdev->rx_q))) {
4543                 /* Send copy to monitor */
4544                 hci_send_to_monitor(hdev, skb);
4545
4546                 if (atomic_read(&hdev->promisc)) {
4547                         /* Send copy to the sockets */
4548                         hci_send_to_sock(hdev, skb);
4549                 }
4550
4551                 if (test_bit(HCI_RAW, &hdev->flags) ||
4552                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4553                         kfree_skb(skb);
4554                         continue;
4555                 }
4556
4557                 if (test_bit(HCI_INIT, &hdev->flags)) {
4558                         /* Don't process data packets in this states. */
4559                         switch (bt_cb(skb)->pkt_type) {
4560                         case HCI_ACLDATA_PKT:
4561                         case HCI_SCODATA_PKT:
4562                                 kfree_skb(skb);
4563                                 continue;
4564                         }
4565                 }
4566
4567                 /* Process frame */
4568                 switch (bt_cb(skb)->pkt_type) {
4569                 case HCI_EVENT_PKT:
4570                         BT_DBG("%s Event packet", hdev->name);
4571                         hci_event_packet(hdev, skb);
4572                         break;
4573
4574                 case HCI_ACLDATA_PKT:
4575                         BT_DBG("%s ACL data packet", hdev->name);
4576                         hci_acldata_packet(hdev, skb);
4577                         break;
4578
4579                 case HCI_SCODATA_PKT:
4580                         BT_DBG("%s SCO data packet", hdev->name);
4581                         hci_scodata_packet(hdev, skb);
4582                         break;
4583
4584                 default:
4585                         kfree_skb(skb);
4586                         break;
4587                 }
4588         }
4589 }
4590
4591 static void hci_cmd_work(struct work_struct *work)
4592 {
4593         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4594         struct sk_buff *skb;
4595
4596         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4597                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4598
4599         /* Send queued commands */
4600         if (atomic_read(&hdev->cmd_cnt)) {
4601                 skb = skb_dequeue(&hdev->cmd_q);
4602                 if (!skb)
4603                         return;
4604
4605                 kfree_skb(hdev->sent_cmd);
4606
4607                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4608                 if (hdev->sent_cmd) {
4609                         atomic_dec(&hdev->cmd_cnt);
4610                         hci_send_frame(hdev, skb);
4611                         if (test_bit(HCI_RESET, &hdev->flags))
4612                                 del_timer(&hdev->cmd_timer);
4613                         else
4614                                 mod_timer(&hdev->cmd_timer,
4615                                           jiffies + HCI_CMD_TIMEOUT);
4616                 } else {
4617                         skb_queue_head(&hdev->cmd_q, skb);
4618                         queue_work(hdev->workqueue, &hdev->cmd_work);
4619                 }
4620         }
4621 }