Bluetooth: Add support for vendor specific diagnostic channel
[cascardo/linux.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "mgmt_util.h"
36
37 static LIST_HEAD(mgmt_chan_list);
38 static DEFINE_MUTEX(mgmt_chan_list_lock);
39
40 static atomic_t monitor_promisc = ATOMIC_INIT(0);
41
42 /* ----- HCI socket interface ----- */
43
44 /* Socket info */
45 #define hci_pi(sk) ((struct hci_pinfo *) sk)
46
47 struct hci_pinfo {
48         struct bt_sock    bt;
49         struct hci_dev    *hdev;
50         struct hci_filter filter;
51         __u32             cmsg_mask;
52         unsigned short    channel;
53         unsigned long     flags;
54 };
55
56 void hci_sock_set_flag(struct sock *sk, int nr)
57 {
58         set_bit(nr, &hci_pi(sk)->flags);
59 }
60
61 void hci_sock_clear_flag(struct sock *sk, int nr)
62 {
63         clear_bit(nr, &hci_pi(sk)->flags);
64 }
65
66 int hci_sock_test_flag(struct sock *sk, int nr)
67 {
68         return test_bit(nr, &hci_pi(sk)->flags);
69 }
70
71 unsigned short hci_sock_get_channel(struct sock *sk)
72 {
73         return hci_pi(sk)->channel;
74 }
75
76 static inline int hci_test_bit(int nr, const void *addr)
77 {
78         return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
79 }
80
81 /* Security filter */
82 #define HCI_SFLT_MAX_OGF  5
83
84 struct hci_sec_filter {
85         __u32 type_mask;
86         __u32 event_mask[2];
87         __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88 };
89
90 static const struct hci_sec_filter hci_sec_filter = {
91         /* Packet types */
92         0x10,
93         /* Events */
94         { 0x1000d9fe, 0x0000b00c },
95         /* Commands */
96         {
97                 { 0x0 },
98                 /* OGF_LINK_CTL */
99                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
100                 /* OGF_LINK_POLICY */
101                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
102                 /* OGF_HOST_CTL */
103                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
104                 /* OGF_INFO_PARAM */
105                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
106                 /* OGF_STATUS_PARAM */
107                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
108         }
109 };
110
111 static struct bt_sock_list hci_sk_list = {
112         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
113 };
114
115 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116 {
117         struct hci_filter *flt;
118         int flt_type, flt_event;
119
120         /* Apply filter */
121         flt = &hci_pi(sk)->filter;
122
123         if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
124                 flt_type = 0;
125         else
126                 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
127
128         if (!test_bit(flt_type, &flt->type_mask))
129                 return true;
130
131         /* Extra filter for event packets only */
132         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
133                 return false;
134
135         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
136
137         if (!hci_test_bit(flt_event, &flt->event_mask))
138                 return true;
139
140         /* Check filter only when opcode is set */
141         if (!flt->opcode)
142                 return false;
143
144         if (flt_event == HCI_EV_CMD_COMPLETE &&
145             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
146                 return true;
147
148         if (flt_event == HCI_EV_CMD_STATUS &&
149             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
150                 return true;
151
152         return false;
153 }
154
155 /* Send frame to RAW socket */
156 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
157 {
158         struct sock *sk;
159         struct sk_buff *skb_copy = NULL;
160
161         BT_DBG("hdev %p len %d", hdev, skb->len);
162
163         read_lock(&hci_sk_list.lock);
164
165         sk_for_each(sk, &hci_sk_list.head) {
166                 struct sk_buff *nskb;
167
168                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
169                         continue;
170
171                 /* Don't send frame to the socket it came from */
172                 if (skb->sk == sk)
173                         continue;
174
175                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
176                         if (is_filtered_packet(sk, skb))
177                                 continue;
178                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
179                         if (!bt_cb(skb)->incoming)
180                                 continue;
181                         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
182                             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
183                             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
184                                 continue;
185                 } else {
186                         /* Don't send frame to other channel types */
187                         continue;
188                 }
189
190                 if (!skb_copy) {
191                         /* Create a private copy with headroom */
192                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
193                         if (!skb_copy)
194                                 continue;
195
196                         /* Put type byte before the data */
197                         memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
198                 }
199
200                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
201                 if (!nskb)
202                         continue;
203
204                 if (sock_queue_rcv_skb(sk, nskb))
205                         kfree_skb(nskb);
206         }
207
208         read_unlock(&hci_sk_list.lock);
209
210         kfree_skb(skb_copy);
211 }
212
213 /* Send frame to sockets with specific channel */
214 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
215                          int flag, struct sock *skip_sk)
216 {
217         struct sock *sk;
218
219         BT_DBG("channel %u len %d", channel, skb->len);
220
221         read_lock(&hci_sk_list.lock);
222
223         sk_for_each(sk, &hci_sk_list.head) {
224                 struct sk_buff *nskb;
225
226                 /* Ignore socket without the flag set */
227                 if (!hci_sock_test_flag(sk, flag))
228                         continue;
229
230                 /* Skip the original socket */
231                 if (sk == skip_sk)
232                         continue;
233
234                 if (sk->sk_state != BT_BOUND)
235                         continue;
236
237                 if (hci_pi(sk)->channel != channel)
238                         continue;
239
240                 nskb = skb_clone(skb, GFP_ATOMIC);
241                 if (!nskb)
242                         continue;
243
244                 if (sock_queue_rcv_skb(sk, nskb))
245                         kfree_skb(nskb);
246         }
247
248         read_unlock(&hci_sk_list.lock);
249 }
250
251 /* Send frame to monitor socket */
252 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
253 {
254         struct sk_buff *skb_copy = NULL;
255         struct hci_mon_hdr *hdr;
256         __le16 opcode;
257
258         if (!atomic_read(&monitor_promisc))
259                 return;
260
261         BT_DBG("hdev %p len %d", hdev, skb->len);
262
263         switch (bt_cb(skb)->pkt_type) {
264         case HCI_COMMAND_PKT:
265                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
266                 break;
267         case HCI_EVENT_PKT:
268                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
269                 break;
270         case HCI_ACLDATA_PKT:
271                 if (bt_cb(skb)->incoming)
272                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
273                 else
274                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
275                 break;
276         case HCI_SCODATA_PKT:
277                 if (bt_cb(skb)->incoming)
278                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
279                 else
280                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
281                 break;
282         case HCI_DIAG_PKT:
283                 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
284                 break;
285         default:
286                 return;
287         }
288
289         /* Create a private copy with headroom */
290         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
291         if (!skb_copy)
292                 return;
293
294         /* Put header before the data */
295         hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
296         hdr->opcode = opcode;
297         hdr->index = cpu_to_le16(hdev->id);
298         hdr->len = cpu_to_le16(skb->len);
299
300         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
301                             HCI_SOCK_TRUSTED, NULL);
302         kfree_skb(skb_copy);
303 }
304
305 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
306 {
307         struct hci_mon_hdr *hdr;
308         struct hci_mon_new_index *ni;
309         struct hci_mon_index_info *ii;
310         struct sk_buff *skb;
311         __le16 opcode;
312
313         switch (event) {
314         case HCI_DEV_REG:
315                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
316                 if (!skb)
317                         return NULL;
318
319                 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
320                 ni->type = hdev->dev_type;
321                 ni->bus = hdev->bus;
322                 bacpy(&ni->bdaddr, &hdev->bdaddr);
323                 memcpy(ni->name, hdev->name, 8);
324
325                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
326                 break;
327
328         case HCI_DEV_UNREG:
329                 skb = bt_skb_alloc(0, GFP_ATOMIC);
330                 if (!skb)
331                         return NULL;
332
333                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
334                 break;
335
336         case HCI_DEV_UP:
337                 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
338                 if (!skb)
339                         return NULL;
340
341                 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
342                 bacpy(&ii->bdaddr, &hdev->bdaddr);
343                 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
344
345                 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
346                 break;
347
348         case HCI_DEV_OPEN:
349                 skb = bt_skb_alloc(0, GFP_ATOMIC);
350                 if (!skb)
351                         return NULL;
352
353                 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
354                 break;
355
356         case HCI_DEV_CLOSE:
357                 skb = bt_skb_alloc(0, GFP_ATOMIC);
358                 if (!skb)
359                         return NULL;
360
361                 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
362                 break;
363
364         default:
365                 return NULL;
366         }
367
368         __net_timestamp(skb);
369
370         hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
371         hdr->opcode = opcode;
372         hdr->index = cpu_to_le16(hdev->id);
373         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
374
375         return skb;
376 }
377
378 static void send_monitor_replay(struct sock *sk)
379 {
380         struct hci_dev *hdev;
381
382         read_lock(&hci_dev_list_lock);
383
384         list_for_each_entry(hdev, &hci_dev_list, list) {
385                 struct sk_buff *skb;
386
387                 skb = create_monitor_event(hdev, HCI_DEV_REG);
388                 if (!skb)
389                         continue;
390
391                 if (sock_queue_rcv_skb(sk, skb))
392                         kfree_skb(skb);
393
394                 if (!test_bit(HCI_RUNNING, &hdev->flags))
395                         continue;
396
397                 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
398                 if (!skb)
399                         continue;
400
401                 if (sock_queue_rcv_skb(sk, skb))
402                         kfree_skb(skb);
403
404                 if (!test_bit(HCI_UP, &hdev->flags))
405                         continue;
406
407                 skb = create_monitor_event(hdev, HCI_DEV_UP);
408                 if (!skb)
409                         continue;
410
411                 if (sock_queue_rcv_skb(sk, skb))
412                         kfree_skb(skb);
413         }
414
415         read_unlock(&hci_dev_list_lock);
416 }
417
418 /* Generate internal stack event */
419 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
420 {
421         struct hci_event_hdr *hdr;
422         struct hci_ev_stack_internal *ev;
423         struct sk_buff *skb;
424
425         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
426         if (!skb)
427                 return;
428
429         hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
430         hdr->evt  = HCI_EV_STACK_INTERNAL;
431         hdr->plen = sizeof(*ev) + dlen;
432
433         ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
434         ev->type = type;
435         memcpy(ev->data, data, dlen);
436
437         bt_cb(skb)->incoming = 1;
438         __net_timestamp(skb);
439
440         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
441         hci_send_to_sock(hdev, skb);
442         kfree_skb(skb);
443 }
444
445 void hci_sock_dev_event(struct hci_dev *hdev, int event)
446 {
447         BT_DBG("hdev %s event %d", hdev->name, event);
448
449         if (atomic_read(&monitor_promisc)) {
450                 struct sk_buff *skb;
451
452                 /* Send event to monitor */
453                 skb = create_monitor_event(hdev, event);
454                 if (skb) {
455                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
456                                             HCI_SOCK_TRUSTED, NULL);
457                         kfree_skb(skb);
458                 }
459         }
460
461         if (event <= HCI_DEV_DOWN) {
462                 struct hci_ev_si_device ev;
463
464                 /* Send event to sockets */
465                 ev.event  = event;
466                 ev.dev_id = hdev->id;
467                 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
468         }
469
470         if (event == HCI_DEV_UNREG) {
471                 struct sock *sk;
472
473                 /* Detach sockets from device */
474                 read_lock(&hci_sk_list.lock);
475                 sk_for_each(sk, &hci_sk_list.head) {
476                         bh_lock_sock_nested(sk);
477                         if (hci_pi(sk)->hdev == hdev) {
478                                 hci_pi(sk)->hdev = NULL;
479                                 sk->sk_err = EPIPE;
480                                 sk->sk_state = BT_OPEN;
481                                 sk->sk_state_change(sk);
482
483                                 hci_dev_put(hdev);
484                         }
485                         bh_unlock_sock(sk);
486                 }
487                 read_unlock(&hci_sk_list.lock);
488         }
489 }
490
491 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
492 {
493         struct hci_mgmt_chan *c;
494
495         list_for_each_entry(c, &mgmt_chan_list, list) {
496                 if (c->channel == channel)
497                         return c;
498         }
499
500         return NULL;
501 }
502
503 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
504 {
505         struct hci_mgmt_chan *c;
506
507         mutex_lock(&mgmt_chan_list_lock);
508         c = __hci_mgmt_chan_find(channel);
509         mutex_unlock(&mgmt_chan_list_lock);
510
511         return c;
512 }
513
514 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
515 {
516         if (c->channel < HCI_CHANNEL_CONTROL)
517                 return -EINVAL;
518
519         mutex_lock(&mgmt_chan_list_lock);
520         if (__hci_mgmt_chan_find(c->channel)) {
521                 mutex_unlock(&mgmt_chan_list_lock);
522                 return -EALREADY;
523         }
524
525         list_add_tail(&c->list, &mgmt_chan_list);
526
527         mutex_unlock(&mgmt_chan_list_lock);
528
529         return 0;
530 }
531 EXPORT_SYMBOL(hci_mgmt_chan_register);
532
533 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
534 {
535         mutex_lock(&mgmt_chan_list_lock);
536         list_del(&c->list);
537         mutex_unlock(&mgmt_chan_list_lock);
538 }
539 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
540
541 static int hci_sock_release(struct socket *sock)
542 {
543         struct sock *sk = sock->sk;
544         struct hci_dev *hdev;
545
546         BT_DBG("sock %p sk %p", sock, sk);
547
548         if (!sk)
549                 return 0;
550
551         hdev = hci_pi(sk)->hdev;
552
553         if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
554                 atomic_dec(&monitor_promisc);
555
556         bt_sock_unlink(&hci_sk_list, sk);
557
558         if (hdev) {
559                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
560                         /* When releasing an user channel exclusive access,
561                          * call hci_dev_do_close directly instead of calling
562                          * hci_dev_close to ensure the exclusive access will
563                          * be released and the controller brought back down.
564                          *
565                          * The checking of HCI_AUTO_OFF is not needed in this
566                          * case since it will have been cleared already when
567                          * opening the user channel.
568                          */
569                         hci_dev_do_close(hdev);
570                         hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
571                         mgmt_index_added(hdev);
572                 }
573
574                 atomic_dec(&hdev->promisc);
575                 hci_dev_put(hdev);
576         }
577
578         sock_orphan(sk);
579
580         skb_queue_purge(&sk->sk_receive_queue);
581         skb_queue_purge(&sk->sk_write_queue);
582
583         sock_put(sk);
584         return 0;
585 }
586
587 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
588 {
589         bdaddr_t bdaddr;
590         int err;
591
592         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
593                 return -EFAULT;
594
595         hci_dev_lock(hdev);
596
597         err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
598
599         hci_dev_unlock(hdev);
600
601         return err;
602 }
603
604 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
605 {
606         bdaddr_t bdaddr;
607         int err;
608
609         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
610                 return -EFAULT;
611
612         hci_dev_lock(hdev);
613
614         err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
615
616         hci_dev_unlock(hdev);
617
618         return err;
619 }
620
621 /* Ioctls that require bound socket */
622 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
623                                 unsigned long arg)
624 {
625         struct hci_dev *hdev = hci_pi(sk)->hdev;
626
627         if (!hdev)
628                 return -EBADFD;
629
630         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
631                 return -EBUSY;
632
633         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
634                 return -EOPNOTSUPP;
635
636         if (hdev->dev_type != HCI_BREDR)
637                 return -EOPNOTSUPP;
638
639         switch (cmd) {
640         case HCISETRAW:
641                 if (!capable(CAP_NET_ADMIN))
642                         return -EPERM;
643                 return -EOPNOTSUPP;
644
645         case HCIGETCONNINFO:
646                 return hci_get_conn_info(hdev, (void __user *) arg);
647
648         case HCIGETAUTHINFO:
649                 return hci_get_auth_info(hdev, (void __user *) arg);
650
651         case HCIBLOCKADDR:
652                 if (!capable(CAP_NET_ADMIN))
653                         return -EPERM;
654                 return hci_sock_blacklist_add(hdev, (void __user *) arg);
655
656         case HCIUNBLOCKADDR:
657                 if (!capable(CAP_NET_ADMIN))
658                         return -EPERM;
659                 return hci_sock_blacklist_del(hdev, (void __user *) arg);
660         }
661
662         return -ENOIOCTLCMD;
663 }
664
665 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
666                           unsigned long arg)
667 {
668         void __user *argp = (void __user *) arg;
669         struct sock *sk = sock->sk;
670         int err;
671
672         BT_DBG("cmd %x arg %lx", cmd, arg);
673
674         lock_sock(sk);
675
676         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
677                 err = -EBADFD;
678                 goto done;
679         }
680
681         release_sock(sk);
682
683         switch (cmd) {
684         case HCIGETDEVLIST:
685                 return hci_get_dev_list(argp);
686
687         case HCIGETDEVINFO:
688                 return hci_get_dev_info(argp);
689
690         case HCIGETCONNLIST:
691                 return hci_get_conn_list(argp);
692
693         case HCIDEVUP:
694                 if (!capable(CAP_NET_ADMIN))
695                         return -EPERM;
696                 return hci_dev_open(arg);
697
698         case HCIDEVDOWN:
699                 if (!capable(CAP_NET_ADMIN))
700                         return -EPERM;
701                 return hci_dev_close(arg);
702
703         case HCIDEVRESET:
704                 if (!capable(CAP_NET_ADMIN))
705                         return -EPERM;
706                 return hci_dev_reset(arg);
707
708         case HCIDEVRESTAT:
709                 if (!capable(CAP_NET_ADMIN))
710                         return -EPERM;
711                 return hci_dev_reset_stat(arg);
712
713         case HCISETSCAN:
714         case HCISETAUTH:
715         case HCISETENCRYPT:
716         case HCISETPTYPE:
717         case HCISETLINKPOL:
718         case HCISETLINKMODE:
719         case HCISETACLMTU:
720         case HCISETSCOMTU:
721                 if (!capable(CAP_NET_ADMIN))
722                         return -EPERM;
723                 return hci_dev_cmd(cmd, argp);
724
725         case HCIINQUIRY:
726                 return hci_inquiry(argp);
727         }
728
729         lock_sock(sk);
730
731         err = hci_sock_bound_ioctl(sk, cmd, arg);
732
733 done:
734         release_sock(sk);
735         return err;
736 }
737
738 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
739                          int addr_len)
740 {
741         struct sockaddr_hci haddr;
742         struct sock *sk = sock->sk;
743         struct hci_dev *hdev = NULL;
744         int len, err = 0;
745
746         BT_DBG("sock %p sk %p", sock, sk);
747
748         if (!addr)
749                 return -EINVAL;
750
751         memset(&haddr, 0, sizeof(haddr));
752         len = min_t(unsigned int, sizeof(haddr), addr_len);
753         memcpy(&haddr, addr, len);
754
755         if (haddr.hci_family != AF_BLUETOOTH)
756                 return -EINVAL;
757
758         lock_sock(sk);
759
760         if (sk->sk_state == BT_BOUND) {
761                 err = -EALREADY;
762                 goto done;
763         }
764
765         switch (haddr.hci_channel) {
766         case HCI_CHANNEL_RAW:
767                 if (hci_pi(sk)->hdev) {
768                         err = -EALREADY;
769                         goto done;
770                 }
771
772                 if (haddr.hci_dev != HCI_DEV_NONE) {
773                         hdev = hci_dev_get(haddr.hci_dev);
774                         if (!hdev) {
775                                 err = -ENODEV;
776                                 goto done;
777                         }
778
779                         atomic_inc(&hdev->promisc);
780                 }
781
782                 hci_pi(sk)->hdev = hdev;
783                 break;
784
785         case HCI_CHANNEL_USER:
786                 if (hci_pi(sk)->hdev) {
787                         err = -EALREADY;
788                         goto done;
789                 }
790
791                 if (haddr.hci_dev == HCI_DEV_NONE) {
792                         err = -EINVAL;
793                         goto done;
794                 }
795
796                 if (!capable(CAP_NET_ADMIN)) {
797                         err = -EPERM;
798                         goto done;
799                 }
800
801                 hdev = hci_dev_get(haddr.hci_dev);
802                 if (!hdev) {
803                         err = -ENODEV;
804                         goto done;
805                 }
806
807                 if (test_bit(HCI_INIT, &hdev->flags) ||
808                     hci_dev_test_flag(hdev, HCI_SETUP) ||
809                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
810                     (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
811                      test_bit(HCI_UP, &hdev->flags))) {
812                         err = -EBUSY;
813                         hci_dev_put(hdev);
814                         goto done;
815                 }
816
817                 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
818                         err = -EUSERS;
819                         hci_dev_put(hdev);
820                         goto done;
821                 }
822
823                 mgmt_index_removed(hdev);
824
825                 err = hci_dev_open(hdev->id);
826                 if (err) {
827                         if (err == -EALREADY) {
828                                 /* In case the transport is already up and
829                                  * running, clear the error here.
830                                  *
831                                  * This can happen when opening an user
832                                  * channel and HCI_AUTO_OFF grace period
833                                  * is still active.
834                                  */
835                                 err = 0;
836                         } else {
837                                 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
838                                 mgmt_index_added(hdev);
839                                 hci_dev_put(hdev);
840                                 goto done;
841                         }
842                 }
843
844                 atomic_inc(&hdev->promisc);
845
846                 hci_pi(sk)->hdev = hdev;
847                 break;
848
849         case HCI_CHANNEL_MONITOR:
850                 if (haddr.hci_dev != HCI_DEV_NONE) {
851                         err = -EINVAL;
852                         goto done;
853                 }
854
855                 if (!capable(CAP_NET_RAW)) {
856                         err = -EPERM;
857                         goto done;
858                 }
859
860                 /* The monitor interface is restricted to CAP_NET_RAW
861                  * capabilities and with that implicitly trusted.
862                  */
863                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
864
865                 send_monitor_replay(sk);
866
867                 atomic_inc(&monitor_promisc);
868                 break;
869
870         default:
871                 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
872                         err = -EINVAL;
873                         goto done;
874                 }
875
876                 if (haddr.hci_dev != HCI_DEV_NONE) {
877                         err = -EINVAL;
878                         goto done;
879                 }
880
881                 /* Users with CAP_NET_ADMIN capabilities are allowed
882                  * access to all management commands and events. For
883                  * untrusted users the interface is restricted and
884                  * also only untrusted events are sent.
885                  */
886                 if (capable(CAP_NET_ADMIN))
887                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
888
889                 /* At the moment the index and unconfigured index events
890                  * are enabled unconditionally. Setting them on each
891                  * socket when binding keeps this functionality. They
892                  * however might be cleared later and then sending of these
893                  * events will be disabled, but that is then intentional.
894                  *
895                  * This also enables generic events that are safe to be
896                  * received by untrusted users. Example for such events
897                  * are changes to settings, class of device, name etc.
898                  */
899                 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
900                         hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
901                         hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
902                         hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
903                 }
904                 break;
905         }
906
907
908         hci_pi(sk)->channel = haddr.hci_channel;
909         sk->sk_state = BT_BOUND;
910
911 done:
912         release_sock(sk);
913         return err;
914 }
915
916 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
917                             int *addr_len, int peer)
918 {
919         struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
920         struct sock *sk = sock->sk;
921         struct hci_dev *hdev;
922         int err = 0;
923
924         BT_DBG("sock %p sk %p", sock, sk);
925
926         if (peer)
927                 return -EOPNOTSUPP;
928
929         lock_sock(sk);
930
931         hdev = hci_pi(sk)->hdev;
932         if (!hdev) {
933                 err = -EBADFD;
934                 goto done;
935         }
936
937         *addr_len = sizeof(*haddr);
938         haddr->hci_family = AF_BLUETOOTH;
939         haddr->hci_dev    = hdev->id;
940         haddr->hci_channel= hci_pi(sk)->channel;
941
942 done:
943         release_sock(sk);
944         return err;
945 }
946
947 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
948                           struct sk_buff *skb)
949 {
950         __u32 mask = hci_pi(sk)->cmsg_mask;
951
952         if (mask & HCI_CMSG_DIR) {
953                 int incoming = bt_cb(skb)->incoming;
954                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
955                          &incoming);
956         }
957
958         if (mask & HCI_CMSG_TSTAMP) {
959 #ifdef CONFIG_COMPAT
960                 struct compat_timeval ctv;
961 #endif
962                 struct timeval tv;
963                 void *data;
964                 int len;
965
966                 skb_get_timestamp(skb, &tv);
967
968                 data = &tv;
969                 len = sizeof(tv);
970 #ifdef CONFIG_COMPAT
971                 if (!COMPAT_USE_64BIT_TIME &&
972                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
973                         ctv.tv_sec = tv.tv_sec;
974                         ctv.tv_usec = tv.tv_usec;
975                         data = &ctv;
976                         len = sizeof(ctv);
977                 }
978 #endif
979
980                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
981         }
982 }
983
984 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
985                             int flags)
986 {
987         int noblock = flags & MSG_DONTWAIT;
988         struct sock *sk = sock->sk;
989         struct sk_buff *skb;
990         int copied, err;
991
992         BT_DBG("sock %p, sk %p", sock, sk);
993
994         if (flags & (MSG_OOB))
995                 return -EOPNOTSUPP;
996
997         if (sk->sk_state == BT_CLOSED)
998                 return 0;
999
1000         skb = skb_recv_datagram(sk, flags, noblock, &err);
1001         if (!skb)
1002                 return err;
1003
1004         copied = skb->len;
1005         if (len < copied) {
1006                 msg->msg_flags |= MSG_TRUNC;
1007                 copied = len;
1008         }
1009
1010         skb_reset_transport_header(skb);
1011         err = skb_copy_datagram_msg(skb, 0, msg, copied);
1012
1013         switch (hci_pi(sk)->channel) {
1014         case HCI_CHANNEL_RAW:
1015                 hci_sock_cmsg(sk, msg, skb);
1016                 break;
1017         case HCI_CHANNEL_USER:
1018         case HCI_CHANNEL_MONITOR:
1019                 sock_recv_timestamp(msg, sk, skb);
1020                 break;
1021         default:
1022                 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1023                         sock_recv_timestamp(msg, sk, skb);
1024                 break;
1025         }
1026
1027         skb_free_datagram(sk, skb);
1028
1029         return err ? : copied;
1030 }
1031
1032 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1033                         struct msghdr *msg, size_t msglen)
1034 {
1035         void *buf;
1036         u8 *cp;
1037         struct mgmt_hdr *hdr;
1038         u16 opcode, index, len;
1039         struct hci_dev *hdev = NULL;
1040         const struct hci_mgmt_handler *handler;
1041         bool var_len, no_hdev;
1042         int err;
1043
1044         BT_DBG("got %zu bytes", msglen);
1045
1046         if (msglen < sizeof(*hdr))
1047                 return -EINVAL;
1048
1049         buf = kmalloc(msglen, GFP_KERNEL);
1050         if (!buf)
1051                 return -ENOMEM;
1052
1053         if (memcpy_from_msg(buf, msg, msglen)) {
1054                 err = -EFAULT;
1055                 goto done;
1056         }
1057
1058         hdr = buf;
1059         opcode = __le16_to_cpu(hdr->opcode);
1060         index = __le16_to_cpu(hdr->index);
1061         len = __le16_to_cpu(hdr->len);
1062
1063         if (len != msglen - sizeof(*hdr)) {
1064                 err = -EINVAL;
1065                 goto done;
1066         }
1067
1068         if (opcode >= chan->handler_count ||
1069             chan->handlers[opcode].func == NULL) {
1070                 BT_DBG("Unknown op %u", opcode);
1071                 err = mgmt_cmd_status(sk, index, opcode,
1072                                       MGMT_STATUS_UNKNOWN_COMMAND);
1073                 goto done;
1074         }
1075
1076         handler = &chan->handlers[opcode];
1077
1078         if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1079             !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1080                 err = mgmt_cmd_status(sk, index, opcode,
1081                                       MGMT_STATUS_PERMISSION_DENIED);
1082                 goto done;
1083         }
1084
1085         if (index != MGMT_INDEX_NONE) {
1086                 hdev = hci_dev_get(index);
1087                 if (!hdev) {
1088                         err = mgmt_cmd_status(sk, index, opcode,
1089                                               MGMT_STATUS_INVALID_INDEX);
1090                         goto done;
1091                 }
1092
1093                 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1094                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1095                     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1096                         err = mgmt_cmd_status(sk, index, opcode,
1097                                               MGMT_STATUS_INVALID_INDEX);
1098                         goto done;
1099                 }
1100
1101                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1102                     !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1103                         err = mgmt_cmd_status(sk, index, opcode,
1104                                               MGMT_STATUS_INVALID_INDEX);
1105                         goto done;
1106                 }
1107         }
1108
1109         no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1110         if (no_hdev != !hdev) {
1111                 err = mgmt_cmd_status(sk, index, opcode,
1112                                       MGMT_STATUS_INVALID_INDEX);
1113                 goto done;
1114         }
1115
1116         var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1117         if ((var_len && len < handler->data_len) ||
1118             (!var_len && len != handler->data_len)) {
1119                 err = mgmt_cmd_status(sk, index, opcode,
1120                                       MGMT_STATUS_INVALID_PARAMS);
1121                 goto done;
1122         }
1123
1124         if (hdev && chan->hdev_init)
1125                 chan->hdev_init(sk, hdev);
1126
1127         cp = buf + sizeof(*hdr);
1128
1129         err = handler->func(sk, hdev, cp, len);
1130         if (err < 0)
1131                 goto done;
1132
1133         err = msglen;
1134
1135 done:
1136         if (hdev)
1137                 hci_dev_put(hdev);
1138
1139         kfree(buf);
1140         return err;
1141 }
1142
1143 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1144                             size_t len)
1145 {
1146         struct sock *sk = sock->sk;
1147         struct hci_mgmt_chan *chan;
1148         struct hci_dev *hdev;
1149         struct sk_buff *skb;
1150         int err;
1151
1152         BT_DBG("sock %p sk %p", sock, sk);
1153
1154         if (msg->msg_flags & MSG_OOB)
1155                 return -EOPNOTSUPP;
1156
1157         if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1158                 return -EINVAL;
1159
1160         if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1161                 return -EINVAL;
1162
1163         lock_sock(sk);
1164
1165         switch (hci_pi(sk)->channel) {
1166         case HCI_CHANNEL_RAW:
1167         case HCI_CHANNEL_USER:
1168                 break;
1169         case HCI_CHANNEL_MONITOR:
1170                 err = -EOPNOTSUPP;
1171                 goto done;
1172         default:
1173                 mutex_lock(&mgmt_chan_list_lock);
1174                 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1175                 if (chan)
1176                         err = hci_mgmt_cmd(chan, sk, msg, len);
1177                 else
1178                         err = -EINVAL;
1179
1180                 mutex_unlock(&mgmt_chan_list_lock);
1181                 goto done;
1182         }
1183
1184         hdev = hci_pi(sk)->hdev;
1185         if (!hdev) {
1186                 err = -EBADFD;
1187                 goto done;
1188         }
1189
1190         if (!test_bit(HCI_UP, &hdev->flags)) {
1191                 err = -ENETDOWN;
1192                 goto done;
1193         }
1194
1195         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1196         if (!skb)
1197                 goto done;
1198
1199         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1200                 err = -EFAULT;
1201                 goto drop;
1202         }
1203
1204         bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1205         skb_pull(skb, 1);
1206
1207         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1208                 /* No permission check is needed for user channel
1209                  * since that gets enforced when binding the socket.
1210                  *
1211                  * However check that the packet type is valid.
1212                  */
1213                 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1214                     bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1215                     bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1216                         err = -EINVAL;
1217                         goto drop;
1218                 }
1219
1220                 skb_queue_tail(&hdev->raw_q, skb);
1221                 queue_work(hdev->workqueue, &hdev->tx_work);
1222         } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1223                 u16 opcode = get_unaligned_le16(skb->data);
1224                 u16 ogf = hci_opcode_ogf(opcode);
1225                 u16 ocf = hci_opcode_ocf(opcode);
1226
1227                 if (((ogf > HCI_SFLT_MAX_OGF) ||
1228                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1229                                    &hci_sec_filter.ocf_mask[ogf])) &&
1230                     !capable(CAP_NET_RAW)) {
1231                         err = -EPERM;
1232                         goto drop;
1233                 }
1234
1235                 if (ogf == 0x3f) {
1236                         skb_queue_tail(&hdev->raw_q, skb);
1237                         queue_work(hdev->workqueue, &hdev->tx_work);
1238                 } else {
1239                         /* Stand-alone HCI commands must be flagged as
1240                          * single-command requests.
1241                          */
1242                         bt_cb(skb)->req.start = true;
1243
1244                         skb_queue_tail(&hdev->cmd_q, skb);
1245                         queue_work(hdev->workqueue, &hdev->cmd_work);
1246                 }
1247         } else {
1248                 if (!capable(CAP_NET_RAW)) {
1249                         err = -EPERM;
1250                         goto drop;
1251                 }
1252
1253                 skb_queue_tail(&hdev->raw_q, skb);
1254                 queue_work(hdev->workqueue, &hdev->tx_work);
1255         }
1256
1257         err = len;
1258
1259 done:
1260         release_sock(sk);
1261         return err;
1262
1263 drop:
1264         kfree_skb(skb);
1265         goto done;
1266 }
1267
1268 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1269                                char __user *optval, unsigned int len)
1270 {
1271         struct hci_ufilter uf = { .opcode = 0 };
1272         struct sock *sk = sock->sk;
1273         int err = 0, opt = 0;
1274
1275         BT_DBG("sk %p, opt %d", sk, optname);
1276
1277         lock_sock(sk);
1278
1279         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1280                 err = -EBADFD;
1281                 goto done;
1282         }
1283
1284         switch (optname) {
1285         case HCI_DATA_DIR:
1286                 if (get_user(opt, (int __user *)optval)) {
1287                         err = -EFAULT;
1288                         break;
1289                 }
1290
1291                 if (opt)
1292                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1293                 else
1294                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1295                 break;
1296
1297         case HCI_TIME_STAMP:
1298                 if (get_user(opt, (int __user *)optval)) {
1299                         err = -EFAULT;
1300                         break;
1301                 }
1302
1303                 if (opt)
1304                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1305                 else
1306                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1307                 break;
1308
1309         case HCI_FILTER:
1310                 {
1311                         struct hci_filter *f = &hci_pi(sk)->filter;
1312
1313                         uf.type_mask = f->type_mask;
1314                         uf.opcode    = f->opcode;
1315                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1316                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1317                 }
1318
1319                 len = min_t(unsigned int, len, sizeof(uf));
1320                 if (copy_from_user(&uf, optval, len)) {
1321                         err = -EFAULT;
1322                         break;
1323                 }
1324
1325                 if (!capable(CAP_NET_RAW)) {
1326                         uf.type_mask &= hci_sec_filter.type_mask;
1327                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1328                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1329                 }
1330
1331                 {
1332                         struct hci_filter *f = &hci_pi(sk)->filter;
1333
1334                         f->type_mask = uf.type_mask;
1335                         f->opcode    = uf.opcode;
1336                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1337                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1338                 }
1339                 break;
1340
1341         default:
1342                 err = -ENOPROTOOPT;
1343                 break;
1344         }
1345
1346 done:
1347         release_sock(sk);
1348         return err;
1349 }
1350
1351 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1352                                char __user *optval, int __user *optlen)
1353 {
1354         struct hci_ufilter uf;
1355         struct sock *sk = sock->sk;
1356         int len, opt, err = 0;
1357
1358         BT_DBG("sk %p, opt %d", sk, optname);
1359
1360         if (get_user(len, optlen))
1361                 return -EFAULT;
1362
1363         lock_sock(sk);
1364
1365         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1366                 err = -EBADFD;
1367                 goto done;
1368         }
1369
1370         switch (optname) {
1371         case HCI_DATA_DIR:
1372                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1373                         opt = 1;
1374                 else
1375                         opt = 0;
1376
1377                 if (put_user(opt, optval))
1378                         err = -EFAULT;
1379                 break;
1380
1381         case HCI_TIME_STAMP:
1382                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1383                         opt = 1;
1384                 else
1385                         opt = 0;
1386
1387                 if (put_user(opt, optval))
1388                         err = -EFAULT;
1389                 break;
1390
1391         case HCI_FILTER:
1392                 {
1393                         struct hci_filter *f = &hci_pi(sk)->filter;
1394
1395                         memset(&uf, 0, sizeof(uf));
1396                         uf.type_mask = f->type_mask;
1397                         uf.opcode    = f->opcode;
1398                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1399                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1400                 }
1401
1402                 len = min_t(unsigned int, len, sizeof(uf));
1403                 if (copy_to_user(optval, &uf, len))
1404                         err = -EFAULT;
1405                 break;
1406
1407         default:
1408                 err = -ENOPROTOOPT;
1409                 break;
1410         }
1411
1412 done:
1413         release_sock(sk);
1414         return err;
1415 }
1416
1417 static const struct proto_ops hci_sock_ops = {
1418         .family         = PF_BLUETOOTH,
1419         .owner          = THIS_MODULE,
1420         .release        = hci_sock_release,
1421         .bind           = hci_sock_bind,
1422         .getname        = hci_sock_getname,
1423         .sendmsg        = hci_sock_sendmsg,
1424         .recvmsg        = hci_sock_recvmsg,
1425         .ioctl          = hci_sock_ioctl,
1426         .poll           = datagram_poll,
1427         .listen         = sock_no_listen,
1428         .shutdown       = sock_no_shutdown,
1429         .setsockopt     = hci_sock_setsockopt,
1430         .getsockopt     = hci_sock_getsockopt,
1431         .connect        = sock_no_connect,
1432         .socketpair     = sock_no_socketpair,
1433         .accept         = sock_no_accept,
1434         .mmap           = sock_no_mmap
1435 };
1436
1437 static struct proto hci_sk_proto = {
1438         .name           = "HCI",
1439         .owner          = THIS_MODULE,
1440         .obj_size       = sizeof(struct hci_pinfo)
1441 };
1442
1443 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1444                            int kern)
1445 {
1446         struct sock *sk;
1447
1448         BT_DBG("sock %p", sock);
1449
1450         if (sock->type != SOCK_RAW)
1451                 return -ESOCKTNOSUPPORT;
1452
1453         sock->ops = &hci_sock_ops;
1454
1455         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1456         if (!sk)
1457                 return -ENOMEM;
1458
1459         sock_init_data(sock, sk);
1460
1461         sock_reset_flag(sk, SOCK_ZAPPED);
1462
1463         sk->sk_protocol = protocol;
1464
1465         sock->state = SS_UNCONNECTED;
1466         sk->sk_state = BT_OPEN;
1467
1468         bt_sock_link(&hci_sk_list, sk);
1469         return 0;
1470 }
1471
1472 static const struct net_proto_family hci_sock_family_ops = {
1473         .family = PF_BLUETOOTH,
1474         .owner  = THIS_MODULE,
1475         .create = hci_sock_create,
1476 };
1477
1478 int __init hci_sock_init(void)
1479 {
1480         int err;
1481
1482         BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1483
1484         err = proto_register(&hci_sk_proto, 0);
1485         if (err < 0)
1486                 return err;
1487
1488         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1489         if (err < 0) {
1490                 BT_ERR("HCI socket registration failed");
1491                 goto error;
1492         }
1493
1494         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1495         if (err < 0) {
1496                 BT_ERR("Failed to create HCI proc file");
1497                 bt_sock_unregister(BTPROTO_HCI);
1498                 goto error;
1499         }
1500
1501         BT_INFO("HCI socket layer initialized");
1502
1503         return 0;
1504
1505 error:
1506         proto_unregister(&hci_sk_proto);
1507         return err;
1508 }
1509
1510 void hci_sock_cleanup(void)
1511 {
1512         bt_procfs_cleanup(&init_net, "hci");
1513         bt_sock_unregister(BTPROTO_HCI);
1514         proto_unregister(&hci_sk_proto);
1515 }