blob: a4227c777d168cb60105d67517703600208e8b03 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Johannes Berg787b3062016-01-06 14:38:40 +010028#include <linux/utsname.h>
Marcel Holtmann70ecce92016-08-27 20:23:38 +020029#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/unaligned.h>
31
32#include <net/bluetooth/bluetooth.h>
33#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010034#include <net/bluetooth/hci_mon.h>
Johan Hedbergfa4335d2015-03-17 13:48:50 +020035#include <net/bluetooth/mgmt.h>
36
37#include "mgmt_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Johan Hedberg801c1e82015-03-06 21:08:50 +020039static LIST_HEAD(mgmt_chan_list);
40static DEFINE_MUTEX(mgmt_chan_list_lock);
41
Marcel Holtmann70ecce92016-08-27 20:23:38 +020042static DEFINE_IDA(sock_cookie_ida);
43
Marcel Holtmanncd82e612012-02-20 20:34:38 +010044static atomic_t monitor_promisc = ATOMIC_INIT(0);
45
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* ----- HCI socket interface ----- */
47
Marcel Holtmann863def52014-07-11 05:41:00 +020048/* Socket info */
49#define hci_pi(sk) ((struct hci_pinfo *) sk)
50
51struct hci_pinfo {
52 struct bt_sock bt;
53 struct hci_dev *hdev;
54 struct hci_filter filter;
55 __u32 cmsg_mask;
56 unsigned short channel;
Marcel Holtmann6befc642015-03-14 19:27:53 -070057 unsigned long flags;
Marcel Holtmann70ecce92016-08-27 20:23:38 +020058 __u32 cookie;
59 char comm[TASK_COMM_LEN];
Marcel Holtmann863def52014-07-11 05:41:00 +020060};
61
Marcel Holtmann6befc642015-03-14 19:27:53 -070062void hci_sock_set_flag(struct sock *sk, int nr)
63{
64 set_bit(nr, &hci_pi(sk)->flags);
65}
66
67void hci_sock_clear_flag(struct sock *sk, int nr)
68{
69 clear_bit(nr, &hci_pi(sk)->flags);
70}
71
Marcel Holtmannc85be542015-03-14 19:28:00 -070072int hci_sock_test_flag(struct sock *sk, int nr)
73{
74 return test_bit(nr, &hci_pi(sk)->flags);
75}
76
Johan Hedbergd0f172b2015-03-17 13:48:46 +020077unsigned short hci_sock_get_channel(struct sock *sk)
78{
79 return hci_pi(sk)->channel;
80}
81
Marcel Holtmann70ecce92016-08-27 20:23:38 +020082u32 hci_sock_get_cookie(struct sock *sk)
83{
84 return hci_pi(sk)->cookie;
85}
86
Marcel Holtmanndf1cb872016-08-30 05:00:34 +020087static bool hci_sock_gen_cookie(struct sock *sk)
88{
89 int id = hci_pi(sk)->cookie;
90
91 if (!id) {
92 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
93 if (id < 0)
94 id = 0xffffffff;
95
96 hci_pi(sk)->cookie = id;
97 get_task_comm(hci_pi(sk)->comm, current);
98 return true;
99 }
100
101 return false;
102}
103
104static void hci_sock_free_cookie(struct sock *sk)
105{
106 int id = hci_pi(sk)->cookie;
107
108 if (id) {
109 hci_pi(sk)->cookie = 0xffffffff;
110 ida_simple_remove(&sock_cookie_ida, id);
111 }
112}
113
Jiri Slaby93919762015-02-19 15:20:43 +0100114static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115{
Jiri Slaby93919762015-02-19 15:20:43 +0100116 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117}
118
119/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +0200120#define HCI_SFLT_MAX_OGF 5
121
122struct hci_sec_filter {
123 __u32 type_mask;
124 __u32 event_mask[2];
125 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
126};
127
Marcel Holtmann7e67c112014-07-11 05:36:40 +0200128static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 /* Packet types */
130 0x10,
131 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +0200132 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 /* Commands */
134 {
135 { 0x0 },
136 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200137 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200139 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200141 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200143 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200145 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 }
147};
148
149static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -0700150 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151};
152
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700153static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
154{
155 struct hci_filter *flt;
156 int flt_type, flt_event;
157
158 /* Apply filter */
159 flt = &hci_pi(sk)->filter;
160
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100161 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700162
163 if (!test_bit(flt_type, &flt->type_mask))
164 return true;
165
166 /* Extra filter for event packets only */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100167 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700168 return false;
169
170 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
171
172 if (!hci_test_bit(flt_event, &flt->event_mask))
173 return true;
174
175 /* Check filter only when opcode is set */
176 if (!flt->opcode)
177 return false;
178
179 if (flt_event == HCI_EV_CMD_COMPLETE &&
180 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
181 return true;
182
183 if (flt_event == HCI_EV_CMD_STATUS &&
184 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
185 return true;
186
187 return false;
188}
189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100191void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
193 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100194 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
196 BT_DBG("hdev %p len %d", hdev, skb->len);
197
198 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100199
Sasha Levinb67bfe02013-02-27 17:06:00 -0800200 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 struct sk_buff *nskb;
202
203 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
204 continue;
205
206 /* Don't send frame to the socket it came from */
207 if (skb->sk == sk)
208 continue;
209
Marcel Holtmann23500182013-08-26 21:40:52 -0700210 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100211 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
212 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
213 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
214 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
Marcel Holtmannbb775432015-10-09 16:13:50 +0200215 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700216 if (is_filtered_packet(sk, skb))
217 continue;
218 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
219 if (!bt_cb(skb)->incoming)
220 continue;
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100221 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
222 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
223 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
Marcel Holtmann23500182013-08-26 21:40:52 -0700224 continue;
225 } else {
226 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200227 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700228 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100230 if (!skb_copy) {
231 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300232 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100233 if (!skb_copy)
234 continue;
235
236 /* Put type byte before the data */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100237 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100238 }
239
240 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200241 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 continue;
243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 if (sock_queue_rcv_skb(sk, nskb))
245 kfree_skb(nskb);
246 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100247
248 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100249
250 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100251}
252
Johan Hedberg71290692015-02-20 13:26:23 +0200253/* Send frame to sockets with specific channel */
254void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700255 int flag, struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100256{
257 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100258
Johan Hedberg71290692015-02-20 13:26:23 +0200259 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100260
261 read_lock(&hci_sk_list.lock);
262
Sasha Levinb67bfe02013-02-27 17:06:00 -0800263 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100264 struct sk_buff *nskb;
265
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700266 /* Ignore socket without the flag set */
Marcel Holtmannc85be542015-03-14 19:28:00 -0700267 if (!hci_sock_test_flag(sk, flag))
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700268 continue;
269
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100270 /* Skip the original socket */
271 if (sk == skip_sk)
272 continue;
273
274 if (sk->sk_state != BT_BOUND)
275 continue;
276
Johan Hedberg71290692015-02-20 13:26:23 +0200277 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800278 continue;
279
280 nskb = skb_clone(skb, GFP_ATOMIC);
281 if (!nskb)
282 continue;
283
284 if (sock_queue_rcv_skb(sk, nskb))
285 kfree_skb(nskb);
286 }
287
288 read_unlock(&hci_sk_list.lock);
289}
290
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100291/* Send frame to monitor socket */
292void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
293{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100294 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800295 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100296 __le16 opcode;
297
298 if (!atomic_read(&monitor_promisc))
299 return;
300
301 BT_DBG("hdev %p len %d", hdev, skb->len);
302
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100303 switch (hci_skb_pkt_type(skb)) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100304 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700305 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100306 break;
307 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700308 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100309 break;
310 case HCI_ACLDATA_PKT:
311 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700312 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100313 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700314 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100315 break;
316 case HCI_SCODATA_PKT:
317 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700318 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100319 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700320 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100321 break;
Marcel Holtmanne875ff82015-10-07 16:38:35 +0200322 case HCI_DIAG_PKT:
323 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
324 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100325 default:
326 return;
327 }
328
Marcel Holtmann2b531292015-01-11 19:33:31 -0800329 /* Create a private copy with headroom */
330 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
331 if (!skb_copy)
332 return;
333
334 /* Put header before the data */
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100335 hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
Marcel Holtmann2b531292015-01-11 19:33:31 -0800336 hdr->opcode = opcode;
337 hdr->index = cpu_to_le16(hdev->id);
338 hdr->len = cpu_to_le16(skb->len);
339
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700340 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
341 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100342 kfree_skb(skb_copy);
343}
344
Marcel Holtmann38ceaa02016-08-27 20:23:41 +0200345void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
346 void *data, u16 data_len, ktime_t tstamp,
347 int flag, struct sock *skip_sk)
348{
349 struct sock *sk;
350 __le16 index;
351
352 if (hdev)
353 index = cpu_to_le16(hdev->id);
354 else
355 index = cpu_to_le16(MGMT_INDEX_NONE);
356
357 read_lock(&hci_sk_list.lock);
358
359 sk_for_each(sk, &hci_sk_list.head) {
360 struct hci_mon_hdr *hdr;
361 struct sk_buff *skb;
362
363 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
364 continue;
365
366 /* Ignore socket without the flag set */
367 if (!hci_sock_test_flag(sk, flag))
368 continue;
369
370 /* Skip the original socket */
371 if (sk == skip_sk)
372 continue;
373
374 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
375 if (!skb)
376 continue;
377
378 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
379 put_unaligned_le16(event, skb_put(skb, 2));
380
381 if (data)
382 memcpy(skb_put(skb, data_len), data, data_len);
383
384 skb->tstamp = tstamp;
385
386 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
387 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
388 hdr->index = index;
389 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
390
391 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
392 HCI_SOCK_TRUSTED, NULL);
393 kfree_skb(skb);
394 }
395
396 read_unlock(&hci_sk_list.lock);
397}
398
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100399static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
400{
401 struct hci_mon_hdr *hdr;
402 struct hci_mon_new_index *ni;
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200403 struct hci_mon_index_info *ii;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100404 struct sk_buff *skb;
405 __le16 opcode;
406
407 switch (event) {
408 case HCI_DEV_REG:
409 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
410 if (!skb)
411 return NULL;
412
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200413 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100414 ni->type = hdev->dev_type;
415 ni->bus = hdev->bus;
416 bacpy(&ni->bdaddr, &hdev->bdaddr);
417 memcpy(ni->name, hdev->name, 8);
418
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700419 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100420 break;
421
422 case HCI_DEV_UNREG:
423 skb = bt_skb_alloc(0, GFP_ATOMIC);
424 if (!skb)
425 return NULL;
426
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700427 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100428 break;
429
Marcel Holtmanne131d742015-10-20 02:30:47 +0200430 case HCI_DEV_SETUP:
431 if (hdev->manufacturer == 0xffff)
432 return NULL;
433
434 /* fall through */
435
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200436 case HCI_DEV_UP:
437 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
438 if (!skb)
439 return NULL;
440
441 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
442 bacpy(&ii->bdaddr, &hdev->bdaddr);
443 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
444
445 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
446 break;
447
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200448 case HCI_DEV_OPEN:
449 skb = bt_skb_alloc(0, GFP_ATOMIC);
450 if (!skb)
451 return NULL;
452
453 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
454 break;
455
456 case HCI_DEV_CLOSE:
457 skb = bt_skb_alloc(0, GFP_ATOMIC);
458 if (!skb)
459 return NULL;
460
461 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
462 break;
463
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100464 default:
465 return NULL;
466 }
467
468 __net_timestamp(skb);
469
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100470 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100471 hdr->opcode = opcode;
472 hdr->index = cpu_to_le16(hdev->id);
473 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
474
475 return skb;
476}
477
Marcel Holtmann249fa162016-08-27 20:23:40 +0200478static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
479{
480 struct hci_mon_hdr *hdr;
481 struct sk_buff *skb;
482 u16 format = 0x0002;
483 u8 ver[3];
484 u32 flags;
485
486 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
487 if (!skb)
488 return NULL;
489
490 mgmt_fill_version_info(ver);
491 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
492
493 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
494 put_unaligned_le16(format, skb_put(skb, 2));
495 memcpy(skb_put(skb, sizeof(ver)), ver, sizeof(ver));
496 put_unaligned_le32(flags, skb_put(skb, 4));
497 *skb_put(skb, 1) = TASK_COMM_LEN;
498 memcpy(skb_put(skb, TASK_COMM_LEN), hci_pi(sk)->comm, TASK_COMM_LEN);
499
500 __net_timestamp(skb);
501
502 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
503 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
504 hdr->index = cpu_to_le16(HCI_DEV_NONE);
505 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
506
507 return skb;
508}
509
510static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
511{
512 struct hci_mon_hdr *hdr;
513 struct sk_buff *skb;
514
515 skb = bt_skb_alloc(4, GFP_ATOMIC);
516 if (!skb)
517 return NULL;
518
519 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
520
521 __net_timestamp(skb);
522
523 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
524 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
525 hdr->index = cpu_to_le16(HCI_DEV_NONE);
526 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
527
528 return skb;
529}
530
Marcel Holtmann38ceaa02016-08-27 20:23:41 +0200531static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
532 u16 opcode, u16 len,
533 const void *buf)
534{
535 struct hci_mon_hdr *hdr;
536 struct sk_buff *skb;
537
538 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
539 if (!skb)
540 return NULL;
541
542 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
543 put_unaligned_le16(opcode, skb_put(skb, 2));
544
545 if (buf)
546 memcpy(skb_put(skb, len), buf, len);
547
548 __net_timestamp(skb);
549
550 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
551 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
552 hdr->index = cpu_to_le16(index);
553 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
554
555 return skb;
556}
557
Johannes Berg787b3062016-01-06 14:38:40 +0100558static void __printf(2, 3)
559send_monitor_note(struct sock *sk, const char *fmt, ...)
Marcel Holtmanndd315062015-11-08 07:47:12 +0100560{
Johannes Berg787b3062016-01-06 14:38:40 +0100561 size_t len;
Marcel Holtmanndd315062015-11-08 07:47:12 +0100562 struct hci_mon_hdr *hdr;
563 struct sk_buff *skb;
Johannes Berg787b3062016-01-06 14:38:40 +0100564 va_list args;
565
566 va_start(args, fmt);
567 len = vsnprintf(NULL, 0, fmt, args);
568 va_end(args);
Marcel Holtmanndd315062015-11-08 07:47:12 +0100569
570 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
571 if (!skb)
572 return;
573
Johannes Berg787b3062016-01-06 14:38:40 +0100574 va_start(args, fmt);
575 vsprintf(skb_put(skb, len), fmt, args);
576 *skb_put(skb, 1) = 0;
577 va_end(args);
Marcel Holtmanndd315062015-11-08 07:47:12 +0100578
579 __net_timestamp(skb);
580
581 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
582 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
583 hdr->index = cpu_to_le16(HCI_DEV_NONE);
584 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
585
586 if (sock_queue_rcv_skb(sk, skb))
587 kfree_skb(skb);
588}
589
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100590static void send_monitor_replay(struct sock *sk)
591{
592 struct hci_dev *hdev;
593
594 read_lock(&hci_dev_list_lock);
595
596 list_for_each_entry(hdev, &hci_dev_list, list) {
597 struct sk_buff *skb;
598
599 skb = create_monitor_event(hdev, HCI_DEV_REG);
600 if (!skb)
601 continue;
602
603 if (sock_queue_rcv_skb(sk, skb))
604 kfree_skb(skb);
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200605
606 if (!test_bit(HCI_RUNNING, &hdev->flags))
607 continue;
608
609 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
610 if (!skb)
611 continue;
612
613 if (sock_queue_rcv_skb(sk, skb))
614 kfree_skb(skb);
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200615
Marcel Holtmanne131d742015-10-20 02:30:47 +0200616 if (test_bit(HCI_UP, &hdev->flags))
617 skb = create_monitor_event(hdev, HCI_DEV_UP);
618 else if (hci_dev_test_flag(hdev, HCI_SETUP))
619 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
620 else
621 skb = NULL;
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200622
Marcel Holtmanne131d742015-10-20 02:30:47 +0200623 if (skb) {
624 if (sock_queue_rcv_skb(sk, skb))
625 kfree_skb(skb);
626 }
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100627 }
628
629 read_unlock(&hci_dev_list_lock);
630}
631
Marcel Holtmann249fa162016-08-27 20:23:40 +0200632static void send_monitor_control_replay(struct sock *mon_sk)
633{
634 struct sock *sk;
635
636 read_lock(&hci_sk_list.lock);
637
638 sk_for_each(sk, &hci_sk_list.head) {
639 struct sk_buff *skb;
640
641 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
642 continue;
643
644 skb = create_monitor_ctrl_open(sk);
645 if (!skb)
646 continue;
647
648 if (sock_queue_rcv_skb(mon_sk, skb))
649 kfree_skb(skb);
650 }
651
652 read_unlock(&hci_sk_list.lock);
653}
654
Marcel Holtmann040030e2012-02-20 14:50:37 +0100655/* Generate internal stack event */
656static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
657{
658 struct hci_event_hdr *hdr;
659 struct hci_ev_stack_internal *ev;
660 struct sk_buff *skb;
661
662 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
663 if (!skb)
664 return;
665
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100666 hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
Marcel Holtmann040030e2012-02-20 14:50:37 +0100667 hdr->evt = HCI_EV_STACK_INTERNAL;
668 hdr->plen = sizeof(*ev) + dlen;
669
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100670 ev = (void *)skb_put(skb, sizeof(*ev) + dlen);
Marcel Holtmann040030e2012-02-20 14:50:37 +0100671 ev->type = type;
672 memcpy(ev->data, data, dlen);
673
674 bt_cb(skb)->incoming = 1;
675 __net_timestamp(skb);
676
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100677 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100678 hci_send_to_sock(hdev, skb);
679 kfree_skb(skb);
680}
681
682void hci_sock_dev_event(struct hci_dev *hdev, int event)
683{
Marcel Holtmann040030e2012-02-20 14:50:37 +0100684 BT_DBG("hdev %s event %d", hdev->name, event);
685
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100686 if (atomic_read(&monitor_promisc)) {
687 struct sk_buff *skb;
688
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200689 /* Send event to monitor */
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100690 skb = create_monitor_event(hdev, event);
691 if (skb) {
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700692 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
693 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100694 kfree_skb(skb);
695 }
696 }
697
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200698 if (event <= HCI_DEV_DOWN) {
699 struct hci_ev_si_device ev;
700
701 /* Send event to sockets */
702 ev.event = event;
703 ev.dev_id = hdev->id;
704 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
705 }
Marcel Holtmann040030e2012-02-20 14:50:37 +0100706
707 if (event == HCI_DEV_UNREG) {
708 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100709
710 /* Detach sockets from device */
711 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800712 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100713 bh_lock_sock_nested(sk);
714 if (hci_pi(sk)->hdev == hdev) {
715 hci_pi(sk)->hdev = NULL;
716 sk->sk_err = EPIPE;
717 sk->sk_state = BT_OPEN;
718 sk->sk_state_change(sk);
719
720 hci_dev_put(hdev);
721 }
722 bh_unlock_sock(sk);
723 }
724 read_unlock(&hci_sk_list.lock);
725 }
726}
727
Johan Hedberg801c1e82015-03-06 21:08:50 +0200728static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
729{
730 struct hci_mgmt_chan *c;
731
732 list_for_each_entry(c, &mgmt_chan_list, list) {
733 if (c->channel == channel)
734 return c;
735 }
736
737 return NULL;
738}
739
740static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
741{
742 struct hci_mgmt_chan *c;
743
744 mutex_lock(&mgmt_chan_list_lock);
745 c = __hci_mgmt_chan_find(channel);
746 mutex_unlock(&mgmt_chan_list_lock);
747
748 return c;
749}
750
751int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
752{
753 if (c->channel < HCI_CHANNEL_CONTROL)
754 return -EINVAL;
755
756 mutex_lock(&mgmt_chan_list_lock);
757 if (__hci_mgmt_chan_find(c->channel)) {
758 mutex_unlock(&mgmt_chan_list_lock);
759 return -EALREADY;
760 }
761
762 list_add_tail(&c->list, &mgmt_chan_list);
763
764 mutex_unlock(&mgmt_chan_list_lock);
765
766 return 0;
767}
768EXPORT_SYMBOL(hci_mgmt_chan_register);
769
770void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
771{
772 mutex_lock(&mgmt_chan_list_lock);
773 list_del(&c->list);
774 mutex_unlock(&mgmt_chan_list_lock);
775}
776EXPORT_SYMBOL(hci_mgmt_chan_unregister);
777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778static int hci_sock_release(struct socket *sock)
779{
780 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100781 struct hci_dev *hdev;
Marcel Holtmann249fa162016-08-27 20:23:40 +0200782 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783
784 BT_DBG("sock %p sk %p", sock, sk);
785
786 if (!sk)
787 return 0;
788
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100789 hdev = hci_pi(sk)->hdev;
790
Marcel Holtmann70ecce92016-08-27 20:23:38 +0200791 switch (hci_pi(sk)->channel) {
792 case HCI_CHANNEL_MONITOR:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100793 atomic_dec(&monitor_promisc);
Marcel Holtmann70ecce92016-08-27 20:23:38 +0200794 break;
795 case HCI_CHANNEL_CONTROL:
Marcel Holtmann249fa162016-08-27 20:23:40 +0200796 /* Send event to monitor */
797 skb = create_monitor_ctrl_close(sk);
798 if (skb) {
799 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
800 HCI_SOCK_TRUSTED, NULL);
801 kfree_skb(skb);
802 }
803
Marcel Holtmanndf1cb872016-08-30 05:00:34 +0200804 hci_sock_free_cookie(sk);
Marcel Holtmann70ecce92016-08-27 20:23:38 +0200805 break;
806 }
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100807
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 bt_sock_unlink(&hci_sk_list, sk);
809
810 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700811 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Simon Fels6b3cc1d2015-09-02 12:10:12 +0200812 /* When releasing an user channel exclusive access,
813 * call hci_dev_do_close directly instead of calling
814 * hci_dev_close to ensure the exclusive access will
815 * be released and the controller brought back down.
816 *
817 * The checking of HCI_AUTO_OFF is not needed in this
818 * case since it will have been cleared already when
819 * opening the user channel.
820 */
821 hci_dev_do_close(hdev);
Loic Poulain9380f9e2015-05-21 16:46:41 +0200822 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
823 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700824 }
825
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 atomic_dec(&hdev->promisc);
827 hci_dev_put(hdev);
828 }
829
830 sock_orphan(sk);
831
832 skb_queue_purge(&sk->sk_receive_queue);
833 skb_queue_purge(&sk->sk_write_queue);
834
835 sock_put(sk);
836 return 0;
837}
838
Antti Julkub2a66aa2011-06-15 12:01:14 +0300839static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200840{
841 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300842 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200843
844 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
845 return -EFAULT;
846
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300847 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300848
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300849 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300850
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300851 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300852
853 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200854}
855
Antti Julkub2a66aa2011-06-15 12:01:14 +0300856static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200857{
858 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300859 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200860
861 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
862 return -EFAULT;
863
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300864 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300865
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300866 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300867
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300868 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300869
870 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200871}
872
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900873/* Ioctls that require bound socket */
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300874static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
875 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876{
877 struct hci_dev *hdev = hci_pi(sk)->hdev;
878
879 if (!hdev)
880 return -EBADFD;
881
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700882 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700883 return -EBUSY;
884
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700885 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200886 return -EOPNOTSUPP;
887
Marcel Holtmannca8bee52016-07-05 14:30:14 +0200888 if (hdev->dev_type != HCI_PRIMARY)
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700889 return -EOPNOTSUPP;
890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 switch (cmd) {
892 case HCISETRAW:
893 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000894 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700895 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 case HCIGETCONNINFO:
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100898 return hci_get_conn_info(hdev, (void __user *)arg);
Marcel Holtmann40be4922008-07-14 20:13:50 +0200899
900 case HCIGETAUTHINFO:
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100901 return hci_get_auth_info(hdev, (void __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
Johan Hedbergf0358562010-05-18 13:20:32 +0200903 case HCIBLOCKADDR:
904 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000905 return -EPERM;
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100906 return hci_sock_blacklist_add(hdev, (void __user *)arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200907
908 case HCIUNBLOCKADDR:
909 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000910 return -EPERM;
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100911 return hci_sock_blacklist_del(hdev, (void __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700913
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700914 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915}
916
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300917static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
918 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919{
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100920 void __user *argp = (void __user *)arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700921 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 int err;
923
924 BT_DBG("cmd %x arg %lx", cmd, arg);
925
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700926 lock_sock(sk);
927
928 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
929 err = -EBADFD;
930 goto done;
931 }
932
933 release_sock(sk);
934
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 switch (cmd) {
936 case HCIGETDEVLIST:
937 return hci_get_dev_list(argp);
938
939 case HCIGETDEVINFO:
940 return hci_get_dev_info(argp);
941
942 case HCIGETCONNLIST:
943 return hci_get_conn_list(argp);
944
945 case HCIDEVUP:
946 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000947 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 return hci_dev_open(arg);
949
950 case HCIDEVDOWN:
951 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000952 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 return hci_dev_close(arg);
954
955 case HCIDEVRESET:
956 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000957 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 return hci_dev_reset(arg);
959
960 case HCIDEVRESTAT:
961 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000962 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 return hci_dev_reset_stat(arg);
964
965 case HCISETSCAN:
966 case HCISETAUTH:
967 case HCISETENCRYPT:
968 case HCISETPTYPE:
969 case HCISETLINKPOL:
970 case HCISETLINKMODE:
971 case HCISETACLMTU:
972 case HCISETSCOMTU:
973 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000974 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 return hci_dev_cmd(cmd, argp);
976
977 case HCIINQUIRY:
978 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700980
981 lock_sock(sk);
982
983 err = hci_sock_bound_ioctl(sk, cmd, arg);
984
985done:
986 release_sock(sk);
987 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988}
989
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300990static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
991 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992{
Johan Hedberg03811012010-12-08 00:21:06 +0200993 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 struct sock *sk = sock->sk;
995 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200996 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
998 BT_DBG("sock %p sk %p", sock, sk);
999
Johan Hedberg03811012010-12-08 00:21:06 +02001000 if (!addr)
1001 return -EINVAL;
1002
1003 memset(&haddr, 0, sizeof(haddr));
1004 len = min_t(unsigned int, sizeof(haddr), addr_len);
1005 memcpy(&haddr, addr, len);
1006
1007 if (haddr.hci_family != AF_BLUETOOTH)
1008 return -EINVAL;
1009
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 lock_sock(sk);
1011
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +01001012 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 err = -EALREADY;
1014 goto done;
1015 }
1016
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +01001017 switch (haddr.hci_channel) {
1018 case HCI_CHANNEL_RAW:
1019 if (hci_pi(sk)->hdev) {
1020 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 goto done;
1022 }
1023
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +01001024 if (haddr.hci_dev != HCI_DEV_NONE) {
1025 hdev = hci_dev_get(haddr.hci_dev);
1026 if (!hdev) {
1027 err = -ENODEV;
1028 goto done;
1029 }
1030
1031 atomic_inc(&hdev->promisc);
1032 }
1033
1034 hci_pi(sk)->hdev = hdev;
1035 break;
1036
Marcel Holtmann23500182013-08-26 21:40:52 -07001037 case HCI_CHANNEL_USER:
1038 if (hci_pi(sk)->hdev) {
1039 err = -EALREADY;
1040 goto done;
1041 }
1042
1043 if (haddr.hci_dev == HCI_DEV_NONE) {
1044 err = -EINVAL;
1045 goto done;
1046 }
1047
Marcel Holtmann10a8b862013-10-01 22:59:24 -07001048 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -07001049 err = -EPERM;
1050 goto done;
1051 }
1052
1053 hdev = hci_dev_get(haddr.hci_dev);
1054 if (!hdev) {
1055 err = -ENODEV;
1056 goto done;
1057 }
1058
Marcel Holtmann781f8992015-06-06 06:06:49 +02001059 if (test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001060 hci_dev_test_flag(hdev, HCI_SETUP) ||
Marcel Holtmann781f8992015-06-06 06:06:49 +02001061 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1062 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1063 test_bit(HCI_UP, &hdev->flags))) {
Marcel Holtmann23500182013-08-26 21:40:52 -07001064 err = -EBUSY;
1065 hci_dev_put(hdev);
1066 goto done;
1067 }
1068
Marcel Holtmann238be782015-03-13 02:11:06 -07001069 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann23500182013-08-26 21:40:52 -07001070 err = -EUSERS;
1071 hci_dev_put(hdev);
1072 goto done;
1073 }
1074
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02001075 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -07001076
1077 err = hci_dev_open(hdev->id);
1078 if (err) {
Marcel Holtmann781f8992015-06-06 06:06:49 +02001079 if (err == -EALREADY) {
1080 /* In case the transport is already up and
1081 * running, clear the error here.
1082 *
1083 * This can happen when opening an user
1084 * channel and HCI_AUTO_OFF grace period
1085 * is still active.
1086 */
1087 err = 0;
1088 } else {
1089 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1090 mgmt_index_added(hdev);
1091 hci_dev_put(hdev);
1092 goto done;
1093 }
Marcel Holtmann23500182013-08-26 21:40:52 -07001094 }
1095
1096 atomic_inc(&hdev->promisc);
1097
1098 hci_pi(sk)->hdev = hdev;
1099 break;
1100
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001101 case HCI_CHANNEL_MONITOR:
1102 if (haddr.hci_dev != HCI_DEV_NONE) {
1103 err = -EINVAL;
1104 goto done;
1105 }
1106
1107 if (!capable(CAP_NET_RAW)) {
1108 err = -EPERM;
1109 goto done;
1110 }
1111
Marcel Holtmann50ebc052015-03-14 19:27:58 -07001112 /* The monitor interface is restricted to CAP_NET_RAW
1113 * capabilities and with that implicitly trusted.
1114 */
1115 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1116
Johannes Berg787b3062016-01-06 14:38:40 +01001117 send_monitor_note(sk, "Linux version %s (%s)",
1118 init_utsname()->release,
1119 init_utsname()->machine);
Marcel Holtmann9e8305b2016-08-30 05:00:35 +02001120 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1121 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001122 send_monitor_replay(sk);
Marcel Holtmann249fa162016-08-27 20:23:40 +02001123 send_monitor_control_replay(sk);
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001124
1125 atomic_inc(&monitor_promisc);
1126 break;
1127
Marcel Holtmannac714942015-11-08 07:47:13 +01001128 case HCI_CHANNEL_LOGGING:
1129 if (haddr.hci_dev != HCI_DEV_NONE) {
1130 err = -EINVAL;
1131 goto done;
1132 }
1133
1134 if (!capable(CAP_NET_ADMIN)) {
1135 err = -EPERM;
1136 goto done;
1137 }
1138 break;
1139
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +01001140 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +02001141 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1142 err = -EINVAL;
1143 goto done;
1144 }
1145
1146 if (haddr.hci_dev != HCI_DEV_NONE) {
1147 err = -EINVAL;
1148 goto done;
1149 }
1150
Marcel Holtmann1195fbb2015-03-14 19:28:04 -07001151 /* Users with CAP_NET_ADMIN capabilities are allowed
1152 * access to all management commands and events. For
1153 * untrusted users the interface is restricted and
1154 * also only untrusted events are sent.
Marcel Holtmann50ebc052015-03-14 19:27:58 -07001155 */
Marcel Holtmann1195fbb2015-03-14 19:28:04 -07001156 if (capable(CAP_NET_ADMIN))
1157 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
Marcel Holtmann50ebc052015-03-14 19:27:58 -07001158
Marcel Holtmannf9207332015-03-14 19:27:55 -07001159 /* At the moment the index and unconfigured index events
1160 * are enabled unconditionally. Setting them on each
1161 * socket when binding keeps this functionality. They
1162 * however might be cleared later and then sending of these
1163 * events will be disabled, but that is then intentional.
Marcel Holtmannf6b77122015-03-14 19:28:05 -07001164 *
1165 * This also enables generic events that are safe to be
1166 * received by untrusted users. Example for such events
1167 * are changes to settings, class of device, name etc.
Marcel Holtmannf9207332015-03-14 19:27:55 -07001168 */
1169 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
Marcel Holtmann249fa162016-08-27 20:23:40 +02001170 struct sk_buff *skb;
Marcel Holtmann70ecce92016-08-27 20:23:38 +02001171
Marcel Holtmanndf1cb872016-08-30 05:00:34 +02001172 hci_sock_gen_cookie(sk);
Marcel Holtmann70ecce92016-08-27 20:23:38 +02001173
Marcel Holtmann249fa162016-08-27 20:23:40 +02001174 /* Send event to monitor */
1175 skb = create_monitor_ctrl_open(sk);
1176 if (skb) {
1177 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1178 HCI_SOCK_TRUSTED, NULL);
1179 kfree_skb(skb);
1180 }
1181
Marcel Holtmannf9207332015-03-14 19:27:55 -07001182 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1183 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
Marcel Holtmann5504c3a2016-08-29 06:19:46 +02001184 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1185 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1186 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1187 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
Marcel Holtmannf9207332015-03-14 19:27:55 -07001188 }
Johan Hedberg801c1e82015-03-06 21:08:50 +02001189 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 }
1191
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +01001192
Johan Hedberg03811012010-12-08 00:21:06 +02001193 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 sk->sk_state = BT_BOUND;
1195
1196done:
1197 release_sock(sk);
1198 return err;
1199}
1200
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001201static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1202 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203{
Marcel Holtmann8528d3f2015-11-08 07:47:11 +01001204 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -07001206 struct hci_dev *hdev;
1207 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208
1209 BT_DBG("sock %p sk %p", sock, sk);
1210
Marcel Holtmann06f43cb2013-08-26 00:06:30 -07001211 if (peer)
1212 return -EOPNOTSUPP;
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 lock_sock(sk);
1215
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -07001216 hdev = hci_pi(sk)->hdev;
1217 if (!hdev) {
1218 err = -EBADFD;
1219 goto done;
1220 }
1221
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 *addr_len = sizeof(*haddr);
1223 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +01001224 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -07001225 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -07001227done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -07001229 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230}
1231
Gustavo Padovan6039aa72012-05-23 04:04:18 -03001232static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1233 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234{
1235 __u32 mask = hci_pi(sk)->cmsg_mask;
1236
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001237 if (mask & HCI_CMSG_DIR) {
1238 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001239 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1240 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001241 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001243 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +01001244#ifdef CONFIG_COMPAT
1245 struct compat_timeval ctv;
1246#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001247 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001248 void *data;
1249 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001250
1251 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001252
David S. Miller1da97f82007-09-12 14:10:58 +02001253 data = &tv;
1254 len = sizeof(tv);
1255#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -08001256 if (!COMPAT_USE_64BIT_TIME &&
1257 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001258 ctv.tv_sec = tv.tv_sec;
1259 ctv.tv_usec = tv.tv_usec;
1260 data = &ctv;
1261 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001262 }
David S. Miller1da97f82007-09-12 14:10:58 +02001263#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001264
1265 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001266 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001268
Marcel Holtmann8528d3f2015-11-08 07:47:11 +01001269static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1270 size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271{
1272 int noblock = flags & MSG_DONTWAIT;
1273 struct sock *sk = sock->sk;
1274 struct sk_buff *skb;
1275 int copied, err;
Denis Kenzior83871f82016-06-27 11:01:13 -05001276 unsigned int skblen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277
1278 BT_DBG("sock %p, sk %p", sock, sk);
1279
Marcel Holtmannd94a6102015-10-25 22:45:18 +01001280 if (flags & MSG_OOB)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 return -EOPNOTSUPP;
1282
Marcel Holtmannac714942015-11-08 07:47:13 +01001283 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1284 return -EOPNOTSUPP;
1285
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 if (sk->sk_state == BT_CLOSED)
1287 return 0;
1288
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001289 skb = skb_recv_datagram(sk, flags, noblock, &err);
1290 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 return err;
1292
Denis Kenzior83871f82016-06-27 11:01:13 -05001293 skblen = skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 copied = skb->len;
1295 if (len < copied) {
1296 msg->msg_flags |= MSG_TRUNC;
1297 copied = len;
1298 }
1299
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001300 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -05001301 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
Marcel Holtmann3a208622012-02-20 14:50:34 +01001303 switch (hci_pi(sk)->channel) {
1304 case HCI_CHANNEL_RAW:
1305 hci_sock_cmsg(sk, msg, skb);
1306 break;
Marcel Holtmann23500182013-08-26 21:40:52 -07001307 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001308 case HCI_CHANNEL_MONITOR:
1309 sock_recv_timestamp(msg, sk, skb);
1310 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001311 default:
1312 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1313 sock_recv_timestamp(msg, sk, skb);
1314 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +01001315 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
1317 skb_free_datagram(sk, skb);
1318
Luiz Augusto von Dentz4f342282016-08-15 16:02:20 +03001319 if (flags & MSG_TRUNC)
Denis Kenzior83871f82016-06-27 11:01:13 -05001320 copied = skblen;
1321
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 return err ? : copied;
1323}
1324
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001325static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1326 struct msghdr *msg, size_t msglen)
1327{
1328 void *buf;
1329 u8 *cp;
1330 struct mgmt_hdr *hdr;
1331 u16 opcode, index, len;
1332 struct hci_dev *hdev = NULL;
1333 const struct hci_mgmt_handler *handler;
1334 bool var_len, no_hdev;
1335 int err;
1336
1337 BT_DBG("got %zu bytes", msglen);
1338
1339 if (msglen < sizeof(*hdr))
1340 return -EINVAL;
1341
1342 buf = kmalloc(msglen, GFP_KERNEL);
1343 if (!buf)
1344 return -ENOMEM;
1345
1346 if (memcpy_from_msg(buf, msg, msglen)) {
1347 err = -EFAULT;
1348 goto done;
1349 }
1350
1351 hdr = buf;
1352 opcode = __le16_to_cpu(hdr->opcode);
1353 index = __le16_to_cpu(hdr->index);
1354 len = __le16_to_cpu(hdr->len);
1355
1356 if (len != msglen - sizeof(*hdr)) {
1357 err = -EINVAL;
1358 goto done;
1359 }
1360
Marcel Holtmann38ceaa02016-08-27 20:23:41 +02001361 if (chan->channel == HCI_CHANNEL_CONTROL) {
1362 struct sk_buff *skb;
1363
1364 /* Send event to monitor */
1365 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1366 buf + sizeof(*hdr));
1367 if (skb) {
1368 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1369 HCI_SOCK_TRUSTED, NULL);
1370 kfree_skb(skb);
1371 }
1372 }
1373
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001374 if (opcode >= chan->handler_count ||
1375 chan->handlers[opcode].func == NULL) {
1376 BT_DBG("Unknown op %u", opcode);
1377 err = mgmt_cmd_status(sk, index, opcode,
1378 MGMT_STATUS_UNKNOWN_COMMAND);
1379 goto done;
1380 }
1381
1382 handler = &chan->handlers[opcode];
1383
1384 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1385 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1386 err = mgmt_cmd_status(sk, index, opcode,
1387 MGMT_STATUS_PERMISSION_DENIED);
1388 goto done;
1389 }
1390
1391 if (index != MGMT_INDEX_NONE) {
1392 hdev = hci_dev_get(index);
1393 if (!hdev) {
1394 err = mgmt_cmd_status(sk, index, opcode,
1395 MGMT_STATUS_INVALID_INDEX);
1396 goto done;
1397 }
1398
1399 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1400 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1401 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1402 err = mgmt_cmd_status(sk, index, opcode,
1403 MGMT_STATUS_INVALID_INDEX);
1404 goto done;
1405 }
1406
1407 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1408 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1409 err = mgmt_cmd_status(sk, index, opcode,
1410 MGMT_STATUS_INVALID_INDEX);
1411 goto done;
1412 }
1413 }
1414
1415 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1416 if (no_hdev != !hdev) {
1417 err = mgmt_cmd_status(sk, index, opcode,
1418 MGMT_STATUS_INVALID_INDEX);
1419 goto done;
1420 }
1421
1422 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1423 if ((var_len && len < handler->data_len) ||
1424 (!var_len && len != handler->data_len)) {
1425 err = mgmt_cmd_status(sk, index, opcode,
1426 MGMT_STATUS_INVALID_PARAMS);
1427 goto done;
1428 }
1429
1430 if (hdev && chan->hdev_init)
1431 chan->hdev_init(sk, hdev);
1432
1433 cp = buf + sizeof(*hdr);
1434
1435 err = handler->func(sk, hdev, cp, len);
1436 if (err < 0)
1437 goto done;
1438
1439 err = msglen;
1440
1441done:
1442 if (hdev)
1443 hci_dev_put(hdev);
1444
1445 kfree(buf);
1446 return err;
1447}
1448
Marcel Holtmannac714942015-11-08 07:47:13 +01001449static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1450{
1451 struct hci_mon_hdr *hdr;
1452 struct sk_buff *skb;
1453 struct hci_dev *hdev;
1454 u16 index;
1455 int err;
1456
1457 /* The logging frame consists at minimum of the standard header,
1458 * the priority byte, the ident length byte and at least one string
1459 * terminator NUL byte. Anything shorter are invalid packets.
1460 */
1461 if (len < sizeof(*hdr) + 3)
1462 return -EINVAL;
1463
1464 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1465 if (!skb)
1466 return err;
1467
1468 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1469 err = -EFAULT;
1470 goto drop;
1471 }
1472
1473 hdr = (void *)skb->data;
1474
1475 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1476 err = -EINVAL;
1477 goto drop;
1478 }
1479
1480 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1481 __u8 priority = skb->data[sizeof(*hdr)];
1482 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1483
1484 /* Only the priorities 0-7 are valid and with that any other
1485 * value results in an invalid packet.
1486 *
1487 * The priority byte is followed by an ident length byte and
1488 * the NUL terminated ident string. Check that the ident
1489 * length is not overflowing the packet and also that the
1490 * ident string itself is NUL terminated. In case the ident
1491 * length is zero, the length value actually doubles as NUL
1492 * terminator identifier.
1493 *
1494 * The message follows the ident string (if present) and
1495 * must be NUL terminated. Otherwise it is not a valid packet.
1496 */
1497 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1498 ident_len > len - sizeof(*hdr) - 3 ||
1499 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1500 err = -EINVAL;
1501 goto drop;
1502 }
1503 } else {
1504 err = -EINVAL;
1505 goto drop;
1506 }
1507
1508 index = __le16_to_cpu(hdr->index);
1509
1510 if (index != MGMT_INDEX_NONE) {
1511 hdev = hci_dev_get(index);
1512 if (!hdev) {
1513 err = -ENODEV;
1514 goto drop;
1515 }
1516 } else {
1517 hdev = NULL;
1518 }
1519
1520 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1521
1522 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1523 err = len;
1524
1525 if (hdev)
1526 hci_dev_put(hdev);
1527
1528drop:
1529 kfree_skb(skb);
1530 return err;
1531}
1532
Ying Xue1b784142015-03-02 15:37:48 +08001533static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1534 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535{
1536 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001537 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 struct hci_dev *hdev;
1539 struct sk_buff *skb;
1540 int err;
1541
1542 BT_DBG("sock %p sk %p", sock, sk);
1543
1544 if (msg->msg_flags & MSG_OOB)
1545 return -EOPNOTSUPP;
1546
1547 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1548 return -EINVAL;
1549
1550 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1551 return -EINVAL;
1552
1553 lock_sock(sk);
1554
Johan Hedberg03811012010-12-08 00:21:06 +02001555 switch (hci_pi(sk)->channel) {
1556 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -07001557 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +02001558 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001559 case HCI_CHANNEL_MONITOR:
1560 err = -EOPNOTSUPP;
1561 goto done;
Marcel Holtmannac714942015-11-08 07:47:13 +01001562 case HCI_CHANNEL_LOGGING:
1563 err = hci_logging_frame(sk, msg, len);
1564 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +02001565 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +02001566 mutex_lock(&mgmt_chan_list_lock);
1567 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1568 if (chan)
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001569 err = hci_mgmt_cmd(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +02001570 else
1571 err = -EINVAL;
1572
1573 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +02001574 goto done;
1575 }
1576
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001577 hdev = hci_pi(sk)->hdev;
1578 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 err = -EBADFD;
1580 goto done;
1581 }
1582
Marcel Holtmann7e21add2009-11-18 01:05:00 +01001583 if (!test_bit(HCI_UP, &hdev->flags)) {
1584 err = -ENETDOWN;
1585 goto done;
1586 }
1587
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001588 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1589 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 goto done;
1591
Al Viro6ce8e9c2014-04-06 21:25:44 -04001592 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 err = -EFAULT;
1594 goto drop;
1595 }
1596
Marcel Holtmann8528d3f2015-11-08 07:47:11 +01001597 hci_skb_pkt_type(skb) = skb->data[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001600 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1601 /* No permission check is needed for user channel
1602 * since that gets enforced when binding the socket.
1603 *
1604 * However check that the packet type is valid.
1605 */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001606 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1607 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1608 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001609 err = -EINVAL;
1610 goto drop;
1611 }
1612
1613 skb_queue_tail(&hdev->raw_q, skb);
1614 queue_work(hdev->workqueue, &hdev->tx_work);
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001615 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001616 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 u16 ogf = hci_opcode_ogf(opcode);
1618 u16 ocf = hci_opcode_ocf(opcode);
1619
1620 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001621 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1622 &hci_sec_filter.ocf_mask[ogf])) &&
1623 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 err = -EPERM;
1625 goto drop;
1626 }
1627
Marcel Holtmann19821622015-11-06 07:42:20 +01001628 /* Since the opcode has already been extracted here, store
1629 * a copy of the value for later use by the drivers.
1630 */
1631 hci_skb_opcode(skb) = opcode;
1632
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001633 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001635 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001637 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001638 * single-command requests.
1639 */
Johan Hedberg44d27132015-11-05 09:31:40 +02001640 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg11714b32013-03-05 20:37:47 +02001641
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001643 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 }
1645 } else {
1646 if (!capable(CAP_NET_RAW)) {
1647 err = -EPERM;
1648 goto drop;
1649 }
1650
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001651 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1652 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmannbb775432015-10-09 16:13:50 +02001653 err = -EINVAL;
1654 goto drop;
1655 }
1656
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001658 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 }
1660
1661 err = len;
1662
1663done:
1664 release_sock(sk);
1665 return err;
1666
1667drop:
1668 kfree_skb(skb);
1669 goto done;
1670}
1671
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001672static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1673 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674{
1675 struct hci_ufilter uf = { .opcode = 0 };
1676 struct sock *sk = sock->sk;
1677 int err = 0, opt = 0;
1678
1679 BT_DBG("sk %p, opt %d", sk, optname);
1680
Marcel Holtmann47b0f5732016-08-27 20:23:37 +02001681 if (level != SOL_HCI)
1682 return -ENOPROTOOPT;
1683
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 lock_sock(sk);
1685
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001686 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001687 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001688 goto done;
1689 }
1690
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 switch (optname) {
1692 case HCI_DATA_DIR:
1693 if (get_user(opt, (int __user *)optval)) {
1694 err = -EFAULT;
1695 break;
1696 }
1697
1698 if (opt)
1699 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1700 else
1701 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1702 break;
1703
1704 case HCI_TIME_STAMP:
1705 if (get_user(opt, (int __user *)optval)) {
1706 err = -EFAULT;
1707 break;
1708 }
1709
1710 if (opt)
1711 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1712 else
1713 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1714 break;
1715
1716 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001717 {
1718 struct hci_filter *f = &hci_pi(sk)->filter;
1719
1720 uf.type_mask = f->type_mask;
1721 uf.opcode = f->opcode;
1722 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1723 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1724 }
1725
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 len = min_t(unsigned int, len, sizeof(uf));
1727 if (copy_from_user(&uf, optval, len)) {
1728 err = -EFAULT;
1729 break;
1730 }
1731
1732 if (!capable(CAP_NET_RAW)) {
1733 uf.type_mask &= hci_sec_filter.type_mask;
1734 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1735 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1736 }
1737
1738 {
1739 struct hci_filter *f = &hci_pi(sk)->filter;
1740
1741 f->type_mask = uf.type_mask;
1742 f->opcode = uf.opcode;
1743 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1744 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1745 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001746 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
1748 default:
1749 err = -ENOPROTOOPT;
1750 break;
1751 }
1752
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001753done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 release_sock(sk);
1755 return err;
1756}
1757
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001758static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1759 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760{
1761 struct hci_ufilter uf;
1762 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001763 int len, opt, err = 0;
1764
1765 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
Marcel Holtmann47b0f5732016-08-27 20:23:37 +02001767 if (level != SOL_HCI)
1768 return -ENOPROTOOPT;
1769
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 if (get_user(len, optlen))
1771 return -EFAULT;
1772
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001773 lock_sock(sk);
1774
1775 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001776 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001777 goto done;
1778 }
1779
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 switch (optname) {
1781 case HCI_DATA_DIR:
1782 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1783 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001784 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 opt = 0;
1786
1787 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001788 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 break;
1790
1791 case HCI_TIME_STAMP:
1792 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1793 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001794 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 opt = 0;
1796
1797 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001798 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 break;
1800
1801 case HCI_FILTER:
1802 {
1803 struct hci_filter *f = &hci_pi(sk)->filter;
1804
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001805 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 uf.type_mask = f->type_mask;
1807 uf.opcode = f->opcode;
1808 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1809 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1810 }
1811
1812 len = min_t(unsigned int, len, sizeof(uf));
1813 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001814 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 break;
1816
1817 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001818 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 break;
1820 }
1821
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001822done:
1823 release_sock(sk);
1824 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825}
1826
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001827static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 .family = PF_BLUETOOTH,
1829 .owner = THIS_MODULE,
1830 .release = hci_sock_release,
1831 .bind = hci_sock_bind,
1832 .getname = hci_sock_getname,
1833 .sendmsg = hci_sock_sendmsg,
1834 .recvmsg = hci_sock_recvmsg,
1835 .ioctl = hci_sock_ioctl,
1836 .poll = datagram_poll,
1837 .listen = sock_no_listen,
1838 .shutdown = sock_no_shutdown,
1839 .setsockopt = hci_sock_setsockopt,
1840 .getsockopt = hci_sock_getsockopt,
1841 .connect = sock_no_connect,
1842 .socketpair = sock_no_socketpair,
1843 .accept = sock_no_accept,
1844 .mmap = sock_no_mmap
1845};
1846
1847static struct proto hci_sk_proto = {
1848 .name = "HCI",
1849 .owner = THIS_MODULE,
1850 .obj_size = sizeof(struct hci_pinfo)
1851};
1852
Eric Paris3f378b62009-11-05 22:18:14 -08001853static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1854 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855{
1856 struct sock *sk;
1857
1858 BT_DBG("sock %p", sock);
1859
1860 if (sock->type != SOCK_RAW)
1861 return -ESOCKTNOSUPPORT;
1862
1863 sock->ops = &hci_sock_ops;
1864
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001865 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 if (!sk)
1867 return -ENOMEM;
1868
1869 sock_init_data(sock, sk);
1870
1871 sock_reset_flag(sk, SOCK_ZAPPED);
1872
1873 sk->sk_protocol = protocol;
1874
1875 sock->state = SS_UNCONNECTED;
1876 sk->sk_state = BT_OPEN;
1877
1878 bt_sock_link(&hci_sk_list, sk);
1879 return 0;
1880}
1881
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001882static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 .family = PF_BLUETOOTH,
1884 .owner = THIS_MODULE,
1885 .create = hci_sock_create,
1886};
1887
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888int __init hci_sock_init(void)
1889{
1890 int err;
1891
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001892 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1893
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 err = proto_register(&hci_sk_proto, 0);
1895 if (err < 0)
1896 return err;
1897
1898 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001899 if (err < 0) {
1900 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001902 }
1903
Al Virob0316612013-04-04 19:14:33 -04001904 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001905 if (err < 0) {
1906 BT_ERR("Failed to create HCI proc file");
1907 bt_sock_unregister(BTPROTO_HCI);
1908 goto error;
1909 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 BT_INFO("HCI socket layer initialized");
1912
1913 return 0;
1914
1915error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 proto_unregister(&hci_sk_proto);
1917 return err;
1918}
1919
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301920void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001922 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001923 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925}