Bluetooth: Add support for set_powered management command
[linux-2.6.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <net/sock.h>
45
46 #include <asm/system.h>
47 #include <linux/uaccess.h>
48 #include <asm/unaligned.h>
49
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52
53 #define AUTO_OFF_TIMEOUT 2000
54
55 static void hci_cmd_task(unsigned long arg);
56 static void hci_rx_task(unsigned long arg);
57 static void hci_tx_task(unsigned long arg);
58 static void hci_notify(struct hci_dev *hdev, int event);
59
60 static DEFINE_RWLOCK(hci_task_lock);
61
62 /* HCI device list */
63 LIST_HEAD(hci_dev_list);
64 DEFINE_RWLOCK(hci_dev_list_lock);
65
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list);
68 DEFINE_RWLOCK(hci_cb_list_lock);
69
70 /* HCI protocols */
71 #define HCI_MAX_PROTO   2
72 struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
76
77 /* ---- HCI notifications ---- */
78
79 int hci_register_notifier(struct notifier_block *nb)
80 {
81         return atomic_notifier_chain_register(&hci_notifier, nb);
82 }
83
84 int hci_unregister_notifier(struct notifier_block *nb)
85 {
86         return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 }
88
89 static void hci_notify(struct hci_dev *hdev, int event)
90 {
91         atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 }
93
94 /* ---- HCI requests ---- */
95
96 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
97 {
98         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
100         /* If the request has set req_last_cmd (typical for multi-HCI
101          * command requests) check if the completed command matches
102          * this, and if not just return. Single HCI command requests
103          * typically leave req_last_cmd as 0 */
104         if (hdev->req_last_cmd && cmd != hdev->req_last_cmd)
105                 return;
106
107         if (hdev->req_status == HCI_REQ_PEND) {
108                 hdev->req_result = result;
109                 hdev->req_status = HCI_REQ_DONE;
110                 wake_up_interruptible(&hdev->req_wait_q);
111         }
112 }
113
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116         BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118         if (hdev->req_status == HCI_REQ_PEND) {
119                 hdev->req_result = err;
120                 hdev->req_status = HCI_REQ_CANCELED;
121                 wake_up_interruptible(&hdev->req_wait_q);
122         }
123 }
124
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127                                 unsigned long opt, __u32 timeout)
128 {
129         DECLARE_WAITQUEUE(wait, current);
130         int err = 0;
131
132         BT_DBG("%s start", hdev->name);
133
134         hdev->req_status = HCI_REQ_PEND;
135
136         add_wait_queue(&hdev->req_wait_q, &wait);
137         set_current_state(TASK_INTERRUPTIBLE);
138
139         req(hdev, opt);
140         schedule_timeout(timeout);
141
142         remove_wait_queue(&hdev->req_wait_q, &wait);
143
144         if (signal_pending(current))
145                 return -EINTR;
146
147         switch (hdev->req_status) {
148         case HCI_REQ_DONE:
149                 err = -bt_err(hdev->req_result);
150                 break;
151
152         case HCI_REQ_CANCELED:
153                 err = -hdev->req_result;
154                 break;
155
156         default:
157                 err = -ETIMEDOUT;
158                 break;
159         }
160
161         hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0;
162
163         BT_DBG("%s end: err %d", hdev->name, err);
164
165         return err;
166 }
167
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169                                 unsigned long opt, __u32 timeout)
170 {
171         int ret;
172
173         if (!test_bit(HCI_UP, &hdev->flags))
174                 return -ENETDOWN;
175
176         /* Serialize all requests */
177         hci_req_lock(hdev);
178         ret = __hci_request(hdev, req, opt, timeout);
179         hci_req_unlock(hdev);
180
181         return ret;
182 }
183
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186         BT_DBG("%s %ld", hdev->name, opt);
187
188         /* Reset device */
189         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
190 }
191
192 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
193 {
194         struct sk_buff *skb;
195         __le16 param;
196         __u8 flt_type;
197
198         BT_DBG("%s %ld", hdev->name, opt);
199
200         /* Driver initialization */
201
202         /* Special commands */
203         while ((skb = skb_dequeue(&hdev->driver_init))) {
204                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
205                 skb->dev = (void *) hdev;
206
207                 skb_queue_tail(&hdev->cmd_q, skb);
208                 tasklet_schedule(&hdev->cmd_task);
209         }
210         skb_queue_purge(&hdev->driver_init);
211
212         /* Mandatory initialization */
213
214         /* Reset */
215         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
216                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
217
218         /* Read Local Supported Features */
219         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
220
221         /* Read Local Version */
222         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
225         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
226
227 #if 0
228         /* Host buffer size */
229         {
230                 struct hci_cp_host_buffer_size cp;
231                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
232                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
233                 cp.acl_max_pkt = cpu_to_le16(0xffff);
234                 cp.sco_max_pkt = cpu_to_le16(0xffff);
235                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
236         }
237 #endif
238
239         /* Read BD Address */
240         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242         /* Read Class of Device */
243         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245         /* Read Local Name */
246         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
247
248         /* Read Voice Setting */
249         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
250
251         /* Optional initialization */
252
253         /* Clear Event Filters */
254         flt_type = HCI_FLT_CLEAR_ALL;
255         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
256
257         /* Page timeout ~20 secs */
258         param = cpu_to_le16(0x8000);
259         hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
260
261         /* Connection accept timeout ~20 secs */
262         param = cpu_to_le16(0x7d00);
263         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
264
265         hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT;
266 }
267
268 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
269 {
270         __u8 scan = opt;
271
272         BT_DBG("%s %x", hdev->name, scan);
273
274         /* Inquiry and Page scans */
275         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
276 }
277
278 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
279 {
280         __u8 auth = opt;
281
282         BT_DBG("%s %x", hdev->name, auth);
283
284         /* Authentication */
285         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
286 }
287
288 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
289 {
290         __u8 encrypt = opt;
291
292         BT_DBG("%s %x", hdev->name, encrypt);
293
294         /* Encryption */
295         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
296 }
297
298 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
299 {
300         __le16 policy = cpu_to_le16(opt);
301
302         BT_DBG("%s %x", hdev->name, policy);
303
304         /* Default link policy */
305         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
306 }
307
308 /* Get HCI device by index.
309  * Device is held on return. */
310 struct hci_dev *hci_dev_get(int index)
311 {
312         struct hci_dev *hdev = NULL;
313         struct list_head *p;
314
315         BT_DBG("%d", index);
316
317         if (index < 0)
318                 return NULL;
319
320         read_lock(&hci_dev_list_lock);
321         list_for_each(p, &hci_dev_list) {
322                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
323                 if (d->id == index) {
324                         hdev = hci_dev_hold(d);
325                         break;
326                 }
327         }
328         read_unlock(&hci_dev_list_lock);
329         return hdev;
330 }
331
332 /* ---- Inquiry support ---- */
333 static void inquiry_cache_flush(struct hci_dev *hdev)
334 {
335         struct inquiry_cache *cache = &hdev->inq_cache;
336         struct inquiry_entry *next  = cache->list, *e;
337
338         BT_DBG("cache %p", cache);
339
340         cache->list = NULL;
341         while ((e = next)) {
342                 next = e->next;
343                 kfree(e);
344         }
345 }
346
347 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
348 {
349         struct inquiry_cache *cache = &hdev->inq_cache;
350         struct inquiry_entry *e;
351
352         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
353
354         for (e = cache->list; e; e = e->next)
355                 if (!bacmp(&e->data.bdaddr, bdaddr))
356                         break;
357         return e;
358 }
359
360 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
361 {
362         struct inquiry_cache *cache = &hdev->inq_cache;
363         struct inquiry_entry *ie;
364
365         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
366
367         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
368         if (!ie) {
369                 /* Entry not in the cache. Add new one. */
370                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
371                 if (!ie)
372                         return;
373
374                 ie->next = cache->list;
375                 cache->list = ie;
376         }
377
378         memcpy(&ie->data, data, sizeof(*data));
379         ie->timestamp = jiffies;
380         cache->timestamp = jiffies;
381 }
382
383 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
384 {
385         struct inquiry_cache *cache = &hdev->inq_cache;
386         struct inquiry_info *info = (struct inquiry_info *) buf;
387         struct inquiry_entry *e;
388         int copied = 0;
389
390         for (e = cache->list; e && copied < num; e = e->next, copied++) {
391                 struct inquiry_data *data = &e->data;
392                 bacpy(&info->bdaddr, &data->bdaddr);
393                 info->pscan_rep_mode    = data->pscan_rep_mode;
394                 info->pscan_period_mode = data->pscan_period_mode;
395                 info->pscan_mode        = data->pscan_mode;
396                 memcpy(info->dev_class, data->dev_class, 3);
397                 info->clock_offset      = data->clock_offset;
398                 info++;
399         }
400
401         BT_DBG("cache %p, copied %d", cache, copied);
402         return copied;
403 }
404
405 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
406 {
407         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
408         struct hci_cp_inquiry cp;
409
410         BT_DBG("%s", hdev->name);
411
412         if (test_bit(HCI_INQUIRY, &hdev->flags))
413                 return;
414
415         /* Start Inquiry */
416         memcpy(&cp.lap, &ir->lap, 3);
417         cp.length  = ir->length;
418         cp.num_rsp = ir->num_rsp;
419         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
420 }
421
422 int hci_inquiry(void __user *arg)
423 {
424         __u8 __user *ptr = arg;
425         struct hci_inquiry_req ir;
426         struct hci_dev *hdev;
427         int err = 0, do_inquiry = 0, max_rsp;
428         long timeo;
429         __u8 *buf;
430
431         if (copy_from_user(&ir, ptr, sizeof(ir)))
432                 return -EFAULT;
433
434         if (!(hdev = hci_dev_get(ir.dev_id)))
435                 return -ENODEV;
436
437         hci_dev_lock_bh(hdev);
438         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
439                                 inquiry_cache_empty(hdev) ||
440                                 ir.flags & IREQ_CACHE_FLUSH) {
441                 inquiry_cache_flush(hdev);
442                 do_inquiry = 1;
443         }
444         hci_dev_unlock_bh(hdev);
445
446         timeo = ir.length * msecs_to_jiffies(2000);
447
448         if (do_inquiry) {
449                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
450                 if (err < 0)
451                         goto done;
452         }
453
454         /* for unlimited number of responses we will use buffer with 255 entries */
455         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
456
457         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
458          * copy it to the user space.
459          */
460         buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
461         if (!buf) {
462                 err = -ENOMEM;
463                 goto done;
464         }
465
466         hci_dev_lock_bh(hdev);
467         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
468         hci_dev_unlock_bh(hdev);
469
470         BT_DBG("num_rsp %d", ir.num_rsp);
471
472         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
473                 ptr += sizeof(ir);
474                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
475                                         ir.num_rsp))
476                         err = -EFAULT;
477         } else
478                 err = -EFAULT;
479
480         kfree(buf);
481
482 done:
483         hci_dev_put(hdev);
484         return err;
485 }
486
487 /* ---- HCI ioctl helpers ---- */
488
489 int hci_dev_open(__u16 dev)
490 {
491         struct hci_dev *hdev;
492         int ret = 0;
493
494         if (!(hdev = hci_dev_get(dev)))
495                 return -ENODEV;
496
497         BT_DBG("%s %p", hdev->name, hdev);
498
499         hci_req_lock(hdev);
500
501         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
502                 ret = -ERFKILL;
503                 goto done;
504         }
505
506         if (test_bit(HCI_UP, &hdev->flags)) {
507                 ret = -EALREADY;
508                 goto done;
509         }
510
511         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512                 set_bit(HCI_RAW, &hdev->flags);
513
514         /* Treat all non BR/EDR controllers as raw devices for now */
515         if (hdev->dev_type != HCI_BREDR)
516                 set_bit(HCI_RAW, &hdev->flags);
517
518         if (hdev->open(hdev)) {
519                 ret = -EIO;
520                 goto done;
521         }
522
523         if (!test_bit(HCI_RAW, &hdev->flags)) {
524                 atomic_set(&hdev->cmd_cnt, 1);
525                 set_bit(HCI_INIT, &hdev->flags);
526
527                 //__hci_request(hdev, hci_reset_req, 0, HZ);
528                 ret = __hci_request(hdev, hci_init_req, 0,
529                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
530
531                 clear_bit(HCI_INIT, &hdev->flags);
532         }
533
534         if (!ret) {
535                 hci_dev_hold(hdev);
536                 set_bit(HCI_UP, &hdev->flags);
537                 hci_notify(hdev, HCI_DEV_UP);
538                 if (!test_bit(HCI_SETUP, &hdev->flags))
539                         mgmt_powered(hdev->id, 1);
540         } else {
541                 /* Init failed, cleanup */
542                 tasklet_kill(&hdev->rx_task);
543                 tasklet_kill(&hdev->tx_task);
544                 tasklet_kill(&hdev->cmd_task);
545
546                 skb_queue_purge(&hdev->cmd_q);
547                 skb_queue_purge(&hdev->rx_q);
548
549                 if (hdev->flush)
550                         hdev->flush(hdev);
551
552                 if (hdev->sent_cmd) {
553                         kfree_skb(hdev->sent_cmd);
554                         hdev->sent_cmd = NULL;
555                 }
556
557                 hdev->close(hdev);
558                 hdev->flags = 0;
559         }
560
561 done:
562         hci_req_unlock(hdev);
563         hci_dev_put(hdev);
564         return ret;
565 }
566
567 static int hci_dev_do_close(struct hci_dev *hdev)
568 {
569         BT_DBG("%s %p", hdev->name, hdev);
570
571         hci_req_cancel(hdev, ENODEV);
572         hci_req_lock(hdev);
573
574         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
575                 hci_req_unlock(hdev);
576                 return 0;
577         }
578
579         /* Kill RX and TX tasks */
580         tasklet_kill(&hdev->rx_task);
581         tasklet_kill(&hdev->tx_task);
582
583         hci_dev_lock_bh(hdev);
584         inquiry_cache_flush(hdev);
585         hci_conn_hash_flush(hdev);
586         hci_dev_unlock_bh(hdev);
587
588         hci_notify(hdev, HCI_DEV_DOWN);
589
590         if (hdev->flush)
591                 hdev->flush(hdev);
592
593         /* Reset device */
594         skb_queue_purge(&hdev->cmd_q);
595         atomic_set(&hdev->cmd_cnt, 1);
596         if (!test_bit(HCI_RAW, &hdev->flags)) {
597                 set_bit(HCI_INIT, &hdev->flags);
598                 __hci_request(hdev, hci_reset_req, 0,
599                                         msecs_to_jiffies(250));
600                 clear_bit(HCI_INIT, &hdev->flags);
601         }
602
603         /* Kill cmd task */
604         tasklet_kill(&hdev->cmd_task);
605
606         /* Drop queues */
607         skb_queue_purge(&hdev->rx_q);
608         skb_queue_purge(&hdev->cmd_q);
609         skb_queue_purge(&hdev->raw_q);
610
611         /* Drop last sent command */
612         if (hdev->sent_cmd) {
613                 kfree_skb(hdev->sent_cmd);
614                 hdev->sent_cmd = NULL;
615         }
616
617         /* After this point our queues are empty
618          * and no tasks are scheduled. */
619         hdev->close(hdev);
620
621         mgmt_powered(hdev->id, 0);
622
623         /* Clear flags */
624         hdev->flags = 0;
625
626         hci_req_unlock(hdev);
627
628         hci_dev_put(hdev);
629         return 0;
630 }
631
632 int hci_dev_close(__u16 dev)
633 {
634         struct hci_dev *hdev;
635         int err;
636
637         hdev = hci_dev_get(dev);
638         if (!hdev)
639                 return -ENODEV;
640         err = hci_dev_do_close(hdev);
641         hci_dev_put(hdev);
642         return err;
643 }
644
645 int hci_dev_reset(__u16 dev)
646 {
647         struct hci_dev *hdev;
648         int ret = 0;
649
650         hdev = hci_dev_get(dev);
651         if (!hdev)
652                 return -ENODEV;
653
654         hci_req_lock(hdev);
655         tasklet_disable(&hdev->tx_task);
656
657         if (!test_bit(HCI_UP, &hdev->flags))
658                 goto done;
659
660         /* Drop queues */
661         skb_queue_purge(&hdev->rx_q);
662         skb_queue_purge(&hdev->cmd_q);
663
664         hci_dev_lock_bh(hdev);
665         inquiry_cache_flush(hdev);
666         hci_conn_hash_flush(hdev);
667         hci_dev_unlock_bh(hdev);
668
669         if (hdev->flush)
670                 hdev->flush(hdev);
671
672         atomic_set(&hdev->cmd_cnt, 1);
673         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
674
675         if (!test_bit(HCI_RAW, &hdev->flags))
676                 ret = __hci_request(hdev, hci_reset_req, 0,
677                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
678
679 done:
680         tasklet_enable(&hdev->tx_task);
681         hci_req_unlock(hdev);
682         hci_dev_put(hdev);
683         return ret;
684 }
685
686 int hci_dev_reset_stat(__u16 dev)
687 {
688         struct hci_dev *hdev;
689         int ret = 0;
690
691         hdev = hci_dev_get(dev);
692         if (!hdev)
693                 return -ENODEV;
694
695         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
696
697         hci_dev_put(hdev);
698
699         return ret;
700 }
701
702 int hci_dev_cmd(unsigned int cmd, void __user *arg)
703 {
704         struct hci_dev *hdev;
705         struct hci_dev_req dr;
706         int err = 0;
707
708         if (copy_from_user(&dr, arg, sizeof(dr)))
709                 return -EFAULT;
710
711         hdev = hci_dev_get(dr.dev_id);
712         if (!hdev)
713                 return -ENODEV;
714
715         switch (cmd) {
716         case HCISETAUTH:
717                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
718                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
719                 break;
720
721         case HCISETENCRYPT:
722                 if (!lmp_encrypt_capable(hdev)) {
723                         err = -EOPNOTSUPP;
724                         break;
725                 }
726
727                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
728                         /* Auth must be enabled first */
729                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
730                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
731                         if (err)
732                                 break;
733                 }
734
735                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
736                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
737                 break;
738
739         case HCISETSCAN:
740                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
741                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
742                 break;
743
744         case HCISETLINKPOL:
745                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
746                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
747                 break;
748
749         case HCISETLINKMODE:
750                 hdev->link_mode = ((__u16) dr.dev_opt) &
751                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
752                 break;
753
754         case HCISETPTYPE:
755                 hdev->pkt_type = (__u16) dr.dev_opt;
756                 break;
757
758         case HCISETACLMTU:
759                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
760                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
761                 break;
762
763         case HCISETSCOMTU:
764                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
765                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
766                 break;
767
768         default:
769                 err = -EINVAL;
770                 break;
771         }
772
773         hci_dev_put(hdev);
774         return err;
775 }
776
777 int hci_get_dev_list(void __user *arg)
778 {
779         struct hci_dev_list_req *dl;
780         struct hci_dev_req *dr;
781         struct list_head *p;
782         int n = 0, size, err;
783         __u16 dev_num;
784
785         if (get_user(dev_num, (__u16 __user *) arg))
786                 return -EFAULT;
787
788         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
789                 return -EINVAL;
790
791         size = sizeof(*dl) + dev_num * sizeof(*dr);
792
793         dl = kzalloc(size, GFP_KERNEL);
794         if (!dl)
795                 return -ENOMEM;
796
797         dr = dl->dev_req;
798
799         read_lock_bh(&hci_dev_list_lock);
800         list_for_each(p, &hci_dev_list) {
801                 struct hci_dev *hdev;
802                 hdev = list_entry(p, struct hci_dev, list);
803                 hci_del_off_timer(hdev);
804                 (dr + n)->dev_id  = hdev->id;
805                 (dr + n)->dev_opt = hdev->flags;
806                 if (++n >= dev_num)
807                         break;
808         }
809         read_unlock_bh(&hci_dev_list_lock);
810
811         dl->dev_num = n;
812         size = sizeof(*dl) + n * sizeof(*dr);
813
814         err = copy_to_user(arg, dl, size);
815         kfree(dl);
816
817         return err ? -EFAULT : 0;
818 }
819
820 int hci_get_dev_info(void __user *arg)
821 {
822         struct hci_dev *hdev;
823         struct hci_dev_info di;
824         int err = 0;
825
826         if (copy_from_user(&di, arg, sizeof(di)))
827                 return -EFAULT;
828
829         hdev = hci_dev_get(di.dev_id);
830         if (!hdev)
831                 return -ENODEV;
832
833         hci_del_off_timer(hdev);
834
835         strcpy(di.name, hdev->name);
836         di.bdaddr   = hdev->bdaddr;
837         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
838         di.flags    = hdev->flags;
839         di.pkt_type = hdev->pkt_type;
840         di.acl_mtu  = hdev->acl_mtu;
841         di.acl_pkts = hdev->acl_pkts;
842         di.sco_mtu  = hdev->sco_mtu;
843         di.sco_pkts = hdev->sco_pkts;
844         di.link_policy = hdev->link_policy;
845         di.link_mode   = hdev->link_mode;
846
847         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
848         memcpy(&di.features, &hdev->features, sizeof(di.features));
849
850         if (copy_to_user(arg, &di, sizeof(di)))
851                 err = -EFAULT;
852
853         hci_dev_put(hdev);
854
855         return err;
856 }
857
858 /* ---- Interface to HCI drivers ---- */
859
860 static int hci_rfkill_set_block(void *data, bool blocked)
861 {
862         struct hci_dev *hdev = data;
863
864         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
865
866         if (!blocked)
867                 return 0;
868
869         hci_dev_do_close(hdev);
870
871         return 0;
872 }
873
874 static const struct rfkill_ops hci_rfkill_ops = {
875         .set_block = hci_rfkill_set_block,
876 };
877
878 /* Alloc HCI device */
879 struct hci_dev *hci_alloc_dev(void)
880 {
881         struct hci_dev *hdev;
882
883         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
884         if (!hdev)
885                 return NULL;
886
887         skb_queue_head_init(&hdev->driver_init);
888
889         return hdev;
890 }
891 EXPORT_SYMBOL(hci_alloc_dev);
892
893 /* Free HCI device */
894 void hci_free_dev(struct hci_dev *hdev)
895 {
896         skb_queue_purge(&hdev->driver_init);
897
898         /* will free via device release */
899         put_device(&hdev->dev);
900 }
901 EXPORT_SYMBOL(hci_free_dev);
902
903 static void hci_power_on(struct work_struct *work)
904 {
905         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
906
907         BT_DBG("%s", hdev->name);
908
909         if (hci_dev_open(hdev->id) < 0)
910                 return;
911
912         if (test_bit(HCI_AUTO_OFF, &hdev->flags))
913                 mod_timer(&hdev->off_timer,
914                                 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
915
916         if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
917                 mgmt_index_added(hdev->id);
918 }
919
920 static void hci_power_off(struct work_struct *work)
921 {
922         struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
923
924         BT_DBG("%s", hdev->name);
925
926         hci_dev_close(hdev->id);
927 }
928
929 static void hci_auto_off(unsigned long data)
930 {
931         struct hci_dev *hdev = (struct hci_dev *) data;
932
933         BT_DBG("%s", hdev->name);
934
935         clear_bit(HCI_AUTO_OFF, &hdev->flags);
936
937         queue_work(hdev->workqueue, &hdev->power_off);
938 }
939
940 void hci_del_off_timer(struct hci_dev *hdev)
941 {
942         BT_DBG("%s", hdev->name);
943
944         clear_bit(HCI_AUTO_OFF, &hdev->flags);
945         del_timer(&hdev->off_timer);
946 }
947
948 /* Register HCI device */
949 int hci_register_dev(struct hci_dev *hdev)
950 {
951         struct list_head *head = &hci_dev_list, *p;
952         int i, id = 0;
953
954         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
955                                                 hdev->bus, hdev->owner);
956
957         if (!hdev->open || !hdev->close || !hdev->destruct)
958                 return -EINVAL;
959
960         write_lock_bh(&hci_dev_list_lock);
961
962         /* Find first available device id */
963         list_for_each(p, &hci_dev_list) {
964                 if (list_entry(p, struct hci_dev, list)->id != id)
965                         break;
966                 head = p; id++;
967         }
968
969         sprintf(hdev->name, "hci%d", id);
970         hdev->id = id;
971         list_add(&hdev->list, head);
972
973         atomic_set(&hdev->refcnt, 1);
974         spin_lock_init(&hdev->lock);
975
976         hdev->flags = 0;
977         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
978         hdev->esco_type = (ESCO_HV1);
979         hdev->link_mode = (HCI_LM_ACCEPT);
980
981         hdev->idle_timeout = 0;
982         hdev->sniff_max_interval = 800;
983         hdev->sniff_min_interval = 80;
984
985         tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
986         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
987         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
988
989         skb_queue_head_init(&hdev->rx_q);
990         skb_queue_head_init(&hdev->cmd_q);
991         skb_queue_head_init(&hdev->raw_q);
992
993         for (i = 0; i < NUM_REASSEMBLY; i++)
994                 hdev->reassembly[i] = NULL;
995
996         init_waitqueue_head(&hdev->req_wait_q);
997         mutex_init(&hdev->req_lock);
998
999         inquiry_cache_init(hdev);
1000
1001         hci_conn_hash_init(hdev);
1002
1003         INIT_LIST_HEAD(&hdev->blacklist);
1004
1005         INIT_WORK(&hdev->power_on, hci_power_on);
1006         INIT_WORK(&hdev->power_off, hci_power_off);
1007         setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1008
1009         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1010
1011         atomic_set(&hdev->promisc, 0);
1012
1013         write_unlock_bh(&hci_dev_list_lock);
1014
1015         hdev->workqueue = create_singlethread_workqueue(hdev->name);
1016         if (!hdev->workqueue)
1017                 goto nomem;
1018
1019         hci_register_sysfs(hdev);
1020
1021         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1022                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1023         if (hdev->rfkill) {
1024                 if (rfkill_register(hdev->rfkill) < 0) {
1025                         rfkill_destroy(hdev->rfkill);
1026                         hdev->rfkill = NULL;
1027                 }
1028         }
1029
1030         set_bit(HCI_AUTO_OFF, &hdev->flags);
1031         set_bit(HCI_SETUP, &hdev->flags);
1032         queue_work(hdev->workqueue, &hdev->power_on);
1033
1034         hci_notify(hdev, HCI_DEV_REG);
1035
1036         return id;
1037
1038 nomem:
1039         write_lock_bh(&hci_dev_list_lock);
1040         list_del(&hdev->list);
1041         write_unlock_bh(&hci_dev_list_lock);
1042
1043         return -ENOMEM;
1044 }
1045 EXPORT_SYMBOL(hci_register_dev);
1046
1047 /* Unregister HCI device */
1048 int hci_unregister_dev(struct hci_dev *hdev)
1049 {
1050         int i;
1051
1052         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1053
1054         write_lock_bh(&hci_dev_list_lock);
1055         list_del(&hdev->list);
1056         write_unlock_bh(&hci_dev_list_lock);
1057
1058         hci_dev_do_close(hdev);
1059
1060         for (i = 0; i < NUM_REASSEMBLY; i++)
1061                 kfree_skb(hdev->reassembly[i]);
1062
1063         if (!test_bit(HCI_INIT, &hdev->flags) &&
1064                                         !test_bit(HCI_SETUP, &hdev->flags))
1065                 mgmt_index_removed(hdev->id);
1066
1067         hci_notify(hdev, HCI_DEV_UNREG);
1068
1069         if (hdev->rfkill) {
1070                 rfkill_unregister(hdev->rfkill);
1071                 rfkill_destroy(hdev->rfkill);
1072         }
1073
1074         hci_unregister_sysfs(hdev);
1075
1076         destroy_workqueue(hdev->workqueue);
1077
1078         hci_dev_lock_bh(hdev);
1079         hci_blacklist_clear(hdev);
1080         hci_dev_unlock_bh(hdev);
1081
1082         __hci_dev_put(hdev);
1083
1084         return 0;
1085 }
1086 EXPORT_SYMBOL(hci_unregister_dev);
1087
1088 /* Suspend HCI device */
1089 int hci_suspend_dev(struct hci_dev *hdev)
1090 {
1091         hci_notify(hdev, HCI_DEV_SUSPEND);
1092         return 0;
1093 }
1094 EXPORT_SYMBOL(hci_suspend_dev);
1095
1096 /* Resume HCI device */
1097 int hci_resume_dev(struct hci_dev *hdev)
1098 {
1099         hci_notify(hdev, HCI_DEV_RESUME);
1100         return 0;
1101 }
1102 EXPORT_SYMBOL(hci_resume_dev);
1103
1104 /* Receive frame from HCI drivers */
1105 int hci_recv_frame(struct sk_buff *skb)
1106 {
1107         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1108         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1109                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1110                 kfree_skb(skb);
1111                 return -ENXIO;
1112         }
1113
1114         /* Incomming skb */
1115         bt_cb(skb)->incoming = 1;
1116
1117         /* Time stamp */
1118         __net_timestamp(skb);
1119
1120         /* Queue frame for rx task */
1121         skb_queue_tail(&hdev->rx_q, skb);
1122         tasklet_schedule(&hdev->rx_task);
1123
1124         return 0;
1125 }
1126 EXPORT_SYMBOL(hci_recv_frame);
1127
1128 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1129                           int count, __u8 index, gfp_t gfp_mask)
1130 {
1131         int len = 0;
1132         int hlen = 0;
1133         int remain = count;
1134         struct sk_buff *skb;
1135         struct bt_skb_cb *scb;
1136
1137         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1138                                 index >= NUM_REASSEMBLY)
1139                 return -EILSEQ;
1140
1141         skb = hdev->reassembly[index];
1142
1143         if (!skb) {
1144                 switch (type) {
1145                 case HCI_ACLDATA_PKT:
1146                         len = HCI_MAX_FRAME_SIZE;
1147                         hlen = HCI_ACL_HDR_SIZE;
1148                         break;
1149                 case HCI_EVENT_PKT:
1150                         len = HCI_MAX_EVENT_SIZE;
1151                         hlen = HCI_EVENT_HDR_SIZE;
1152                         break;
1153                 case HCI_SCODATA_PKT:
1154                         len = HCI_MAX_SCO_SIZE;
1155                         hlen = HCI_SCO_HDR_SIZE;
1156                         break;
1157                 }
1158
1159                 skb = bt_skb_alloc(len, gfp_mask);
1160                 if (!skb)
1161                         return -ENOMEM;
1162
1163                 scb = (void *) skb->cb;
1164                 scb->expect = hlen;
1165                 scb->pkt_type = type;
1166
1167                 skb->dev = (void *) hdev;
1168                 hdev->reassembly[index] = skb;
1169         }
1170
1171         while (count) {
1172                 scb = (void *) skb->cb;
1173                 len = min(scb->expect, (__u16)count);
1174
1175                 memcpy(skb_put(skb, len), data, len);
1176
1177                 count -= len;
1178                 data += len;
1179                 scb->expect -= len;
1180                 remain = count;
1181
1182                 switch (type) {
1183                 case HCI_EVENT_PKT:
1184                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1185                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1186                                 scb->expect = h->plen;
1187
1188                                 if (skb_tailroom(skb) < scb->expect) {
1189                                         kfree_skb(skb);
1190                                         hdev->reassembly[index] = NULL;
1191                                         return -ENOMEM;
1192                                 }
1193                         }
1194                         break;
1195
1196                 case HCI_ACLDATA_PKT:
1197                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1198                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1199                                 scb->expect = __le16_to_cpu(h->dlen);
1200
1201                                 if (skb_tailroom(skb) < scb->expect) {
1202                                         kfree_skb(skb);
1203                                         hdev->reassembly[index] = NULL;
1204                                         return -ENOMEM;
1205                                 }
1206                         }
1207                         break;
1208
1209                 case HCI_SCODATA_PKT:
1210                         if (skb->len == HCI_SCO_HDR_SIZE) {
1211                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1212                                 scb->expect = h->dlen;
1213
1214                                 if (skb_tailroom(skb) < scb->expect) {
1215                                         kfree_skb(skb);
1216                                         hdev->reassembly[index] = NULL;
1217                                         return -ENOMEM;
1218                                 }
1219                         }
1220                         break;
1221                 }
1222
1223                 if (scb->expect == 0) {
1224                         /* Complete frame */
1225
1226                         bt_cb(skb)->pkt_type = type;
1227                         hci_recv_frame(skb);
1228
1229                         hdev->reassembly[index] = NULL;
1230                         return remain;
1231                 }
1232         }
1233
1234         return remain;
1235 }
1236
1237 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1238 {
1239         int rem = 0;
1240
1241         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1242                 return -EILSEQ;
1243
1244         while (count) {
1245                 rem = hci_reassembly(hdev, type, data, count,
1246                                                 type - 1, GFP_ATOMIC);
1247                 if (rem < 0)
1248                         return rem;
1249
1250                 data += (count - rem);
1251                 count = rem;
1252         };
1253
1254         return rem;
1255 }
1256 EXPORT_SYMBOL(hci_recv_fragment);
1257
1258 #define STREAM_REASSEMBLY 0
1259
1260 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1261 {
1262         int type;
1263         int rem = 0;
1264
1265         while (count) {
1266                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1267
1268                 if (!skb) {
1269                         struct { char type; } *pkt;
1270
1271                         /* Start of the frame */
1272                         pkt = data;
1273                         type = pkt->type;
1274
1275                         data++;
1276                         count--;
1277                 } else
1278                         type = bt_cb(skb)->pkt_type;
1279
1280                 rem = hci_reassembly(hdev, type, data,
1281                                         count, STREAM_REASSEMBLY, GFP_ATOMIC);
1282                 if (rem < 0)
1283                         return rem;
1284
1285                 data += (count - rem);
1286                 count = rem;
1287         };
1288
1289         return rem;
1290 }
1291 EXPORT_SYMBOL(hci_recv_stream_fragment);
1292
1293 /* ---- Interface to upper protocols ---- */
1294
1295 /* Register/Unregister protocols.
1296  * hci_task_lock is used to ensure that no tasks are running. */
1297 int hci_register_proto(struct hci_proto *hp)
1298 {
1299         int err = 0;
1300
1301         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1302
1303         if (hp->id >= HCI_MAX_PROTO)
1304                 return -EINVAL;
1305
1306         write_lock_bh(&hci_task_lock);
1307
1308         if (!hci_proto[hp->id])
1309                 hci_proto[hp->id] = hp;
1310         else
1311                 err = -EEXIST;
1312
1313         write_unlock_bh(&hci_task_lock);
1314
1315         return err;
1316 }
1317 EXPORT_SYMBOL(hci_register_proto);
1318
1319 int hci_unregister_proto(struct hci_proto *hp)
1320 {
1321         int err = 0;
1322
1323         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1324
1325         if (hp->id >= HCI_MAX_PROTO)
1326                 return -EINVAL;
1327
1328         write_lock_bh(&hci_task_lock);
1329
1330         if (hci_proto[hp->id])
1331                 hci_proto[hp->id] = NULL;
1332         else
1333                 err = -ENOENT;
1334
1335         write_unlock_bh(&hci_task_lock);
1336
1337         return err;
1338 }
1339 EXPORT_SYMBOL(hci_unregister_proto);
1340
1341 int hci_register_cb(struct hci_cb *cb)
1342 {
1343         BT_DBG("%p name %s", cb, cb->name);
1344
1345         write_lock_bh(&hci_cb_list_lock);
1346         list_add(&cb->list, &hci_cb_list);
1347         write_unlock_bh(&hci_cb_list_lock);
1348
1349         return 0;
1350 }
1351 EXPORT_SYMBOL(hci_register_cb);
1352
1353 int hci_unregister_cb(struct hci_cb *cb)
1354 {
1355         BT_DBG("%p name %s", cb, cb->name);
1356
1357         write_lock_bh(&hci_cb_list_lock);
1358         list_del(&cb->list);
1359         write_unlock_bh(&hci_cb_list_lock);
1360
1361         return 0;
1362 }
1363 EXPORT_SYMBOL(hci_unregister_cb);
1364
1365 static int hci_send_frame(struct sk_buff *skb)
1366 {
1367         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1368
1369         if (!hdev) {
1370                 kfree_skb(skb);
1371                 return -ENODEV;
1372         }
1373
1374         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1375
1376         if (atomic_read(&hdev->promisc)) {
1377                 /* Time stamp */
1378                 __net_timestamp(skb);
1379
1380                 hci_send_to_sock(hdev, skb, NULL);
1381         }
1382
1383         /* Get rid of skb owner, prior to sending to the driver. */
1384         skb_orphan(skb);
1385
1386         return hdev->send(skb);
1387 }
1388
1389 /* Send HCI command */
1390 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1391 {
1392         int len = HCI_COMMAND_HDR_SIZE + plen;
1393         struct hci_command_hdr *hdr;
1394         struct sk_buff *skb;
1395
1396         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1397
1398         skb = bt_skb_alloc(len, GFP_ATOMIC);
1399         if (!skb) {
1400                 BT_ERR("%s no memory for command", hdev->name);
1401                 return -ENOMEM;
1402         }
1403
1404         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1405         hdr->opcode = cpu_to_le16(opcode);
1406         hdr->plen   = plen;
1407
1408         if (plen)
1409                 memcpy(skb_put(skb, plen), param, plen);
1410
1411         BT_DBG("skb len %d", skb->len);
1412
1413         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1414         skb->dev = (void *) hdev;
1415
1416         skb_queue_tail(&hdev->cmd_q, skb);
1417         tasklet_schedule(&hdev->cmd_task);
1418
1419         return 0;
1420 }
1421
1422 /* Get data from the previously sent command */
1423 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1424 {
1425         struct hci_command_hdr *hdr;
1426
1427         if (!hdev->sent_cmd)
1428                 return NULL;
1429
1430         hdr = (void *) hdev->sent_cmd->data;
1431
1432         if (hdr->opcode != cpu_to_le16(opcode))
1433                 return NULL;
1434
1435         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1436
1437         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1438 }
1439
1440 /* Send ACL data */
1441 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1442 {
1443         struct hci_acl_hdr *hdr;
1444         int len = skb->len;
1445
1446         skb_push(skb, HCI_ACL_HDR_SIZE);
1447         skb_reset_transport_header(skb);
1448         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1449         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1450         hdr->dlen   = cpu_to_le16(len);
1451 }
1452
1453 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1454 {
1455         struct hci_dev *hdev = conn->hdev;
1456         struct sk_buff *list;
1457
1458         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1459
1460         skb->dev = (void *) hdev;
1461         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1462         hci_add_acl_hdr(skb, conn->handle, flags);
1463
1464         list = skb_shinfo(skb)->frag_list;
1465         if (!list) {
1466                 /* Non fragmented */
1467                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1468
1469                 skb_queue_tail(&conn->data_q, skb);
1470         } else {
1471                 /* Fragmented */
1472                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1473
1474                 skb_shinfo(skb)->frag_list = NULL;
1475
1476                 /* Queue all fragments atomically */
1477                 spin_lock_bh(&conn->data_q.lock);
1478
1479                 __skb_queue_tail(&conn->data_q, skb);
1480
1481                 flags &= ~ACL_START;
1482                 flags |= ACL_CONT;
1483                 do {
1484                         skb = list; list = list->next;
1485
1486                         skb->dev = (void *) hdev;
1487                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1488                         hci_add_acl_hdr(skb, conn->handle, flags);
1489
1490                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1491
1492                         __skb_queue_tail(&conn->data_q, skb);
1493                 } while (list);
1494
1495                 spin_unlock_bh(&conn->data_q.lock);
1496         }
1497
1498         tasklet_schedule(&hdev->tx_task);
1499 }
1500 EXPORT_SYMBOL(hci_send_acl);
1501
1502 /* Send SCO data */
1503 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1504 {
1505         struct hci_dev *hdev = conn->hdev;
1506         struct hci_sco_hdr hdr;
1507
1508         BT_DBG("%s len %d", hdev->name, skb->len);
1509
1510         hdr.handle = cpu_to_le16(conn->handle);
1511         hdr.dlen   = skb->len;
1512
1513         skb_push(skb, HCI_SCO_HDR_SIZE);
1514         skb_reset_transport_header(skb);
1515         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1516
1517         skb->dev = (void *) hdev;
1518         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1519
1520         skb_queue_tail(&conn->data_q, skb);
1521         tasklet_schedule(&hdev->tx_task);
1522 }
1523 EXPORT_SYMBOL(hci_send_sco);
1524
1525 /* ---- HCI TX task (outgoing data) ---- */
1526
1527 /* HCI Connection scheduler */
1528 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1529 {
1530         struct hci_conn_hash *h = &hdev->conn_hash;
1531         struct hci_conn *conn = NULL;
1532         int num = 0, min = ~0;
1533         struct list_head *p;
1534
1535         /* We don't have to lock device here. Connections are always
1536          * added and removed with TX task disabled. */
1537         list_for_each(p, &h->list) {
1538                 struct hci_conn *c;
1539                 c = list_entry(p, struct hci_conn, list);
1540
1541                 if (c->type != type || skb_queue_empty(&c->data_q))
1542                         continue;
1543
1544                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1545                         continue;
1546
1547                 num++;
1548
1549                 if (c->sent < min) {
1550                         min  = c->sent;
1551                         conn = c;
1552                 }
1553         }
1554
1555         if (conn) {
1556                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1557                 int q = cnt / num;
1558                 *quote = q ? q : 1;
1559         } else
1560                 *quote = 0;
1561
1562         BT_DBG("conn %p quote %d", conn, *quote);
1563         return conn;
1564 }
1565
1566 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1567 {
1568         struct hci_conn_hash *h = &hdev->conn_hash;
1569         struct list_head *p;
1570         struct hci_conn  *c;
1571
1572         BT_ERR("%s ACL tx timeout", hdev->name);
1573
1574         /* Kill stalled connections */
1575         list_for_each(p, &h->list) {
1576                 c = list_entry(p, struct hci_conn, list);
1577                 if (c->type == ACL_LINK && c->sent) {
1578                         BT_ERR("%s killing stalled ACL connection %s",
1579                                 hdev->name, batostr(&c->dst));
1580                         hci_acl_disconn(c, 0x13);
1581                 }
1582         }
1583 }
1584
1585 static inline void hci_sched_acl(struct hci_dev *hdev)
1586 {
1587         struct hci_conn *conn;
1588         struct sk_buff *skb;
1589         int quote;
1590
1591         BT_DBG("%s", hdev->name);
1592
1593         if (!test_bit(HCI_RAW, &hdev->flags)) {
1594                 /* ACL tx timeout must be longer than maximum
1595                  * link supervision timeout (40.9 seconds) */
1596                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1597                         hci_acl_tx_to(hdev);
1598         }
1599
1600         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1601                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1602                         BT_DBG("skb %p len %d", skb, skb->len);
1603
1604                         hci_conn_enter_active_mode(conn);
1605
1606                         hci_send_frame(skb);
1607                         hdev->acl_last_tx = jiffies;
1608
1609                         hdev->acl_cnt--;
1610                         conn->sent++;
1611                 }
1612         }
1613 }
1614
1615 /* Schedule SCO */
1616 static inline void hci_sched_sco(struct hci_dev *hdev)
1617 {
1618         struct hci_conn *conn;
1619         struct sk_buff *skb;
1620         int quote;
1621
1622         BT_DBG("%s", hdev->name);
1623
1624         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1625                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1626                         BT_DBG("skb %p len %d", skb, skb->len);
1627                         hci_send_frame(skb);
1628
1629                         conn->sent++;
1630                         if (conn->sent == ~0)
1631                                 conn->sent = 0;
1632                 }
1633         }
1634 }
1635
1636 static inline void hci_sched_esco(struct hci_dev *hdev)
1637 {
1638         struct hci_conn *conn;
1639         struct sk_buff *skb;
1640         int quote;
1641
1642         BT_DBG("%s", hdev->name);
1643
1644         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1645                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1646                         BT_DBG("skb %p len %d", skb, skb->len);
1647                         hci_send_frame(skb);
1648
1649                         conn->sent++;
1650                         if (conn->sent == ~0)
1651                                 conn->sent = 0;
1652                 }
1653         }
1654 }
1655
1656 static void hci_tx_task(unsigned long arg)
1657 {
1658         struct hci_dev *hdev = (struct hci_dev *) arg;
1659         struct sk_buff *skb;
1660
1661         read_lock(&hci_task_lock);
1662
1663         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1664
1665         /* Schedule queues and send stuff to HCI driver */
1666
1667         hci_sched_acl(hdev);
1668
1669         hci_sched_sco(hdev);
1670
1671         hci_sched_esco(hdev);
1672
1673         /* Send next queued raw (unknown type) packet */
1674         while ((skb = skb_dequeue(&hdev->raw_q)))
1675                 hci_send_frame(skb);
1676
1677         read_unlock(&hci_task_lock);
1678 }
1679
1680 /* ----- HCI RX task (incoming data proccessing) ----- */
1681
1682 /* ACL data packet */
1683 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1684 {
1685         struct hci_acl_hdr *hdr = (void *) skb->data;
1686         struct hci_conn *conn;
1687         __u16 handle, flags;
1688
1689         skb_pull(skb, HCI_ACL_HDR_SIZE);
1690
1691         handle = __le16_to_cpu(hdr->handle);
1692         flags  = hci_flags(handle);
1693         handle = hci_handle(handle);
1694
1695         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1696
1697         hdev->stat.acl_rx++;
1698
1699         hci_dev_lock(hdev);
1700         conn = hci_conn_hash_lookup_handle(hdev, handle);
1701         hci_dev_unlock(hdev);
1702
1703         if (conn) {
1704                 register struct hci_proto *hp;
1705
1706                 hci_conn_enter_active_mode(conn);
1707
1708                 /* Send to upper protocol */
1709                 hp = hci_proto[HCI_PROTO_L2CAP];
1710                 if (hp && hp->recv_acldata) {
1711                         hp->recv_acldata(conn, skb, flags);
1712                         return;
1713                 }
1714         } else {
1715                 BT_ERR("%s ACL packet for unknown connection handle %d",
1716                         hdev->name, handle);
1717         }
1718
1719         kfree_skb(skb);
1720 }
1721
1722 /* SCO data packet */
1723 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1724 {
1725         struct hci_sco_hdr *hdr = (void *) skb->data;
1726         struct hci_conn *conn;
1727         __u16 handle;
1728
1729         skb_pull(skb, HCI_SCO_HDR_SIZE);
1730
1731         handle = __le16_to_cpu(hdr->handle);
1732
1733         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1734
1735         hdev->stat.sco_rx++;
1736
1737         hci_dev_lock(hdev);
1738         conn = hci_conn_hash_lookup_handle(hdev, handle);
1739         hci_dev_unlock(hdev);
1740
1741         if (conn) {
1742                 register struct hci_proto *hp;
1743
1744                 /* Send to upper protocol */
1745                 hp = hci_proto[HCI_PROTO_SCO];
1746                 if (hp && hp->recv_scodata) {
1747                         hp->recv_scodata(conn, skb);
1748                         return;
1749                 }
1750         } else {
1751                 BT_ERR("%s SCO packet for unknown connection handle %d",
1752                         hdev->name, handle);
1753         }
1754
1755         kfree_skb(skb);
1756 }
1757
1758 static void hci_rx_task(unsigned long arg)
1759 {
1760         struct hci_dev *hdev = (struct hci_dev *) arg;
1761         struct sk_buff *skb;
1762
1763         BT_DBG("%s", hdev->name);
1764
1765         read_lock(&hci_task_lock);
1766
1767         while ((skb = skb_dequeue(&hdev->rx_q))) {
1768                 if (atomic_read(&hdev->promisc)) {
1769                         /* Send copy to the sockets */
1770                         hci_send_to_sock(hdev, skb, NULL);
1771                 }
1772
1773                 if (test_bit(HCI_RAW, &hdev->flags)) {
1774                         kfree_skb(skb);
1775                         continue;
1776                 }
1777
1778                 if (test_bit(HCI_INIT, &hdev->flags)) {
1779                         /* Don't process data packets in this states. */
1780                         switch (bt_cb(skb)->pkt_type) {
1781                         case HCI_ACLDATA_PKT:
1782                         case HCI_SCODATA_PKT:
1783                                 kfree_skb(skb);
1784                                 continue;
1785                         }
1786                 }
1787
1788                 /* Process frame */
1789                 switch (bt_cb(skb)->pkt_type) {
1790                 case HCI_EVENT_PKT:
1791                         hci_event_packet(hdev, skb);
1792                         break;
1793
1794                 case HCI_ACLDATA_PKT:
1795                         BT_DBG("%s ACL data packet", hdev->name);
1796                         hci_acldata_packet(hdev, skb);
1797                         break;
1798
1799                 case HCI_SCODATA_PKT:
1800                         BT_DBG("%s SCO data packet", hdev->name);
1801                         hci_scodata_packet(hdev, skb);
1802                         break;
1803
1804                 default:
1805                         kfree_skb(skb);
1806                         break;
1807                 }
1808         }
1809
1810         read_unlock(&hci_task_lock);
1811 }
1812
1813 static void hci_cmd_task(unsigned long arg)
1814 {
1815         struct hci_dev *hdev = (struct hci_dev *) arg;
1816         struct sk_buff *skb;
1817
1818         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1819
1820         if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1821                 BT_ERR("%s command tx timeout", hdev->name);
1822                 atomic_set(&hdev->cmd_cnt, 1);
1823         }
1824
1825         /* Send queued commands */
1826         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1827                 kfree_skb(hdev->sent_cmd);
1828
1829                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1830                 if (hdev->sent_cmd) {
1831                         atomic_dec(&hdev->cmd_cnt);
1832                         hci_send_frame(skb);
1833                         hdev->cmd_last_tx = jiffies;
1834                 } else {
1835                         skb_queue_head(&hdev->cmd_q, skb);
1836                         tasklet_schedule(&hdev->cmd_task);
1837                 }
1838         }
1839 }