2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
40 #include <linux/uaccess.h>
41 #include <asm/unaligned.h>
43 #include <net/bluetooth/bluetooth.h>
44 #include <net/bluetooth/hci_core.h>
46 /* Handle HCI Event packets */
48 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
50 __u8 status = *((__u8 *) skb->data);
52 BT_DBG("%s status 0x%x", hdev->name, status);
56 mgmt_stop_discovery_failed(hdev, status);
61 clear_bit(HCI_INQUIRY, &hdev->flags);
64 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
67 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
69 hci_conn_check_pending(hdev);
72 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 __u8 status = *((__u8 *) skb->data);
76 BT_DBG("%s status 0x%x", hdev->name, status);
81 hci_conn_check_pending(hdev);
84 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
86 BT_DBG("%s", hdev->name);
89 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
91 struct hci_rp_role_discovery *rp = (void *) skb->data;
92 struct hci_conn *conn;
94 BT_DBG("%s status 0x%x", hdev->name, rp->status);
101 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 conn->link_mode &= ~HCI_LM_MASTER;
106 conn->link_mode |= HCI_LM_MASTER;
109 hci_dev_unlock(hdev);
112 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
114 struct hci_rp_read_link_policy *rp = (void *) skb->data;
115 struct hci_conn *conn;
117 BT_DBG("%s status 0x%x", hdev->name, rp->status);
124 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
126 conn->link_policy = __le16_to_cpu(rp->policy);
128 hci_dev_unlock(hdev);
131 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
133 struct hci_rp_write_link_policy *rp = (void *) skb->data;
134 struct hci_conn *conn;
137 BT_DBG("%s status 0x%x", hdev->name, rp->status);
142 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
150 conn->link_policy = get_unaligned_le16(sent + 2);
152 hci_dev_unlock(hdev);
155 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
157 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
159 BT_DBG("%s status 0x%x", hdev->name, rp->status);
164 hdev->link_policy = __le16_to_cpu(rp->policy);
167 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
169 __u8 status = *((__u8 *) skb->data);
172 BT_DBG("%s status 0x%x", hdev->name, status);
174 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 hdev->link_policy = get_unaligned_le16(sent);
181 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
186 __u8 status = *((__u8 *) skb->data);
188 BT_DBG("%s status 0x%x", hdev->name, status);
190 clear_bit(HCI_RESET, &hdev->flags);
192 hci_req_complete(hdev, HCI_OP_RESET, status);
194 /* Reset all non-persistent flags */
195 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
197 hdev->discovery.state = DISCOVERY_STOPPED;
200 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
202 __u8 status = *((__u8 *) skb->data);
205 BT_DBG("%s status 0x%x", hdev->name, status);
207 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
213 if (test_bit(HCI_MGMT, &hdev->dev_flags))
214 mgmt_set_local_name_complete(hdev, sent, status);
216 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
218 hci_dev_unlock(hdev);
220 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
223 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
225 struct hci_rp_read_local_name *rp = (void *) skb->data;
227 BT_DBG("%s status 0x%x", hdev->name, rp->status);
232 if (test_bit(HCI_SETUP, &hdev->dev_flags))
233 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
236 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
238 __u8 status = *((__u8 *) skb->data);
241 BT_DBG("%s status 0x%x", hdev->name, status);
243 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
248 __u8 param = *((__u8 *) sent);
250 if (param == AUTH_ENABLED)
251 set_bit(HCI_AUTH, &hdev->flags);
253 clear_bit(HCI_AUTH, &hdev->flags);
256 if (test_bit(HCI_MGMT, &hdev->dev_flags))
257 mgmt_auth_enable_complete(hdev, status);
259 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
262 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
264 __u8 status = *((__u8 *) skb->data);
267 BT_DBG("%s status 0x%x", hdev->name, status);
269 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
274 __u8 param = *((__u8 *) sent);
277 set_bit(HCI_ENCRYPT, &hdev->flags);
279 clear_bit(HCI_ENCRYPT, &hdev->flags);
282 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
285 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287 __u8 param, status = *((__u8 *) skb->data);
288 int old_pscan, old_iscan;
291 BT_DBG("%s status 0x%x", hdev->name, status);
293 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
297 param = *((__u8 *) sent);
302 mgmt_write_scan_failed(hdev, param, status);
303 hdev->discov_timeout = 0;
307 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
308 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
310 if (param & SCAN_INQUIRY) {
311 set_bit(HCI_ISCAN, &hdev->flags);
313 mgmt_discoverable(hdev, 1);
314 if (hdev->discov_timeout > 0) {
315 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
316 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
319 } else if (old_iscan)
320 mgmt_discoverable(hdev, 0);
322 if (param & SCAN_PAGE) {
323 set_bit(HCI_PSCAN, &hdev->flags);
325 mgmt_connectable(hdev, 1);
326 } else if (old_pscan)
327 mgmt_connectable(hdev, 0);
330 hci_dev_unlock(hdev);
331 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
336 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
338 BT_DBG("%s status 0x%x", hdev->name, rp->status);
343 memcpy(hdev->dev_class, rp->dev_class, 3);
345 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
351 __u8 status = *((__u8 *) skb->data);
354 BT_DBG("%s status 0x%x", hdev->name, status);
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
363 memcpy(hdev->dev_class, sent, 3);
365 if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 mgmt_set_class_of_dev_complete(hdev, sent, status);
368 hci_dev_unlock(hdev);
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
373 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
376 BT_DBG("%s status 0x%x", hdev->name, rp->status);
381 setting = __le16_to_cpu(rp->voice_setting);
383 if (hdev->voice_setting == setting)
386 hdev->voice_setting = setting;
388 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
391 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
396 __u8 status = *((__u8 *) skb->data);
400 BT_DBG("%s status 0x%x", hdev->name, status);
405 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
409 setting = get_unaligned_le16(sent);
411 if (hdev->voice_setting == setting)
414 hdev->voice_setting = setting;
416 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
419 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
422 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
424 __u8 status = *((__u8 *) skb->data);
426 BT_DBG("%s status 0x%x", hdev->name, status);
428 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
431 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
433 __u8 status = *((__u8 *) skb->data);
436 BT_DBG("%s status 0x%x", hdev->name, status);
438 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
442 if (test_bit(HCI_MGMT, &hdev->dev_flags))
443 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
446 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
448 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
452 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
454 if (hdev->features[6] & LMP_EXT_INQ)
457 if (hdev->features[3] & LMP_RSSI_INQ)
460 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
461 hdev->lmp_subver == 0x0757)
464 if (hdev->manufacturer == 15) {
465 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
467 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
469 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
473 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
474 hdev->lmp_subver == 0x1805)
480 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
484 mode = hci_get_inquiry_mode(hdev);
486 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
489 static void hci_setup_event_mask(struct hci_dev *hdev)
491 /* The second byte is 0xff instead of 0x9f (two reserved bits
492 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
493 * command otherwise */
494 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
496 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
497 * any event mask for pre 1.2 devices */
498 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
501 events[4] |= 0x01; /* Flow Specification Complete */
502 events[4] |= 0x02; /* Inquiry Result with RSSI */
503 events[4] |= 0x04; /* Read Remote Extended Features Complete */
504 events[5] |= 0x08; /* Synchronous Connection Complete */
505 events[5] |= 0x10; /* Synchronous Connection Changed */
507 if (hdev->features[3] & LMP_RSSI_INQ)
508 events[4] |= 0x04; /* Inquiry Result with RSSI */
510 if (hdev->features[5] & LMP_SNIFF_SUBR)
511 events[5] |= 0x20; /* Sniff Subrating */
513 if (hdev->features[5] & LMP_PAUSE_ENC)
514 events[5] |= 0x80; /* Encryption Key Refresh Complete */
516 if (hdev->features[6] & LMP_EXT_INQ)
517 events[5] |= 0x40; /* Extended Inquiry Result */
519 if (hdev->features[6] & LMP_NO_FLUSH)
520 events[7] |= 0x01; /* Enhanced Flush Complete */
522 if (hdev->features[7] & LMP_LSTO)
523 events[6] |= 0x80; /* Link Supervision Timeout Changed */
525 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
526 events[6] |= 0x01; /* IO Capability Request */
527 events[6] |= 0x02; /* IO Capability Response */
528 events[6] |= 0x04; /* User Confirmation Request */
529 events[6] |= 0x08; /* User Passkey Request */
530 events[6] |= 0x10; /* Remote OOB Data Request */
531 events[6] |= 0x20; /* Simple Pairing Complete */
532 events[7] |= 0x04; /* User Passkey Notification */
533 events[7] |= 0x08; /* Keypress Notification */
534 events[7] |= 0x10; /* Remote Host Supported
535 * Features Notification */
538 if (hdev->features[4] & LMP_LE)
539 events[7] |= 0x20; /* LE Meta-Event */
541 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
544 static void hci_setup(struct hci_dev *hdev)
546 if (hdev->dev_type != HCI_BREDR)
549 hci_setup_event_mask(hdev);
551 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
552 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
554 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
555 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
557 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
558 sizeof(mode), &mode);
560 struct hci_cp_write_eir cp;
562 memset(hdev->eir, 0, sizeof(hdev->eir));
563 memset(&cp, 0, sizeof(cp));
565 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
569 if (hdev->features[3] & LMP_RSSI_INQ)
570 hci_setup_inquiry_mode(hdev);
572 if (hdev->features[7] & LMP_INQ_TX_PWR)
573 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
575 if (hdev->features[7] & LMP_EXTFEATURES) {
576 struct hci_cp_read_local_ext_features cp;
579 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
583 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
585 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
590 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
592 struct hci_rp_read_local_version *rp = (void *) skb->data;
594 BT_DBG("%s status 0x%x", hdev->name, rp->status);
599 hdev->hci_ver = rp->hci_ver;
600 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
601 hdev->lmp_ver = rp->lmp_ver;
602 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
603 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
605 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
607 hdev->hci_ver, hdev->hci_rev);
609 if (test_bit(HCI_INIT, &hdev->flags))
613 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
616 static void hci_setup_link_policy(struct hci_dev *hdev)
620 if (hdev->features[0] & LMP_RSWITCH)
621 link_policy |= HCI_LP_RSWITCH;
622 if (hdev->features[0] & LMP_HOLD)
623 link_policy |= HCI_LP_HOLD;
624 if (hdev->features[0] & LMP_SNIFF)
625 link_policy |= HCI_LP_SNIFF;
626 if (hdev->features[1] & LMP_PARK)
627 link_policy |= HCI_LP_PARK;
629 link_policy = cpu_to_le16(link_policy);
630 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
634 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
636 struct hci_rp_read_local_commands *rp = (void *) skb->data;
638 BT_DBG("%s status 0x%x", hdev->name, rp->status);
643 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
645 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
646 hci_setup_link_policy(hdev);
649 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
652 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
654 struct hci_rp_read_local_features *rp = (void *) skb->data;
656 BT_DBG("%s status 0x%x", hdev->name, rp->status);
661 memcpy(hdev->features, rp->features, 8);
663 /* Adjust default settings according to features
664 * supported by device. */
666 if (hdev->features[0] & LMP_3SLOT)
667 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
669 if (hdev->features[0] & LMP_5SLOT)
670 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
672 if (hdev->features[1] & LMP_HV2) {
673 hdev->pkt_type |= (HCI_HV2);
674 hdev->esco_type |= (ESCO_HV2);
677 if (hdev->features[1] & LMP_HV3) {
678 hdev->pkt_type |= (HCI_HV3);
679 hdev->esco_type |= (ESCO_HV3);
682 if (hdev->features[3] & LMP_ESCO)
683 hdev->esco_type |= (ESCO_EV3);
685 if (hdev->features[4] & LMP_EV4)
686 hdev->esco_type |= (ESCO_EV4);
688 if (hdev->features[4] & LMP_EV5)
689 hdev->esco_type |= (ESCO_EV5);
691 if (hdev->features[5] & LMP_EDR_ESCO_2M)
692 hdev->esco_type |= (ESCO_2EV3);
694 if (hdev->features[5] & LMP_EDR_ESCO_3M)
695 hdev->esco_type |= (ESCO_3EV3);
697 if (hdev->features[5] & LMP_EDR_3S_ESCO)
698 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
700 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
701 hdev->features[0], hdev->features[1],
702 hdev->features[2], hdev->features[3],
703 hdev->features[4], hdev->features[5],
704 hdev->features[6], hdev->features[7]);
707 static void hci_set_le_support(struct hci_dev *hdev)
709 struct hci_cp_write_le_host_supported cp;
711 memset(&cp, 0, sizeof(cp));
713 if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
715 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
718 if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
719 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
723 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
726 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
728 BT_DBG("%s status 0x%x", hdev->name, rp->status);
735 memcpy(hdev->features, rp->features, 8);
738 memcpy(hdev->host_features, rp->features, 8);
742 if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
743 hci_set_le_support(hdev);
746 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
749 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
752 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
754 BT_DBG("%s status 0x%x", hdev->name, rp->status);
759 hdev->flow_ctl_mode = rp->mode;
761 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
764 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
766 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
768 BT_DBG("%s status 0x%x", hdev->name, rp->status);
773 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
774 hdev->sco_mtu = rp->sco_mtu;
775 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
776 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
778 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
783 hdev->acl_cnt = hdev->acl_pkts;
784 hdev->sco_cnt = hdev->sco_pkts;
786 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
787 hdev->acl_mtu, hdev->acl_pkts,
788 hdev->sco_mtu, hdev->sco_pkts);
791 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
793 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
795 BT_DBG("%s status 0x%x", hdev->name, rp->status);
798 bacpy(&hdev->bdaddr, &rp->bdaddr);
800 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
803 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
806 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
808 BT_DBG("%s status 0x%x", hdev->name, rp->status);
813 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
814 hdev->block_len = __le16_to_cpu(rp->block_len);
815 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
817 hdev->block_cnt = hdev->num_blocks;
819 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
820 hdev->block_cnt, hdev->block_len);
822 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
825 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
827 __u8 status = *((__u8 *) skb->data);
829 BT_DBG("%s status 0x%x", hdev->name, status);
831 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
834 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
837 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
839 BT_DBG("%s status 0x%x", hdev->name, rp->status);
844 hdev->amp_status = rp->amp_status;
845 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
846 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
847 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
848 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
849 hdev->amp_type = rp->amp_type;
850 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
851 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
852 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
853 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
855 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
858 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
861 __u8 status = *((__u8 *) skb->data);
863 BT_DBG("%s status 0x%x", hdev->name, status);
865 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
868 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
870 __u8 status = *((__u8 *) skb->data);
872 BT_DBG("%s status 0x%x", hdev->name, status);
874 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
877 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
880 __u8 status = *((__u8 *) skb->data);
882 BT_DBG("%s status 0x%x", hdev->name, status);
884 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
887 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
890 __u8 status = *((__u8 *) skb->data);
892 BT_DBG("%s status 0x%x", hdev->name, status);
894 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
897 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
899 __u8 status = *((__u8 *) skb->data);
901 BT_DBG("%s status 0x%x", hdev->name, status);
903 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
906 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
908 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
909 struct hci_cp_pin_code_reply *cp;
910 struct hci_conn *conn;
912 BT_DBG("%s status 0x%x", hdev->name, rp->status);
916 if (test_bit(HCI_MGMT, &hdev->dev_flags))
917 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
922 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
926 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
928 conn->pin_length = cp->pin_len;
931 hci_dev_unlock(hdev);
934 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
936 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
938 BT_DBG("%s status 0x%x", hdev->name, rp->status);
942 if (test_bit(HCI_MGMT, &hdev->dev_flags))
943 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
946 hci_dev_unlock(hdev);
949 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
952 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
954 BT_DBG("%s status 0x%x", hdev->name, rp->status);
959 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
960 hdev->le_pkts = rp->le_max_pkt;
962 hdev->le_cnt = hdev->le_pkts;
964 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
966 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
969 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
971 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
973 BT_DBG("%s status 0x%x", hdev->name, rp->status);
977 if (test_bit(HCI_MGMT, &hdev->dev_flags))
978 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
981 hci_dev_unlock(hdev);
984 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
987 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
989 BT_DBG("%s status 0x%x", hdev->name, rp->status);
993 if (test_bit(HCI_MGMT, &hdev->dev_flags))
994 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
995 ACL_LINK, 0, rp->status);
997 hci_dev_unlock(hdev);
1000 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1002 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1004 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1008 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1009 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1012 hci_dev_unlock(hdev);
1015 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1016 struct sk_buff *skb)
1018 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1020 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1024 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1025 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1026 ACL_LINK, 0, rp->status);
1028 hci_dev_unlock(hdev);
1031 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1032 struct sk_buff *skb)
1034 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1036 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1039 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1040 rp->randomizer, rp->status);
1041 hci_dev_unlock(hdev);
1044 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1046 __u8 status = *((__u8 *) skb->data);
1048 BT_DBG("%s status 0x%x", hdev->name, status);
1050 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1054 mgmt_start_discovery_failed(hdev, status);
1055 hci_dev_unlock(hdev);
1060 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1061 struct sk_buff *skb)
1063 struct hci_cp_le_set_scan_enable *cp;
1064 __u8 status = *((__u8 *) skb->data);
1066 BT_DBG("%s status 0x%x", hdev->name, status);
1068 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1072 switch (cp->enable) {
1073 case LE_SCANNING_ENABLED:
1074 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1078 mgmt_start_discovery_failed(hdev, status);
1079 hci_dev_unlock(hdev);
1083 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1085 cancel_delayed_work_sync(&hdev->adv_work);
1088 hci_adv_entries_clear(hdev);
1089 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1090 hci_dev_unlock(hdev);
1093 case LE_SCANNING_DISABLED:
1097 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1099 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1101 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
1102 mgmt_interleaved_discovery(hdev);
1105 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1106 hci_dev_unlock(hdev);
1112 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1117 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1119 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1121 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1126 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1129 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1131 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1133 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1138 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1141 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1142 struct sk_buff *skb)
1144 struct hci_cp_write_le_host_supported *sent;
1145 __u8 status = *((__u8 *) skb->data);
1147 BT_DBG("%s status 0x%x", hdev->name, status);
1149 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1155 hdev->host_features[0] |= LMP_HOST_LE;
1157 hdev->host_features[0] &= ~LMP_HOST_LE;
1160 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1161 !test_bit(HCI_INIT, &hdev->flags))
1162 mgmt_le_enable_complete(hdev, sent->le, status);
1164 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1167 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1169 BT_DBG("%s status 0x%x", hdev->name, status);
1172 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1173 hci_conn_check_pending(hdev);
1175 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1176 mgmt_start_discovery_failed(hdev, status);
1177 hci_dev_unlock(hdev);
1181 set_bit(HCI_INQUIRY, &hdev->flags);
1184 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1185 hci_dev_unlock(hdev);
1188 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1190 struct hci_cp_create_conn *cp;
1191 struct hci_conn *conn;
1193 BT_DBG("%s status 0x%x", hdev->name, status);
1195 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1201 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1203 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1206 if (conn && conn->state == BT_CONNECT) {
1207 if (status != 0x0c || conn->attempt > 2) {
1208 conn->state = BT_CLOSED;
1209 hci_proto_connect_cfm(conn, status);
1212 conn->state = BT_CONNECT2;
1216 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1219 conn->link_mode |= HCI_LM_MASTER;
1221 BT_ERR("No memory for new connection");
1225 hci_dev_unlock(hdev);
1228 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1230 struct hci_cp_add_sco *cp;
1231 struct hci_conn *acl, *sco;
1234 BT_DBG("%s status 0x%x", hdev->name, status);
1239 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1243 handle = __le16_to_cpu(cp->handle);
1245 BT_DBG("%s handle %d", hdev->name, handle);
1249 acl = hci_conn_hash_lookup_handle(hdev, handle);
1253 sco->state = BT_CLOSED;
1255 hci_proto_connect_cfm(sco, status);
1260 hci_dev_unlock(hdev);
1263 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1265 struct hci_cp_auth_requested *cp;
1266 struct hci_conn *conn;
1268 BT_DBG("%s status 0x%x", hdev->name, status);
1273 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1279 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1281 if (conn->state == BT_CONFIG) {
1282 hci_proto_connect_cfm(conn, status);
1287 hci_dev_unlock(hdev);
1290 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1292 struct hci_cp_set_conn_encrypt *cp;
1293 struct hci_conn *conn;
1295 BT_DBG("%s status 0x%x", hdev->name, status);
1300 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1306 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1308 if (conn->state == BT_CONFIG) {
1309 hci_proto_connect_cfm(conn, status);
1314 hci_dev_unlock(hdev);
1317 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1318 struct hci_conn *conn)
1320 if (conn->state != BT_CONFIG || !conn->out)
1323 if (conn->pending_sec_level == BT_SECURITY_SDP)
1326 /* Only request authentication for SSP connections or non-SSP
1327 * devices with sec_level HIGH or if MITM protection is requested */
1328 if (!hci_conn_ssp_enabled(conn) &&
1329 conn->pending_sec_level != BT_SECURITY_HIGH &&
1330 !(conn->auth_type & 0x01))
1336 static inline int hci_resolve_name(struct hci_dev *hdev,
1337 struct inquiry_entry *e)
1339 struct hci_cp_remote_name_req cp;
1341 memset(&cp, 0, sizeof(cp));
1343 bacpy(&cp.bdaddr, &e->data.bdaddr);
1344 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1345 cp.pscan_mode = e->data.pscan_mode;
1346 cp.clock_offset = e->data.clock_offset;
1348 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1351 static bool hci_resolve_next_name(struct hci_dev *hdev)
1353 struct discovery_state *discov = &hdev->discovery;
1354 struct inquiry_entry *e;
1356 if (list_empty(&discov->resolve))
1359 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1363 if (hci_resolve_name(hdev, e) == 0) {
1364 e->name_state = NAME_PENDING;
1371 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1372 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1374 struct discovery_state *discov = &hdev->discovery;
1375 struct inquiry_entry *e;
1377 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1378 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1379 name_len, conn->dev_class);
1381 if (discov->state == DISCOVERY_STOPPED)
1384 if (discov->state == DISCOVERY_STOPPING)
1385 goto discov_complete;
1387 if (discov->state != DISCOVERY_RESOLVING)
1390 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1391 /* If the device was not found in a list of found devices names of which
1392 * are pending. there is no need to continue resolving a next name as it
1393 * will be done upon receiving another Remote Name Request Complete
1400 e->name_state = NAME_KNOWN;
1401 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1402 e->data.rssi, name, name_len);
1404 e->name_state = NAME_NOT_KNOWN;
1407 if (hci_resolve_next_name(hdev))
1411 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1414 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1416 struct hci_cp_remote_name_req *cp;
1417 struct hci_conn *conn;
1419 BT_DBG("%s status 0x%x", hdev->name, status);
1421 /* If successful wait for the name req complete event before
1422 * checking for the need to do authentication */
1426 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1432 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1434 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1435 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1440 if (!hci_outgoing_auth_needed(hdev, conn))
1443 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1444 struct hci_cp_auth_requested cp;
1445 cp.handle = __cpu_to_le16(conn->handle);
1446 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1450 hci_dev_unlock(hdev);
1453 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1455 struct hci_cp_read_remote_features *cp;
1456 struct hci_conn *conn;
1458 BT_DBG("%s status 0x%x", hdev->name, status);
1463 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1469 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1471 if (conn->state == BT_CONFIG) {
1472 hci_proto_connect_cfm(conn, status);
1477 hci_dev_unlock(hdev);
1480 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1482 struct hci_cp_read_remote_ext_features *cp;
1483 struct hci_conn *conn;
1485 BT_DBG("%s status 0x%x", hdev->name, status);
1490 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1496 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1498 if (conn->state == BT_CONFIG) {
1499 hci_proto_connect_cfm(conn, status);
1504 hci_dev_unlock(hdev);
1507 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1509 struct hci_cp_setup_sync_conn *cp;
1510 struct hci_conn *acl, *sco;
1513 BT_DBG("%s status 0x%x", hdev->name, status);
1518 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1522 handle = __le16_to_cpu(cp->handle);
1524 BT_DBG("%s handle %d", hdev->name, handle);
1528 acl = hci_conn_hash_lookup_handle(hdev, handle);
1532 sco->state = BT_CLOSED;
1534 hci_proto_connect_cfm(sco, status);
1539 hci_dev_unlock(hdev);
1542 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1544 struct hci_cp_sniff_mode *cp;
1545 struct hci_conn *conn;
1547 BT_DBG("%s status 0x%x", hdev->name, status);
1552 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1558 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1560 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1562 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1563 hci_sco_setup(conn, status);
1566 hci_dev_unlock(hdev);
1569 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1571 struct hci_cp_exit_sniff_mode *cp;
1572 struct hci_conn *conn;
1574 BT_DBG("%s status 0x%x", hdev->name, status);
1579 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1585 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1587 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1589 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1590 hci_sco_setup(conn, status);
1593 hci_dev_unlock(hdev);
1596 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1598 struct hci_cp_disconnect *cp;
1599 struct hci_conn *conn;
1604 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1610 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1612 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1613 conn->dst_type, status);
1615 hci_dev_unlock(hdev);
1618 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1620 struct hci_cp_le_create_conn *cp;
1621 struct hci_conn *conn;
1623 BT_DBG("%s status 0x%x", hdev->name, status);
1625 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1631 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1633 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1637 if (conn && conn->state == BT_CONNECT) {
1638 conn->state = BT_CLOSED;
1639 hci_proto_connect_cfm(conn, status);
1644 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1646 conn->dst_type = cp->peer_addr_type;
1649 BT_ERR("No memory for new connection");
1654 hci_dev_unlock(hdev);
1657 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1659 BT_DBG("%s status 0x%x", hdev->name, status);
1662 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1664 __u8 status = *((__u8 *) skb->data);
1665 struct discovery_state *discov = &hdev->discovery;
1666 struct inquiry_entry *e;
1668 BT_DBG("%s status %d", hdev->name, status);
1670 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1672 hci_conn_check_pending(hdev);
1674 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1677 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1682 if (discov->state != DISCOVERY_FINDING)
1685 if (list_empty(&discov->resolve)) {
1686 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1690 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1691 if (e && hci_resolve_name(hdev, e) == 0) {
1692 e->name_state = NAME_PENDING;
1693 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1695 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1699 hci_dev_unlock(hdev);
1702 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1704 struct inquiry_data data;
1705 struct inquiry_info *info = (void *) (skb->data + 1);
1706 int num_rsp = *((__u8 *) skb->data);
1708 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1715 for (; num_rsp; num_rsp--, info++) {
1716 bool name_known, ssp;
1718 bacpy(&data.bdaddr, &info->bdaddr);
1719 data.pscan_rep_mode = info->pscan_rep_mode;
1720 data.pscan_period_mode = info->pscan_period_mode;
1721 data.pscan_mode = info->pscan_mode;
1722 memcpy(data.dev_class, info->dev_class, 3);
1723 data.clock_offset = info->clock_offset;
1725 data.ssp_mode = 0x00;
1727 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1728 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1729 info->dev_class, 0, !name_known, ssp, NULL,
1733 hci_dev_unlock(hdev);
1736 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1738 struct hci_ev_conn_complete *ev = (void *) skb->data;
1739 struct hci_conn *conn;
1741 BT_DBG("%s", hdev->name);
1745 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1747 if (ev->link_type != SCO_LINK)
1750 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1754 conn->type = SCO_LINK;
1758 conn->handle = __le16_to_cpu(ev->handle);
1760 if (conn->type == ACL_LINK) {
1761 conn->state = BT_CONFIG;
1762 hci_conn_hold(conn);
1764 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1765 !hci_find_link_key(hdev, &ev->bdaddr))
1766 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1768 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1770 conn->state = BT_CONNECTED;
1772 hci_conn_hold_device(conn);
1773 hci_conn_add_sysfs(conn);
1775 if (test_bit(HCI_AUTH, &hdev->flags))
1776 conn->link_mode |= HCI_LM_AUTH;
1778 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1779 conn->link_mode |= HCI_LM_ENCRYPT;
1781 /* Get remote features */
1782 if (conn->type == ACL_LINK) {
1783 struct hci_cp_read_remote_features cp;
1784 cp.handle = ev->handle;
1785 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1789 /* Set packet type for incoming connection */
1790 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1791 struct hci_cp_change_conn_ptype cp;
1792 cp.handle = ev->handle;
1793 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1794 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1798 conn->state = BT_CLOSED;
1799 if (conn->type == ACL_LINK)
1800 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1801 conn->dst_type, ev->status);
1804 if (conn->type == ACL_LINK)
1805 hci_sco_setup(conn, ev->status);
1808 hci_proto_connect_cfm(conn, ev->status);
1810 } else if (ev->link_type != ACL_LINK)
1811 hci_proto_connect_cfm(conn, ev->status);
1814 hci_dev_unlock(hdev);
1816 hci_conn_check_pending(hdev);
1819 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1821 struct hci_ev_conn_request *ev = (void *) skb->data;
1822 int mask = hdev->link_mode;
1824 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1825 batostr(&ev->bdaddr), ev->link_type);
1827 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1829 if ((mask & HCI_LM_ACCEPT) &&
1830 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1831 /* Connection accepted */
1832 struct inquiry_entry *ie;
1833 struct hci_conn *conn;
1837 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1839 memcpy(ie->data.dev_class, ev->dev_class, 3);
1841 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1843 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1845 BT_ERR("No memory for new connection");
1846 hci_dev_unlock(hdev);
1851 memcpy(conn->dev_class, ev->dev_class, 3);
1852 conn->state = BT_CONNECT;
1854 hci_dev_unlock(hdev);
1856 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1857 struct hci_cp_accept_conn_req cp;
1859 bacpy(&cp.bdaddr, &ev->bdaddr);
1861 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1862 cp.role = 0x00; /* Become master */
1864 cp.role = 0x01; /* Remain slave */
1866 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1869 struct hci_cp_accept_sync_conn_req cp;
1871 bacpy(&cp.bdaddr, &ev->bdaddr);
1872 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1874 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1875 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1876 cp.max_latency = cpu_to_le16(0xffff);
1877 cp.content_format = cpu_to_le16(hdev->voice_setting);
1878 cp.retrans_effort = 0xff;
1880 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1884 /* Connection rejected */
1885 struct hci_cp_reject_conn_req cp;
1887 bacpy(&cp.bdaddr, &ev->bdaddr);
1888 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1889 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1893 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1895 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1896 struct hci_conn *conn;
1898 BT_DBG("%s status %d", hdev->name, ev->status);
1902 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1906 if (ev->status == 0)
1907 conn->state = BT_CLOSED;
1909 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1910 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1911 if (ev->status != 0)
1912 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1913 conn->dst_type, ev->status);
1915 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1919 if (ev->status == 0) {
1920 if (conn->type == ACL_LINK && conn->flush_key)
1921 hci_remove_link_key(hdev, &conn->dst);
1922 hci_proto_disconn_cfm(conn, ev->reason);
1927 hci_dev_unlock(hdev);
1930 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1932 struct hci_ev_auth_complete *ev = (void *) skb->data;
1933 struct hci_conn *conn;
1935 BT_DBG("%s status %d", hdev->name, ev->status);
1939 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1944 if (!hci_conn_ssp_enabled(conn) &&
1945 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1946 BT_INFO("re-auth of legacy device is not possible.");
1948 conn->link_mode |= HCI_LM_AUTH;
1949 conn->sec_level = conn->pending_sec_level;
1952 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1956 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1957 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1959 if (conn->state == BT_CONFIG) {
1960 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1961 struct hci_cp_set_conn_encrypt cp;
1962 cp.handle = ev->handle;
1964 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1967 conn->state = BT_CONNECTED;
1968 hci_proto_connect_cfm(conn, ev->status);
1972 hci_auth_cfm(conn, ev->status);
1974 hci_conn_hold(conn);
1975 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1979 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1981 struct hci_cp_set_conn_encrypt cp;
1982 cp.handle = ev->handle;
1984 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1987 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1988 hci_encrypt_cfm(conn, ev->status, 0x00);
1993 hci_dev_unlock(hdev);
1996 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1998 struct hci_ev_remote_name *ev = (void *) skb->data;
1999 struct hci_conn *conn;
2001 BT_DBG("%s", hdev->name);
2003 hci_conn_check_pending(hdev);
2007 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2009 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2012 if (ev->status == 0)
2013 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2014 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2016 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2022 if (!hci_outgoing_auth_needed(hdev, conn))
2025 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2026 struct hci_cp_auth_requested cp;
2027 cp.handle = __cpu_to_le16(conn->handle);
2028 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2032 hci_dev_unlock(hdev);
2035 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2037 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2038 struct hci_conn *conn;
2040 BT_DBG("%s status %d", hdev->name, ev->status);
2044 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2048 /* Encryption implies authentication */
2049 conn->link_mode |= HCI_LM_AUTH;
2050 conn->link_mode |= HCI_LM_ENCRYPT;
2051 conn->sec_level = conn->pending_sec_level;
2053 conn->link_mode &= ~HCI_LM_ENCRYPT;
2056 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2058 if (ev->status && conn->state == BT_CONNECTED) {
2059 hci_acl_disconn(conn, 0x13);
2064 if (conn->state == BT_CONFIG) {
2066 conn->state = BT_CONNECTED;
2068 hci_proto_connect_cfm(conn, ev->status);
2071 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2075 hci_dev_unlock(hdev);
2078 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2080 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2081 struct hci_conn *conn;
2083 BT_DBG("%s status %d", hdev->name, ev->status);
2087 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2090 conn->link_mode |= HCI_LM_SECURE;
2092 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2094 hci_key_change_cfm(conn, ev->status);
2097 hci_dev_unlock(hdev);
2100 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2102 struct hci_ev_remote_features *ev = (void *) skb->data;
2103 struct hci_conn *conn;
2105 BT_DBG("%s status %d", hdev->name, ev->status);
2109 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2114 memcpy(conn->features, ev->features, 8);
2116 if (conn->state != BT_CONFIG)
2119 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2120 struct hci_cp_read_remote_ext_features cp;
2121 cp.handle = ev->handle;
2123 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2128 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2129 struct hci_cp_remote_name_req cp;
2130 memset(&cp, 0, sizeof(cp));
2131 bacpy(&cp.bdaddr, &conn->dst);
2132 cp.pscan_rep_mode = 0x02;
2133 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2134 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2135 mgmt_device_connected(hdev, &conn->dst, conn->type,
2136 conn->dst_type, 0, NULL, 0,
2139 if (!hci_outgoing_auth_needed(hdev, conn)) {
2140 conn->state = BT_CONNECTED;
2141 hci_proto_connect_cfm(conn, ev->status);
2146 hci_dev_unlock(hdev);
2149 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2151 BT_DBG("%s", hdev->name);
2154 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2156 BT_DBG("%s", hdev->name);
2159 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2161 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2164 skb_pull(skb, sizeof(*ev));
2166 opcode = __le16_to_cpu(ev->opcode);
2169 case HCI_OP_INQUIRY_CANCEL:
2170 hci_cc_inquiry_cancel(hdev, skb);
2173 case HCI_OP_EXIT_PERIODIC_INQ:
2174 hci_cc_exit_periodic_inq(hdev, skb);
2177 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2178 hci_cc_remote_name_req_cancel(hdev, skb);
2181 case HCI_OP_ROLE_DISCOVERY:
2182 hci_cc_role_discovery(hdev, skb);
2185 case HCI_OP_READ_LINK_POLICY:
2186 hci_cc_read_link_policy(hdev, skb);
2189 case HCI_OP_WRITE_LINK_POLICY:
2190 hci_cc_write_link_policy(hdev, skb);
2193 case HCI_OP_READ_DEF_LINK_POLICY:
2194 hci_cc_read_def_link_policy(hdev, skb);
2197 case HCI_OP_WRITE_DEF_LINK_POLICY:
2198 hci_cc_write_def_link_policy(hdev, skb);
2202 hci_cc_reset(hdev, skb);
2205 case HCI_OP_WRITE_LOCAL_NAME:
2206 hci_cc_write_local_name(hdev, skb);
2209 case HCI_OP_READ_LOCAL_NAME:
2210 hci_cc_read_local_name(hdev, skb);
2213 case HCI_OP_WRITE_AUTH_ENABLE:
2214 hci_cc_write_auth_enable(hdev, skb);
2217 case HCI_OP_WRITE_ENCRYPT_MODE:
2218 hci_cc_write_encrypt_mode(hdev, skb);
2221 case HCI_OP_WRITE_SCAN_ENABLE:
2222 hci_cc_write_scan_enable(hdev, skb);
2225 case HCI_OP_READ_CLASS_OF_DEV:
2226 hci_cc_read_class_of_dev(hdev, skb);
2229 case HCI_OP_WRITE_CLASS_OF_DEV:
2230 hci_cc_write_class_of_dev(hdev, skb);
2233 case HCI_OP_READ_VOICE_SETTING:
2234 hci_cc_read_voice_setting(hdev, skb);
2237 case HCI_OP_WRITE_VOICE_SETTING:
2238 hci_cc_write_voice_setting(hdev, skb);
2241 case HCI_OP_HOST_BUFFER_SIZE:
2242 hci_cc_host_buffer_size(hdev, skb);
2245 case HCI_OP_WRITE_SSP_MODE:
2246 hci_cc_write_ssp_mode(hdev, skb);
2249 case HCI_OP_READ_LOCAL_VERSION:
2250 hci_cc_read_local_version(hdev, skb);
2253 case HCI_OP_READ_LOCAL_COMMANDS:
2254 hci_cc_read_local_commands(hdev, skb);
2257 case HCI_OP_READ_LOCAL_FEATURES:
2258 hci_cc_read_local_features(hdev, skb);
2261 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2262 hci_cc_read_local_ext_features(hdev, skb);
2265 case HCI_OP_READ_BUFFER_SIZE:
2266 hci_cc_read_buffer_size(hdev, skb);
2269 case HCI_OP_READ_BD_ADDR:
2270 hci_cc_read_bd_addr(hdev, skb);
2273 case HCI_OP_READ_DATA_BLOCK_SIZE:
2274 hci_cc_read_data_block_size(hdev, skb);
2277 case HCI_OP_WRITE_CA_TIMEOUT:
2278 hci_cc_write_ca_timeout(hdev, skb);
2281 case HCI_OP_READ_FLOW_CONTROL_MODE:
2282 hci_cc_read_flow_control_mode(hdev, skb);
2285 case HCI_OP_READ_LOCAL_AMP_INFO:
2286 hci_cc_read_local_amp_info(hdev, skb);
2289 case HCI_OP_DELETE_STORED_LINK_KEY:
2290 hci_cc_delete_stored_link_key(hdev, skb);
2293 case HCI_OP_SET_EVENT_MASK:
2294 hci_cc_set_event_mask(hdev, skb);
2297 case HCI_OP_WRITE_INQUIRY_MODE:
2298 hci_cc_write_inquiry_mode(hdev, skb);
2301 case HCI_OP_READ_INQ_RSP_TX_POWER:
2302 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2305 case HCI_OP_SET_EVENT_FLT:
2306 hci_cc_set_event_flt(hdev, skb);
2309 case HCI_OP_PIN_CODE_REPLY:
2310 hci_cc_pin_code_reply(hdev, skb);
2313 case HCI_OP_PIN_CODE_NEG_REPLY:
2314 hci_cc_pin_code_neg_reply(hdev, skb);
2317 case HCI_OP_READ_LOCAL_OOB_DATA:
2318 hci_cc_read_local_oob_data_reply(hdev, skb);
2321 case HCI_OP_LE_READ_BUFFER_SIZE:
2322 hci_cc_le_read_buffer_size(hdev, skb);
2325 case HCI_OP_USER_CONFIRM_REPLY:
2326 hci_cc_user_confirm_reply(hdev, skb);
2329 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2330 hci_cc_user_confirm_neg_reply(hdev, skb);
2333 case HCI_OP_USER_PASSKEY_REPLY:
2334 hci_cc_user_passkey_reply(hdev, skb);
2337 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2338 hci_cc_user_passkey_neg_reply(hdev, skb);
2341 case HCI_OP_LE_SET_SCAN_PARAM:
2342 hci_cc_le_set_scan_param(hdev, skb);
2345 case HCI_OP_LE_SET_SCAN_ENABLE:
2346 hci_cc_le_set_scan_enable(hdev, skb);
2349 case HCI_OP_LE_LTK_REPLY:
2350 hci_cc_le_ltk_reply(hdev, skb);
2353 case HCI_OP_LE_LTK_NEG_REPLY:
2354 hci_cc_le_ltk_neg_reply(hdev, skb);
2357 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2358 hci_cc_write_le_host_supported(hdev, skb);
2362 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2366 if (ev->opcode != HCI_OP_NOP)
2367 del_timer(&hdev->cmd_timer);
2370 atomic_set(&hdev->cmd_cnt, 1);
2371 if (!skb_queue_empty(&hdev->cmd_q))
2372 queue_work(hdev->workqueue, &hdev->cmd_work);
2376 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2378 struct hci_ev_cmd_status *ev = (void *) skb->data;
2381 skb_pull(skb, sizeof(*ev));
2383 opcode = __le16_to_cpu(ev->opcode);
2386 case HCI_OP_INQUIRY:
2387 hci_cs_inquiry(hdev, ev->status);
2390 case HCI_OP_CREATE_CONN:
2391 hci_cs_create_conn(hdev, ev->status);
2394 case HCI_OP_ADD_SCO:
2395 hci_cs_add_sco(hdev, ev->status);
2398 case HCI_OP_AUTH_REQUESTED:
2399 hci_cs_auth_requested(hdev, ev->status);
2402 case HCI_OP_SET_CONN_ENCRYPT:
2403 hci_cs_set_conn_encrypt(hdev, ev->status);
2406 case HCI_OP_REMOTE_NAME_REQ:
2407 hci_cs_remote_name_req(hdev, ev->status);
2410 case HCI_OP_READ_REMOTE_FEATURES:
2411 hci_cs_read_remote_features(hdev, ev->status);
2414 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2415 hci_cs_read_remote_ext_features(hdev, ev->status);
2418 case HCI_OP_SETUP_SYNC_CONN:
2419 hci_cs_setup_sync_conn(hdev, ev->status);
2422 case HCI_OP_SNIFF_MODE:
2423 hci_cs_sniff_mode(hdev, ev->status);
2426 case HCI_OP_EXIT_SNIFF_MODE:
2427 hci_cs_exit_sniff_mode(hdev, ev->status);
2430 case HCI_OP_DISCONNECT:
2431 hci_cs_disconnect(hdev, ev->status);
2434 case HCI_OP_LE_CREATE_CONN:
2435 hci_cs_le_create_conn(hdev, ev->status);
2438 case HCI_OP_LE_START_ENC:
2439 hci_cs_le_start_enc(hdev, ev->status);
2443 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2447 if (ev->opcode != HCI_OP_NOP)
2448 del_timer(&hdev->cmd_timer);
2450 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2451 atomic_set(&hdev->cmd_cnt, 1);
2452 if (!skb_queue_empty(&hdev->cmd_q))
2453 queue_work(hdev->workqueue, &hdev->cmd_work);
2457 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2459 struct hci_ev_role_change *ev = (void *) skb->data;
2460 struct hci_conn *conn;
2462 BT_DBG("%s status %d", hdev->name, ev->status);
2466 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2470 conn->link_mode &= ~HCI_LM_MASTER;
2472 conn->link_mode |= HCI_LM_MASTER;
2475 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2477 hci_role_switch_cfm(conn, ev->status, ev->role);
2480 hci_dev_unlock(hdev);
2483 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2485 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2488 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2489 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2493 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2494 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2495 BT_DBG("%s bad parameters", hdev->name);
2499 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2501 for (i = 0; i < ev->num_hndl; i++) {
2502 struct hci_comp_pkts_info *info = &ev->handles[i];
2503 struct hci_conn *conn;
2504 __u16 handle, count;
2506 handle = __le16_to_cpu(info->handle);
2507 count = __le16_to_cpu(info->count);
2509 conn = hci_conn_hash_lookup_handle(hdev, handle);
2513 conn->sent -= count;
2515 switch (conn->type) {
2517 hdev->acl_cnt += count;
2518 if (hdev->acl_cnt > hdev->acl_pkts)
2519 hdev->acl_cnt = hdev->acl_pkts;
2523 if (hdev->le_pkts) {
2524 hdev->le_cnt += count;
2525 if (hdev->le_cnt > hdev->le_pkts)
2526 hdev->le_cnt = hdev->le_pkts;
2528 hdev->acl_cnt += count;
2529 if (hdev->acl_cnt > hdev->acl_pkts)
2530 hdev->acl_cnt = hdev->acl_pkts;
2535 hdev->sco_cnt += count;
2536 if (hdev->sco_cnt > hdev->sco_pkts)
2537 hdev->sco_cnt = hdev->sco_pkts;
2541 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2546 queue_work(hdev->workqueue, &hdev->tx_work);
2549 static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2550 struct sk_buff *skb)
2552 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2555 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2556 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2560 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2561 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2562 BT_DBG("%s bad parameters", hdev->name);
2566 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2569 for (i = 0; i < ev->num_hndl; i++) {
2570 struct hci_comp_blocks_info *info = &ev->handles[i];
2571 struct hci_conn *conn;
2572 __u16 handle, block_count;
2574 handle = __le16_to_cpu(info->handle);
2575 block_count = __le16_to_cpu(info->blocks);
2577 conn = hci_conn_hash_lookup_handle(hdev, handle);
2581 conn->sent -= block_count;
2583 switch (conn->type) {
2585 hdev->block_cnt += block_count;
2586 if (hdev->block_cnt > hdev->num_blocks)
2587 hdev->block_cnt = hdev->num_blocks;
2591 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2596 queue_work(hdev->workqueue, &hdev->tx_work);
2599 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2601 struct hci_ev_mode_change *ev = (void *) skb->data;
2602 struct hci_conn *conn;
2604 BT_DBG("%s status %d", hdev->name, ev->status);
2608 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2610 conn->mode = ev->mode;
2611 conn->interval = __le16_to_cpu(ev->interval);
2613 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2614 if (conn->mode == HCI_CM_ACTIVE)
2615 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2617 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2620 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2621 hci_sco_setup(conn, ev->status);
2624 hci_dev_unlock(hdev);
2627 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2629 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2630 struct hci_conn *conn;
2632 BT_DBG("%s", hdev->name);
2636 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2640 if (conn->state == BT_CONNECTED) {
2641 hci_conn_hold(conn);
2642 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2646 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2647 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2648 sizeof(ev->bdaddr), &ev->bdaddr);
2649 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2652 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2657 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2661 hci_dev_unlock(hdev);
2664 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2666 struct hci_ev_link_key_req *ev = (void *) skb->data;
2667 struct hci_cp_link_key_reply cp;
2668 struct hci_conn *conn;
2669 struct link_key *key;
2671 BT_DBG("%s", hdev->name);
2673 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2678 key = hci_find_link_key(hdev, &ev->bdaddr);
2680 BT_DBG("%s link key not found for %s", hdev->name,
2681 batostr(&ev->bdaddr));
2685 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2686 batostr(&ev->bdaddr));
2688 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2689 key->type == HCI_LK_DEBUG_COMBINATION) {
2690 BT_DBG("%s ignoring debug key", hdev->name);
2694 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2696 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2697 conn->auth_type != 0xff &&
2698 (conn->auth_type & 0x01)) {
2699 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2703 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2704 conn->pending_sec_level == BT_SECURITY_HIGH) {
2705 BT_DBG("%s ignoring key unauthenticated for high \
2706 security", hdev->name);
2710 conn->key_type = key->type;
2711 conn->pin_length = key->pin_len;
2714 bacpy(&cp.bdaddr, &ev->bdaddr);
2715 memcpy(cp.link_key, key->val, 16);
2717 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2719 hci_dev_unlock(hdev);
2724 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2725 hci_dev_unlock(hdev);
2728 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2730 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2731 struct hci_conn *conn;
2734 BT_DBG("%s", hdev->name);
2738 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2740 hci_conn_hold(conn);
2741 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2742 pin_len = conn->pin_length;
2744 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2745 conn->key_type = ev->key_type;
2750 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2751 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2752 ev->key_type, pin_len);
2754 hci_dev_unlock(hdev);
2757 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2759 struct hci_ev_clock_offset *ev = (void *) skb->data;
2760 struct hci_conn *conn;
2762 BT_DBG("%s status %d", hdev->name, ev->status);
2766 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2767 if (conn && !ev->status) {
2768 struct inquiry_entry *ie;
2770 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2772 ie->data.clock_offset = ev->clock_offset;
2773 ie->timestamp = jiffies;
2777 hci_dev_unlock(hdev);
2780 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2782 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2783 struct hci_conn *conn;
2785 BT_DBG("%s status %d", hdev->name, ev->status);
2789 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2790 if (conn && !ev->status)
2791 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2793 hci_dev_unlock(hdev);
2796 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2798 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2799 struct inquiry_entry *ie;
2801 BT_DBG("%s", hdev->name);
2805 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2807 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2808 ie->timestamp = jiffies;
2811 hci_dev_unlock(hdev);
2814 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2816 struct inquiry_data data;
2817 int num_rsp = *((__u8 *) skb->data);
2818 bool name_known, ssp;
2820 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2827 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2828 struct inquiry_info_with_rssi_and_pscan_mode *info;
2829 info = (void *) (skb->data + 1);
2831 for (; num_rsp; num_rsp--, info++) {
2832 bacpy(&data.bdaddr, &info->bdaddr);
2833 data.pscan_rep_mode = info->pscan_rep_mode;
2834 data.pscan_period_mode = info->pscan_period_mode;
2835 data.pscan_mode = info->pscan_mode;
2836 memcpy(data.dev_class, info->dev_class, 3);
2837 data.clock_offset = info->clock_offset;
2838 data.rssi = info->rssi;
2839 data.ssp_mode = 0x00;
2841 name_known = hci_inquiry_cache_update(hdev, &data,
2843 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2844 info->dev_class, info->rssi,
2845 !name_known, ssp, NULL, 0);
2848 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2850 for (; num_rsp; num_rsp--, info++) {
2851 bacpy(&data.bdaddr, &info->bdaddr);
2852 data.pscan_rep_mode = info->pscan_rep_mode;
2853 data.pscan_period_mode = info->pscan_period_mode;
2854 data.pscan_mode = 0x00;
2855 memcpy(data.dev_class, info->dev_class, 3);
2856 data.clock_offset = info->clock_offset;
2857 data.rssi = info->rssi;
2858 data.ssp_mode = 0x00;
2859 name_known = hci_inquiry_cache_update(hdev, &data,
2861 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2862 info->dev_class, info->rssi,
2863 !name_known, ssp, NULL, 0);
2867 hci_dev_unlock(hdev);
2870 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2872 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2873 struct hci_conn *conn;
2875 BT_DBG("%s", hdev->name);
2879 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2883 if (!ev->status && ev->page == 0x01) {
2884 struct inquiry_entry *ie;
2886 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2888 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2890 if (ev->features[0] & LMP_HOST_SSP)
2891 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2894 if (conn->state != BT_CONFIG)
2897 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2898 struct hci_cp_remote_name_req cp;
2899 memset(&cp, 0, sizeof(cp));
2900 bacpy(&cp.bdaddr, &conn->dst);
2901 cp.pscan_rep_mode = 0x02;
2902 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2903 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2904 mgmt_device_connected(hdev, &conn->dst, conn->type,
2905 conn->dst_type, 0, NULL, 0,
2908 if (!hci_outgoing_auth_needed(hdev, conn)) {
2909 conn->state = BT_CONNECTED;
2910 hci_proto_connect_cfm(conn, ev->status);
2915 hci_dev_unlock(hdev);
2918 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2920 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2921 struct hci_conn *conn;
2923 BT_DBG("%s status %d", hdev->name, ev->status);
2927 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2929 if (ev->link_type == ESCO_LINK)
2932 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2936 conn->type = SCO_LINK;
2939 switch (ev->status) {
2941 conn->handle = __le16_to_cpu(ev->handle);
2942 conn->state = BT_CONNECTED;
2944 hci_conn_hold_device(conn);
2945 hci_conn_add_sysfs(conn);
2948 case 0x11: /* Unsupported Feature or Parameter Value */
2949 case 0x1c: /* SCO interval rejected */
2950 case 0x1a: /* Unsupported Remote Feature */
2951 case 0x1f: /* Unspecified error */
2952 if (conn->out && conn->attempt < 2) {
2953 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2954 (hdev->esco_type & EDR_ESCO_MASK);
2955 hci_setup_sync(conn, conn->link->handle);
2961 conn->state = BT_CLOSED;
2965 hci_proto_connect_cfm(conn, ev->status);
2970 hci_dev_unlock(hdev);
2973 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2975 BT_DBG("%s", hdev->name);
2978 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2980 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2982 BT_DBG("%s status %d", hdev->name, ev->status);
2985 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2987 struct inquiry_data data;
2988 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2989 int num_rsp = *((__u8 *) skb->data);
2991 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2998 for (; num_rsp; num_rsp--, info++) {
2999 bool name_known, ssp;
3001 bacpy(&data.bdaddr, &info->bdaddr);
3002 data.pscan_rep_mode = info->pscan_rep_mode;
3003 data.pscan_period_mode = info->pscan_period_mode;
3004 data.pscan_mode = 0x00;
3005 memcpy(data.dev_class, info->dev_class, 3);
3006 data.clock_offset = info->clock_offset;
3007 data.rssi = info->rssi;
3008 data.ssp_mode = 0x01;
3010 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3011 name_known = eir_has_data_type(info->data,
3017 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3019 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3020 info->dev_class, info->rssi, !name_known,
3021 ssp, info->data, sizeof(info->data));
3024 hci_dev_unlock(hdev);
3027 static inline u8 hci_get_auth_req(struct hci_conn *conn)
3029 /* If remote requests dedicated bonding follow that lead */
3030 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3031 /* If both remote and local IO capabilities allow MITM
3032 * protection then require it, otherwise don't */
3033 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3039 /* If remote requests no-bonding follow that lead */
3040 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3041 return conn->remote_auth | (conn->auth_type & 0x01);
3043 return conn->auth_type;
3046 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3048 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3049 struct hci_conn *conn;
3051 BT_DBG("%s", hdev->name);
3055 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3059 hci_conn_hold(conn);
3061 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3064 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3065 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3066 struct hci_cp_io_capability_reply cp;
3068 bacpy(&cp.bdaddr, &ev->bdaddr);
3069 /* Change the IO capability from KeyboardDisplay
3070 * to DisplayYesNo as it is not supported by BT spec. */
3071 cp.capability = (conn->io_capability == 0x04) ?
3072 0x01 : conn->io_capability;
3073 conn->auth_type = hci_get_auth_req(conn);
3074 cp.authentication = conn->auth_type;
3076 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
3077 hci_find_remote_oob_data(hdev, &conn->dst))
3082 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3085 struct hci_cp_io_capability_neg_reply cp;
3087 bacpy(&cp.bdaddr, &ev->bdaddr);
3088 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3090 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3095 hci_dev_unlock(hdev);
3098 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3100 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3101 struct hci_conn *conn;
3103 BT_DBG("%s", hdev->name);
3107 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3111 conn->remote_cap = ev->capability;
3112 conn->remote_auth = ev->authentication;
3114 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3117 hci_dev_unlock(hdev);
3120 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3121 struct sk_buff *skb)
3123 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3124 int loc_mitm, rem_mitm, confirm_hint = 0;
3125 struct hci_conn *conn;
3127 BT_DBG("%s", hdev->name);
3131 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3134 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3138 loc_mitm = (conn->auth_type & 0x01);
3139 rem_mitm = (conn->remote_auth & 0x01);
3141 /* If we require MITM but the remote device can't provide that
3142 * (it has NoInputNoOutput) then reject the confirmation
3143 * request. The only exception is when we're dedicated bonding
3144 * initiators (connect_cfm_cb set) since then we always have the MITM
3146 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3147 BT_DBG("Rejecting request: remote device can't provide MITM");
3148 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3149 sizeof(ev->bdaddr), &ev->bdaddr);
3153 /* If no side requires MITM protection; auto-accept */
3154 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3155 (!rem_mitm || conn->io_capability == 0x03)) {
3157 /* If we're not the initiators request authorization to
3158 * proceed from user space (mgmt_user_confirm with
3159 * confirm_hint set to 1). */
3160 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3161 BT_DBG("Confirming auto-accept as acceptor");
3166 BT_DBG("Auto-accept of user confirmation with %ums delay",
3167 hdev->auto_accept_delay);
3169 if (hdev->auto_accept_delay > 0) {
3170 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3171 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3175 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3176 sizeof(ev->bdaddr), &ev->bdaddr);
3181 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3185 hci_dev_unlock(hdev);
3188 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3189 struct sk_buff *skb)
3191 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3193 BT_DBG("%s", hdev->name);
3197 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3198 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3200 hci_dev_unlock(hdev);
3203 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3205 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3206 struct hci_conn *conn;
3208 BT_DBG("%s", hdev->name);
3212 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3216 /* To avoid duplicate auth_failed events to user space we check
3217 * the HCI_CONN_AUTH_PEND flag which will be set if we
3218 * initiated the authentication. A traditional auth_complete
3219 * event gets always produced as initiator and is also mapped to
3220 * the mgmt_auth_failed event */
3221 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3222 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3228 hci_dev_unlock(hdev);
3231 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3233 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3234 struct inquiry_entry *ie;
3236 BT_DBG("%s", hdev->name);
3240 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3242 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3244 hci_dev_unlock(hdev);
3247 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3248 struct sk_buff *skb)
3250 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3251 struct oob_data *data;
3253 BT_DBG("%s", hdev->name);
3257 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3260 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3262 struct hci_cp_remote_oob_data_reply cp;
3264 bacpy(&cp.bdaddr, &ev->bdaddr);
3265 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3266 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3268 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3271 struct hci_cp_remote_oob_data_neg_reply cp;
3273 bacpy(&cp.bdaddr, &ev->bdaddr);
3274 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3279 hci_dev_unlock(hdev);
3282 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3284 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3285 struct hci_conn *conn;
3287 BT_DBG("%s status %d", hdev->name, ev->status);
3291 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3293 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3295 BT_ERR("No memory for new connection");
3296 hci_dev_unlock(hdev);
3300 conn->dst_type = ev->bdaddr_type;
3304 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3305 conn->dst_type, ev->status);
3306 hci_proto_connect_cfm(conn, ev->status);
3307 conn->state = BT_CLOSED;
3312 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3313 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3314 conn->dst_type, 0, NULL, 0, NULL);
3316 conn->sec_level = BT_SECURITY_LOW;
3317 conn->handle = __le16_to_cpu(ev->handle);
3318 conn->state = BT_CONNECTED;
3320 hci_conn_hold_device(conn);
3321 hci_conn_add_sysfs(conn);
3323 hci_proto_connect_cfm(conn, ev->status);
3326 hci_dev_unlock(hdev);
3329 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3330 struct sk_buff *skb)
3332 u8 num_reports = skb->data[0];
3333 void *ptr = &skb->data[1];
3338 while (num_reports--) {
3339 struct hci_ev_le_advertising_info *ev = ptr;
3341 hci_add_adv_entry(hdev, ev);
3343 rssi = ev->data[ev->length];
3344 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3345 NULL, rssi, 0, 1, ev->data, ev->length);
3347 ptr += sizeof(*ev) + ev->length + 1;
3350 hci_dev_unlock(hdev);
3353 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3354 struct sk_buff *skb)
3356 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3357 struct hci_cp_le_ltk_reply cp;
3358 struct hci_cp_le_ltk_neg_reply neg;
3359 struct hci_conn *conn;
3360 struct smp_ltk *ltk;
3362 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3366 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3370 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3374 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3375 cp.handle = cpu_to_le16(conn->handle);
3377 if (ltk->authenticated)
3378 conn->sec_level = BT_SECURITY_HIGH;
3380 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3382 if (ltk->type & HCI_SMP_STK) {
3383 list_del(<k->list);
3387 hci_dev_unlock(hdev);
3392 neg.handle = ev->handle;
3393 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3394 hci_dev_unlock(hdev);
3397 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3399 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3401 skb_pull(skb, sizeof(*le_ev));
3403 switch (le_ev->subevent) {
3404 case HCI_EV_LE_CONN_COMPLETE:
3405 hci_le_conn_complete_evt(hdev, skb);
3408 case HCI_EV_LE_ADVERTISING_REPORT:
3409 hci_le_adv_report_evt(hdev, skb);
3412 case HCI_EV_LE_LTK_REQ:
3413 hci_le_ltk_request_evt(hdev, skb);
3421 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3423 struct hci_event_hdr *hdr = (void *) skb->data;
3424 __u8 event = hdr->evt;
3426 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3429 case HCI_EV_INQUIRY_COMPLETE:
3430 hci_inquiry_complete_evt(hdev, skb);
3433 case HCI_EV_INQUIRY_RESULT:
3434 hci_inquiry_result_evt(hdev, skb);
3437 case HCI_EV_CONN_COMPLETE:
3438 hci_conn_complete_evt(hdev, skb);
3441 case HCI_EV_CONN_REQUEST:
3442 hci_conn_request_evt(hdev, skb);
3445 case HCI_EV_DISCONN_COMPLETE:
3446 hci_disconn_complete_evt(hdev, skb);
3449 case HCI_EV_AUTH_COMPLETE:
3450 hci_auth_complete_evt(hdev, skb);
3453 case HCI_EV_REMOTE_NAME:
3454 hci_remote_name_evt(hdev, skb);
3457 case HCI_EV_ENCRYPT_CHANGE:
3458 hci_encrypt_change_evt(hdev, skb);
3461 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3462 hci_change_link_key_complete_evt(hdev, skb);
3465 case HCI_EV_REMOTE_FEATURES:
3466 hci_remote_features_evt(hdev, skb);
3469 case HCI_EV_REMOTE_VERSION:
3470 hci_remote_version_evt(hdev, skb);
3473 case HCI_EV_QOS_SETUP_COMPLETE:
3474 hci_qos_setup_complete_evt(hdev, skb);
3477 case HCI_EV_CMD_COMPLETE:
3478 hci_cmd_complete_evt(hdev, skb);
3481 case HCI_EV_CMD_STATUS:
3482 hci_cmd_status_evt(hdev, skb);
3485 case HCI_EV_ROLE_CHANGE:
3486 hci_role_change_evt(hdev, skb);
3489 case HCI_EV_NUM_COMP_PKTS:
3490 hci_num_comp_pkts_evt(hdev, skb);
3493 case HCI_EV_MODE_CHANGE:
3494 hci_mode_change_evt(hdev, skb);
3497 case HCI_EV_PIN_CODE_REQ:
3498 hci_pin_code_request_evt(hdev, skb);
3501 case HCI_EV_LINK_KEY_REQ:
3502 hci_link_key_request_evt(hdev, skb);
3505 case HCI_EV_LINK_KEY_NOTIFY:
3506 hci_link_key_notify_evt(hdev, skb);
3509 case HCI_EV_CLOCK_OFFSET:
3510 hci_clock_offset_evt(hdev, skb);
3513 case HCI_EV_PKT_TYPE_CHANGE:
3514 hci_pkt_type_change_evt(hdev, skb);
3517 case HCI_EV_PSCAN_REP_MODE:
3518 hci_pscan_rep_mode_evt(hdev, skb);
3521 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3522 hci_inquiry_result_with_rssi_evt(hdev, skb);
3525 case HCI_EV_REMOTE_EXT_FEATURES:
3526 hci_remote_ext_features_evt(hdev, skb);
3529 case HCI_EV_SYNC_CONN_COMPLETE:
3530 hci_sync_conn_complete_evt(hdev, skb);
3533 case HCI_EV_SYNC_CONN_CHANGED:
3534 hci_sync_conn_changed_evt(hdev, skb);
3537 case HCI_EV_SNIFF_SUBRATE:
3538 hci_sniff_subrate_evt(hdev, skb);
3541 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3542 hci_extended_inquiry_result_evt(hdev, skb);
3545 case HCI_EV_IO_CAPA_REQUEST:
3546 hci_io_capa_request_evt(hdev, skb);
3549 case HCI_EV_IO_CAPA_REPLY:
3550 hci_io_capa_reply_evt(hdev, skb);
3553 case HCI_EV_USER_CONFIRM_REQUEST:
3554 hci_user_confirm_request_evt(hdev, skb);
3557 case HCI_EV_USER_PASSKEY_REQUEST:
3558 hci_user_passkey_request_evt(hdev, skb);
3561 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3562 hci_simple_pair_complete_evt(hdev, skb);
3565 case HCI_EV_REMOTE_HOST_FEATURES:
3566 hci_remote_host_features_evt(hdev, skb);
3569 case HCI_EV_LE_META:
3570 hci_le_meta_evt(hdev, skb);
3573 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3574 hci_remote_oob_data_request_evt(hdev, skb);
3577 case HCI_EV_NUM_COMP_BLOCKS:
3578 hci_num_comp_blocks_evt(hdev, skb);
3582 BT_DBG("%s event 0x%x", hdev->name, event);
3587 hdev->stat.evt_rx++;