hci_event.c revision 12d4a3b2ccb3ac2bd56e7c216d6e7f44730006f3
1/* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23*/ 24 25/* Bluetooth HCI event handling. */ 26 27#include <asm/unaligned.h> 28 29#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/hci_core.h> 31#include <net/bluetooth/mgmt.h> 32 33#include "a2mp.h" 34#include "amp.h" 35 36/* Handle HCI Event packets */ 37 38static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) 39{ 40 __u8 status = *((__u8 *) skb->data); 41 42 BT_DBG("%s status 0x%2.2x", hdev->name, status); 43 44 if (status) 45 return; 46 47 clear_bit(HCI_INQUIRY, &hdev->flags); 48 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ 49 wake_up_bit(&hdev->flags, HCI_INQUIRY); 50 51 hci_conn_check_pending(hdev); 52} 53 54static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 55{ 56 __u8 status = *((__u8 *) skb->data); 57 58 BT_DBG("%s status 0x%2.2x", hdev->name, status); 59 60 if (status) 61 return; 62 63 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags); 64} 65 66static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 67{ 68 __u8 status = *((__u8 *) skb->data); 69 70 BT_DBG("%s status 0x%2.2x", hdev->name, status); 71 72 if (status) 73 return; 74 75 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags); 76 77 hci_conn_check_pending(hdev); 78} 79 80static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, 81 struct sk_buff *skb) 82{ 83 BT_DBG("%s", hdev->name); 84} 85 86static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) 87{ 88 struct hci_rp_role_discovery *rp = (void *) skb->data; 89 struct hci_conn *conn; 90 91 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 92 93 if (rp->status) 94 return; 95 96 hci_dev_lock(hdev); 97 98 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 99 if (conn) { 100 if (rp->role) 101 conn->link_mode &= ~HCI_LM_MASTER; 102 else 103 conn->link_mode |= HCI_LM_MASTER; 104 } 105 106 hci_dev_unlock(hdev); 107} 108 109static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 110{ 111 struct hci_rp_read_link_policy *rp = (void *) skb->data; 112 struct hci_conn *conn; 113 114 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 115 116 if (rp->status) 117 return; 118 119 hci_dev_lock(hdev); 120 121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 122 if (conn) 123 conn->link_policy = __le16_to_cpu(rp->policy); 124 125 hci_dev_unlock(hdev); 126} 127 128static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 129{ 130 struct hci_rp_write_link_policy *rp = (void *) skb->data; 131 struct hci_conn *conn; 132 void *sent; 133 134 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 135 136 if (rp->status) 137 return; 138 139 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 140 if (!sent) 141 return; 142 143 hci_dev_lock(hdev); 144 145 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 146 if (conn) 147 conn->link_policy = get_unaligned_le16(sent + 2); 148 149 hci_dev_unlock(hdev); 150} 151 152static void hci_cc_read_def_link_policy(struct hci_dev *hdev, 153 struct sk_buff *skb) 154{ 155 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 156 157 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 158 159 if (rp->status) 160 return; 161 162 hdev->link_policy = __le16_to_cpu(rp->policy); 163} 164 165static void hci_cc_write_def_link_policy(struct hci_dev *hdev, 166 struct sk_buff *skb) 167{ 168 __u8 status = *((__u8 *) skb->data); 169 void *sent; 170 171 BT_DBG("%s status 0x%2.2x", hdev->name, status); 172 173 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 174 if (!sent) 175 return; 176 177 if (!status) 178 hdev->link_policy = get_unaligned_le16(sent); 179} 180 181static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 182{ 183 __u8 status = *((__u8 *) skb->data); 184 185 BT_DBG("%s status 0x%2.2x", hdev->name, status); 186 187 clear_bit(HCI_RESET, &hdev->flags); 188 189 /* Reset all non-persistent flags */ 190 hdev->dev_flags &= ~HCI_PERSISTENT_MASK; 191 192 hdev->discovery.state = DISCOVERY_STOPPED; 193 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 194 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 195 196 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 197 hdev->adv_data_len = 0; 198 199 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); 200 hdev->scan_rsp_data_len = 0; 201 202 hdev->ssp_debug_mode = 0; 203} 204 205static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 206{ 207 __u8 status = *((__u8 *) skb->data); 208 void *sent; 209 210 BT_DBG("%s status 0x%2.2x", hdev->name, status); 211 212 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 213 if (!sent) 214 return; 215 216 hci_dev_lock(hdev); 217 218 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 219 mgmt_set_local_name_complete(hdev, sent, status); 220 else if (!status) 221 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 222 223 hci_dev_unlock(hdev); 224} 225 226static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 227{ 228 struct hci_rp_read_local_name *rp = (void *) skb->data; 229 230 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 231 232 if (rp->status) 233 return; 234 235 if (test_bit(HCI_SETUP, &hdev->dev_flags)) 236 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 237} 238 239static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 240{ 241 __u8 status = *((__u8 *) skb->data); 242 void *sent; 243 244 BT_DBG("%s status 0x%2.2x", hdev->name, status); 245 246 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 247 if (!sent) 248 return; 249 250 if (!status) { 251 __u8 param = *((__u8 *) sent); 252 253 if (param == AUTH_ENABLED) 254 set_bit(HCI_AUTH, &hdev->flags); 255 else 256 clear_bit(HCI_AUTH, &hdev->flags); 257 } 258 259 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 260 mgmt_auth_enable_complete(hdev, status); 261} 262 263static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 264{ 265 __u8 status = *((__u8 *) skb->data); 266 void *sent; 267 268 BT_DBG("%s status 0x%2.2x", hdev->name, status); 269 270 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 271 if (!sent) 272 return; 273 274 if (!status) { 275 __u8 param = *((__u8 *) sent); 276 277 if (param) 278 set_bit(HCI_ENCRYPT, &hdev->flags); 279 else 280 clear_bit(HCI_ENCRYPT, &hdev->flags); 281 } 282} 283 284static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 285{ 286 __u8 param, status = *((__u8 *) skb->data); 287 int old_pscan, old_iscan; 288 void *sent; 289 290 BT_DBG("%s status 0x%2.2x", hdev->name, status); 291 292 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 293 if (!sent) 294 return; 295 296 param = *((__u8 *) sent); 297 298 hci_dev_lock(hdev); 299 300 if (status) { 301 mgmt_write_scan_failed(hdev, param, status); 302 hdev->discov_timeout = 0; 303 goto done; 304 } 305 306 /* We need to ensure that we set this back on if someone changed 307 * the scan mode through a raw HCI socket. 308 */ 309 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); 310 311 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags); 312 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags); 313 314 if (param & SCAN_INQUIRY) { 315 set_bit(HCI_ISCAN, &hdev->flags); 316 if (!old_iscan) 317 mgmt_discoverable(hdev, 1); 318 } else if (old_iscan) 319 mgmt_discoverable(hdev, 0); 320 321 if (param & SCAN_PAGE) { 322 set_bit(HCI_PSCAN, &hdev->flags); 323 if (!old_pscan) 324 mgmt_connectable(hdev, 1); 325 } else if (old_pscan) 326 mgmt_connectable(hdev, 0); 327 328done: 329 hci_dev_unlock(hdev); 330} 331 332static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 333{ 334 struct hci_rp_read_class_of_dev *rp = (void *) skb->data; 335 336 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 337 338 if (rp->status) 339 return; 340 341 memcpy(hdev->dev_class, rp->dev_class, 3); 342 343 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 344 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 345} 346 347static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 348{ 349 __u8 status = *((__u8 *) skb->data); 350 void *sent; 351 352 BT_DBG("%s status 0x%2.2x", hdev->name, status); 353 354 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 355 if (!sent) 356 return; 357 358 hci_dev_lock(hdev); 359 360 if (status == 0) 361 memcpy(hdev->dev_class, sent, 3); 362 363 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 364 mgmt_set_class_of_dev_complete(hdev, sent, status); 365 366 hci_dev_unlock(hdev); 367} 368 369static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 370{ 371 struct hci_rp_read_voice_setting *rp = (void *) skb->data; 372 __u16 setting; 373 374 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 375 376 if (rp->status) 377 return; 378 379 setting = __le16_to_cpu(rp->voice_setting); 380 381 if (hdev->voice_setting == setting) 382 return; 383 384 hdev->voice_setting = setting; 385 386 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 387 388 if (hdev->notify) 389 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 390} 391 392static void hci_cc_write_voice_setting(struct hci_dev *hdev, 393 struct sk_buff *skb) 394{ 395 __u8 status = *((__u8 *) skb->data); 396 __u16 setting; 397 void *sent; 398 399 BT_DBG("%s status 0x%2.2x", hdev->name, status); 400 401 if (status) 402 return; 403 404 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 405 if (!sent) 406 return; 407 408 setting = get_unaligned_le16(sent); 409 410 if (hdev->voice_setting == setting) 411 return; 412 413 hdev->voice_setting = setting; 414 415 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 416 417 if (hdev->notify) 418 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 419} 420 421static void hci_cc_read_num_supported_iac(struct hci_dev *hdev, 422 struct sk_buff *skb) 423{ 424 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data; 425 426 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 427 428 if (rp->status) 429 return; 430 431 hdev->num_iac = rp->num_iac; 432 433 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac); 434} 435 436static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 437{ 438 __u8 status = *((__u8 *) skb->data); 439 struct hci_cp_write_ssp_mode *sent; 440 441 BT_DBG("%s status 0x%2.2x", hdev->name, status); 442 443 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 444 if (!sent) 445 return; 446 447 if (!status) { 448 if (sent->mode) 449 hdev->features[1][0] |= LMP_HOST_SSP; 450 else 451 hdev->features[1][0] &= ~LMP_HOST_SSP; 452 } 453 454 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 455 mgmt_ssp_enable_complete(hdev, sent->mode, status); 456 else if (!status) { 457 if (sent->mode) 458 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 459 else 460 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 461 } 462} 463 464static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb) 465{ 466 u8 status = *((u8 *) skb->data); 467 struct hci_cp_write_sc_support *sent; 468 469 BT_DBG("%s status 0x%2.2x", hdev->name, status); 470 471 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); 472 if (!sent) 473 return; 474 475 if (!status) { 476 if (sent->support) 477 hdev->features[1][0] |= LMP_HOST_SC; 478 else 479 hdev->features[1][0] &= ~LMP_HOST_SC; 480 } 481 482 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 483 mgmt_sc_enable_complete(hdev, sent->support, status); 484 else if (!status) { 485 if (sent->support) 486 set_bit(HCI_SC_ENABLED, &hdev->dev_flags); 487 else 488 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags); 489 } 490} 491 492static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 493{ 494 struct hci_rp_read_local_version *rp = (void *) skb->data; 495 496 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 497 498 if (rp->status) 499 return; 500 501 if (test_bit(HCI_SETUP, &hdev->dev_flags)) { 502 hdev->hci_ver = rp->hci_ver; 503 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 504 hdev->lmp_ver = rp->lmp_ver; 505 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 506 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 507 } 508} 509 510static void hci_cc_read_local_commands(struct hci_dev *hdev, 511 struct sk_buff *skb) 512{ 513 struct hci_rp_read_local_commands *rp = (void *) skb->data; 514 515 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 516 517 if (rp->status) 518 return; 519 520 if (test_bit(HCI_SETUP, &hdev->dev_flags)) 521 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 522} 523 524static void hci_cc_read_local_features(struct hci_dev *hdev, 525 struct sk_buff *skb) 526{ 527 struct hci_rp_read_local_features *rp = (void *) skb->data; 528 529 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 530 531 if (rp->status) 532 return; 533 534 memcpy(hdev->features, rp->features, 8); 535 536 /* Adjust default settings according to features 537 * supported by device. */ 538 539 if (hdev->features[0][0] & LMP_3SLOT) 540 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 541 542 if (hdev->features[0][0] & LMP_5SLOT) 543 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 544 545 if (hdev->features[0][1] & LMP_HV2) { 546 hdev->pkt_type |= (HCI_HV2); 547 hdev->esco_type |= (ESCO_HV2); 548 } 549 550 if (hdev->features[0][1] & LMP_HV3) { 551 hdev->pkt_type |= (HCI_HV3); 552 hdev->esco_type |= (ESCO_HV3); 553 } 554 555 if (lmp_esco_capable(hdev)) 556 hdev->esco_type |= (ESCO_EV3); 557 558 if (hdev->features[0][4] & LMP_EV4) 559 hdev->esco_type |= (ESCO_EV4); 560 561 if (hdev->features[0][4] & LMP_EV5) 562 hdev->esco_type |= (ESCO_EV5); 563 564 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 565 hdev->esco_type |= (ESCO_2EV3); 566 567 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 568 hdev->esco_type |= (ESCO_3EV3); 569 570 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 571 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 572} 573 574static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 575 struct sk_buff *skb) 576{ 577 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 578 579 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 580 581 if (rp->status) 582 return; 583 584 if (hdev->max_page < rp->max_page) 585 hdev->max_page = rp->max_page; 586 587 if (rp->page < HCI_MAX_PAGES) 588 memcpy(hdev->features[rp->page], rp->features, 8); 589} 590 591static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 592 struct sk_buff *skb) 593{ 594 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; 595 596 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 597 598 if (!rp->status) 599 hdev->flow_ctl_mode = rp->mode; 600} 601 602static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 603{ 604 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 605 606 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 607 608 if (rp->status) 609 return; 610 611 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 612 hdev->sco_mtu = rp->sco_mtu; 613 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 614 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 615 616 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 617 hdev->sco_mtu = 64; 618 hdev->sco_pkts = 8; 619 } 620 621 hdev->acl_cnt = hdev->acl_pkts; 622 hdev->sco_cnt = hdev->sco_pkts; 623 624 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 625 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 626} 627 628static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 629{ 630 struct hci_rp_read_bd_addr *rp = (void *) skb->data; 631 632 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 633 634 if (!rp->status) 635 bacpy(&hdev->bdaddr, &rp->bdaddr); 636} 637 638static void hci_cc_read_page_scan_activity(struct hci_dev *hdev, 639 struct sk_buff *skb) 640{ 641 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data; 642 643 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 644 645 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) { 646 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 647 hdev->page_scan_window = __le16_to_cpu(rp->window); 648 } 649} 650 651static void hci_cc_write_page_scan_activity(struct hci_dev *hdev, 652 struct sk_buff *skb) 653{ 654 u8 status = *((u8 *) skb->data); 655 struct hci_cp_write_page_scan_activity *sent; 656 657 BT_DBG("%s status 0x%2.2x", hdev->name, status); 658 659 if (status) 660 return; 661 662 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 663 if (!sent) 664 return; 665 666 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 667 hdev->page_scan_window = __le16_to_cpu(sent->window); 668} 669 670static void hci_cc_read_page_scan_type(struct hci_dev *hdev, 671 struct sk_buff *skb) 672{ 673 struct hci_rp_read_page_scan_type *rp = (void *) skb->data; 674 675 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 676 677 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) 678 hdev->page_scan_type = rp->type; 679} 680 681static void hci_cc_write_page_scan_type(struct hci_dev *hdev, 682 struct sk_buff *skb) 683{ 684 u8 status = *((u8 *) skb->data); 685 u8 *type; 686 687 BT_DBG("%s status 0x%2.2x", hdev->name, status); 688 689 if (status) 690 return; 691 692 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 693 if (type) 694 hdev->page_scan_type = *type; 695} 696 697static void hci_cc_read_data_block_size(struct hci_dev *hdev, 698 struct sk_buff *skb) 699{ 700 struct hci_rp_read_data_block_size *rp = (void *) skb->data; 701 702 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 703 704 if (rp->status) 705 return; 706 707 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 708 hdev->block_len = __le16_to_cpu(rp->block_len); 709 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 710 711 hdev->block_cnt = hdev->num_blocks; 712 713 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 714 hdev->block_cnt, hdev->block_len); 715} 716 717static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 718 struct sk_buff *skb) 719{ 720 struct hci_rp_read_local_amp_info *rp = (void *) skb->data; 721 722 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 723 724 if (rp->status) 725 goto a2mp_rsp; 726 727 hdev->amp_status = rp->amp_status; 728 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 729 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 730 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 731 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 732 hdev->amp_type = rp->amp_type; 733 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 734 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 735 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 736 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 737 738a2mp_rsp: 739 a2mp_send_getinfo_rsp(hdev); 740} 741 742static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev, 743 struct sk_buff *skb) 744{ 745 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data; 746 struct amp_assoc *assoc = &hdev->loc_assoc; 747 size_t rem_len, frag_len; 748 749 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 750 751 if (rp->status) 752 goto a2mp_rsp; 753 754 frag_len = skb->len - sizeof(*rp); 755 rem_len = __le16_to_cpu(rp->rem_len); 756 757 if (rem_len > frag_len) { 758 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len); 759 760 memcpy(assoc->data + assoc->offset, rp->frag, frag_len); 761 assoc->offset += frag_len; 762 763 /* Read other fragments */ 764 amp_read_loc_assoc_frag(hdev, rp->phy_handle); 765 766 return; 767 } 768 769 memcpy(assoc->data + assoc->offset, rp->frag, rem_len); 770 assoc->len = assoc->offset + rem_len; 771 assoc->offset = 0; 772 773a2mp_rsp: 774 /* Send A2MP Rsp when all fragments are received */ 775 a2mp_send_getampassoc_rsp(hdev, rp->status); 776 a2mp_send_create_phy_link_req(hdev, rp->status); 777} 778 779static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 780 struct sk_buff *skb) 781{ 782 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; 783 784 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 785 786 if (!rp->status) 787 hdev->inq_tx_power = rp->tx_power; 788} 789 790static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 791{ 792 struct hci_rp_pin_code_reply *rp = (void *) skb->data; 793 struct hci_cp_pin_code_reply *cp; 794 struct hci_conn *conn; 795 796 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 797 798 hci_dev_lock(hdev); 799 800 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 801 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 802 803 if (rp->status) 804 goto unlock; 805 806 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 807 if (!cp) 808 goto unlock; 809 810 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 811 if (conn) 812 conn->pin_length = cp->pin_len; 813 814unlock: 815 hci_dev_unlock(hdev); 816} 817 818static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 819{ 820 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; 821 822 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 823 824 hci_dev_lock(hdev); 825 826 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 827 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 828 rp->status); 829 830 hci_dev_unlock(hdev); 831} 832 833static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, 834 struct sk_buff *skb) 835{ 836 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; 837 838 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 839 840 if (rp->status) 841 return; 842 843 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 844 hdev->le_pkts = rp->le_max_pkt; 845 846 hdev->le_cnt = hdev->le_pkts; 847 848 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 849} 850 851static void hci_cc_le_read_local_features(struct hci_dev *hdev, 852 struct sk_buff *skb) 853{ 854 struct hci_rp_le_read_local_features *rp = (void *) skb->data; 855 856 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 857 858 if (!rp->status) 859 memcpy(hdev->le_features, rp->features, 8); 860} 861 862static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, 863 struct sk_buff *skb) 864{ 865 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data; 866 867 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 868 869 if (!rp->status) 870 hdev->adv_tx_power = rp->tx_power; 871} 872 873static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 874{ 875 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 876 877 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 878 879 hci_dev_lock(hdev); 880 881 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 882 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 883 rp->status); 884 885 hci_dev_unlock(hdev); 886} 887 888static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 889 struct sk_buff *skb) 890{ 891 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 892 893 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 894 895 hci_dev_lock(hdev); 896 897 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 898 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 899 ACL_LINK, 0, rp->status); 900 901 hci_dev_unlock(hdev); 902} 903 904static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) 905{ 906 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 907 908 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 909 910 hci_dev_lock(hdev); 911 912 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 913 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 914 0, rp->status); 915 916 hci_dev_unlock(hdev); 917} 918 919static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, 920 struct sk_buff *skb) 921{ 922 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 923 924 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 925 926 hci_dev_lock(hdev); 927 928 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 929 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 930 ACL_LINK, 0, rp->status); 931 932 hci_dev_unlock(hdev); 933} 934 935static void hci_cc_read_local_oob_data(struct hci_dev *hdev, 936 struct sk_buff *skb) 937{ 938 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 939 940 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 941 942 hci_dev_lock(hdev); 943 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer, 944 NULL, NULL, rp->status); 945 hci_dev_unlock(hdev); 946} 947 948static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, 949 struct sk_buff *skb) 950{ 951 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data; 952 953 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 954 955 hci_dev_lock(hdev); 956 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192, 957 rp->hash256, rp->randomizer256, 958 rp->status); 959 hci_dev_unlock(hdev); 960} 961 962 963static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb) 964{ 965 __u8 status = *((__u8 *) skb->data); 966 bdaddr_t *sent; 967 968 BT_DBG("%s status 0x%2.2x", hdev->name, status); 969 970 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 971 if (!sent) 972 return; 973 974 hci_dev_lock(hdev); 975 976 if (!status) 977 bacpy(&hdev->random_addr, sent); 978 979 hci_dev_unlock(hdev); 980} 981 982static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) 983{ 984 __u8 *sent, status = *((__u8 *) skb->data); 985 986 BT_DBG("%s status 0x%2.2x", hdev->name, status); 987 988 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 989 if (!sent) 990 return; 991 992 hci_dev_lock(hdev); 993 994 if (!status) 995 mgmt_advertising(hdev, *sent); 996 997 hci_dev_unlock(hdev); 998} 999 1000static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1001 struct sk_buff *skb) 1002{ 1003 struct hci_cp_le_set_scan_enable *cp; 1004 __u8 status = *((__u8 *) skb->data); 1005 1006 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1007 1008 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1009 if (!cp) 1010 return; 1011 1012 if (status) 1013 return; 1014 1015 switch (cp->enable) { 1016 case LE_SCAN_ENABLE: 1017 set_bit(HCI_LE_SCAN, &hdev->dev_flags); 1018 break; 1019 1020 case LE_SCAN_DISABLE: 1021 clear_bit(HCI_LE_SCAN, &hdev->dev_flags); 1022 break; 1023 1024 default: 1025 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable); 1026 break; 1027 } 1028} 1029 1030static void hci_cc_le_read_white_list_size(struct hci_dev *hdev, 1031 struct sk_buff *skb) 1032{ 1033 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data; 1034 1035 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); 1036 1037 if (!rp->status) 1038 hdev->le_white_list_size = rp->size; 1039} 1040 1041static void hci_cc_le_read_supported_states(struct hci_dev *hdev, 1042 struct sk_buff *skb) 1043{ 1044 struct hci_rp_le_read_supported_states *rp = (void *) skb->data; 1045 1046 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1047 1048 if (!rp->status) 1049 memcpy(hdev->le_states, rp->le_states, 8); 1050} 1051 1052static void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1053 struct sk_buff *skb) 1054{ 1055 struct hci_cp_write_le_host_supported *sent; 1056 __u8 status = *((__u8 *) skb->data); 1057 1058 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1059 1060 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 1061 if (!sent) 1062 return; 1063 1064 if (!status) { 1065 if (sent->le) { 1066 hdev->features[1][0] |= LMP_HOST_LE; 1067 set_bit(HCI_LE_ENABLED, &hdev->dev_flags); 1068 } else { 1069 hdev->features[1][0] &= ~LMP_HOST_LE; 1070 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags); 1071 clear_bit(HCI_ADVERTISING, &hdev->dev_flags); 1072 } 1073 1074 if (sent->simul) 1075 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 1076 else 1077 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 1078 } 1079} 1080 1081static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev, 1082 struct sk_buff *skb) 1083{ 1084 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data; 1085 1086 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x", 1087 hdev->name, rp->status, rp->phy_handle); 1088 1089 if (rp->status) 1090 return; 1091 1092 amp_write_rem_assoc_continue(hdev, rp->phy_handle); 1093} 1094 1095static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1096{ 1097 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1098 1099 if (status) { 1100 hci_conn_check_pending(hdev); 1101 return; 1102 } 1103 1104 set_bit(HCI_INQUIRY, &hdev->flags); 1105} 1106 1107static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 1108{ 1109 struct hci_cp_create_conn *cp; 1110 struct hci_conn *conn; 1111 1112 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1113 1114 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 1115 if (!cp) 1116 return; 1117 1118 hci_dev_lock(hdev); 1119 1120 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1121 1122 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn); 1123 1124 if (status) { 1125 if (conn && conn->state == BT_CONNECT) { 1126 if (status != 0x0c || conn->attempt > 2) { 1127 conn->state = BT_CLOSED; 1128 hci_proto_connect_cfm(conn, status); 1129 hci_conn_del(conn); 1130 } else 1131 conn->state = BT_CONNECT2; 1132 } 1133 } else { 1134 if (!conn) { 1135 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr); 1136 if (conn) { 1137 conn->out = true; 1138 conn->link_mode |= HCI_LM_MASTER; 1139 } else 1140 BT_ERR("No memory for new connection"); 1141 } 1142 } 1143 1144 hci_dev_unlock(hdev); 1145} 1146 1147static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 1148{ 1149 struct hci_cp_add_sco *cp; 1150 struct hci_conn *acl, *sco; 1151 __u16 handle; 1152 1153 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1154 1155 if (!status) 1156 return; 1157 1158 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 1159 if (!cp) 1160 return; 1161 1162 handle = __le16_to_cpu(cp->handle); 1163 1164 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 1165 1166 hci_dev_lock(hdev); 1167 1168 acl = hci_conn_hash_lookup_handle(hdev, handle); 1169 if (acl) { 1170 sco = acl->link; 1171 if (sco) { 1172 sco->state = BT_CLOSED; 1173 1174 hci_proto_connect_cfm(sco, status); 1175 hci_conn_del(sco); 1176 } 1177 } 1178 1179 hci_dev_unlock(hdev); 1180} 1181 1182static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 1183{ 1184 struct hci_cp_auth_requested *cp; 1185 struct hci_conn *conn; 1186 1187 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1188 1189 if (!status) 1190 return; 1191 1192 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 1193 if (!cp) 1194 return; 1195 1196 hci_dev_lock(hdev); 1197 1198 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1199 if (conn) { 1200 if (conn->state == BT_CONFIG) { 1201 hci_proto_connect_cfm(conn, status); 1202 hci_conn_drop(conn); 1203 } 1204 } 1205 1206 hci_dev_unlock(hdev); 1207} 1208 1209static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 1210{ 1211 struct hci_cp_set_conn_encrypt *cp; 1212 struct hci_conn *conn; 1213 1214 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1215 1216 if (!status) 1217 return; 1218 1219 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 1220 if (!cp) 1221 return; 1222 1223 hci_dev_lock(hdev); 1224 1225 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1226 if (conn) { 1227 if (conn->state == BT_CONFIG) { 1228 hci_proto_connect_cfm(conn, status); 1229 hci_conn_drop(conn); 1230 } 1231 } 1232 1233 hci_dev_unlock(hdev); 1234} 1235 1236static int hci_outgoing_auth_needed(struct hci_dev *hdev, 1237 struct hci_conn *conn) 1238{ 1239 if (conn->state != BT_CONFIG || !conn->out) 1240 return 0; 1241 1242 if (conn->pending_sec_level == BT_SECURITY_SDP) 1243 return 0; 1244 1245 /* Only request authentication for SSP connections or non-SSP 1246 * devices with sec_level MEDIUM or HIGH or if MITM protection 1247 * is requested. 1248 */ 1249 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 1250 conn->pending_sec_level != BT_SECURITY_HIGH && 1251 conn->pending_sec_level != BT_SECURITY_MEDIUM) 1252 return 0; 1253 1254 return 1; 1255} 1256 1257static int hci_resolve_name(struct hci_dev *hdev, 1258 struct inquiry_entry *e) 1259{ 1260 struct hci_cp_remote_name_req cp; 1261 1262 memset(&cp, 0, sizeof(cp)); 1263 1264 bacpy(&cp.bdaddr, &e->data.bdaddr); 1265 cp.pscan_rep_mode = e->data.pscan_rep_mode; 1266 cp.pscan_mode = e->data.pscan_mode; 1267 cp.clock_offset = e->data.clock_offset; 1268 1269 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 1270} 1271 1272static bool hci_resolve_next_name(struct hci_dev *hdev) 1273{ 1274 struct discovery_state *discov = &hdev->discovery; 1275 struct inquiry_entry *e; 1276 1277 if (list_empty(&discov->resolve)) 1278 return false; 1279 1280 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 1281 if (!e) 1282 return false; 1283 1284 if (hci_resolve_name(hdev, e) == 0) { 1285 e->name_state = NAME_PENDING; 1286 return true; 1287 } 1288 1289 return false; 1290} 1291 1292static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 1293 bdaddr_t *bdaddr, u8 *name, u8 name_len) 1294{ 1295 struct discovery_state *discov = &hdev->discovery; 1296 struct inquiry_entry *e; 1297 1298 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 1299 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name, 1300 name_len, conn->dev_class); 1301 1302 if (discov->state == DISCOVERY_STOPPED) 1303 return; 1304 1305 if (discov->state == DISCOVERY_STOPPING) 1306 goto discov_complete; 1307 1308 if (discov->state != DISCOVERY_RESOLVING) 1309 return; 1310 1311 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 1312 /* If the device was not found in a list of found devices names of which 1313 * are pending. there is no need to continue resolving a next name as it 1314 * will be done upon receiving another Remote Name Request Complete 1315 * Event */ 1316 if (!e) 1317 return; 1318 1319 list_del(&e->list); 1320 if (name) { 1321 e->name_state = NAME_KNOWN; 1322 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, 1323 e->data.rssi, name, name_len); 1324 } else { 1325 e->name_state = NAME_NOT_KNOWN; 1326 } 1327 1328 if (hci_resolve_next_name(hdev)) 1329 return; 1330 1331discov_complete: 1332 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1333} 1334 1335static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 1336{ 1337 struct hci_cp_remote_name_req *cp; 1338 struct hci_conn *conn; 1339 1340 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1341 1342 /* If successful wait for the name req complete event before 1343 * checking for the need to do authentication */ 1344 if (!status) 1345 return; 1346 1347 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 1348 if (!cp) 1349 return; 1350 1351 hci_dev_lock(hdev); 1352 1353 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1354 1355 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1356 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 1357 1358 if (!conn) 1359 goto unlock; 1360 1361 if (!hci_outgoing_auth_needed(hdev, conn)) 1362 goto unlock; 1363 1364 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 1365 struct hci_cp_auth_requested auth_cp; 1366 1367 auth_cp.handle = __cpu_to_le16(conn->handle); 1368 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 1369 sizeof(auth_cp), &auth_cp); 1370 } 1371 1372unlock: 1373 hci_dev_unlock(hdev); 1374} 1375 1376static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 1377{ 1378 struct hci_cp_read_remote_features *cp; 1379 struct hci_conn *conn; 1380 1381 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1382 1383 if (!status) 1384 return; 1385 1386 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 1387 if (!cp) 1388 return; 1389 1390 hci_dev_lock(hdev); 1391 1392 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1393 if (conn) { 1394 if (conn->state == BT_CONFIG) { 1395 hci_proto_connect_cfm(conn, status); 1396 hci_conn_drop(conn); 1397 } 1398 } 1399 1400 hci_dev_unlock(hdev); 1401} 1402 1403static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 1404{ 1405 struct hci_cp_read_remote_ext_features *cp; 1406 struct hci_conn *conn; 1407 1408 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1409 1410 if (!status) 1411 return; 1412 1413 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 1414 if (!cp) 1415 return; 1416 1417 hci_dev_lock(hdev); 1418 1419 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1420 if (conn) { 1421 if (conn->state == BT_CONFIG) { 1422 hci_proto_connect_cfm(conn, status); 1423 hci_conn_drop(conn); 1424 } 1425 } 1426 1427 hci_dev_unlock(hdev); 1428} 1429 1430static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 1431{ 1432 struct hci_cp_setup_sync_conn *cp; 1433 struct hci_conn *acl, *sco; 1434 __u16 handle; 1435 1436 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1437 1438 if (!status) 1439 return; 1440 1441 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 1442 if (!cp) 1443 return; 1444 1445 handle = __le16_to_cpu(cp->handle); 1446 1447 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 1448 1449 hci_dev_lock(hdev); 1450 1451 acl = hci_conn_hash_lookup_handle(hdev, handle); 1452 if (acl) { 1453 sco = acl->link; 1454 if (sco) { 1455 sco->state = BT_CLOSED; 1456 1457 hci_proto_connect_cfm(sco, status); 1458 hci_conn_del(sco); 1459 } 1460 } 1461 1462 hci_dev_unlock(hdev); 1463} 1464 1465static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 1466{ 1467 struct hci_cp_sniff_mode *cp; 1468 struct hci_conn *conn; 1469 1470 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1471 1472 if (!status) 1473 return; 1474 1475 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 1476 if (!cp) 1477 return; 1478 1479 hci_dev_lock(hdev); 1480 1481 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1482 if (conn) { 1483 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 1484 1485 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 1486 hci_sco_setup(conn, status); 1487 } 1488 1489 hci_dev_unlock(hdev); 1490} 1491 1492static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 1493{ 1494 struct hci_cp_exit_sniff_mode *cp; 1495 struct hci_conn *conn; 1496 1497 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1498 1499 if (!status) 1500 return; 1501 1502 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 1503 if (!cp) 1504 return; 1505 1506 hci_dev_lock(hdev); 1507 1508 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1509 if (conn) { 1510 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 1511 1512 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 1513 hci_sco_setup(conn, status); 1514 } 1515 1516 hci_dev_unlock(hdev); 1517} 1518 1519static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 1520{ 1521 struct hci_cp_disconnect *cp; 1522 struct hci_conn *conn; 1523 1524 if (!status) 1525 return; 1526 1527 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 1528 if (!cp) 1529 return; 1530 1531 hci_dev_lock(hdev); 1532 1533 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1534 if (conn) 1535 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 1536 conn->dst_type, status); 1537 1538 hci_dev_unlock(hdev); 1539} 1540 1541static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status) 1542{ 1543 struct hci_cp_create_phy_link *cp; 1544 1545 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1546 1547 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK); 1548 if (!cp) 1549 return; 1550 1551 hci_dev_lock(hdev); 1552 1553 if (status) { 1554 struct hci_conn *hcon; 1555 1556 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle); 1557 if (hcon) 1558 hci_conn_del(hcon); 1559 } else { 1560 amp_write_remote_assoc(hdev, cp->phy_handle); 1561 } 1562 1563 hci_dev_unlock(hdev); 1564} 1565 1566static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status) 1567{ 1568 struct hci_cp_accept_phy_link *cp; 1569 1570 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1571 1572 if (status) 1573 return; 1574 1575 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK); 1576 if (!cp) 1577 return; 1578 1579 amp_write_remote_assoc(hdev, cp->phy_handle); 1580} 1581 1582static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1583{ 1584 __u8 status = *((__u8 *) skb->data); 1585 struct discovery_state *discov = &hdev->discovery; 1586 struct inquiry_entry *e; 1587 1588 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1589 1590 hci_conn_check_pending(hdev); 1591 1592 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 1593 return; 1594 1595 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ 1596 wake_up_bit(&hdev->flags, HCI_INQUIRY); 1597 1598 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 1599 return; 1600 1601 hci_dev_lock(hdev); 1602 1603 if (discov->state != DISCOVERY_FINDING) 1604 goto unlock; 1605 1606 if (list_empty(&discov->resolve)) { 1607 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1608 goto unlock; 1609 } 1610 1611 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 1612 if (e && hci_resolve_name(hdev, e) == 0) { 1613 e->name_state = NAME_PENDING; 1614 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 1615 } else { 1616 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1617 } 1618 1619unlock: 1620 hci_dev_unlock(hdev); 1621} 1622 1623static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 1624{ 1625 struct inquiry_data data; 1626 struct inquiry_info *info = (void *) (skb->data + 1); 1627 int num_rsp = *((__u8 *) skb->data); 1628 1629 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 1630 1631 if (!num_rsp) 1632 return; 1633 1634 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 1635 return; 1636 1637 hci_dev_lock(hdev); 1638 1639 for (; num_rsp; num_rsp--, info++) { 1640 bool name_known, ssp; 1641 1642 bacpy(&data.bdaddr, &info->bdaddr); 1643 data.pscan_rep_mode = info->pscan_rep_mode; 1644 data.pscan_period_mode = info->pscan_period_mode; 1645 data.pscan_mode = info->pscan_mode; 1646 memcpy(data.dev_class, info->dev_class, 3); 1647 data.clock_offset = info->clock_offset; 1648 data.rssi = 0x00; 1649 data.ssp_mode = 0x00; 1650 1651 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp); 1652 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 1653 info->dev_class, 0, !name_known, ssp, NULL, 1654 0); 1655 } 1656 1657 hci_dev_unlock(hdev); 1658} 1659 1660static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1661{ 1662 struct hci_ev_conn_complete *ev = (void *) skb->data; 1663 struct hci_conn *conn; 1664 1665 BT_DBG("%s", hdev->name); 1666 1667 hci_dev_lock(hdev); 1668 1669 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1670 if (!conn) { 1671 if (ev->link_type != SCO_LINK) 1672 goto unlock; 1673 1674 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 1675 if (!conn) 1676 goto unlock; 1677 1678 conn->type = SCO_LINK; 1679 } 1680 1681 if (!ev->status) { 1682 conn->handle = __le16_to_cpu(ev->handle); 1683 1684 if (conn->type == ACL_LINK) { 1685 conn->state = BT_CONFIG; 1686 hci_conn_hold(conn); 1687 1688 if (!conn->out && !hci_conn_ssp_enabled(conn) && 1689 !hci_find_link_key(hdev, &ev->bdaddr)) 1690 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 1691 else 1692 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1693 } else 1694 conn->state = BT_CONNECTED; 1695 1696 hci_conn_add_sysfs(conn); 1697 1698 if (test_bit(HCI_AUTH, &hdev->flags)) 1699 conn->link_mode |= HCI_LM_AUTH; 1700 1701 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 1702 conn->link_mode |= HCI_LM_ENCRYPT; 1703 1704 /* Get remote features */ 1705 if (conn->type == ACL_LINK) { 1706 struct hci_cp_read_remote_features cp; 1707 cp.handle = ev->handle; 1708 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 1709 sizeof(cp), &cp); 1710 } 1711 1712 /* Set packet type for incoming connection */ 1713 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 1714 struct hci_cp_change_conn_ptype cp; 1715 cp.handle = ev->handle; 1716 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1717 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 1718 &cp); 1719 } 1720 } else { 1721 conn->state = BT_CLOSED; 1722 if (conn->type == ACL_LINK) 1723 mgmt_connect_failed(hdev, &conn->dst, conn->type, 1724 conn->dst_type, ev->status); 1725 } 1726 1727 if (conn->type == ACL_LINK) 1728 hci_sco_setup(conn, ev->status); 1729 1730 if (ev->status) { 1731 hci_proto_connect_cfm(conn, ev->status); 1732 hci_conn_del(conn); 1733 } else if (ev->link_type != ACL_LINK) 1734 hci_proto_connect_cfm(conn, ev->status); 1735 1736unlock: 1737 hci_dev_unlock(hdev); 1738 1739 hci_conn_check_pending(hdev); 1740} 1741 1742static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1743{ 1744 struct hci_ev_conn_request *ev = (void *) skb->data; 1745 int mask = hdev->link_mode; 1746 __u8 flags = 0; 1747 1748 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr, 1749 ev->link_type); 1750 1751 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 1752 &flags); 1753 1754 if ((mask & HCI_LM_ACCEPT) && 1755 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) { 1756 /* Connection accepted */ 1757 struct inquiry_entry *ie; 1758 struct hci_conn *conn; 1759 1760 hci_dev_lock(hdev); 1761 1762 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 1763 if (ie) 1764 memcpy(ie->data.dev_class, ev->dev_class, 3); 1765 1766 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 1767 &ev->bdaddr); 1768 if (!conn) { 1769 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); 1770 if (!conn) { 1771 BT_ERR("No memory for new connection"); 1772 hci_dev_unlock(hdev); 1773 return; 1774 } 1775 } 1776 1777 memcpy(conn->dev_class, ev->dev_class, 3); 1778 1779 hci_dev_unlock(hdev); 1780 1781 if (ev->link_type == ACL_LINK || 1782 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 1783 struct hci_cp_accept_conn_req cp; 1784 conn->state = BT_CONNECT; 1785 1786 bacpy(&cp.bdaddr, &ev->bdaddr); 1787 1788 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 1789 cp.role = 0x00; /* Become master */ 1790 else 1791 cp.role = 0x01; /* Remain slave */ 1792 1793 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), 1794 &cp); 1795 } else if (!(flags & HCI_PROTO_DEFER)) { 1796 struct hci_cp_accept_sync_conn_req cp; 1797 conn->state = BT_CONNECT; 1798 1799 bacpy(&cp.bdaddr, &ev->bdaddr); 1800 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1801 1802 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40); 1803 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40); 1804 cp.max_latency = __constant_cpu_to_le16(0xffff); 1805 cp.content_format = cpu_to_le16(hdev->voice_setting); 1806 cp.retrans_effort = 0xff; 1807 1808 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, 1809 sizeof(cp), &cp); 1810 } else { 1811 conn->state = BT_CONNECT2; 1812 hci_proto_connect_cfm(conn, 0); 1813 } 1814 } else { 1815 /* Connection rejected */ 1816 struct hci_cp_reject_conn_req cp; 1817 1818 bacpy(&cp.bdaddr, &ev->bdaddr); 1819 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 1820 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 1821 } 1822} 1823 1824static u8 hci_to_mgmt_reason(u8 err) 1825{ 1826 switch (err) { 1827 case HCI_ERROR_CONNECTION_TIMEOUT: 1828 return MGMT_DEV_DISCONN_TIMEOUT; 1829 case HCI_ERROR_REMOTE_USER_TERM: 1830 case HCI_ERROR_REMOTE_LOW_RESOURCES: 1831 case HCI_ERROR_REMOTE_POWER_OFF: 1832 return MGMT_DEV_DISCONN_REMOTE; 1833 case HCI_ERROR_LOCAL_HOST_TERM: 1834 return MGMT_DEV_DISCONN_LOCAL_HOST; 1835 default: 1836 return MGMT_DEV_DISCONN_UNKNOWN; 1837 } 1838} 1839 1840static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1841{ 1842 struct hci_ev_disconn_complete *ev = (void *) skb->data; 1843 u8 reason = hci_to_mgmt_reason(ev->reason); 1844 struct hci_conn *conn; 1845 bool mgmt_connected; 1846 u8 type; 1847 1848 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 1849 1850 hci_dev_lock(hdev); 1851 1852 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1853 if (!conn) 1854 goto unlock; 1855 1856 if (ev->status) { 1857 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 1858 conn->dst_type, ev->status); 1859 goto unlock; 1860 } 1861 1862 conn->state = BT_CLOSED; 1863 1864 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 1865 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 1866 reason, mgmt_connected); 1867 1868 if (conn->type == ACL_LINK && conn->flush_key) 1869 hci_remove_link_key(hdev, &conn->dst); 1870 1871 type = conn->type; 1872 1873 hci_proto_disconn_cfm(conn, ev->reason); 1874 hci_conn_del(conn); 1875 1876 /* Re-enable advertising if necessary, since it might 1877 * have been disabled by the connection. From the 1878 * HCI_LE_Set_Advertise_Enable command description in 1879 * the core specification (v4.0): 1880 * "The Controller shall continue advertising until the Host 1881 * issues an LE_Set_Advertise_Enable command with 1882 * Advertising_Enable set to 0x00 (Advertising is disabled) 1883 * or until a connection is created or until the Advertising 1884 * is timed out due to Directed Advertising." 1885 */ 1886 if (type == LE_LINK) 1887 mgmt_reenable_advertising(hdev); 1888 1889unlock: 1890 hci_dev_unlock(hdev); 1891} 1892 1893static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1894{ 1895 struct hci_ev_auth_complete *ev = (void *) skb->data; 1896 struct hci_conn *conn; 1897 1898 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 1899 1900 hci_dev_lock(hdev); 1901 1902 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1903 if (!conn) 1904 goto unlock; 1905 1906 if (!ev->status) { 1907 if (!hci_conn_ssp_enabled(conn) && 1908 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 1909 BT_INFO("re-auth of legacy device is not possible."); 1910 } else { 1911 conn->link_mode |= HCI_LM_AUTH; 1912 conn->sec_level = conn->pending_sec_level; 1913 } 1914 } else { 1915 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type, 1916 ev->status); 1917 } 1918 1919 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 1920 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 1921 1922 if (conn->state == BT_CONFIG) { 1923 if (!ev->status && hci_conn_ssp_enabled(conn)) { 1924 struct hci_cp_set_conn_encrypt cp; 1925 cp.handle = ev->handle; 1926 cp.encrypt = 0x01; 1927 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1928 &cp); 1929 } else { 1930 conn->state = BT_CONNECTED; 1931 hci_proto_connect_cfm(conn, ev->status); 1932 hci_conn_drop(conn); 1933 } 1934 } else { 1935 hci_auth_cfm(conn, ev->status); 1936 1937 hci_conn_hold(conn); 1938 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1939 hci_conn_drop(conn); 1940 } 1941 1942 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 1943 if (!ev->status) { 1944 struct hci_cp_set_conn_encrypt cp; 1945 cp.handle = ev->handle; 1946 cp.encrypt = 0x01; 1947 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1948 &cp); 1949 } else { 1950 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 1951 hci_encrypt_cfm(conn, ev->status, 0x00); 1952 } 1953 } 1954 1955unlock: 1956 hci_dev_unlock(hdev); 1957} 1958 1959static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 1960{ 1961 struct hci_ev_remote_name *ev = (void *) skb->data; 1962 struct hci_conn *conn; 1963 1964 BT_DBG("%s", hdev->name); 1965 1966 hci_conn_check_pending(hdev); 1967 1968 hci_dev_lock(hdev); 1969 1970 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 1971 1972 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 1973 goto check_auth; 1974 1975 if (ev->status == 0) 1976 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 1977 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 1978 else 1979 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 1980 1981check_auth: 1982 if (!conn) 1983 goto unlock; 1984 1985 if (!hci_outgoing_auth_needed(hdev, conn)) 1986 goto unlock; 1987 1988 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 1989 struct hci_cp_auth_requested cp; 1990 cp.handle = __cpu_to_le16(conn->handle); 1991 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1992 } 1993 1994unlock: 1995 hci_dev_unlock(hdev); 1996} 1997 1998static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 1999{ 2000 struct hci_ev_encrypt_change *ev = (void *) skb->data; 2001 struct hci_conn *conn; 2002 2003 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2004 2005 hci_dev_lock(hdev); 2006 2007 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2008 if (!conn) 2009 goto unlock; 2010 2011 if (!ev->status) { 2012 if (ev->encrypt) { 2013 /* Encryption implies authentication */ 2014 conn->link_mode |= HCI_LM_AUTH; 2015 conn->link_mode |= HCI_LM_ENCRYPT; 2016 conn->sec_level = conn->pending_sec_level; 2017 2018 /* P-256 authentication key implies FIPS */ 2019 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 2020 conn->link_mode |= HCI_LM_FIPS; 2021 2022 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 2023 conn->type == LE_LINK) 2024 set_bit(HCI_CONN_AES_CCM, &conn->flags); 2025 } else { 2026 conn->link_mode &= ~HCI_LM_ENCRYPT; 2027 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 2028 } 2029 } 2030 2031 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2032 2033 if (ev->status && conn->state == BT_CONNECTED) { 2034 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2035 hci_conn_drop(conn); 2036 goto unlock; 2037 } 2038 2039 if (conn->state == BT_CONFIG) { 2040 if (!ev->status) 2041 conn->state = BT_CONNECTED; 2042 2043 hci_proto_connect_cfm(conn, ev->status); 2044 hci_conn_drop(conn); 2045 } else 2046 hci_encrypt_cfm(conn, ev->status, ev->encrypt); 2047 2048unlock: 2049 hci_dev_unlock(hdev); 2050} 2051 2052static void hci_change_link_key_complete_evt(struct hci_dev *hdev, 2053 struct sk_buff *skb) 2054{ 2055 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 2056 struct hci_conn *conn; 2057 2058 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2059 2060 hci_dev_lock(hdev); 2061 2062 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2063 if (conn) { 2064 if (!ev->status) 2065 conn->link_mode |= HCI_LM_SECURE; 2066 2067 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 2068 2069 hci_key_change_cfm(conn, ev->status); 2070 } 2071 2072 hci_dev_unlock(hdev); 2073} 2074 2075static void hci_remote_features_evt(struct hci_dev *hdev, 2076 struct sk_buff *skb) 2077{ 2078 struct hci_ev_remote_features *ev = (void *) skb->data; 2079 struct hci_conn *conn; 2080 2081 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2082 2083 hci_dev_lock(hdev); 2084 2085 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2086 if (!conn) 2087 goto unlock; 2088 2089 if (!ev->status) 2090 memcpy(conn->features[0], ev->features, 8); 2091 2092 if (conn->state != BT_CONFIG) 2093 goto unlock; 2094 2095 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) { 2096 struct hci_cp_read_remote_ext_features cp; 2097 cp.handle = ev->handle; 2098 cp.page = 0x01; 2099 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 2100 sizeof(cp), &cp); 2101 goto unlock; 2102 } 2103 2104 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 2105 struct hci_cp_remote_name_req cp; 2106 memset(&cp, 0, sizeof(cp)); 2107 bacpy(&cp.bdaddr, &conn->dst); 2108 cp.pscan_rep_mode = 0x02; 2109 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2110 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2111 mgmt_device_connected(hdev, &conn->dst, conn->type, 2112 conn->dst_type, 0, NULL, 0, 2113 conn->dev_class); 2114 2115 if (!hci_outgoing_auth_needed(hdev, conn)) { 2116 conn->state = BT_CONNECTED; 2117 hci_proto_connect_cfm(conn, ev->status); 2118 hci_conn_drop(conn); 2119 } 2120 2121unlock: 2122 hci_dev_unlock(hdev); 2123} 2124 2125static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2126{ 2127 struct hci_ev_cmd_complete *ev = (void *) skb->data; 2128 u8 status = skb->data[sizeof(*ev)]; 2129 __u16 opcode; 2130 2131 skb_pull(skb, sizeof(*ev)); 2132 2133 opcode = __le16_to_cpu(ev->opcode); 2134 2135 switch (opcode) { 2136 case HCI_OP_INQUIRY_CANCEL: 2137 hci_cc_inquiry_cancel(hdev, skb); 2138 break; 2139 2140 case HCI_OP_PERIODIC_INQ: 2141 hci_cc_periodic_inq(hdev, skb); 2142 break; 2143 2144 case HCI_OP_EXIT_PERIODIC_INQ: 2145 hci_cc_exit_periodic_inq(hdev, skb); 2146 break; 2147 2148 case HCI_OP_REMOTE_NAME_REQ_CANCEL: 2149 hci_cc_remote_name_req_cancel(hdev, skb); 2150 break; 2151 2152 case HCI_OP_ROLE_DISCOVERY: 2153 hci_cc_role_discovery(hdev, skb); 2154 break; 2155 2156 case HCI_OP_READ_LINK_POLICY: 2157 hci_cc_read_link_policy(hdev, skb); 2158 break; 2159 2160 case HCI_OP_WRITE_LINK_POLICY: 2161 hci_cc_write_link_policy(hdev, skb); 2162 break; 2163 2164 case HCI_OP_READ_DEF_LINK_POLICY: 2165 hci_cc_read_def_link_policy(hdev, skb); 2166 break; 2167 2168 case HCI_OP_WRITE_DEF_LINK_POLICY: 2169 hci_cc_write_def_link_policy(hdev, skb); 2170 break; 2171 2172 case HCI_OP_RESET: 2173 hci_cc_reset(hdev, skb); 2174 break; 2175 2176 case HCI_OP_WRITE_LOCAL_NAME: 2177 hci_cc_write_local_name(hdev, skb); 2178 break; 2179 2180 case HCI_OP_READ_LOCAL_NAME: 2181 hci_cc_read_local_name(hdev, skb); 2182 break; 2183 2184 case HCI_OP_WRITE_AUTH_ENABLE: 2185 hci_cc_write_auth_enable(hdev, skb); 2186 break; 2187 2188 case HCI_OP_WRITE_ENCRYPT_MODE: 2189 hci_cc_write_encrypt_mode(hdev, skb); 2190 break; 2191 2192 case HCI_OP_WRITE_SCAN_ENABLE: 2193 hci_cc_write_scan_enable(hdev, skb); 2194 break; 2195 2196 case HCI_OP_READ_CLASS_OF_DEV: 2197 hci_cc_read_class_of_dev(hdev, skb); 2198 break; 2199 2200 case HCI_OP_WRITE_CLASS_OF_DEV: 2201 hci_cc_write_class_of_dev(hdev, skb); 2202 break; 2203 2204 case HCI_OP_READ_VOICE_SETTING: 2205 hci_cc_read_voice_setting(hdev, skb); 2206 break; 2207 2208 case HCI_OP_WRITE_VOICE_SETTING: 2209 hci_cc_write_voice_setting(hdev, skb); 2210 break; 2211 2212 case HCI_OP_READ_NUM_SUPPORTED_IAC: 2213 hci_cc_read_num_supported_iac(hdev, skb); 2214 break; 2215 2216 case HCI_OP_WRITE_SSP_MODE: 2217 hci_cc_write_ssp_mode(hdev, skb); 2218 break; 2219 2220 case HCI_OP_WRITE_SC_SUPPORT: 2221 hci_cc_write_sc_support(hdev, skb); 2222 break; 2223 2224 case HCI_OP_READ_LOCAL_VERSION: 2225 hci_cc_read_local_version(hdev, skb); 2226 break; 2227 2228 case HCI_OP_READ_LOCAL_COMMANDS: 2229 hci_cc_read_local_commands(hdev, skb); 2230 break; 2231 2232 case HCI_OP_READ_LOCAL_FEATURES: 2233 hci_cc_read_local_features(hdev, skb); 2234 break; 2235 2236 case HCI_OP_READ_LOCAL_EXT_FEATURES: 2237 hci_cc_read_local_ext_features(hdev, skb); 2238 break; 2239 2240 case HCI_OP_READ_BUFFER_SIZE: 2241 hci_cc_read_buffer_size(hdev, skb); 2242 break; 2243 2244 case HCI_OP_READ_BD_ADDR: 2245 hci_cc_read_bd_addr(hdev, skb); 2246 break; 2247 2248 case HCI_OP_READ_PAGE_SCAN_ACTIVITY: 2249 hci_cc_read_page_scan_activity(hdev, skb); 2250 break; 2251 2252 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY: 2253 hci_cc_write_page_scan_activity(hdev, skb); 2254 break; 2255 2256 case HCI_OP_READ_PAGE_SCAN_TYPE: 2257 hci_cc_read_page_scan_type(hdev, skb); 2258 break; 2259 2260 case HCI_OP_WRITE_PAGE_SCAN_TYPE: 2261 hci_cc_write_page_scan_type(hdev, skb); 2262 break; 2263 2264 case HCI_OP_READ_DATA_BLOCK_SIZE: 2265 hci_cc_read_data_block_size(hdev, skb); 2266 break; 2267 2268 case HCI_OP_READ_FLOW_CONTROL_MODE: 2269 hci_cc_read_flow_control_mode(hdev, skb); 2270 break; 2271 2272 case HCI_OP_READ_LOCAL_AMP_INFO: 2273 hci_cc_read_local_amp_info(hdev, skb); 2274 break; 2275 2276 case HCI_OP_READ_LOCAL_AMP_ASSOC: 2277 hci_cc_read_local_amp_assoc(hdev, skb); 2278 break; 2279 2280 case HCI_OP_READ_INQ_RSP_TX_POWER: 2281 hci_cc_read_inq_rsp_tx_power(hdev, skb); 2282 break; 2283 2284 case HCI_OP_PIN_CODE_REPLY: 2285 hci_cc_pin_code_reply(hdev, skb); 2286 break; 2287 2288 case HCI_OP_PIN_CODE_NEG_REPLY: 2289 hci_cc_pin_code_neg_reply(hdev, skb); 2290 break; 2291 2292 case HCI_OP_READ_LOCAL_OOB_DATA: 2293 hci_cc_read_local_oob_data(hdev, skb); 2294 break; 2295 2296 case HCI_OP_READ_LOCAL_OOB_EXT_DATA: 2297 hci_cc_read_local_oob_ext_data(hdev, skb); 2298 break; 2299 2300 case HCI_OP_LE_READ_BUFFER_SIZE: 2301 hci_cc_le_read_buffer_size(hdev, skb); 2302 break; 2303 2304 case HCI_OP_LE_READ_LOCAL_FEATURES: 2305 hci_cc_le_read_local_features(hdev, skb); 2306 break; 2307 2308 case HCI_OP_LE_READ_ADV_TX_POWER: 2309 hci_cc_le_read_adv_tx_power(hdev, skb); 2310 break; 2311 2312 case HCI_OP_USER_CONFIRM_REPLY: 2313 hci_cc_user_confirm_reply(hdev, skb); 2314 break; 2315 2316 case HCI_OP_USER_CONFIRM_NEG_REPLY: 2317 hci_cc_user_confirm_neg_reply(hdev, skb); 2318 break; 2319 2320 case HCI_OP_USER_PASSKEY_REPLY: 2321 hci_cc_user_passkey_reply(hdev, skb); 2322 break; 2323 2324 case HCI_OP_USER_PASSKEY_NEG_REPLY: 2325 hci_cc_user_passkey_neg_reply(hdev, skb); 2326 break; 2327 2328 case HCI_OP_LE_SET_RANDOM_ADDR: 2329 hci_cc_le_set_random_addr(hdev, skb); 2330 break; 2331 2332 case HCI_OP_LE_SET_ADV_ENABLE: 2333 hci_cc_le_set_adv_enable(hdev, skb); 2334 break; 2335 2336 case HCI_OP_LE_SET_SCAN_ENABLE: 2337 hci_cc_le_set_scan_enable(hdev, skb); 2338 break; 2339 2340 case HCI_OP_LE_READ_WHITE_LIST_SIZE: 2341 hci_cc_le_read_white_list_size(hdev, skb); 2342 break; 2343 2344 case HCI_OP_LE_READ_SUPPORTED_STATES: 2345 hci_cc_le_read_supported_states(hdev, skb); 2346 break; 2347 2348 case HCI_OP_WRITE_LE_HOST_SUPPORTED: 2349 hci_cc_write_le_host_supported(hdev, skb); 2350 break; 2351 2352 case HCI_OP_WRITE_REMOTE_AMP_ASSOC: 2353 hci_cc_write_remote_amp_assoc(hdev, skb); 2354 break; 2355 2356 default: 2357 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 2358 break; 2359 } 2360 2361 if (opcode != HCI_OP_NOP) 2362 del_timer(&hdev->cmd_timer); 2363 2364 hci_req_cmd_complete(hdev, opcode, status); 2365 2366 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 2367 atomic_set(&hdev->cmd_cnt, 1); 2368 if (!skb_queue_empty(&hdev->cmd_q)) 2369 queue_work(hdev->workqueue, &hdev->cmd_work); 2370 } 2371} 2372 2373static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) 2374{ 2375 struct hci_ev_cmd_status *ev = (void *) skb->data; 2376 __u16 opcode; 2377 2378 skb_pull(skb, sizeof(*ev)); 2379 2380 opcode = __le16_to_cpu(ev->opcode); 2381 2382 switch (opcode) { 2383 case HCI_OP_INQUIRY: 2384 hci_cs_inquiry(hdev, ev->status); 2385 break; 2386 2387 case HCI_OP_CREATE_CONN: 2388 hci_cs_create_conn(hdev, ev->status); 2389 break; 2390 2391 case HCI_OP_ADD_SCO: 2392 hci_cs_add_sco(hdev, ev->status); 2393 break; 2394 2395 case HCI_OP_AUTH_REQUESTED: 2396 hci_cs_auth_requested(hdev, ev->status); 2397 break; 2398 2399 case HCI_OP_SET_CONN_ENCRYPT: 2400 hci_cs_set_conn_encrypt(hdev, ev->status); 2401 break; 2402 2403 case HCI_OP_REMOTE_NAME_REQ: 2404 hci_cs_remote_name_req(hdev, ev->status); 2405 break; 2406 2407 case HCI_OP_READ_REMOTE_FEATURES: 2408 hci_cs_read_remote_features(hdev, ev->status); 2409 break; 2410 2411 case HCI_OP_READ_REMOTE_EXT_FEATURES: 2412 hci_cs_read_remote_ext_features(hdev, ev->status); 2413 break; 2414 2415 case HCI_OP_SETUP_SYNC_CONN: 2416 hci_cs_setup_sync_conn(hdev, ev->status); 2417 break; 2418 2419 case HCI_OP_SNIFF_MODE: 2420 hci_cs_sniff_mode(hdev, ev->status); 2421 break; 2422 2423 case HCI_OP_EXIT_SNIFF_MODE: 2424 hci_cs_exit_sniff_mode(hdev, ev->status); 2425 break; 2426 2427 case HCI_OP_DISCONNECT: 2428 hci_cs_disconnect(hdev, ev->status); 2429 break; 2430 2431 case HCI_OP_CREATE_PHY_LINK: 2432 hci_cs_create_phylink(hdev, ev->status); 2433 break; 2434 2435 case HCI_OP_ACCEPT_PHY_LINK: 2436 hci_cs_accept_phylink(hdev, ev->status); 2437 break; 2438 2439 default: 2440 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 2441 break; 2442 } 2443 2444 if (opcode != HCI_OP_NOP) 2445 del_timer(&hdev->cmd_timer); 2446 2447 if (ev->status || 2448 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event)) 2449 hci_req_cmd_complete(hdev, opcode, ev->status); 2450 2451 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 2452 atomic_set(&hdev->cmd_cnt, 1); 2453 if (!skb_queue_empty(&hdev->cmd_q)) 2454 queue_work(hdev->workqueue, &hdev->cmd_work); 2455 } 2456} 2457 2458static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2459{ 2460 struct hci_ev_role_change *ev = (void *) skb->data; 2461 struct hci_conn *conn; 2462 2463 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2464 2465 hci_dev_lock(hdev); 2466 2467 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2468 if (conn) { 2469 if (!ev->status) { 2470 if (ev->role) 2471 conn->link_mode &= ~HCI_LM_MASTER; 2472 else 2473 conn->link_mode |= HCI_LM_MASTER; 2474 } 2475 2476 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2477 2478 hci_role_switch_cfm(conn, ev->status, ev->role); 2479 } 2480 2481 hci_dev_unlock(hdev); 2482} 2483 2484static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 2485{ 2486 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 2487 int i; 2488 2489 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 2490 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode); 2491 return; 2492 } 2493 2494 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2495 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { 2496 BT_DBG("%s bad parameters", hdev->name); 2497 return; 2498 } 2499 2500 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); 2501 2502 for (i = 0; i < ev->num_hndl; i++) { 2503 struct hci_comp_pkts_info *info = &ev->handles[i]; 2504 struct hci_conn *conn; 2505 __u16 handle, count; 2506 2507 handle = __le16_to_cpu(info->handle); 2508 count = __le16_to_cpu(info->count); 2509 2510 conn = hci_conn_hash_lookup_handle(hdev, handle); 2511 if (!conn) 2512 continue; 2513 2514 conn->sent -= count; 2515 2516 switch (conn->type) { 2517 case ACL_LINK: 2518 hdev->acl_cnt += count; 2519 if (hdev->acl_cnt > hdev->acl_pkts) 2520 hdev->acl_cnt = hdev->acl_pkts; 2521 break; 2522 2523 case LE_LINK: 2524 if (hdev->le_pkts) { 2525 hdev->le_cnt += count; 2526 if (hdev->le_cnt > hdev->le_pkts) 2527 hdev->le_cnt = hdev->le_pkts; 2528 } else { 2529 hdev->acl_cnt += count; 2530 if (hdev->acl_cnt > hdev->acl_pkts) 2531 hdev->acl_cnt = hdev->acl_pkts; 2532 } 2533 break; 2534 2535 case SCO_LINK: 2536 hdev->sco_cnt += count; 2537 if (hdev->sco_cnt > hdev->sco_pkts) 2538 hdev->sco_cnt = hdev->sco_pkts; 2539 break; 2540 2541 default: 2542 BT_ERR("Unknown type %d conn %p", conn->type, conn); 2543 break; 2544 } 2545 } 2546 2547 queue_work(hdev->workqueue, &hdev->tx_work); 2548} 2549 2550static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, 2551 __u16 handle) 2552{ 2553 struct hci_chan *chan; 2554 2555 switch (hdev->dev_type) { 2556 case HCI_BREDR: 2557 return hci_conn_hash_lookup_handle(hdev, handle); 2558 case HCI_AMP: 2559 chan = hci_chan_lookup_handle(hdev, handle); 2560 if (chan) 2561 return chan->conn; 2562 break; 2563 default: 2564 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type); 2565 break; 2566 } 2567 2568 return NULL; 2569} 2570 2571static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) 2572{ 2573 struct hci_ev_num_comp_blocks *ev = (void *) skb->data; 2574 int i; 2575 2576 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 2577 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode); 2578 return; 2579 } 2580 2581 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2582 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { 2583 BT_DBG("%s bad parameters", hdev->name); 2584 return; 2585 } 2586 2587 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, 2588 ev->num_hndl); 2589 2590 for (i = 0; i < ev->num_hndl; i++) { 2591 struct hci_comp_blocks_info *info = &ev->handles[i]; 2592 struct hci_conn *conn = NULL; 2593 __u16 handle, block_count; 2594 2595 handle = __le16_to_cpu(info->handle); 2596 block_count = __le16_to_cpu(info->blocks); 2597 2598 conn = __hci_conn_lookup_handle(hdev, handle); 2599 if (!conn) 2600 continue; 2601 2602 conn->sent -= block_count; 2603 2604 switch (conn->type) { 2605 case ACL_LINK: 2606 case AMP_LINK: 2607 hdev->block_cnt += block_count; 2608 if (hdev->block_cnt > hdev->num_blocks) 2609 hdev->block_cnt = hdev->num_blocks; 2610 break; 2611 2612 default: 2613 BT_ERR("Unknown type %d conn %p", conn->type, conn); 2614 break; 2615 } 2616 } 2617 2618 queue_work(hdev->workqueue, &hdev->tx_work); 2619} 2620 2621static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2622{ 2623 struct hci_ev_mode_change *ev = (void *) skb->data; 2624 struct hci_conn *conn; 2625 2626 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2627 2628 hci_dev_lock(hdev); 2629 2630 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2631 if (conn) { 2632 conn->mode = ev->mode; 2633 2634 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 2635 &conn->flags)) { 2636 if (conn->mode == HCI_CM_ACTIVE) 2637 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 2638 else 2639 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 2640 } 2641 2642 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2643 hci_sco_setup(conn, ev->status); 2644 } 2645 2646 hci_dev_unlock(hdev); 2647} 2648 2649static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2650{ 2651 struct hci_ev_pin_code_req *ev = (void *) skb->data; 2652 struct hci_conn *conn; 2653 2654 BT_DBG("%s", hdev->name); 2655 2656 hci_dev_lock(hdev); 2657 2658 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2659 if (!conn) 2660 goto unlock; 2661 2662 if (conn->state == BT_CONNECTED) { 2663 hci_conn_hold(conn); 2664 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 2665 hci_conn_drop(conn); 2666 } 2667 2668 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags)) 2669 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2670 sizeof(ev->bdaddr), &ev->bdaddr); 2671 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { 2672 u8 secure; 2673 2674 if (conn->pending_sec_level == BT_SECURITY_HIGH) 2675 secure = 1; 2676 else 2677 secure = 0; 2678 2679 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 2680 } 2681 2682unlock: 2683 hci_dev_unlock(hdev); 2684} 2685 2686static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2687{ 2688 struct hci_ev_link_key_req *ev = (void *) skb->data; 2689 struct hci_cp_link_key_reply cp; 2690 struct hci_conn *conn; 2691 struct link_key *key; 2692 2693 BT_DBG("%s", hdev->name); 2694 2695 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 2696 return; 2697 2698 hci_dev_lock(hdev); 2699 2700 key = hci_find_link_key(hdev, &ev->bdaddr); 2701 if (!key) { 2702 BT_DBG("%s link key not found for %pMR", hdev->name, 2703 &ev->bdaddr); 2704 goto not_found; 2705 } 2706 2707 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type, 2708 &ev->bdaddr); 2709 2710 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && 2711 key->type == HCI_LK_DEBUG_COMBINATION) { 2712 BT_DBG("%s ignoring debug key", hdev->name); 2713 goto not_found; 2714 } 2715 2716 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2717 if (conn) { 2718 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 2719 key->type == HCI_LK_UNAUTH_COMBINATION_P256) && 2720 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 2721 BT_DBG("%s ignoring unauthenticated key", hdev->name); 2722 goto not_found; 2723 } 2724 2725 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 2726 conn->pending_sec_level == BT_SECURITY_HIGH) { 2727 BT_DBG("%s ignoring key unauthenticated for high security", 2728 hdev->name); 2729 goto not_found; 2730 } 2731 2732 conn->key_type = key->type; 2733 conn->pin_length = key->pin_len; 2734 } 2735 2736 bacpy(&cp.bdaddr, &ev->bdaddr); 2737 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 2738 2739 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 2740 2741 hci_dev_unlock(hdev); 2742 2743 return; 2744 2745not_found: 2746 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 2747 hci_dev_unlock(hdev); 2748} 2749 2750static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 2751{ 2752 struct hci_ev_link_key_notify *ev = (void *) skb->data; 2753 struct hci_conn *conn; 2754 u8 pin_len = 0; 2755 2756 BT_DBG("%s", hdev->name); 2757 2758 hci_dev_lock(hdev); 2759 2760 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2761 if (conn) { 2762 hci_conn_hold(conn); 2763 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2764 pin_len = conn->pin_length; 2765 2766 if (ev->key_type != HCI_LK_CHANGED_COMBINATION) 2767 conn->key_type = ev->key_type; 2768 2769 hci_conn_drop(conn); 2770 } 2771 2772 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 2773 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, 2774 ev->key_type, pin_len); 2775 2776 hci_dev_unlock(hdev); 2777} 2778 2779static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 2780{ 2781 struct hci_ev_clock_offset *ev = (void *) skb->data; 2782 struct hci_conn *conn; 2783 2784 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2785 2786 hci_dev_lock(hdev); 2787 2788 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2789 if (conn && !ev->status) { 2790 struct inquiry_entry *ie; 2791 2792 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 2793 if (ie) { 2794 ie->data.clock_offset = ev->clock_offset; 2795 ie->timestamp = jiffies; 2796 } 2797 } 2798 2799 hci_dev_unlock(hdev); 2800} 2801 2802static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2803{ 2804 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 2805 struct hci_conn *conn; 2806 2807 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2808 2809 hci_dev_lock(hdev); 2810 2811 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2812 if (conn && !ev->status) 2813 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 2814 2815 hci_dev_unlock(hdev); 2816} 2817 2818static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 2819{ 2820 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 2821 struct inquiry_entry *ie; 2822 2823 BT_DBG("%s", hdev->name); 2824 2825 hci_dev_lock(hdev); 2826 2827 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2828 if (ie) { 2829 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 2830 ie->timestamp = jiffies; 2831 } 2832 2833 hci_dev_unlock(hdev); 2834} 2835 2836static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, 2837 struct sk_buff *skb) 2838{ 2839 struct inquiry_data data; 2840 int num_rsp = *((__u8 *) skb->data); 2841 bool name_known, ssp; 2842 2843 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2844 2845 if (!num_rsp) 2846 return; 2847 2848 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 2849 return; 2850 2851 hci_dev_lock(hdev); 2852 2853 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 2854 struct inquiry_info_with_rssi_and_pscan_mode *info; 2855 info = (void *) (skb->data + 1); 2856 2857 for (; num_rsp; num_rsp--, info++) { 2858 bacpy(&data.bdaddr, &info->bdaddr); 2859 data.pscan_rep_mode = info->pscan_rep_mode; 2860 data.pscan_period_mode = info->pscan_period_mode; 2861 data.pscan_mode = info->pscan_mode; 2862 memcpy(data.dev_class, info->dev_class, 3); 2863 data.clock_offset = info->clock_offset; 2864 data.rssi = info->rssi; 2865 data.ssp_mode = 0x00; 2866 2867 name_known = hci_inquiry_cache_update(hdev, &data, 2868 false, &ssp); 2869 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2870 info->dev_class, info->rssi, 2871 !name_known, ssp, NULL, 0); 2872 } 2873 } else { 2874 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 2875 2876 for (; num_rsp; num_rsp--, info++) { 2877 bacpy(&data.bdaddr, &info->bdaddr); 2878 data.pscan_rep_mode = info->pscan_rep_mode; 2879 data.pscan_period_mode = info->pscan_period_mode; 2880 data.pscan_mode = 0x00; 2881 memcpy(data.dev_class, info->dev_class, 3); 2882 data.clock_offset = info->clock_offset; 2883 data.rssi = info->rssi; 2884 data.ssp_mode = 0x00; 2885 name_known = hci_inquiry_cache_update(hdev, &data, 2886 false, &ssp); 2887 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2888 info->dev_class, info->rssi, 2889 !name_known, ssp, NULL, 0); 2890 } 2891 } 2892 2893 hci_dev_unlock(hdev); 2894} 2895 2896static void hci_remote_ext_features_evt(struct hci_dev *hdev, 2897 struct sk_buff *skb) 2898{ 2899 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 2900 struct hci_conn *conn; 2901 2902 BT_DBG("%s", hdev->name); 2903 2904 hci_dev_lock(hdev); 2905 2906 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2907 if (!conn) 2908 goto unlock; 2909 2910 if (ev->page < HCI_MAX_PAGES) 2911 memcpy(conn->features[ev->page], ev->features, 8); 2912 2913 if (!ev->status && ev->page == 0x01) { 2914 struct inquiry_entry *ie; 2915 2916 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 2917 if (ie) 2918 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 2919 2920 if (ev->features[0] & LMP_HOST_SSP) { 2921 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 2922 } else { 2923 /* It is mandatory by the Bluetooth specification that 2924 * Extended Inquiry Results are only used when Secure 2925 * Simple Pairing is enabled, but some devices violate 2926 * this. 2927 * 2928 * To make these devices work, the internal SSP 2929 * enabled flag needs to be cleared if the remote host 2930 * features do not indicate SSP support */ 2931 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 2932 } 2933 2934 if (ev->features[0] & LMP_HOST_SC) 2935 set_bit(HCI_CONN_SC_ENABLED, &conn->flags); 2936 } 2937 2938 if (conn->state != BT_CONFIG) 2939 goto unlock; 2940 2941 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 2942 struct hci_cp_remote_name_req cp; 2943 memset(&cp, 0, sizeof(cp)); 2944 bacpy(&cp.bdaddr, &conn->dst); 2945 cp.pscan_rep_mode = 0x02; 2946 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2947 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2948 mgmt_device_connected(hdev, &conn->dst, conn->type, 2949 conn->dst_type, 0, NULL, 0, 2950 conn->dev_class); 2951 2952 if (!hci_outgoing_auth_needed(hdev, conn)) { 2953 conn->state = BT_CONNECTED; 2954 hci_proto_connect_cfm(conn, ev->status); 2955 hci_conn_drop(conn); 2956 } 2957 2958unlock: 2959 hci_dev_unlock(hdev); 2960} 2961 2962static void hci_sync_conn_complete_evt(struct hci_dev *hdev, 2963 struct sk_buff *skb) 2964{ 2965 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 2966 struct hci_conn *conn; 2967 2968 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2969 2970 hci_dev_lock(hdev); 2971 2972 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 2973 if (!conn) { 2974 if (ev->link_type == ESCO_LINK) 2975 goto unlock; 2976 2977 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 2978 if (!conn) 2979 goto unlock; 2980 2981 conn->type = SCO_LINK; 2982 } 2983 2984 switch (ev->status) { 2985 case 0x00: 2986 conn->handle = __le16_to_cpu(ev->handle); 2987 conn->state = BT_CONNECTED; 2988 2989 hci_conn_add_sysfs(conn); 2990 break; 2991 2992 case 0x0d: /* Connection Rejected due to Limited Resources */ 2993 case 0x11: /* Unsupported Feature or Parameter Value */ 2994 case 0x1c: /* SCO interval rejected */ 2995 case 0x1a: /* Unsupported Remote Feature */ 2996 case 0x1f: /* Unspecified error */ 2997 if (conn->out) { 2998 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 2999 (hdev->esco_type & EDR_ESCO_MASK); 3000 if (hci_setup_sync(conn, conn->link->handle)) 3001 goto unlock; 3002 } 3003 /* fall through */ 3004 3005 default: 3006 conn->state = BT_CLOSED; 3007 break; 3008 } 3009 3010 hci_proto_connect_cfm(conn, ev->status); 3011 if (ev->status) 3012 hci_conn_del(conn); 3013 3014unlock: 3015 hci_dev_unlock(hdev); 3016} 3017 3018static inline size_t eir_get_length(u8 *eir, size_t eir_len) 3019{ 3020 size_t parsed = 0; 3021 3022 while (parsed < eir_len) { 3023 u8 field_len = eir[0]; 3024 3025 if (field_len == 0) 3026 return parsed; 3027 3028 parsed += field_len + 1; 3029 eir += field_len + 1; 3030 } 3031 3032 return eir_len; 3033} 3034 3035static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, 3036 struct sk_buff *skb) 3037{ 3038 struct inquiry_data data; 3039 struct extended_inquiry_info *info = (void *) (skb->data + 1); 3040 int num_rsp = *((__u8 *) skb->data); 3041 size_t eir_len; 3042 3043 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 3044 3045 if (!num_rsp) 3046 return; 3047 3048 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 3049 return; 3050 3051 hci_dev_lock(hdev); 3052 3053 for (; num_rsp; num_rsp--, info++) { 3054 bool name_known, ssp; 3055 3056 bacpy(&data.bdaddr, &info->bdaddr); 3057 data.pscan_rep_mode = info->pscan_rep_mode; 3058 data.pscan_period_mode = info->pscan_period_mode; 3059 data.pscan_mode = 0x00; 3060 memcpy(data.dev_class, info->dev_class, 3); 3061 data.clock_offset = info->clock_offset; 3062 data.rssi = info->rssi; 3063 data.ssp_mode = 0x01; 3064 3065 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3066 name_known = eir_has_data_type(info->data, 3067 sizeof(info->data), 3068 EIR_NAME_COMPLETE); 3069 else 3070 name_known = true; 3071 3072 name_known = hci_inquiry_cache_update(hdev, &data, name_known, 3073 &ssp); 3074 eir_len = eir_get_length(info->data, sizeof(info->data)); 3075 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3076 info->dev_class, info->rssi, !name_known, 3077 ssp, info->data, eir_len); 3078 } 3079 3080 hci_dev_unlock(hdev); 3081} 3082 3083static void hci_key_refresh_complete_evt(struct hci_dev *hdev, 3084 struct sk_buff *skb) 3085{ 3086 struct hci_ev_key_refresh_complete *ev = (void *) skb->data; 3087 struct hci_conn *conn; 3088 3089 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status, 3090 __le16_to_cpu(ev->handle)); 3091 3092 hci_dev_lock(hdev); 3093 3094 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3095 if (!conn) 3096 goto unlock; 3097 3098 if (!ev->status) 3099 conn->sec_level = conn->pending_sec_level; 3100 3101 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3102 3103 if (ev->status && conn->state == BT_CONNECTED) { 3104 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3105 hci_conn_drop(conn); 3106 goto unlock; 3107 } 3108 3109 if (conn->state == BT_CONFIG) { 3110 if (!ev->status) 3111 conn->state = BT_CONNECTED; 3112 3113 hci_proto_connect_cfm(conn, ev->status); 3114 hci_conn_drop(conn); 3115 } else { 3116 hci_auth_cfm(conn, ev->status); 3117 3118 hci_conn_hold(conn); 3119 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3120 hci_conn_drop(conn); 3121 } 3122 3123unlock: 3124 hci_dev_unlock(hdev); 3125} 3126 3127static u8 hci_get_auth_req(struct hci_conn *conn) 3128{ 3129 /* If remote requests dedicated bonding follow that lead */ 3130 if (conn->remote_auth == HCI_AT_DEDICATED_BONDING || 3131 conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) { 3132 /* If both remote and local IO capabilities allow MITM 3133 * protection then require it, otherwise don't */ 3134 if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT || 3135 conn->io_capability == HCI_IO_NO_INPUT_OUTPUT) 3136 return HCI_AT_DEDICATED_BONDING; 3137 else 3138 return HCI_AT_DEDICATED_BONDING_MITM; 3139 } 3140 3141 /* If remote requests no-bonding follow that lead */ 3142 if (conn->remote_auth == HCI_AT_NO_BONDING || 3143 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 3144 return conn->remote_auth | (conn->auth_type & 0x01); 3145 3146 return conn->auth_type; 3147} 3148 3149static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3150{ 3151 struct hci_ev_io_capa_request *ev = (void *) skb->data; 3152 struct hci_conn *conn; 3153 3154 BT_DBG("%s", hdev->name); 3155 3156 hci_dev_lock(hdev); 3157 3158 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3159 if (!conn) 3160 goto unlock; 3161 3162 hci_conn_hold(conn); 3163 3164 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3165 goto unlock; 3166 3167 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) || 3168 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 3169 struct hci_cp_io_capability_reply cp; 3170 3171 bacpy(&cp.bdaddr, &ev->bdaddr); 3172 /* Change the IO capability from KeyboardDisplay 3173 * to DisplayYesNo as it is not supported by BT spec. */ 3174 cp.capability = (conn->io_capability == 0x04) ? 3175 HCI_IO_DISPLAY_YESNO : conn->io_capability; 3176 conn->auth_type = hci_get_auth_req(conn); 3177 cp.authentication = conn->auth_type; 3178 3179 if (hci_find_remote_oob_data(hdev, &conn->dst) && 3180 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags))) 3181 cp.oob_data = 0x01; 3182 else 3183 cp.oob_data = 0x00; 3184 3185 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 3186 sizeof(cp), &cp); 3187 } else { 3188 struct hci_cp_io_capability_neg_reply cp; 3189 3190 bacpy(&cp.bdaddr, &ev->bdaddr); 3191 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 3192 3193 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 3194 sizeof(cp), &cp); 3195 } 3196 3197unlock: 3198 hci_dev_unlock(hdev); 3199} 3200 3201static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 3202{ 3203 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 3204 struct hci_conn *conn; 3205 3206 BT_DBG("%s", hdev->name); 3207 3208 hci_dev_lock(hdev); 3209 3210 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3211 if (!conn) 3212 goto unlock; 3213 3214 conn->remote_cap = ev->capability; 3215 conn->remote_auth = ev->authentication; 3216 if (ev->oob_data) 3217 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags); 3218 3219unlock: 3220 hci_dev_unlock(hdev); 3221} 3222 3223static void hci_user_confirm_request_evt(struct hci_dev *hdev, 3224 struct sk_buff *skb) 3225{ 3226 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 3227 int loc_mitm, rem_mitm, confirm_hint = 0; 3228 struct hci_conn *conn; 3229 3230 BT_DBG("%s", hdev->name); 3231 3232 hci_dev_lock(hdev); 3233 3234 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3235 goto unlock; 3236 3237 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3238 if (!conn) 3239 goto unlock; 3240 3241 loc_mitm = (conn->auth_type & 0x01); 3242 rem_mitm = (conn->remote_auth & 0x01); 3243 3244 /* If we require MITM but the remote device can't provide that 3245 * (it has NoInputNoOutput) then reject the confirmation 3246 * request. The only exception is when we're dedicated bonding 3247 * initiators (connect_cfm_cb set) since then we always have the MITM 3248 * bit set. */ 3249 if (!conn->connect_cfm_cb && loc_mitm && 3250 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 3251 BT_DBG("Rejecting request: remote device can't provide MITM"); 3252 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 3253 sizeof(ev->bdaddr), &ev->bdaddr); 3254 goto unlock; 3255 } 3256 3257 /* If no side requires MITM protection; auto-accept */ 3258 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 3259 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 3260 3261 /* If we're not the initiators request authorization to 3262 * proceed from user space (mgmt_user_confirm with 3263 * confirm_hint set to 1). */ 3264 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3265 BT_DBG("Confirming auto-accept as acceptor"); 3266 confirm_hint = 1; 3267 goto confirm; 3268 } 3269 3270 BT_DBG("Auto-accept of user confirmation with %ums delay", 3271 hdev->auto_accept_delay); 3272 3273 if (hdev->auto_accept_delay > 0) { 3274 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 3275 queue_delayed_work(conn->hdev->workqueue, 3276 &conn->auto_accept_work, delay); 3277 goto unlock; 3278 } 3279 3280 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 3281 sizeof(ev->bdaddr), &ev->bdaddr); 3282 goto unlock; 3283 } 3284 3285confirm: 3286 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey, 3287 confirm_hint); 3288 3289unlock: 3290 hci_dev_unlock(hdev); 3291} 3292 3293static void hci_user_passkey_request_evt(struct hci_dev *hdev, 3294 struct sk_buff *skb) 3295{ 3296 struct hci_ev_user_passkey_req *ev = (void *) skb->data; 3297 3298 BT_DBG("%s", hdev->name); 3299 3300 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3301 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 3302} 3303 3304static void hci_user_passkey_notify_evt(struct hci_dev *hdev, 3305 struct sk_buff *skb) 3306{ 3307 struct hci_ev_user_passkey_notify *ev = (void *) skb->data; 3308 struct hci_conn *conn; 3309 3310 BT_DBG("%s", hdev->name); 3311 3312 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3313 if (!conn) 3314 return; 3315 3316 conn->passkey_notify = __le32_to_cpu(ev->passkey); 3317 conn->passkey_entered = 0; 3318 3319 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3320 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 3321 conn->dst_type, conn->passkey_notify, 3322 conn->passkey_entered); 3323} 3324 3325static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 3326{ 3327 struct hci_ev_keypress_notify *ev = (void *) skb->data; 3328 struct hci_conn *conn; 3329 3330 BT_DBG("%s", hdev->name); 3331 3332 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3333 if (!conn) 3334 return; 3335 3336 switch (ev->type) { 3337 case HCI_KEYPRESS_STARTED: 3338 conn->passkey_entered = 0; 3339 return; 3340 3341 case HCI_KEYPRESS_ENTERED: 3342 conn->passkey_entered++; 3343 break; 3344 3345 case HCI_KEYPRESS_ERASED: 3346 conn->passkey_entered--; 3347 break; 3348 3349 case HCI_KEYPRESS_CLEARED: 3350 conn->passkey_entered = 0; 3351 break; 3352 3353 case HCI_KEYPRESS_COMPLETED: 3354 return; 3355 } 3356 3357 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3358 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 3359 conn->dst_type, conn->passkey_notify, 3360 conn->passkey_entered); 3361} 3362 3363static void hci_simple_pair_complete_evt(struct hci_dev *hdev, 3364 struct sk_buff *skb) 3365{ 3366 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 3367 struct hci_conn *conn; 3368 3369 BT_DBG("%s", hdev->name); 3370 3371 hci_dev_lock(hdev); 3372 3373 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3374 if (!conn) 3375 goto unlock; 3376 3377 /* To avoid duplicate auth_failed events to user space we check 3378 * the HCI_CONN_AUTH_PEND flag which will be set if we 3379 * initiated the authentication. A traditional auth_complete 3380 * event gets always produced as initiator and is also mapped to 3381 * the mgmt_auth_failed event */ 3382 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 3383 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type, 3384 ev->status); 3385 3386 hci_conn_drop(conn); 3387 3388unlock: 3389 hci_dev_unlock(hdev); 3390} 3391 3392static void hci_remote_host_features_evt(struct hci_dev *hdev, 3393 struct sk_buff *skb) 3394{ 3395 struct hci_ev_remote_host_features *ev = (void *) skb->data; 3396 struct inquiry_entry *ie; 3397 struct hci_conn *conn; 3398 3399 BT_DBG("%s", hdev->name); 3400 3401 hci_dev_lock(hdev); 3402 3403 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3404 if (conn) 3405 memcpy(conn->features[1], ev->features, 8); 3406 3407 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3408 if (ie) 3409 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 3410 3411 hci_dev_unlock(hdev); 3412} 3413 3414static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 3415 struct sk_buff *skb) 3416{ 3417 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 3418 struct oob_data *data; 3419 3420 BT_DBG("%s", hdev->name); 3421 3422 hci_dev_lock(hdev); 3423 3424 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3425 goto unlock; 3426 3427 data = hci_find_remote_oob_data(hdev, &ev->bdaddr); 3428 if (data) { 3429 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) { 3430 struct hci_cp_remote_oob_ext_data_reply cp; 3431 3432 bacpy(&cp.bdaddr, &ev->bdaddr); 3433 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); 3434 memcpy(cp.randomizer192, data->randomizer192, 3435 sizeof(cp.randomizer192)); 3436 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); 3437 memcpy(cp.randomizer256, data->randomizer256, 3438 sizeof(cp.randomizer256)); 3439 3440 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 3441 sizeof(cp), &cp); 3442 } else { 3443 struct hci_cp_remote_oob_data_reply cp; 3444 3445 bacpy(&cp.bdaddr, &ev->bdaddr); 3446 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 3447 memcpy(cp.randomizer, data->randomizer192, 3448 sizeof(cp.randomizer)); 3449 3450 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 3451 sizeof(cp), &cp); 3452 } 3453 } else { 3454 struct hci_cp_remote_oob_data_neg_reply cp; 3455 3456 bacpy(&cp.bdaddr, &ev->bdaddr); 3457 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 3458 sizeof(cp), &cp); 3459 } 3460 3461unlock: 3462 hci_dev_unlock(hdev); 3463} 3464 3465static void hci_phy_link_complete_evt(struct hci_dev *hdev, 3466 struct sk_buff *skb) 3467{ 3468 struct hci_ev_phy_link_complete *ev = (void *) skb->data; 3469 struct hci_conn *hcon, *bredr_hcon; 3470 3471 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle, 3472 ev->status); 3473 3474 hci_dev_lock(hdev); 3475 3476 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 3477 if (!hcon) { 3478 hci_dev_unlock(hdev); 3479 return; 3480 } 3481 3482 if (ev->status) { 3483 hci_conn_del(hcon); 3484 hci_dev_unlock(hdev); 3485 return; 3486 } 3487 3488 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; 3489 3490 hcon->state = BT_CONNECTED; 3491 bacpy(&hcon->dst, &bredr_hcon->dst); 3492 3493 hci_conn_hold(hcon); 3494 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 3495 hci_conn_drop(hcon); 3496 3497 hci_conn_add_sysfs(hcon); 3498 3499 amp_physical_cfm(bredr_hcon, hcon); 3500 3501 hci_dev_unlock(hdev); 3502} 3503 3504static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3505{ 3506 struct hci_ev_logical_link_complete *ev = (void *) skb->data; 3507 struct hci_conn *hcon; 3508 struct hci_chan *hchan; 3509 struct amp_mgr *mgr; 3510 3511 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", 3512 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle, 3513 ev->status); 3514 3515 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 3516 if (!hcon) 3517 return; 3518 3519 /* Create AMP hchan */ 3520 hchan = hci_chan_create(hcon); 3521 if (!hchan) 3522 return; 3523 3524 hchan->handle = le16_to_cpu(ev->handle); 3525 3526 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); 3527 3528 mgr = hcon->amp_mgr; 3529 if (mgr && mgr->bredr_chan) { 3530 struct l2cap_chan *bredr_chan = mgr->bredr_chan; 3531 3532 l2cap_chan_lock(bredr_chan); 3533 3534 bredr_chan->conn->mtu = hdev->block_mtu; 3535 l2cap_logical_cfm(bredr_chan, hchan, 0); 3536 hci_conn_hold(hcon); 3537 3538 l2cap_chan_unlock(bredr_chan); 3539 } 3540} 3541 3542static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, 3543 struct sk_buff *skb) 3544{ 3545 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data; 3546 struct hci_chan *hchan; 3547 3548 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name, 3549 le16_to_cpu(ev->handle), ev->status); 3550 3551 if (ev->status) 3552 return; 3553 3554 hci_dev_lock(hdev); 3555 3556 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); 3557 if (!hchan) 3558 goto unlock; 3559 3560 amp_destroy_logical_link(hchan, ev->reason); 3561 3562unlock: 3563 hci_dev_unlock(hdev); 3564} 3565 3566static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, 3567 struct sk_buff *skb) 3568{ 3569 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data; 3570 struct hci_conn *hcon; 3571 3572 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3573 3574 if (ev->status) 3575 return; 3576 3577 hci_dev_lock(hdev); 3578 3579 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 3580 if (hcon) { 3581 hcon->state = BT_CLOSED; 3582 hci_conn_del(hcon); 3583 } 3584 3585 hci_dev_unlock(hdev); 3586} 3587 3588static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3589{ 3590 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 3591 struct hci_conn *conn; 3592 struct smp_irk *irk; 3593 3594 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3595 3596 hci_dev_lock(hdev); 3597 3598 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 3599 if (!conn) { 3600 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); 3601 if (!conn) { 3602 BT_ERR("No memory for new connection"); 3603 goto unlock; 3604 } 3605 3606 conn->dst_type = ev->bdaddr_type; 3607 3608 /* The advertising parameters for own address type 3609 * define which source address and source address 3610 * type this connections has. 3611 */ 3612 if (bacmp(&conn->src, BDADDR_ANY)) { 3613 conn->src_type = ADDR_LE_DEV_PUBLIC; 3614 } else { 3615 bacpy(&conn->src, &hdev->static_addr); 3616 conn->src_type = ADDR_LE_DEV_RANDOM; 3617 } 3618 3619 if (ev->role == LE_CONN_ROLE_MASTER) { 3620 conn->out = true; 3621 conn->link_mode |= HCI_LM_MASTER; 3622 } 3623 } 3624 3625 /* Ensure that the hci_conn contains the identity address type 3626 * regardless of which address the connection was made with. 3627 * 3628 * If the controller has a public BD_ADDR, then by default 3629 * use that one. If this is a LE only controller without 3630 * a public address, default to the static random address. 3631 * 3632 * For debugging purposes it is possible to force 3633 * controllers with a public address to use the static 3634 * random address instead. 3635 */ 3636 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) || 3637 !bacmp(&hdev->bdaddr, BDADDR_ANY)) { 3638 bacpy(&conn->src, &hdev->static_addr); 3639 conn->src_type = ADDR_LE_DEV_RANDOM; 3640 } else { 3641 bacpy(&conn->src, &hdev->bdaddr); 3642 conn->src_type = ADDR_LE_DEV_PUBLIC; 3643 } 3644 3645 /* Lookup the identity address from the stored connection 3646 * address and address type. 3647 * 3648 * When establishing connections to an identity address, the 3649 * connection procedure will store the resolvable random 3650 * address first. Now if it can be converted back into the 3651 * identity address, start using the identity address from 3652 * now on. 3653 */ 3654 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); 3655 if (irk) { 3656 bacpy(&conn->dst, &irk->bdaddr); 3657 conn->dst_type = irk->addr_type; 3658 } 3659 3660 if (ev->status) { 3661 mgmt_connect_failed(hdev, &conn->dst, conn->type, 3662 conn->dst_type, ev->status); 3663 hci_proto_connect_cfm(conn, ev->status); 3664 conn->state = BT_CLOSED; 3665 hci_conn_del(conn); 3666 goto unlock; 3667 } 3668 3669 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3670 mgmt_device_connected(hdev, &conn->dst, conn->type, 3671 conn->dst_type, 0, NULL, 0, NULL); 3672 3673 conn->sec_level = BT_SECURITY_LOW; 3674 conn->handle = __le16_to_cpu(ev->handle); 3675 conn->state = BT_CONNECTED; 3676 3677 if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags)) 3678 set_bit(HCI_CONN_6LOWPAN, &conn->flags); 3679 3680 hci_conn_add_sysfs(conn); 3681 3682 hci_proto_connect_cfm(conn, ev->status); 3683 3684unlock: 3685 hci_dev_unlock(hdev); 3686} 3687 3688static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) 3689{ 3690 u8 num_reports = skb->data[0]; 3691 void *ptr = &skb->data[1]; 3692 s8 rssi; 3693 3694 while (num_reports--) { 3695 struct hci_ev_le_advertising_info *ev = ptr; 3696 3697 rssi = ev->data[ev->length]; 3698 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type, 3699 NULL, rssi, 0, 1, ev->data, ev->length); 3700 3701 ptr += sizeof(*ev) + ev->length + 1; 3702 } 3703} 3704 3705static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3706{ 3707 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 3708 struct hci_cp_le_ltk_reply cp; 3709 struct hci_cp_le_ltk_neg_reply neg; 3710 struct hci_conn *conn; 3711 struct smp_ltk *ltk; 3712 3713 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle)); 3714 3715 hci_dev_lock(hdev); 3716 3717 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3718 if (conn == NULL) 3719 goto not_found; 3720 3721 ltk = hci_find_ltk(hdev, ev->ediv, ev->random, conn->out); 3722 if (ltk == NULL) 3723 goto not_found; 3724 3725 memcpy(cp.ltk, ltk->val, sizeof(ltk->val)); 3726 cp.handle = cpu_to_le16(conn->handle); 3727 3728 if (ltk->authenticated) 3729 conn->pending_sec_level = BT_SECURITY_HIGH; 3730 else 3731 conn->pending_sec_level = BT_SECURITY_MEDIUM; 3732 3733 conn->enc_key_size = ltk->enc_size; 3734 3735 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 3736 3737 if (ltk->type & HCI_SMP_STK) { 3738 list_del(<k->list); 3739 kfree(ltk); 3740 } 3741 3742 hci_dev_unlock(hdev); 3743 3744 return; 3745 3746not_found: 3747 neg.handle = ev->handle; 3748 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 3749 hci_dev_unlock(hdev); 3750} 3751 3752static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 3753{ 3754 struct hci_ev_le_meta *le_ev = (void *) skb->data; 3755 3756 skb_pull(skb, sizeof(*le_ev)); 3757 3758 switch (le_ev->subevent) { 3759 case HCI_EV_LE_CONN_COMPLETE: 3760 hci_le_conn_complete_evt(hdev, skb); 3761 break; 3762 3763 case HCI_EV_LE_ADVERTISING_REPORT: 3764 hci_le_adv_report_evt(hdev, skb); 3765 break; 3766 3767 case HCI_EV_LE_LTK_REQ: 3768 hci_le_ltk_request_evt(hdev, skb); 3769 break; 3770 3771 default: 3772 break; 3773 } 3774} 3775 3776static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb) 3777{ 3778 struct hci_ev_channel_selected *ev = (void *) skb->data; 3779 struct hci_conn *hcon; 3780 3781 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle); 3782 3783 skb_pull(skb, sizeof(*ev)); 3784 3785 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 3786 if (!hcon) 3787 return; 3788 3789 amp_read_loc_assoc_final_data(hdev, hcon); 3790} 3791 3792void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 3793{ 3794 struct hci_event_hdr *hdr = (void *) skb->data; 3795 __u8 event = hdr->evt; 3796 3797 hci_dev_lock(hdev); 3798 3799 /* Received events are (currently) only needed when a request is 3800 * ongoing so avoid unnecessary memory allocation. 3801 */ 3802 if (hdev->req_status == HCI_REQ_PEND) { 3803 kfree_skb(hdev->recv_evt); 3804 hdev->recv_evt = skb_clone(skb, GFP_KERNEL); 3805 } 3806 3807 hci_dev_unlock(hdev); 3808 3809 skb_pull(skb, HCI_EVENT_HDR_SIZE); 3810 3811 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) { 3812 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; 3813 u16 opcode = __le16_to_cpu(cmd_hdr->opcode); 3814 3815 hci_req_cmd_complete(hdev, opcode, 0); 3816 } 3817 3818 switch (event) { 3819 case HCI_EV_INQUIRY_COMPLETE: 3820 hci_inquiry_complete_evt(hdev, skb); 3821 break; 3822 3823 case HCI_EV_INQUIRY_RESULT: 3824 hci_inquiry_result_evt(hdev, skb); 3825 break; 3826 3827 case HCI_EV_CONN_COMPLETE: 3828 hci_conn_complete_evt(hdev, skb); 3829 break; 3830 3831 case HCI_EV_CONN_REQUEST: 3832 hci_conn_request_evt(hdev, skb); 3833 break; 3834 3835 case HCI_EV_DISCONN_COMPLETE: 3836 hci_disconn_complete_evt(hdev, skb); 3837 break; 3838 3839 case HCI_EV_AUTH_COMPLETE: 3840 hci_auth_complete_evt(hdev, skb); 3841 break; 3842 3843 case HCI_EV_REMOTE_NAME: 3844 hci_remote_name_evt(hdev, skb); 3845 break; 3846 3847 case HCI_EV_ENCRYPT_CHANGE: 3848 hci_encrypt_change_evt(hdev, skb); 3849 break; 3850 3851 case HCI_EV_CHANGE_LINK_KEY_COMPLETE: 3852 hci_change_link_key_complete_evt(hdev, skb); 3853 break; 3854 3855 case HCI_EV_REMOTE_FEATURES: 3856 hci_remote_features_evt(hdev, skb); 3857 break; 3858 3859 case HCI_EV_CMD_COMPLETE: 3860 hci_cmd_complete_evt(hdev, skb); 3861 break; 3862 3863 case HCI_EV_CMD_STATUS: 3864 hci_cmd_status_evt(hdev, skb); 3865 break; 3866 3867 case HCI_EV_ROLE_CHANGE: 3868 hci_role_change_evt(hdev, skb); 3869 break; 3870 3871 case HCI_EV_NUM_COMP_PKTS: 3872 hci_num_comp_pkts_evt(hdev, skb); 3873 break; 3874 3875 case HCI_EV_MODE_CHANGE: 3876 hci_mode_change_evt(hdev, skb); 3877 break; 3878 3879 case HCI_EV_PIN_CODE_REQ: 3880 hci_pin_code_request_evt(hdev, skb); 3881 break; 3882 3883 case HCI_EV_LINK_KEY_REQ: 3884 hci_link_key_request_evt(hdev, skb); 3885 break; 3886 3887 case HCI_EV_LINK_KEY_NOTIFY: 3888 hci_link_key_notify_evt(hdev, skb); 3889 break; 3890 3891 case HCI_EV_CLOCK_OFFSET: 3892 hci_clock_offset_evt(hdev, skb); 3893 break; 3894 3895 case HCI_EV_PKT_TYPE_CHANGE: 3896 hci_pkt_type_change_evt(hdev, skb); 3897 break; 3898 3899 case HCI_EV_PSCAN_REP_MODE: 3900 hci_pscan_rep_mode_evt(hdev, skb); 3901 break; 3902 3903 case HCI_EV_INQUIRY_RESULT_WITH_RSSI: 3904 hci_inquiry_result_with_rssi_evt(hdev, skb); 3905 break; 3906 3907 case HCI_EV_REMOTE_EXT_FEATURES: 3908 hci_remote_ext_features_evt(hdev, skb); 3909 break; 3910 3911 case HCI_EV_SYNC_CONN_COMPLETE: 3912 hci_sync_conn_complete_evt(hdev, skb); 3913 break; 3914 3915 case HCI_EV_EXTENDED_INQUIRY_RESULT: 3916 hci_extended_inquiry_result_evt(hdev, skb); 3917 break; 3918 3919 case HCI_EV_KEY_REFRESH_COMPLETE: 3920 hci_key_refresh_complete_evt(hdev, skb); 3921 break; 3922 3923 case HCI_EV_IO_CAPA_REQUEST: 3924 hci_io_capa_request_evt(hdev, skb); 3925 break; 3926 3927 case HCI_EV_IO_CAPA_REPLY: 3928 hci_io_capa_reply_evt(hdev, skb); 3929 break; 3930 3931 case HCI_EV_USER_CONFIRM_REQUEST: 3932 hci_user_confirm_request_evt(hdev, skb); 3933 break; 3934 3935 case HCI_EV_USER_PASSKEY_REQUEST: 3936 hci_user_passkey_request_evt(hdev, skb); 3937 break; 3938 3939 case HCI_EV_USER_PASSKEY_NOTIFY: 3940 hci_user_passkey_notify_evt(hdev, skb); 3941 break; 3942 3943 case HCI_EV_KEYPRESS_NOTIFY: 3944 hci_keypress_notify_evt(hdev, skb); 3945 break; 3946 3947 case HCI_EV_SIMPLE_PAIR_COMPLETE: 3948 hci_simple_pair_complete_evt(hdev, skb); 3949 break; 3950 3951 case HCI_EV_REMOTE_HOST_FEATURES: 3952 hci_remote_host_features_evt(hdev, skb); 3953 break; 3954 3955 case HCI_EV_LE_META: 3956 hci_le_meta_evt(hdev, skb); 3957 break; 3958 3959 case HCI_EV_CHANNEL_SELECTED: 3960 hci_chan_selected_evt(hdev, skb); 3961 break; 3962 3963 case HCI_EV_REMOTE_OOB_DATA_REQUEST: 3964 hci_remote_oob_data_request_evt(hdev, skb); 3965 break; 3966 3967 case HCI_EV_PHY_LINK_COMPLETE: 3968 hci_phy_link_complete_evt(hdev, skb); 3969 break; 3970 3971 case HCI_EV_LOGICAL_LINK_COMPLETE: 3972 hci_loglink_complete_evt(hdev, skb); 3973 break; 3974 3975 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE: 3976 hci_disconn_loglink_complete_evt(hdev, skb); 3977 break; 3978 3979 case HCI_EV_DISCONN_PHY_LINK_COMPLETE: 3980 hci_disconn_phylink_complete_evt(hdev, skb); 3981 break; 3982 3983 case HCI_EV_NUM_COMP_BLOCKS: 3984 hci_num_comp_blocks_evt(hdev, skb); 3985 break; 3986 3987 default: 3988 BT_DBG("%s event 0x%2.2x", hdev->name, event); 3989 break; 3990 } 3991 3992 kfree_skb(skb); 3993 hdev->stat.evt_rx++; 3994} 3995