hci_event.c revision 9238f36a5a5097018b90baa42c473d2f916a46f5
1/* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23*/ 24 25/* Bluetooth HCI event handling. */ 26 27#include <asm/unaligned.h> 28 29#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/hci_core.h> 31#include <net/bluetooth/mgmt.h> 32#include <net/bluetooth/a2mp.h> 33#include <net/bluetooth/amp.h> 34 35/* Handle HCI Event packets */ 36 37static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) 38{ 39 __u8 status = *((__u8 *) skb->data); 40 41 BT_DBG("%s status 0x%2.2x", hdev->name, status); 42 43 if (status) { 44 hci_dev_lock(hdev); 45 mgmt_stop_discovery_failed(hdev, status); 46 hci_dev_unlock(hdev); 47 return; 48 } 49 50 clear_bit(HCI_INQUIRY, &hdev->flags); 51 52 hci_dev_lock(hdev); 53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 54 hci_dev_unlock(hdev); 55 56 hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status); 57 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); 58 59 hci_conn_check_pending(hdev); 60} 61 62static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 63{ 64 __u8 status = *((__u8 *) skb->data); 65 66 BT_DBG("%s status 0x%2.2x", hdev->name, status); 67 68 if (status) 69 return; 70 71 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags); 72} 73 74static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 75{ 76 __u8 status = *((__u8 *) skb->data); 77 78 BT_DBG("%s status 0x%2.2x", hdev->name, status); 79 80 if (status) 81 return; 82 83 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags); 84 85 hci_conn_check_pending(hdev); 86} 87 88static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, 89 struct sk_buff *skb) 90{ 91 BT_DBG("%s", hdev->name); 92} 93 94static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) 95{ 96 struct hci_rp_role_discovery *rp = (void *) skb->data; 97 struct hci_conn *conn; 98 99 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 100 101 if (rp->status) 102 return; 103 104 hci_dev_lock(hdev); 105 106 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 107 if (conn) { 108 if (rp->role) 109 conn->link_mode &= ~HCI_LM_MASTER; 110 else 111 conn->link_mode |= HCI_LM_MASTER; 112 } 113 114 hci_dev_unlock(hdev); 115} 116 117static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 118{ 119 struct hci_rp_read_link_policy *rp = (void *) skb->data; 120 struct hci_conn *conn; 121 122 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 123 124 if (rp->status) 125 return; 126 127 hci_dev_lock(hdev); 128 129 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 130 if (conn) 131 conn->link_policy = __le16_to_cpu(rp->policy); 132 133 hci_dev_unlock(hdev); 134} 135 136static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 137{ 138 struct hci_rp_write_link_policy *rp = (void *) skb->data; 139 struct hci_conn *conn; 140 void *sent; 141 142 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 143 144 if (rp->status) 145 return; 146 147 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 148 if (!sent) 149 return; 150 151 hci_dev_lock(hdev); 152 153 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 154 if (conn) 155 conn->link_policy = get_unaligned_le16(sent + 2); 156 157 hci_dev_unlock(hdev); 158} 159 160static void hci_cc_read_def_link_policy(struct hci_dev *hdev, 161 struct sk_buff *skb) 162{ 163 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 164 165 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 166 167 if (rp->status) 168 return; 169 170 hdev->link_policy = __le16_to_cpu(rp->policy); 171} 172 173static void hci_cc_write_def_link_policy(struct hci_dev *hdev, 174 struct sk_buff *skb) 175{ 176 __u8 status = *((__u8 *) skb->data); 177 void *sent; 178 179 BT_DBG("%s status 0x%2.2x", hdev->name, status); 180 181 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 182 if (!sent) 183 return; 184 185 if (!status) 186 hdev->link_policy = get_unaligned_le16(sent); 187 188 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status); 189} 190 191static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 192{ 193 __u8 status = *((__u8 *) skb->data); 194 195 BT_DBG("%s status 0x%2.2x", hdev->name, status); 196 197 clear_bit(HCI_RESET, &hdev->flags); 198 199 hci_req_complete(hdev, HCI_OP_RESET, status); 200 201 /* Reset all non-persistent flags */ 202 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) | 203 BIT(HCI_PERIODIC_INQ)); 204 205 hdev->discovery.state = DISCOVERY_STOPPED; 206 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 207 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 208 209 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 210 hdev->adv_data_len = 0; 211} 212 213static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 214{ 215 __u8 status = *((__u8 *) skb->data); 216 void *sent; 217 218 BT_DBG("%s status 0x%2.2x", hdev->name, status); 219 220 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 221 if (!sent) 222 return; 223 224 hci_dev_lock(hdev); 225 226 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 227 mgmt_set_local_name_complete(hdev, sent, status); 228 else if (!status) 229 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 230 231 hci_dev_unlock(hdev); 232 233 if (!status && !test_bit(HCI_INIT, &hdev->flags)) 234 hci_update_ad(hdev); 235 236 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status); 237} 238 239static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 240{ 241 struct hci_rp_read_local_name *rp = (void *) skb->data; 242 243 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 244 245 if (rp->status) 246 return; 247 248 if (test_bit(HCI_SETUP, &hdev->dev_flags)) 249 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 250} 251 252static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 253{ 254 __u8 status = *((__u8 *) skb->data); 255 void *sent; 256 257 BT_DBG("%s status 0x%2.2x", hdev->name, status); 258 259 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 260 if (!sent) 261 return; 262 263 if (!status) { 264 __u8 param = *((__u8 *) sent); 265 266 if (param == AUTH_ENABLED) 267 set_bit(HCI_AUTH, &hdev->flags); 268 else 269 clear_bit(HCI_AUTH, &hdev->flags); 270 } 271 272 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 273 mgmt_auth_enable_complete(hdev, status); 274 275 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status); 276} 277 278static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 279{ 280 __u8 status = *((__u8 *) skb->data); 281 void *sent; 282 283 BT_DBG("%s status 0x%2.2x", hdev->name, status); 284 285 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 286 if (!sent) 287 return; 288 289 if (!status) { 290 __u8 param = *((__u8 *) sent); 291 292 if (param) 293 set_bit(HCI_ENCRYPT, &hdev->flags); 294 else 295 clear_bit(HCI_ENCRYPT, &hdev->flags); 296 } 297 298 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status); 299} 300 301static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 302{ 303 __u8 param, status = *((__u8 *) skb->data); 304 int old_pscan, old_iscan; 305 void *sent; 306 307 BT_DBG("%s status 0x%2.2x", hdev->name, status); 308 309 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 310 if (!sent) 311 return; 312 313 param = *((__u8 *) sent); 314 315 hci_dev_lock(hdev); 316 317 if (status) { 318 mgmt_write_scan_failed(hdev, param, status); 319 hdev->discov_timeout = 0; 320 goto done; 321 } 322 323 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags); 324 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags); 325 326 if (param & SCAN_INQUIRY) { 327 set_bit(HCI_ISCAN, &hdev->flags); 328 if (!old_iscan) 329 mgmt_discoverable(hdev, 1); 330 if (hdev->discov_timeout > 0) { 331 int to = msecs_to_jiffies(hdev->discov_timeout * 1000); 332 queue_delayed_work(hdev->workqueue, &hdev->discov_off, 333 to); 334 } 335 } else if (old_iscan) 336 mgmt_discoverable(hdev, 0); 337 338 if (param & SCAN_PAGE) { 339 set_bit(HCI_PSCAN, &hdev->flags); 340 if (!old_pscan) 341 mgmt_connectable(hdev, 1); 342 } else if (old_pscan) 343 mgmt_connectable(hdev, 0); 344 345done: 346 hci_dev_unlock(hdev); 347 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status); 348} 349 350static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 351{ 352 struct hci_rp_read_class_of_dev *rp = (void *) skb->data; 353 354 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 355 356 if (rp->status) 357 return; 358 359 memcpy(hdev->dev_class, rp->dev_class, 3); 360 361 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 362 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 363} 364 365static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 366{ 367 __u8 status = *((__u8 *) skb->data); 368 void *sent; 369 370 BT_DBG("%s status 0x%2.2x", hdev->name, status); 371 372 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 373 if (!sent) 374 return; 375 376 hci_dev_lock(hdev); 377 378 if (status == 0) 379 memcpy(hdev->dev_class, sent, 3); 380 381 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 382 mgmt_set_class_of_dev_complete(hdev, sent, status); 383 384 hci_dev_unlock(hdev); 385} 386 387static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 388{ 389 struct hci_rp_read_voice_setting *rp = (void *) skb->data; 390 __u16 setting; 391 392 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 393 394 if (rp->status) 395 return; 396 397 setting = __le16_to_cpu(rp->voice_setting); 398 399 if (hdev->voice_setting == setting) 400 return; 401 402 hdev->voice_setting = setting; 403 404 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 405 406 if (hdev->notify) 407 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 408} 409 410static void hci_cc_write_voice_setting(struct hci_dev *hdev, 411 struct sk_buff *skb) 412{ 413 __u8 status = *((__u8 *) skb->data); 414 __u16 setting; 415 void *sent; 416 417 BT_DBG("%s status 0x%2.2x", hdev->name, status); 418 419 if (status) 420 return; 421 422 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 423 if (!sent) 424 return; 425 426 setting = get_unaligned_le16(sent); 427 428 if (hdev->voice_setting == setting) 429 return; 430 431 hdev->voice_setting = setting; 432 433 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 434 435 if (hdev->notify) 436 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 437} 438 439static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 440{ 441 __u8 status = *((__u8 *) skb->data); 442 443 BT_DBG("%s status 0x%2.2x", hdev->name, status); 444 445 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status); 446} 447 448static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 449{ 450 __u8 status = *((__u8 *) skb->data); 451 struct hci_cp_write_ssp_mode *sent; 452 453 BT_DBG("%s status 0x%2.2x", hdev->name, status); 454 455 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 456 if (!sent) 457 return; 458 459 if (!status) { 460 if (sent->mode) 461 hdev->host_features[0] |= LMP_HOST_SSP; 462 else 463 hdev->host_features[0] &= ~LMP_HOST_SSP; 464 } 465 466 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 467 mgmt_ssp_enable_complete(hdev, sent->mode, status); 468 else if (!status) { 469 if (sent->mode) 470 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 471 else 472 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 473 } 474} 475 476static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 477{ 478 struct hci_rp_read_local_version *rp = (void *) skb->data; 479 480 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 481 482 if (rp->status) 483 goto done; 484 485 hdev->hci_ver = rp->hci_ver; 486 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 487 hdev->lmp_ver = rp->lmp_ver; 488 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 489 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 490 491 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name, 492 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev); 493 494done: 495 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status); 496} 497 498static void hci_cc_read_local_commands(struct hci_dev *hdev, 499 struct sk_buff *skb) 500{ 501 struct hci_rp_read_local_commands *rp = (void *) skb->data; 502 503 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 504 505 if (!rp->status) 506 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 507 508 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status); 509} 510 511static void hci_cc_read_local_features(struct hci_dev *hdev, 512 struct sk_buff *skb) 513{ 514 struct hci_rp_read_local_features *rp = (void *) skb->data; 515 516 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 517 518 if (rp->status) 519 return; 520 521 memcpy(hdev->features, rp->features, 8); 522 523 /* Adjust default settings according to features 524 * supported by device. */ 525 526 if (hdev->features[0] & LMP_3SLOT) 527 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 528 529 if (hdev->features[0] & LMP_5SLOT) 530 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 531 532 if (hdev->features[1] & LMP_HV2) { 533 hdev->pkt_type |= (HCI_HV2); 534 hdev->esco_type |= (ESCO_HV2); 535 } 536 537 if (hdev->features[1] & LMP_HV3) { 538 hdev->pkt_type |= (HCI_HV3); 539 hdev->esco_type |= (ESCO_HV3); 540 } 541 542 if (lmp_esco_capable(hdev)) 543 hdev->esco_type |= (ESCO_EV3); 544 545 if (hdev->features[4] & LMP_EV4) 546 hdev->esco_type |= (ESCO_EV4); 547 548 if (hdev->features[4] & LMP_EV5) 549 hdev->esco_type |= (ESCO_EV5); 550 551 if (hdev->features[5] & LMP_EDR_ESCO_2M) 552 hdev->esco_type |= (ESCO_2EV3); 553 554 if (hdev->features[5] & LMP_EDR_ESCO_3M) 555 hdev->esco_type |= (ESCO_3EV3); 556 557 if (hdev->features[5] & LMP_EDR_3S_ESCO) 558 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 559 560 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, 561 hdev->features[0], hdev->features[1], 562 hdev->features[2], hdev->features[3], 563 hdev->features[4], hdev->features[5], 564 hdev->features[6], hdev->features[7]); 565} 566 567static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 568 struct sk_buff *skb) 569{ 570 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 571 572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 573 574 if (rp->status) 575 goto done; 576 577 switch (rp->page) { 578 case 0: 579 memcpy(hdev->features, rp->features, 8); 580 break; 581 case 1: 582 memcpy(hdev->host_features, rp->features, 8); 583 break; 584 } 585 586done: 587 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status); 588} 589 590static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 591 struct sk_buff *skb) 592{ 593 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; 594 595 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 596 597 if (rp->status) 598 return; 599 600 hdev->flow_ctl_mode = rp->mode; 601 602 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status); 603} 604 605static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 606{ 607 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 608 609 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 610 611 if (rp->status) 612 return; 613 614 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 615 hdev->sco_mtu = rp->sco_mtu; 616 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 617 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 618 619 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 620 hdev->sco_mtu = 64; 621 hdev->sco_pkts = 8; 622 } 623 624 hdev->acl_cnt = hdev->acl_pkts; 625 hdev->sco_cnt = hdev->sco_pkts; 626 627 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 628 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 629} 630 631static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 632{ 633 struct hci_rp_read_bd_addr *rp = (void *) skb->data; 634 635 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 636 637 if (!rp->status) 638 bacpy(&hdev->bdaddr, &rp->bdaddr); 639 640 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status); 641} 642 643static void hci_cc_read_data_block_size(struct hci_dev *hdev, 644 struct sk_buff *skb) 645{ 646 struct hci_rp_read_data_block_size *rp = (void *) skb->data; 647 648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 649 650 if (rp->status) 651 return; 652 653 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 654 hdev->block_len = __le16_to_cpu(rp->block_len); 655 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 656 657 hdev->block_cnt = hdev->num_blocks; 658 659 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 660 hdev->block_cnt, hdev->block_len); 661 662 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status); 663} 664 665static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) 666{ 667 __u8 status = *((__u8 *) skb->data); 668 669 BT_DBG("%s status 0x%2.2x", hdev->name, status); 670 671 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); 672} 673 674static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 675 struct sk_buff *skb) 676{ 677 struct hci_rp_read_local_amp_info *rp = (void *) skb->data; 678 679 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 680 681 if (rp->status) 682 goto a2mp_rsp; 683 684 hdev->amp_status = rp->amp_status; 685 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 686 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 687 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 688 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 689 hdev->amp_type = rp->amp_type; 690 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 691 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 692 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 693 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 694 695 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status); 696 697a2mp_rsp: 698 a2mp_send_getinfo_rsp(hdev); 699} 700 701static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev, 702 struct sk_buff *skb) 703{ 704 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data; 705 struct amp_assoc *assoc = &hdev->loc_assoc; 706 size_t rem_len, frag_len; 707 708 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 709 710 if (rp->status) 711 goto a2mp_rsp; 712 713 frag_len = skb->len - sizeof(*rp); 714 rem_len = __le16_to_cpu(rp->rem_len); 715 716 if (rem_len > frag_len) { 717 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len); 718 719 memcpy(assoc->data + assoc->offset, rp->frag, frag_len); 720 assoc->offset += frag_len; 721 722 /* Read other fragments */ 723 amp_read_loc_assoc_frag(hdev, rp->phy_handle); 724 725 return; 726 } 727 728 memcpy(assoc->data + assoc->offset, rp->frag, rem_len); 729 assoc->len = assoc->offset + rem_len; 730 assoc->offset = 0; 731 732a2mp_rsp: 733 /* Send A2MP Rsp when all fragments are received */ 734 a2mp_send_getampassoc_rsp(hdev, rp->status); 735 a2mp_send_create_phy_link_req(hdev, rp->status); 736} 737 738static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 739 struct sk_buff *skb) 740{ 741 __u8 status = *((__u8 *) skb->data); 742 743 BT_DBG("%s status 0x%2.2x", hdev->name, status); 744 745 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status); 746} 747 748static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb) 749{ 750 __u8 status = *((__u8 *) skb->data); 751 752 BT_DBG("%s status 0x%2.2x", hdev->name, status); 753 754 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status); 755} 756 757static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, 758 struct sk_buff *skb) 759{ 760 __u8 status = *((__u8 *) skb->data); 761 762 BT_DBG("%s status 0x%2.2x", hdev->name, status); 763 764 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status); 765} 766 767static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 768 struct sk_buff *skb) 769{ 770 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; 771 772 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 773 774 if (!rp->status) 775 hdev->inq_tx_power = rp->tx_power; 776 777 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status); 778} 779 780static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb) 781{ 782 __u8 status = *((__u8 *) skb->data); 783 784 BT_DBG("%s status 0x%2.2x", hdev->name, status); 785 786 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status); 787} 788 789static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 790{ 791 struct hci_rp_pin_code_reply *rp = (void *) skb->data; 792 struct hci_cp_pin_code_reply *cp; 793 struct hci_conn *conn; 794 795 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 796 797 hci_dev_lock(hdev); 798 799 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 800 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 801 802 if (rp->status) 803 goto unlock; 804 805 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 806 if (!cp) 807 goto unlock; 808 809 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 810 if (conn) 811 conn->pin_length = cp->pin_len; 812 813unlock: 814 hci_dev_unlock(hdev); 815} 816 817static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 818{ 819 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; 820 821 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 822 823 hci_dev_lock(hdev); 824 825 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 826 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 827 rp->status); 828 829 hci_dev_unlock(hdev); 830} 831 832static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, 833 struct sk_buff *skb) 834{ 835 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; 836 837 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 838 839 if (rp->status) 840 return; 841 842 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 843 hdev->le_pkts = rp->le_max_pkt; 844 845 hdev->le_cnt = hdev->le_pkts; 846 847 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 848 849 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status); 850} 851 852static void hci_cc_le_read_local_features(struct hci_dev *hdev, 853 struct sk_buff *skb) 854{ 855 struct hci_rp_le_read_local_features *rp = (void *) skb->data; 856 857 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 858 859 if (!rp->status) 860 memcpy(hdev->le_features, rp->features, 8); 861 862 hci_req_complete(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, rp->status); 863} 864 865static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, 866 struct sk_buff *skb) 867{ 868 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data; 869 870 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 871 872 if (!rp->status) { 873 hdev->adv_tx_power = rp->tx_power; 874 if (!test_bit(HCI_INIT, &hdev->flags)) 875 hci_update_ad(hdev); 876 } 877 878 hci_req_complete(hdev, HCI_OP_LE_READ_ADV_TX_POWER, rp->status); 879} 880 881static void hci_cc_le_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb) 882{ 883 __u8 status = *((__u8 *) skb->data); 884 885 BT_DBG("%s status 0x%2.2x", hdev->name, status); 886 887 hci_req_complete(hdev, HCI_OP_LE_SET_EVENT_MASK, status); 888} 889 890static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 891{ 892 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 893 894 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 895 896 hci_dev_lock(hdev); 897 898 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 899 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 900 rp->status); 901 902 hci_dev_unlock(hdev); 903} 904 905static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 906 struct sk_buff *skb) 907{ 908 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 909 910 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 911 912 hci_dev_lock(hdev); 913 914 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 915 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 916 ACL_LINK, 0, rp->status); 917 918 hci_dev_unlock(hdev); 919} 920 921static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) 922{ 923 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 924 925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 926 927 hci_dev_lock(hdev); 928 929 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 930 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 931 0, rp->status); 932 933 hci_dev_unlock(hdev); 934} 935 936static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, 937 struct sk_buff *skb) 938{ 939 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 940 941 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 942 943 hci_dev_lock(hdev); 944 945 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 946 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 947 ACL_LINK, 0, rp->status); 948 949 hci_dev_unlock(hdev); 950} 951 952static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, 953 struct sk_buff *skb) 954{ 955 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 956 957 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 958 959 hci_dev_lock(hdev); 960 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash, 961 rp->randomizer, rp->status); 962 hci_dev_unlock(hdev); 963} 964 965static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) 966{ 967 __u8 *sent, status = *((__u8 *) skb->data); 968 969 BT_DBG("%s status 0x%2.2x", hdev->name, status); 970 971 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 972 if (!sent) 973 return; 974 975 hci_dev_lock(hdev); 976 977 if (!status) { 978 if (*sent) 979 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags); 980 else 981 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags); 982 } 983 984 hci_dev_unlock(hdev); 985 986 if (!test_bit(HCI_INIT, &hdev->flags)) 987 hci_update_ad(hdev); 988 989 hci_req_complete(hdev, HCI_OP_LE_SET_ADV_ENABLE, status); 990} 991 992static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) 993{ 994 __u8 status = *((__u8 *) skb->data); 995 996 BT_DBG("%s status 0x%2.2x", hdev->name, status); 997 998 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status); 999 1000 if (status) { 1001 hci_dev_lock(hdev); 1002 mgmt_start_discovery_failed(hdev, status); 1003 hci_dev_unlock(hdev); 1004 return; 1005 } 1006} 1007 1008static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1009 struct sk_buff *skb) 1010{ 1011 struct hci_cp_le_set_scan_enable *cp; 1012 __u8 status = *((__u8 *) skb->data); 1013 1014 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1015 1016 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1017 if (!cp) 1018 return; 1019 1020 switch (cp->enable) { 1021 case LE_SCANNING_ENABLED: 1022 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status); 1023 1024 if (status) { 1025 hci_dev_lock(hdev); 1026 mgmt_start_discovery_failed(hdev, status); 1027 hci_dev_unlock(hdev); 1028 return; 1029 } 1030 1031 set_bit(HCI_LE_SCAN, &hdev->dev_flags); 1032 1033 hci_dev_lock(hdev); 1034 hci_discovery_set_state(hdev, DISCOVERY_FINDING); 1035 hci_dev_unlock(hdev); 1036 break; 1037 1038 case LE_SCANNING_DISABLED: 1039 if (status) { 1040 hci_dev_lock(hdev); 1041 mgmt_stop_discovery_failed(hdev, status); 1042 hci_dev_unlock(hdev); 1043 return; 1044 } 1045 1046 clear_bit(HCI_LE_SCAN, &hdev->dev_flags); 1047 1048 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED && 1049 hdev->discovery.state == DISCOVERY_FINDING) { 1050 mgmt_interleaved_discovery(hdev); 1051 } else { 1052 hci_dev_lock(hdev); 1053 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1054 hci_dev_unlock(hdev); 1055 } 1056 1057 break; 1058 1059 default: 1060 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable); 1061 break; 1062 } 1063} 1064 1065static void hci_cc_le_read_white_list_size(struct hci_dev *hdev, 1066 struct sk_buff *skb) 1067{ 1068 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data; 1069 1070 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); 1071 1072 if (!rp->status) 1073 hdev->le_white_list_size = rp->size; 1074 1075 hci_req_complete(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, rp->status); 1076} 1077 1078static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb) 1079{ 1080 struct hci_rp_le_ltk_reply *rp = (void *) skb->data; 1081 1082 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1083 1084 if (rp->status) 1085 return; 1086 1087 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status); 1088} 1089 1090static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 1091{ 1092 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data; 1093 1094 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1095 1096 if (rp->status) 1097 return; 1098 1099 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); 1100} 1101 1102static void hci_cc_le_read_supported_states(struct hci_dev *hdev, 1103 struct sk_buff *skb) 1104{ 1105 struct hci_rp_le_read_supported_states *rp = (void *) skb->data; 1106 1107 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1108 1109 if (!rp->status) 1110 memcpy(hdev->le_states, rp->le_states, 8); 1111 1112 hci_req_complete(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, rp->status); 1113} 1114 1115static void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1116 struct sk_buff *skb) 1117{ 1118 struct hci_cp_write_le_host_supported *sent; 1119 __u8 status = *((__u8 *) skb->data); 1120 1121 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1122 1123 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 1124 if (!sent) 1125 return; 1126 1127 if (!status) { 1128 if (sent->le) 1129 hdev->host_features[0] |= LMP_HOST_LE; 1130 else 1131 hdev->host_features[0] &= ~LMP_HOST_LE; 1132 1133 if (sent->simul) 1134 hdev->host_features[0] |= LMP_HOST_LE_BREDR; 1135 else 1136 hdev->host_features[0] &= ~LMP_HOST_LE_BREDR; 1137 } 1138 1139 if (test_bit(HCI_MGMT, &hdev->dev_flags) && 1140 !test_bit(HCI_INIT, &hdev->flags)) 1141 mgmt_le_enable_complete(hdev, sent->le, status); 1142 1143 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status); 1144} 1145 1146static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev, 1147 struct sk_buff *skb) 1148{ 1149 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data; 1150 1151 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x", 1152 hdev->name, rp->status, rp->phy_handle); 1153 1154 if (rp->status) 1155 return; 1156 1157 amp_write_rem_assoc_continue(hdev, rp->phy_handle); 1158} 1159 1160static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1161{ 1162 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1163 1164 if (status) { 1165 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1166 hci_conn_check_pending(hdev); 1167 hci_dev_lock(hdev); 1168 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1169 mgmt_start_discovery_failed(hdev, status); 1170 hci_dev_unlock(hdev); 1171 return; 1172 } 1173 1174 set_bit(HCI_INQUIRY, &hdev->flags); 1175 1176 hci_dev_lock(hdev); 1177 hci_discovery_set_state(hdev, DISCOVERY_FINDING); 1178 hci_dev_unlock(hdev); 1179} 1180 1181static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 1182{ 1183 struct hci_cp_create_conn *cp; 1184 struct hci_conn *conn; 1185 1186 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1187 1188 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 1189 if (!cp) 1190 return; 1191 1192 hci_dev_lock(hdev); 1193 1194 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1195 1196 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn); 1197 1198 if (status) { 1199 if (conn && conn->state == BT_CONNECT) { 1200 if (status != 0x0c || conn->attempt > 2) { 1201 conn->state = BT_CLOSED; 1202 hci_proto_connect_cfm(conn, status); 1203 hci_conn_del(conn); 1204 } else 1205 conn->state = BT_CONNECT2; 1206 } 1207 } else { 1208 if (!conn) { 1209 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr); 1210 if (conn) { 1211 conn->out = true; 1212 conn->link_mode |= HCI_LM_MASTER; 1213 } else 1214 BT_ERR("No memory for new connection"); 1215 } 1216 } 1217 1218 hci_dev_unlock(hdev); 1219} 1220 1221static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 1222{ 1223 struct hci_cp_add_sco *cp; 1224 struct hci_conn *acl, *sco; 1225 __u16 handle; 1226 1227 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1228 1229 if (!status) 1230 return; 1231 1232 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 1233 if (!cp) 1234 return; 1235 1236 handle = __le16_to_cpu(cp->handle); 1237 1238 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 1239 1240 hci_dev_lock(hdev); 1241 1242 acl = hci_conn_hash_lookup_handle(hdev, handle); 1243 if (acl) { 1244 sco = acl->link; 1245 if (sco) { 1246 sco->state = BT_CLOSED; 1247 1248 hci_proto_connect_cfm(sco, status); 1249 hci_conn_del(sco); 1250 } 1251 } 1252 1253 hci_dev_unlock(hdev); 1254} 1255 1256static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 1257{ 1258 struct hci_cp_auth_requested *cp; 1259 struct hci_conn *conn; 1260 1261 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1262 1263 if (!status) 1264 return; 1265 1266 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 1267 if (!cp) 1268 return; 1269 1270 hci_dev_lock(hdev); 1271 1272 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1273 if (conn) { 1274 if (conn->state == BT_CONFIG) { 1275 hci_proto_connect_cfm(conn, status); 1276 hci_conn_put(conn); 1277 } 1278 } 1279 1280 hci_dev_unlock(hdev); 1281} 1282 1283static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 1284{ 1285 struct hci_cp_set_conn_encrypt *cp; 1286 struct hci_conn *conn; 1287 1288 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1289 1290 if (!status) 1291 return; 1292 1293 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 1294 if (!cp) 1295 return; 1296 1297 hci_dev_lock(hdev); 1298 1299 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1300 if (conn) { 1301 if (conn->state == BT_CONFIG) { 1302 hci_proto_connect_cfm(conn, status); 1303 hci_conn_put(conn); 1304 } 1305 } 1306 1307 hci_dev_unlock(hdev); 1308} 1309 1310static int hci_outgoing_auth_needed(struct hci_dev *hdev, 1311 struct hci_conn *conn) 1312{ 1313 if (conn->state != BT_CONFIG || !conn->out) 1314 return 0; 1315 1316 if (conn->pending_sec_level == BT_SECURITY_SDP) 1317 return 0; 1318 1319 /* Only request authentication for SSP connections or non-SSP 1320 * devices with sec_level HIGH or if MITM protection is requested */ 1321 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 1322 conn->pending_sec_level != BT_SECURITY_HIGH) 1323 return 0; 1324 1325 return 1; 1326} 1327 1328static int hci_resolve_name(struct hci_dev *hdev, 1329 struct inquiry_entry *e) 1330{ 1331 struct hci_cp_remote_name_req cp; 1332 1333 memset(&cp, 0, sizeof(cp)); 1334 1335 bacpy(&cp.bdaddr, &e->data.bdaddr); 1336 cp.pscan_rep_mode = e->data.pscan_rep_mode; 1337 cp.pscan_mode = e->data.pscan_mode; 1338 cp.clock_offset = e->data.clock_offset; 1339 1340 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 1341} 1342 1343static bool hci_resolve_next_name(struct hci_dev *hdev) 1344{ 1345 struct discovery_state *discov = &hdev->discovery; 1346 struct inquiry_entry *e; 1347 1348 if (list_empty(&discov->resolve)) 1349 return false; 1350 1351 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 1352 if (!e) 1353 return false; 1354 1355 if (hci_resolve_name(hdev, e) == 0) { 1356 e->name_state = NAME_PENDING; 1357 return true; 1358 } 1359 1360 return false; 1361} 1362 1363static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 1364 bdaddr_t *bdaddr, u8 *name, u8 name_len) 1365{ 1366 struct discovery_state *discov = &hdev->discovery; 1367 struct inquiry_entry *e; 1368 1369 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 1370 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name, 1371 name_len, conn->dev_class); 1372 1373 if (discov->state == DISCOVERY_STOPPED) 1374 return; 1375 1376 if (discov->state == DISCOVERY_STOPPING) 1377 goto discov_complete; 1378 1379 if (discov->state != DISCOVERY_RESOLVING) 1380 return; 1381 1382 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 1383 /* If the device was not found in a list of found devices names of which 1384 * are pending. there is no need to continue resolving a next name as it 1385 * will be done upon receiving another Remote Name Request Complete 1386 * Event */ 1387 if (!e) 1388 return; 1389 1390 list_del(&e->list); 1391 if (name) { 1392 e->name_state = NAME_KNOWN; 1393 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, 1394 e->data.rssi, name, name_len); 1395 } else { 1396 e->name_state = NAME_NOT_KNOWN; 1397 } 1398 1399 if (hci_resolve_next_name(hdev)) 1400 return; 1401 1402discov_complete: 1403 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1404} 1405 1406static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 1407{ 1408 struct hci_cp_remote_name_req *cp; 1409 struct hci_conn *conn; 1410 1411 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1412 1413 /* If successful wait for the name req complete event before 1414 * checking for the need to do authentication */ 1415 if (!status) 1416 return; 1417 1418 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 1419 if (!cp) 1420 return; 1421 1422 hci_dev_lock(hdev); 1423 1424 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1425 1426 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1427 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 1428 1429 if (!conn) 1430 goto unlock; 1431 1432 if (!hci_outgoing_auth_needed(hdev, conn)) 1433 goto unlock; 1434 1435 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 1436 struct hci_cp_auth_requested cp; 1437 cp.handle = __cpu_to_le16(conn->handle); 1438 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1439 } 1440 1441unlock: 1442 hci_dev_unlock(hdev); 1443} 1444 1445static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 1446{ 1447 struct hci_cp_read_remote_features *cp; 1448 struct hci_conn *conn; 1449 1450 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1451 1452 if (!status) 1453 return; 1454 1455 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 1456 if (!cp) 1457 return; 1458 1459 hci_dev_lock(hdev); 1460 1461 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1462 if (conn) { 1463 if (conn->state == BT_CONFIG) { 1464 hci_proto_connect_cfm(conn, status); 1465 hci_conn_put(conn); 1466 } 1467 } 1468 1469 hci_dev_unlock(hdev); 1470} 1471 1472static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 1473{ 1474 struct hci_cp_read_remote_ext_features *cp; 1475 struct hci_conn *conn; 1476 1477 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1478 1479 if (!status) 1480 return; 1481 1482 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 1483 if (!cp) 1484 return; 1485 1486 hci_dev_lock(hdev); 1487 1488 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1489 if (conn) { 1490 if (conn->state == BT_CONFIG) { 1491 hci_proto_connect_cfm(conn, status); 1492 hci_conn_put(conn); 1493 } 1494 } 1495 1496 hci_dev_unlock(hdev); 1497} 1498 1499static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 1500{ 1501 struct hci_cp_setup_sync_conn *cp; 1502 struct hci_conn *acl, *sco; 1503 __u16 handle; 1504 1505 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1506 1507 if (!status) 1508 return; 1509 1510 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 1511 if (!cp) 1512 return; 1513 1514 handle = __le16_to_cpu(cp->handle); 1515 1516 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 1517 1518 hci_dev_lock(hdev); 1519 1520 acl = hci_conn_hash_lookup_handle(hdev, handle); 1521 if (acl) { 1522 sco = acl->link; 1523 if (sco) { 1524 sco->state = BT_CLOSED; 1525 1526 hci_proto_connect_cfm(sco, status); 1527 hci_conn_del(sco); 1528 } 1529 } 1530 1531 hci_dev_unlock(hdev); 1532} 1533 1534static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 1535{ 1536 struct hci_cp_sniff_mode *cp; 1537 struct hci_conn *conn; 1538 1539 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1540 1541 if (!status) 1542 return; 1543 1544 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 1545 if (!cp) 1546 return; 1547 1548 hci_dev_lock(hdev); 1549 1550 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1551 if (conn) { 1552 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 1553 1554 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 1555 hci_sco_setup(conn, status); 1556 } 1557 1558 hci_dev_unlock(hdev); 1559} 1560 1561static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 1562{ 1563 struct hci_cp_exit_sniff_mode *cp; 1564 struct hci_conn *conn; 1565 1566 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1567 1568 if (!status) 1569 return; 1570 1571 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 1572 if (!cp) 1573 return; 1574 1575 hci_dev_lock(hdev); 1576 1577 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1578 if (conn) { 1579 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 1580 1581 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 1582 hci_sco_setup(conn, status); 1583 } 1584 1585 hci_dev_unlock(hdev); 1586} 1587 1588static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 1589{ 1590 struct hci_cp_disconnect *cp; 1591 struct hci_conn *conn; 1592 1593 if (!status) 1594 return; 1595 1596 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 1597 if (!cp) 1598 return; 1599 1600 hci_dev_lock(hdev); 1601 1602 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1603 if (conn) 1604 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 1605 conn->dst_type, status); 1606 1607 hci_dev_unlock(hdev); 1608} 1609 1610static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) 1611{ 1612 struct hci_conn *conn; 1613 1614 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1615 1616 if (status) { 1617 hci_dev_lock(hdev); 1618 1619 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 1620 if (!conn) { 1621 hci_dev_unlock(hdev); 1622 return; 1623 } 1624 1625 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn); 1626 1627 conn->state = BT_CLOSED; 1628 mgmt_connect_failed(hdev, &conn->dst, conn->type, 1629 conn->dst_type, status); 1630 hci_proto_connect_cfm(conn, status); 1631 hci_conn_del(conn); 1632 1633 hci_dev_unlock(hdev); 1634 } 1635} 1636 1637static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 1638{ 1639 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1640} 1641 1642static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status) 1643{ 1644 struct hci_cp_create_phy_link *cp; 1645 1646 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1647 1648 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK); 1649 if (!cp) 1650 return; 1651 1652 hci_dev_lock(hdev); 1653 1654 if (status) { 1655 struct hci_conn *hcon; 1656 1657 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle); 1658 if (hcon) 1659 hci_conn_del(hcon); 1660 } else { 1661 amp_write_remote_assoc(hdev, cp->phy_handle); 1662 } 1663 1664 hci_dev_unlock(hdev); 1665} 1666 1667static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status) 1668{ 1669 struct hci_cp_accept_phy_link *cp; 1670 1671 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1672 1673 if (status) 1674 return; 1675 1676 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK); 1677 if (!cp) 1678 return; 1679 1680 amp_write_remote_assoc(hdev, cp->phy_handle); 1681} 1682 1683static void hci_cs_create_logical_link(struct hci_dev *hdev, u8 status) 1684{ 1685 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1686} 1687 1688static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1689{ 1690 __u8 status = *((__u8 *) skb->data); 1691 struct discovery_state *discov = &hdev->discovery; 1692 struct inquiry_entry *e; 1693 1694 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1695 1696 hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status); 1697 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1698 1699 hci_conn_check_pending(hdev); 1700 1701 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 1702 return; 1703 1704 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 1705 return; 1706 1707 hci_dev_lock(hdev); 1708 1709 if (discov->state != DISCOVERY_FINDING) 1710 goto unlock; 1711 1712 if (list_empty(&discov->resolve)) { 1713 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1714 goto unlock; 1715 } 1716 1717 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 1718 if (e && hci_resolve_name(hdev, e) == 0) { 1719 e->name_state = NAME_PENDING; 1720 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 1721 } else { 1722 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1723 } 1724 1725unlock: 1726 hci_dev_unlock(hdev); 1727} 1728 1729static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 1730{ 1731 struct inquiry_data data; 1732 struct inquiry_info *info = (void *) (skb->data + 1); 1733 int num_rsp = *((__u8 *) skb->data); 1734 1735 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 1736 1737 if (!num_rsp) 1738 return; 1739 1740 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 1741 return; 1742 1743 hci_dev_lock(hdev); 1744 1745 for (; num_rsp; num_rsp--, info++) { 1746 bool name_known, ssp; 1747 1748 bacpy(&data.bdaddr, &info->bdaddr); 1749 data.pscan_rep_mode = info->pscan_rep_mode; 1750 data.pscan_period_mode = info->pscan_period_mode; 1751 data.pscan_mode = info->pscan_mode; 1752 memcpy(data.dev_class, info->dev_class, 3); 1753 data.clock_offset = info->clock_offset; 1754 data.rssi = 0x00; 1755 data.ssp_mode = 0x00; 1756 1757 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp); 1758 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 1759 info->dev_class, 0, !name_known, ssp, NULL, 1760 0); 1761 } 1762 1763 hci_dev_unlock(hdev); 1764} 1765 1766static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1767{ 1768 struct hci_ev_conn_complete *ev = (void *) skb->data; 1769 struct hci_conn *conn; 1770 1771 BT_DBG("%s", hdev->name); 1772 1773 hci_dev_lock(hdev); 1774 1775 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1776 if (!conn) { 1777 if (ev->link_type != SCO_LINK) 1778 goto unlock; 1779 1780 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 1781 if (!conn) 1782 goto unlock; 1783 1784 conn->type = SCO_LINK; 1785 } 1786 1787 if (!ev->status) { 1788 conn->handle = __le16_to_cpu(ev->handle); 1789 1790 if (conn->type == ACL_LINK) { 1791 conn->state = BT_CONFIG; 1792 hci_conn_hold(conn); 1793 1794 if (!conn->out && !hci_conn_ssp_enabled(conn) && 1795 !hci_find_link_key(hdev, &ev->bdaddr)) 1796 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 1797 else 1798 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1799 } else 1800 conn->state = BT_CONNECTED; 1801 1802 hci_conn_hold_device(conn); 1803 hci_conn_add_sysfs(conn); 1804 1805 if (test_bit(HCI_AUTH, &hdev->flags)) 1806 conn->link_mode |= HCI_LM_AUTH; 1807 1808 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 1809 conn->link_mode |= HCI_LM_ENCRYPT; 1810 1811 /* Get remote features */ 1812 if (conn->type == ACL_LINK) { 1813 struct hci_cp_read_remote_features cp; 1814 cp.handle = ev->handle; 1815 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 1816 sizeof(cp), &cp); 1817 } 1818 1819 /* Set packet type for incoming connection */ 1820 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 1821 struct hci_cp_change_conn_ptype cp; 1822 cp.handle = ev->handle; 1823 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1824 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 1825 &cp); 1826 } 1827 } else { 1828 conn->state = BT_CLOSED; 1829 if (conn->type == ACL_LINK) 1830 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, 1831 conn->dst_type, ev->status); 1832 } 1833 1834 if (conn->type == ACL_LINK) 1835 hci_sco_setup(conn, ev->status); 1836 1837 if (ev->status) { 1838 hci_proto_connect_cfm(conn, ev->status); 1839 hci_conn_del(conn); 1840 } else if (ev->link_type != ACL_LINK) 1841 hci_proto_connect_cfm(conn, ev->status); 1842 1843unlock: 1844 hci_dev_unlock(hdev); 1845 1846 hci_conn_check_pending(hdev); 1847} 1848 1849void hci_conn_accept(struct hci_conn *conn, int mask) 1850{ 1851 struct hci_dev *hdev = conn->hdev; 1852 1853 BT_DBG("conn %p", conn); 1854 1855 conn->state = BT_CONFIG; 1856 1857 if (!lmp_esco_capable(hdev)) { 1858 struct hci_cp_accept_conn_req cp; 1859 1860 bacpy(&cp.bdaddr, &conn->dst); 1861 1862 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 1863 cp.role = 0x00; /* Become master */ 1864 else 1865 cp.role = 0x01; /* Remain slave */ 1866 1867 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); 1868 } else /* lmp_esco_capable(hdev)) */ { 1869 struct hci_cp_accept_sync_conn_req cp; 1870 1871 bacpy(&cp.bdaddr, &conn->dst); 1872 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1873 1874 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40); 1875 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40); 1876 cp.max_latency = __constant_cpu_to_le16(0xffff); 1877 cp.content_format = cpu_to_le16(hdev->voice_setting); 1878 cp.retrans_effort = 0xff; 1879 1880 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, 1881 sizeof(cp), &cp); 1882 } 1883} 1884 1885static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1886{ 1887 struct hci_ev_conn_request *ev = (void *) skb->data; 1888 int mask = hdev->link_mode; 1889 __u8 flags = 0; 1890 1891 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr, 1892 ev->link_type); 1893 1894 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 1895 &flags); 1896 1897 if ((mask & HCI_LM_ACCEPT) && 1898 !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 1899 /* Connection accepted */ 1900 struct inquiry_entry *ie; 1901 struct hci_conn *conn; 1902 1903 hci_dev_lock(hdev); 1904 1905 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 1906 if (ie) 1907 memcpy(ie->data.dev_class, ev->dev_class, 3); 1908 1909 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 1910 &ev->bdaddr); 1911 if (!conn) { 1912 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); 1913 if (!conn) { 1914 BT_ERR("No memory for new connection"); 1915 hci_dev_unlock(hdev); 1916 return; 1917 } 1918 } 1919 1920 memcpy(conn->dev_class, ev->dev_class, 3); 1921 1922 hci_dev_unlock(hdev); 1923 1924 if (ev->link_type == ACL_LINK || 1925 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 1926 struct hci_cp_accept_conn_req cp; 1927 conn->state = BT_CONNECT; 1928 1929 bacpy(&cp.bdaddr, &ev->bdaddr); 1930 1931 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 1932 cp.role = 0x00; /* Become master */ 1933 else 1934 cp.role = 0x01; /* Remain slave */ 1935 1936 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), 1937 &cp); 1938 } else if (!(flags & HCI_PROTO_DEFER)) { 1939 struct hci_cp_accept_sync_conn_req cp; 1940 conn->state = BT_CONNECT; 1941 1942 bacpy(&cp.bdaddr, &ev->bdaddr); 1943 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1944 1945 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40); 1946 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40); 1947 cp.max_latency = __constant_cpu_to_le16(0xffff); 1948 cp.content_format = cpu_to_le16(hdev->voice_setting); 1949 cp.retrans_effort = 0xff; 1950 1951 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, 1952 sizeof(cp), &cp); 1953 } else { 1954 conn->state = BT_CONNECT2; 1955 hci_proto_connect_cfm(conn, 0); 1956 hci_conn_put(conn); 1957 } 1958 } else { 1959 /* Connection rejected */ 1960 struct hci_cp_reject_conn_req cp; 1961 1962 bacpy(&cp.bdaddr, &ev->bdaddr); 1963 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 1964 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 1965 } 1966} 1967 1968static u8 hci_to_mgmt_reason(u8 err) 1969{ 1970 switch (err) { 1971 case HCI_ERROR_CONNECTION_TIMEOUT: 1972 return MGMT_DEV_DISCONN_TIMEOUT; 1973 case HCI_ERROR_REMOTE_USER_TERM: 1974 case HCI_ERROR_REMOTE_LOW_RESOURCES: 1975 case HCI_ERROR_REMOTE_POWER_OFF: 1976 return MGMT_DEV_DISCONN_REMOTE; 1977 case HCI_ERROR_LOCAL_HOST_TERM: 1978 return MGMT_DEV_DISCONN_LOCAL_HOST; 1979 default: 1980 return MGMT_DEV_DISCONN_UNKNOWN; 1981 } 1982} 1983 1984static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1985{ 1986 struct hci_ev_disconn_complete *ev = (void *) skb->data; 1987 struct hci_conn *conn; 1988 1989 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 1990 1991 hci_dev_lock(hdev); 1992 1993 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1994 if (!conn) 1995 goto unlock; 1996 1997 if (ev->status == 0) 1998 conn->state = BT_CLOSED; 1999 2000 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) && 2001 (conn->type == ACL_LINK || conn->type == LE_LINK)) { 2002 if (ev->status) { 2003 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2004 conn->dst_type, ev->status); 2005 } else { 2006 u8 reason = hci_to_mgmt_reason(ev->reason); 2007 2008 mgmt_device_disconnected(hdev, &conn->dst, conn->type, 2009 conn->dst_type, reason); 2010 } 2011 } 2012 2013 if (ev->status == 0) { 2014 if (conn->type == ACL_LINK && conn->flush_key) 2015 hci_remove_link_key(hdev, &conn->dst); 2016 hci_proto_disconn_cfm(conn, ev->reason); 2017 hci_conn_del(conn); 2018 } 2019 2020unlock: 2021 hci_dev_unlock(hdev); 2022} 2023 2024static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2025{ 2026 struct hci_ev_auth_complete *ev = (void *) skb->data; 2027 struct hci_conn *conn; 2028 2029 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2030 2031 hci_dev_lock(hdev); 2032 2033 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2034 if (!conn) 2035 goto unlock; 2036 2037 if (!ev->status) { 2038 if (!hci_conn_ssp_enabled(conn) && 2039 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 2040 BT_INFO("re-auth of legacy device is not possible."); 2041 } else { 2042 conn->link_mode |= HCI_LM_AUTH; 2043 conn->sec_level = conn->pending_sec_level; 2044 } 2045 } else { 2046 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type, 2047 ev->status); 2048 } 2049 2050 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 2051 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 2052 2053 if (conn->state == BT_CONFIG) { 2054 if (!ev->status && hci_conn_ssp_enabled(conn)) { 2055 struct hci_cp_set_conn_encrypt cp; 2056 cp.handle = ev->handle; 2057 cp.encrypt = 0x01; 2058 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 2059 &cp); 2060 } else { 2061 conn->state = BT_CONNECTED; 2062 hci_proto_connect_cfm(conn, ev->status); 2063 hci_conn_put(conn); 2064 } 2065 } else { 2066 hci_auth_cfm(conn, ev->status); 2067 2068 hci_conn_hold(conn); 2069 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2070 hci_conn_put(conn); 2071 } 2072 2073 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 2074 if (!ev->status) { 2075 struct hci_cp_set_conn_encrypt cp; 2076 cp.handle = ev->handle; 2077 cp.encrypt = 0x01; 2078 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 2079 &cp); 2080 } else { 2081 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2082 hci_encrypt_cfm(conn, ev->status, 0x00); 2083 } 2084 } 2085 2086unlock: 2087 hci_dev_unlock(hdev); 2088} 2089 2090static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 2091{ 2092 struct hci_ev_remote_name *ev = (void *) skb->data; 2093 struct hci_conn *conn; 2094 2095 BT_DBG("%s", hdev->name); 2096 2097 hci_conn_check_pending(hdev); 2098 2099 hci_dev_lock(hdev); 2100 2101 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2102 2103 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 2104 goto check_auth; 2105 2106 if (ev->status == 0) 2107 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 2108 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 2109 else 2110 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 2111 2112check_auth: 2113 if (!conn) 2114 goto unlock; 2115 2116 if (!hci_outgoing_auth_needed(hdev, conn)) 2117 goto unlock; 2118 2119 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2120 struct hci_cp_auth_requested cp; 2121 cp.handle = __cpu_to_le16(conn->handle); 2122 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 2123 } 2124 2125unlock: 2126 hci_dev_unlock(hdev); 2127} 2128 2129static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2130{ 2131 struct hci_ev_encrypt_change *ev = (void *) skb->data; 2132 struct hci_conn *conn; 2133 2134 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2135 2136 hci_dev_lock(hdev); 2137 2138 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2139 if (conn) { 2140 if (!ev->status) { 2141 if (ev->encrypt) { 2142 /* Encryption implies authentication */ 2143 conn->link_mode |= HCI_LM_AUTH; 2144 conn->link_mode |= HCI_LM_ENCRYPT; 2145 conn->sec_level = conn->pending_sec_level; 2146 } else 2147 conn->link_mode &= ~HCI_LM_ENCRYPT; 2148 } 2149 2150 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2151 2152 if (ev->status && conn->state == BT_CONNECTED) { 2153 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2154 hci_conn_put(conn); 2155 goto unlock; 2156 } 2157 2158 if (conn->state == BT_CONFIG) { 2159 if (!ev->status) 2160 conn->state = BT_CONNECTED; 2161 2162 hci_proto_connect_cfm(conn, ev->status); 2163 hci_conn_put(conn); 2164 } else 2165 hci_encrypt_cfm(conn, ev->status, ev->encrypt); 2166 } 2167 2168unlock: 2169 hci_dev_unlock(hdev); 2170} 2171 2172static void hci_change_link_key_complete_evt(struct hci_dev *hdev, 2173 struct sk_buff *skb) 2174{ 2175 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 2176 struct hci_conn *conn; 2177 2178 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2179 2180 hci_dev_lock(hdev); 2181 2182 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2183 if (conn) { 2184 if (!ev->status) 2185 conn->link_mode |= HCI_LM_SECURE; 2186 2187 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 2188 2189 hci_key_change_cfm(conn, ev->status); 2190 } 2191 2192 hci_dev_unlock(hdev); 2193} 2194 2195static void hci_remote_features_evt(struct hci_dev *hdev, 2196 struct sk_buff *skb) 2197{ 2198 struct hci_ev_remote_features *ev = (void *) skb->data; 2199 struct hci_conn *conn; 2200 2201 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2202 2203 hci_dev_lock(hdev); 2204 2205 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2206 if (!conn) 2207 goto unlock; 2208 2209 if (!ev->status) 2210 memcpy(conn->features, ev->features, 8); 2211 2212 if (conn->state != BT_CONFIG) 2213 goto unlock; 2214 2215 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) { 2216 struct hci_cp_read_remote_ext_features cp; 2217 cp.handle = ev->handle; 2218 cp.page = 0x01; 2219 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 2220 sizeof(cp), &cp); 2221 goto unlock; 2222 } 2223 2224 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 2225 struct hci_cp_remote_name_req cp; 2226 memset(&cp, 0, sizeof(cp)); 2227 bacpy(&cp.bdaddr, &conn->dst); 2228 cp.pscan_rep_mode = 0x02; 2229 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2230 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2231 mgmt_device_connected(hdev, &conn->dst, conn->type, 2232 conn->dst_type, 0, NULL, 0, 2233 conn->dev_class); 2234 2235 if (!hci_outgoing_auth_needed(hdev, conn)) { 2236 conn->state = BT_CONNECTED; 2237 hci_proto_connect_cfm(conn, ev->status); 2238 hci_conn_put(conn); 2239 } 2240 2241unlock: 2242 hci_dev_unlock(hdev); 2243} 2244 2245static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) 2246{ 2247 BT_DBG("%s", hdev->name); 2248} 2249 2250static void hci_qos_setup_complete_evt(struct hci_dev *hdev, 2251 struct sk_buff *skb) 2252{ 2253 BT_DBG("%s", hdev->name); 2254} 2255 2256static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2257{ 2258 struct hci_ev_cmd_complete *ev = (void *) skb->data; 2259 u8 status = skb->data[sizeof(*ev)]; 2260 __u16 opcode; 2261 2262 skb_pull(skb, sizeof(*ev)); 2263 2264 opcode = __le16_to_cpu(ev->opcode); 2265 2266 switch (opcode) { 2267 case HCI_OP_INQUIRY_CANCEL: 2268 hci_cc_inquiry_cancel(hdev, skb); 2269 break; 2270 2271 case HCI_OP_PERIODIC_INQ: 2272 hci_cc_periodic_inq(hdev, skb); 2273 break; 2274 2275 case HCI_OP_EXIT_PERIODIC_INQ: 2276 hci_cc_exit_periodic_inq(hdev, skb); 2277 break; 2278 2279 case HCI_OP_REMOTE_NAME_REQ_CANCEL: 2280 hci_cc_remote_name_req_cancel(hdev, skb); 2281 break; 2282 2283 case HCI_OP_ROLE_DISCOVERY: 2284 hci_cc_role_discovery(hdev, skb); 2285 break; 2286 2287 case HCI_OP_READ_LINK_POLICY: 2288 hci_cc_read_link_policy(hdev, skb); 2289 break; 2290 2291 case HCI_OP_WRITE_LINK_POLICY: 2292 hci_cc_write_link_policy(hdev, skb); 2293 break; 2294 2295 case HCI_OP_READ_DEF_LINK_POLICY: 2296 hci_cc_read_def_link_policy(hdev, skb); 2297 break; 2298 2299 case HCI_OP_WRITE_DEF_LINK_POLICY: 2300 hci_cc_write_def_link_policy(hdev, skb); 2301 break; 2302 2303 case HCI_OP_RESET: 2304 hci_cc_reset(hdev, skb); 2305 break; 2306 2307 case HCI_OP_WRITE_LOCAL_NAME: 2308 hci_cc_write_local_name(hdev, skb); 2309 break; 2310 2311 case HCI_OP_READ_LOCAL_NAME: 2312 hci_cc_read_local_name(hdev, skb); 2313 break; 2314 2315 case HCI_OP_WRITE_AUTH_ENABLE: 2316 hci_cc_write_auth_enable(hdev, skb); 2317 break; 2318 2319 case HCI_OP_WRITE_ENCRYPT_MODE: 2320 hci_cc_write_encrypt_mode(hdev, skb); 2321 break; 2322 2323 case HCI_OP_WRITE_SCAN_ENABLE: 2324 hci_cc_write_scan_enable(hdev, skb); 2325 break; 2326 2327 case HCI_OP_READ_CLASS_OF_DEV: 2328 hci_cc_read_class_of_dev(hdev, skb); 2329 break; 2330 2331 case HCI_OP_WRITE_CLASS_OF_DEV: 2332 hci_cc_write_class_of_dev(hdev, skb); 2333 break; 2334 2335 case HCI_OP_READ_VOICE_SETTING: 2336 hci_cc_read_voice_setting(hdev, skb); 2337 break; 2338 2339 case HCI_OP_WRITE_VOICE_SETTING: 2340 hci_cc_write_voice_setting(hdev, skb); 2341 break; 2342 2343 case HCI_OP_HOST_BUFFER_SIZE: 2344 hci_cc_host_buffer_size(hdev, skb); 2345 break; 2346 2347 case HCI_OP_WRITE_SSP_MODE: 2348 hci_cc_write_ssp_mode(hdev, skb); 2349 break; 2350 2351 case HCI_OP_READ_LOCAL_VERSION: 2352 hci_cc_read_local_version(hdev, skb); 2353 break; 2354 2355 case HCI_OP_READ_LOCAL_COMMANDS: 2356 hci_cc_read_local_commands(hdev, skb); 2357 break; 2358 2359 case HCI_OP_READ_LOCAL_FEATURES: 2360 hci_cc_read_local_features(hdev, skb); 2361 break; 2362 2363 case HCI_OP_READ_LOCAL_EXT_FEATURES: 2364 hci_cc_read_local_ext_features(hdev, skb); 2365 break; 2366 2367 case HCI_OP_READ_BUFFER_SIZE: 2368 hci_cc_read_buffer_size(hdev, skb); 2369 break; 2370 2371 case HCI_OP_READ_BD_ADDR: 2372 hci_cc_read_bd_addr(hdev, skb); 2373 break; 2374 2375 case HCI_OP_READ_DATA_BLOCK_SIZE: 2376 hci_cc_read_data_block_size(hdev, skb); 2377 break; 2378 2379 case HCI_OP_WRITE_CA_TIMEOUT: 2380 hci_cc_write_ca_timeout(hdev, skb); 2381 break; 2382 2383 case HCI_OP_READ_FLOW_CONTROL_MODE: 2384 hci_cc_read_flow_control_mode(hdev, skb); 2385 break; 2386 2387 case HCI_OP_READ_LOCAL_AMP_INFO: 2388 hci_cc_read_local_amp_info(hdev, skb); 2389 break; 2390 2391 case HCI_OP_READ_LOCAL_AMP_ASSOC: 2392 hci_cc_read_local_amp_assoc(hdev, skb); 2393 break; 2394 2395 case HCI_OP_DELETE_STORED_LINK_KEY: 2396 hci_cc_delete_stored_link_key(hdev, skb); 2397 break; 2398 2399 case HCI_OP_SET_EVENT_MASK: 2400 hci_cc_set_event_mask(hdev, skb); 2401 break; 2402 2403 case HCI_OP_WRITE_INQUIRY_MODE: 2404 hci_cc_write_inquiry_mode(hdev, skb); 2405 break; 2406 2407 case HCI_OP_READ_INQ_RSP_TX_POWER: 2408 hci_cc_read_inq_rsp_tx_power(hdev, skb); 2409 break; 2410 2411 case HCI_OP_SET_EVENT_FLT: 2412 hci_cc_set_event_flt(hdev, skb); 2413 break; 2414 2415 case HCI_OP_PIN_CODE_REPLY: 2416 hci_cc_pin_code_reply(hdev, skb); 2417 break; 2418 2419 case HCI_OP_PIN_CODE_NEG_REPLY: 2420 hci_cc_pin_code_neg_reply(hdev, skb); 2421 break; 2422 2423 case HCI_OP_READ_LOCAL_OOB_DATA: 2424 hci_cc_read_local_oob_data_reply(hdev, skb); 2425 break; 2426 2427 case HCI_OP_LE_READ_BUFFER_SIZE: 2428 hci_cc_le_read_buffer_size(hdev, skb); 2429 break; 2430 2431 case HCI_OP_LE_READ_LOCAL_FEATURES: 2432 hci_cc_le_read_local_features(hdev, skb); 2433 break; 2434 2435 case HCI_OP_LE_READ_ADV_TX_POWER: 2436 hci_cc_le_read_adv_tx_power(hdev, skb); 2437 break; 2438 2439 case HCI_OP_LE_SET_EVENT_MASK: 2440 hci_cc_le_set_event_mask(hdev, skb); 2441 break; 2442 2443 case HCI_OP_USER_CONFIRM_REPLY: 2444 hci_cc_user_confirm_reply(hdev, skb); 2445 break; 2446 2447 case HCI_OP_USER_CONFIRM_NEG_REPLY: 2448 hci_cc_user_confirm_neg_reply(hdev, skb); 2449 break; 2450 2451 case HCI_OP_USER_PASSKEY_REPLY: 2452 hci_cc_user_passkey_reply(hdev, skb); 2453 break; 2454 2455 case HCI_OP_USER_PASSKEY_NEG_REPLY: 2456 hci_cc_user_passkey_neg_reply(hdev, skb); 2457 break; 2458 2459 case HCI_OP_LE_SET_SCAN_PARAM: 2460 hci_cc_le_set_scan_param(hdev, skb); 2461 break; 2462 2463 case HCI_OP_LE_SET_ADV_ENABLE: 2464 hci_cc_le_set_adv_enable(hdev, skb); 2465 break; 2466 2467 case HCI_OP_LE_SET_SCAN_ENABLE: 2468 hci_cc_le_set_scan_enable(hdev, skb); 2469 break; 2470 2471 case HCI_OP_LE_READ_WHITE_LIST_SIZE: 2472 hci_cc_le_read_white_list_size(hdev, skb); 2473 break; 2474 2475 case HCI_OP_LE_LTK_REPLY: 2476 hci_cc_le_ltk_reply(hdev, skb); 2477 break; 2478 2479 case HCI_OP_LE_LTK_NEG_REPLY: 2480 hci_cc_le_ltk_neg_reply(hdev, skb); 2481 break; 2482 2483 case HCI_OP_LE_READ_SUPPORTED_STATES: 2484 hci_cc_le_read_supported_states(hdev, skb); 2485 break; 2486 2487 case HCI_OP_WRITE_LE_HOST_SUPPORTED: 2488 hci_cc_write_le_host_supported(hdev, skb); 2489 break; 2490 2491 case HCI_OP_WRITE_REMOTE_AMP_ASSOC: 2492 hci_cc_write_remote_amp_assoc(hdev, skb); 2493 break; 2494 2495 default: 2496 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 2497 break; 2498 } 2499 2500 if (ev->opcode != HCI_OP_NOP) 2501 del_timer(&hdev->cmd_timer); 2502 2503 hci_req_cmd_complete(hdev, ev->opcode, status); 2504 2505 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 2506 atomic_set(&hdev->cmd_cnt, 1); 2507 if (!skb_queue_empty(&hdev->cmd_q)) 2508 queue_work(hdev->workqueue, &hdev->cmd_work); 2509 } 2510} 2511 2512static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) 2513{ 2514 struct hci_ev_cmd_status *ev = (void *) skb->data; 2515 __u16 opcode; 2516 2517 skb_pull(skb, sizeof(*ev)); 2518 2519 opcode = __le16_to_cpu(ev->opcode); 2520 2521 switch (opcode) { 2522 case HCI_OP_INQUIRY: 2523 hci_cs_inquiry(hdev, ev->status); 2524 break; 2525 2526 case HCI_OP_CREATE_CONN: 2527 hci_cs_create_conn(hdev, ev->status); 2528 break; 2529 2530 case HCI_OP_ADD_SCO: 2531 hci_cs_add_sco(hdev, ev->status); 2532 break; 2533 2534 case HCI_OP_AUTH_REQUESTED: 2535 hci_cs_auth_requested(hdev, ev->status); 2536 break; 2537 2538 case HCI_OP_SET_CONN_ENCRYPT: 2539 hci_cs_set_conn_encrypt(hdev, ev->status); 2540 break; 2541 2542 case HCI_OP_REMOTE_NAME_REQ: 2543 hci_cs_remote_name_req(hdev, ev->status); 2544 break; 2545 2546 case HCI_OP_READ_REMOTE_FEATURES: 2547 hci_cs_read_remote_features(hdev, ev->status); 2548 break; 2549 2550 case HCI_OP_READ_REMOTE_EXT_FEATURES: 2551 hci_cs_read_remote_ext_features(hdev, ev->status); 2552 break; 2553 2554 case HCI_OP_SETUP_SYNC_CONN: 2555 hci_cs_setup_sync_conn(hdev, ev->status); 2556 break; 2557 2558 case HCI_OP_SNIFF_MODE: 2559 hci_cs_sniff_mode(hdev, ev->status); 2560 break; 2561 2562 case HCI_OP_EXIT_SNIFF_MODE: 2563 hci_cs_exit_sniff_mode(hdev, ev->status); 2564 break; 2565 2566 case HCI_OP_DISCONNECT: 2567 hci_cs_disconnect(hdev, ev->status); 2568 break; 2569 2570 case HCI_OP_LE_CREATE_CONN: 2571 hci_cs_le_create_conn(hdev, ev->status); 2572 break; 2573 2574 case HCI_OP_LE_START_ENC: 2575 hci_cs_le_start_enc(hdev, ev->status); 2576 break; 2577 2578 case HCI_OP_CREATE_PHY_LINK: 2579 hci_cs_create_phylink(hdev, ev->status); 2580 break; 2581 2582 case HCI_OP_ACCEPT_PHY_LINK: 2583 hci_cs_accept_phylink(hdev, ev->status); 2584 break; 2585 2586 case HCI_OP_CREATE_LOGICAL_LINK: 2587 hci_cs_create_logical_link(hdev, ev->status); 2588 break; 2589 2590 default: 2591 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 2592 break; 2593 } 2594 2595 if (ev->opcode != HCI_OP_NOP) 2596 del_timer(&hdev->cmd_timer); 2597 2598 hci_req_cmd_status(hdev, ev->opcode, ev->status); 2599 2600 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 2601 atomic_set(&hdev->cmd_cnt, 1); 2602 if (!skb_queue_empty(&hdev->cmd_q)) 2603 queue_work(hdev->workqueue, &hdev->cmd_work); 2604 } 2605} 2606 2607static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2608{ 2609 struct hci_ev_role_change *ev = (void *) skb->data; 2610 struct hci_conn *conn; 2611 2612 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2613 2614 hci_dev_lock(hdev); 2615 2616 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2617 if (conn) { 2618 if (!ev->status) { 2619 if (ev->role) 2620 conn->link_mode &= ~HCI_LM_MASTER; 2621 else 2622 conn->link_mode |= HCI_LM_MASTER; 2623 } 2624 2625 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2626 2627 hci_role_switch_cfm(conn, ev->status, ev->role); 2628 } 2629 2630 hci_dev_unlock(hdev); 2631} 2632 2633static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 2634{ 2635 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 2636 int i; 2637 2638 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 2639 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode); 2640 return; 2641 } 2642 2643 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2644 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { 2645 BT_DBG("%s bad parameters", hdev->name); 2646 return; 2647 } 2648 2649 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); 2650 2651 for (i = 0; i < ev->num_hndl; i++) { 2652 struct hci_comp_pkts_info *info = &ev->handles[i]; 2653 struct hci_conn *conn; 2654 __u16 handle, count; 2655 2656 handle = __le16_to_cpu(info->handle); 2657 count = __le16_to_cpu(info->count); 2658 2659 conn = hci_conn_hash_lookup_handle(hdev, handle); 2660 if (!conn) 2661 continue; 2662 2663 conn->sent -= count; 2664 2665 switch (conn->type) { 2666 case ACL_LINK: 2667 hdev->acl_cnt += count; 2668 if (hdev->acl_cnt > hdev->acl_pkts) 2669 hdev->acl_cnt = hdev->acl_pkts; 2670 break; 2671 2672 case LE_LINK: 2673 if (hdev->le_pkts) { 2674 hdev->le_cnt += count; 2675 if (hdev->le_cnt > hdev->le_pkts) 2676 hdev->le_cnt = hdev->le_pkts; 2677 } else { 2678 hdev->acl_cnt += count; 2679 if (hdev->acl_cnt > hdev->acl_pkts) 2680 hdev->acl_cnt = hdev->acl_pkts; 2681 } 2682 break; 2683 2684 case SCO_LINK: 2685 hdev->sco_cnt += count; 2686 if (hdev->sco_cnt > hdev->sco_pkts) 2687 hdev->sco_cnt = hdev->sco_pkts; 2688 break; 2689 2690 default: 2691 BT_ERR("Unknown type %d conn %p", conn->type, conn); 2692 break; 2693 } 2694 } 2695 2696 queue_work(hdev->workqueue, &hdev->tx_work); 2697} 2698 2699static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, 2700 __u16 handle) 2701{ 2702 struct hci_chan *chan; 2703 2704 switch (hdev->dev_type) { 2705 case HCI_BREDR: 2706 return hci_conn_hash_lookup_handle(hdev, handle); 2707 case HCI_AMP: 2708 chan = hci_chan_lookup_handle(hdev, handle); 2709 if (chan) 2710 return chan->conn; 2711 break; 2712 default: 2713 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type); 2714 break; 2715 } 2716 2717 return NULL; 2718} 2719 2720static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) 2721{ 2722 struct hci_ev_num_comp_blocks *ev = (void *) skb->data; 2723 int i; 2724 2725 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 2726 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode); 2727 return; 2728 } 2729 2730 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2731 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { 2732 BT_DBG("%s bad parameters", hdev->name); 2733 return; 2734 } 2735 2736 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, 2737 ev->num_hndl); 2738 2739 for (i = 0; i < ev->num_hndl; i++) { 2740 struct hci_comp_blocks_info *info = &ev->handles[i]; 2741 struct hci_conn *conn = NULL; 2742 __u16 handle, block_count; 2743 2744 handle = __le16_to_cpu(info->handle); 2745 block_count = __le16_to_cpu(info->blocks); 2746 2747 conn = __hci_conn_lookup_handle(hdev, handle); 2748 if (!conn) 2749 continue; 2750 2751 conn->sent -= block_count; 2752 2753 switch (conn->type) { 2754 case ACL_LINK: 2755 case AMP_LINK: 2756 hdev->block_cnt += block_count; 2757 if (hdev->block_cnt > hdev->num_blocks) 2758 hdev->block_cnt = hdev->num_blocks; 2759 break; 2760 2761 default: 2762 BT_ERR("Unknown type %d conn %p", conn->type, conn); 2763 break; 2764 } 2765 } 2766 2767 queue_work(hdev->workqueue, &hdev->tx_work); 2768} 2769 2770static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2771{ 2772 struct hci_ev_mode_change *ev = (void *) skb->data; 2773 struct hci_conn *conn; 2774 2775 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2776 2777 hci_dev_lock(hdev); 2778 2779 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2780 if (conn) { 2781 conn->mode = ev->mode; 2782 conn->interval = __le16_to_cpu(ev->interval); 2783 2784 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 2785 &conn->flags)) { 2786 if (conn->mode == HCI_CM_ACTIVE) 2787 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 2788 else 2789 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 2790 } 2791 2792 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2793 hci_sco_setup(conn, ev->status); 2794 } 2795 2796 hci_dev_unlock(hdev); 2797} 2798 2799static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2800{ 2801 struct hci_ev_pin_code_req *ev = (void *) skb->data; 2802 struct hci_conn *conn; 2803 2804 BT_DBG("%s", hdev->name); 2805 2806 hci_dev_lock(hdev); 2807 2808 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2809 if (!conn) 2810 goto unlock; 2811 2812 if (conn->state == BT_CONNECTED) { 2813 hci_conn_hold(conn); 2814 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 2815 hci_conn_put(conn); 2816 } 2817 2818 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags)) 2819 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2820 sizeof(ev->bdaddr), &ev->bdaddr); 2821 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { 2822 u8 secure; 2823 2824 if (conn->pending_sec_level == BT_SECURITY_HIGH) 2825 secure = 1; 2826 else 2827 secure = 0; 2828 2829 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 2830 } 2831 2832unlock: 2833 hci_dev_unlock(hdev); 2834} 2835 2836static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2837{ 2838 struct hci_ev_link_key_req *ev = (void *) skb->data; 2839 struct hci_cp_link_key_reply cp; 2840 struct hci_conn *conn; 2841 struct link_key *key; 2842 2843 BT_DBG("%s", hdev->name); 2844 2845 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags)) 2846 return; 2847 2848 hci_dev_lock(hdev); 2849 2850 key = hci_find_link_key(hdev, &ev->bdaddr); 2851 if (!key) { 2852 BT_DBG("%s link key not found for %pMR", hdev->name, 2853 &ev->bdaddr); 2854 goto not_found; 2855 } 2856 2857 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type, 2858 &ev->bdaddr); 2859 2860 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && 2861 key->type == HCI_LK_DEBUG_COMBINATION) { 2862 BT_DBG("%s ignoring debug key", hdev->name); 2863 goto not_found; 2864 } 2865 2866 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2867 if (conn) { 2868 if (key->type == HCI_LK_UNAUTH_COMBINATION && 2869 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 2870 BT_DBG("%s ignoring unauthenticated key", hdev->name); 2871 goto not_found; 2872 } 2873 2874 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 2875 conn->pending_sec_level == BT_SECURITY_HIGH) { 2876 BT_DBG("%s ignoring key unauthenticated for high security", 2877 hdev->name); 2878 goto not_found; 2879 } 2880 2881 conn->key_type = key->type; 2882 conn->pin_length = key->pin_len; 2883 } 2884 2885 bacpy(&cp.bdaddr, &ev->bdaddr); 2886 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 2887 2888 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 2889 2890 hci_dev_unlock(hdev); 2891 2892 return; 2893 2894not_found: 2895 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 2896 hci_dev_unlock(hdev); 2897} 2898 2899static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 2900{ 2901 struct hci_ev_link_key_notify *ev = (void *) skb->data; 2902 struct hci_conn *conn; 2903 u8 pin_len = 0; 2904 2905 BT_DBG("%s", hdev->name); 2906 2907 hci_dev_lock(hdev); 2908 2909 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2910 if (conn) { 2911 hci_conn_hold(conn); 2912 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2913 pin_len = conn->pin_length; 2914 2915 if (ev->key_type != HCI_LK_CHANGED_COMBINATION) 2916 conn->key_type = ev->key_type; 2917 2918 hci_conn_put(conn); 2919 } 2920 2921 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags)) 2922 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, 2923 ev->key_type, pin_len); 2924 2925 hci_dev_unlock(hdev); 2926} 2927 2928static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 2929{ 2930 struct hci_ev_clock_offset *ev = (void *) skb->data; 2931 struct hci_conn *conn; 2932 2933 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2934 2935 hci_dev_lock(hdev); 2936 2937 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2938 if (conn && !ev->status) { 2939 struct inquiry_entry *ie; 2940 2941 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 2942 if (ie) { 2943 ie->data.clock_offset = ev->clock_offset; 2944 ie->timestamp = jiffies; 2945 } 2946 } 2947 2948 hci_dev_unlock(hdev); 2949} 2950 2951static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2952{ 2953 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 2954 struct hci_conn *conn; 2955 2956 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2957 2958 hci_dev_lock(hdev); 2959 2960 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2961 if (conn && !ev->status) 2962 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 2963 2964 hci_dev_unlock(hdev); 2965} 2966 2967static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 2968{ 2969 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 2970 struct inquiry_entry *ie; 2971 2972 BT_DBG("%s", hdev->name); 2973 2974 hci_dev_lock(hdev); 2975 2976 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2977 if (ie) { 2978 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 2979 ie->timestamp = jiffies; 2980 } 2981 2982 hci_dev_unlock(hdev); 2983} 2984 2985static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, 2986 struct sk_buff *skb) 2987{ 2988 struct inquiry_data data; 2989 int num_rsp = *((__u8 *) skb->data); 2990 bool name_known, ssp; 2991 2992 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2993 2994 if (!num_rsp) 2995 return; 2996 2997 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 2998 return; 2999 3000 hci_dev_lock(hdev); 3001 3002 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 3003 struct inquiry_info_with_rssi_and_pscan_mode *info; 3004 info = (void *) (skb->data + 1); 3005 3006 for (; num_rsp; num_rsp--, info++) { 3007 bacpy(&data.bdaddr, &info->bdaddr); 3008 data.pscan_rep_mode = info->pscan_rep_mode; 3009 data.pscan_period_mode = info->pscan_period_mode; 3010 data.pscan_mode = info->pscan_mode; 3011 memcpy(data.dev_class, info->dev_class, 3); 3012 data.clock_offset = info->clock_offset; 3013 data.rssi = info->rssi; 3014 data.ssp_mode = 0x00; 3015 3016 name_known = hci_inquiry_cache_update(hdev, &data, 3017 false, &ssp); 3018 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3019 info->dev_class, info->rssi, 3020 !name_known, ssp, NULL, 0); 3021 } 3022 } else { 3023 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 3024 3025 for (; num_rsp; num_rsp--, info++) { 3026 bacpy(&data.bdaddr, &info->bdaddr); 3027 data.pscan_rep_mode = info->pscan_rep_mode; 3028 data.pscan_period_mode = info->pscan_period_mode; 3029 data.pscan_mode = 0x00; 3030 memcpy(data.dev_class, info->dev_class, 3); 3031 data.clock_offset = info->clock_offset; 3032 data.rssi = info->rssi; 3033 data.ssp_mode = 0x00; 3034 name_known = hci_inquiry_cache_update(hdev, &data, 3035 false, &ssp); 3036 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3037 info->dev_class, info->rssi, 3038 !name_known, ssp, NULL, 0); 3039 } 3040 } 3041 3042 hci_dev_unlock(hdev); 3043} 3044 3045static void hci_remote_ext_features_evt(struct hci_dev *hdev, 3046 struct sk_buff *skb) 3047{ 3048 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 3049 struct hci_conn *conn; 3050 3051 BT_DBG("%s", hdev->name); 3052 3053 hci_dev_lock(hdev); 3054 3055 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3056 if (!conn) 3057 goto unlock; 3058 3059 if (!ev->status && ev->page == 0x01) { 3060 struct inquiry_entry *ie; 3061 3062 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 3063 if (ie) 3064 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 3065 3066 if (ev->features[0] & LMP_HOST_SSP) 3067 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 3068 } 3069 3070 if (conn->state != BT_CONFIG) 3071 goto unlock; 3072 3073 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 3074 struct hci_cp_remote_name_req cp; 3075 memset(&cp, 0, sizeof(cp)); 3076 bacpy(&cp.bdaddr, &conn->dst); 3077 cp.pscan_rep_mode = 0x02; 3078 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 3079 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3080 mgmt_device_connected(hdev, &conn->dst, conn->type, 3081 conn->dst_type, 0, NULL, 0, 3082 conn->dev_class); 3083 3084 if (!hci_outgoing_auth_needed(hdev, conn)) { 3085 conn->state = BT_CONNECTED; 3086 hci_proto_connect_cfm(conn, ev->status); 3087 hci_conn_put(conn); 3088 } 3089 3090unlock: 3091 hci_dev_unlock(hdev); 3092} 3093 3094static void hci_sync_conn_complete_evt(struct hci_dev *hdev, 3095 struct sk_buff *skb) 3096{ 3097 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 3098 struct hci_conn *conn; 3099 3100 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3101 3102 hci_dev_lock(hdev); 3103 3104 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 3105 if (!conn) { 3106 if (ev->link_type == ESCO_LINK) 3107 goto unlock; 3108 3109 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 3110 if (!conn) 3111 goto unlock; 3112 3113 conn->type = SCO_LINK; 3114 } 3115 3116 switch (ev->status) { 3117 case 0x00: 3118 conn->handle = __le16_to_cpu(ev->handle); 3119 conn->state = BT_CONNECTED; 3120 3121 hci_conn_hold_device(conn); 3122 hci_conn_add_sysfs(conn); 3123 break; 3124 3125 case 0x11: /* Unsupported Feature or Parameter Value */ 3126 case 0x1c: /* SCO interval rejected */ 3127 case 0x1a: /* Unsupported Remote Feature */ 3128 case 0x1f: /* Unspecified error */ 3129 if (conn->out && conn->attempt < 2) { 3130 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 3131 (hdev->esco_type & EDR_ESCO_MASK); 3132 hci_setup_sync(conn, conn->link->handle); 3133 goto unlock; 3134 } 3135 /* fall through */ 3136 3137 default: 3138 conn->state = BT_CLOSED; 3139 break; 3140 } 3141 3142 hci_proto_connect_cfm(conn, ev->status); 3143 if (ev->status) 3144 hci_conn_del(conn); 3145 3146unlock: 3147 hci_dev_unlock(hdev); 3148} 3149 3150static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) 3151{ 3152 BT_DBG("%s", hdev->name); 3153} 3154 3155static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) 3156{ 3157 struct hci_ev_sniff_subrate *ev = (void *) skb->data; 3158 3159 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3160} 3161 3162static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, 3163 struct sk_buff *skb) 3164{ 3165 struct inquiry_data data; 3166 struct extended_inquiry_info *info = (void *) (skb->data + 1); 3167 int num_rsp = *((__u8 *) skb->data); 3168 size_t eir_len; 3169 3170 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 3171 3172 if (!num_rsp) 3173 return; 3174 3175 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 3176 return; 3177 3178 hci_dev_lock(hdev); 3179 3180 for (; num_rsp; num_rsp--, info++) { 3181 bool name_known, ssp; 3182 3183 bacpy(&data.bdaddr, &info->bdaddr); 3184 data.pscan_rep_mode = info->pscan_rep_mode; 3185 data.pscan_period_mode = info->pscan_period_mode; 3186 data.pscan_mode = 0x00; 3187 memcpy(data.dev_class, info->dev_class, 3); 3188 data.clock_offset = info->clock_offset; 3189 data.rssi = info->rssi; 3190 data.ssp_mode = 0x01; 3191 3192 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3193 name_known = eir_has_data_type(info->data, 3194 sizeof(info->data), 3195 EIR_NAME_COMPLETE); 3196 else 3197 name_known = true; 3198 3199 name_known = hci_inquiry_cache_update(hdev, &data, name_known, 3200 &ssp); 3201 eir_len = eir_get_length(info->data, sizeof(info->data)); 3202 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3203 info->dev_class, info->rssi, !name_known, 3204 ssp, info->data, eir_len); 3205 } 3206 3207 hci_dev_unlock(hdev); 3208} 3209 3210static void hci_key_refresh_complete_evt(struct hci_dev *hdev, 3211 struct sk_buff *skb) 3212{ 3213 struct hci_ev_key_refresh_complete *ev = (void *) skb->data; 3214 struct hci_conn *conn; 3215 3216 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status, 3217 __le16_to_cpu(ev->handle)); 3218 3219 hci_dev_lock(hdev); 3220 3221 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3222 if (!conn) 3223 goto unlock; 3224 3225 if (!ev->status) 3226 conn->sec_level = conn->pending_sec_level; 3227 3228 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3229 3230 if (ev->status && conn->state == BT_CONNECTED) { 3231 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3232 hci_conn_put(conn); 3233 goto unlock; 3234 } 3235 3236 if (conn->state == BT_CONFIG) { 3237 if (!ev->status) 3238 conn->state = BT_CONNECTED; 3239 3240 hci_proto_connect_cfm(conn, ev->status); 3241 hci_conn_put(conn); 3242 } else { 3243 hci_auth_cfm(conn, ev->status); 3244 3245 hci_conn_hold(conn); 3246 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3247 hci_conn_put(conn); 3248 } 3249 3250unlock: 3251 hci_dev_unlock(hdev); 3252} 3253 3254static u8 hci_get_auth_req(struct hci_conn *conn) 3255{ 3256 /* If remote requests dedicated bonding follow that lead */ 3257 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) { 3258 /* If both remote and local IO capabilities allow MITM 3259 * protection then require it, otherwise don't */ 3260 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03) 3261 return 0x02; 3262 else 3263 return 0x03; 3264 } 3265 3266 /* If remote requests no-bonding follow that lead */ 3267 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01) 3268 return conn->remote_auth | (conn->auth_type & 0x01); 3269 3270 return conn->auth_type; 3271} 3272 3273static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3274{ 3275 struct hci_ev_io_capa_request *ev = (void *) skb->data; 3276 struct hci_conn *conn; 3277 3278 BT_DBG("%s", hdev->name); 3279 3280 hci_dev_lock(hdev); 3281 3282 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3283 if (!conn) 3284 goto unlock; 3285 3286 hci_conn_hold(conn); 3287 3288 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3289 goto unlock; 3290 3291 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) || 3292 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 3293 struct hci_cp_io_capability_reply cp; 3294 3295 bacpy(&cp.bdaddr, &ev->bdaddr); 3296 /* Change the IO capability from KeyboardDisplay 3297 * to DisplayYesNo as it is not supported by BT spec. */ 3298 cp.capability = (conn->io_capability == 0x04) ? 3299 0x01 : conn->io_capability; 3300 conn->auth_type = hci_get_auth_req(conn); 3301 cp.authentication = conn->auth_type; 3302 3303 if (hci_find_remote_oob_data(hdev, &conn->dst) && 3304 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags))) 3305 cp.oob_data = 0x01; 3306 else 3307 cp.oob_data = 0x00; 3308 3309 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 3310 sizeof(cp), &cp); 3311 } else { 3312 struct hci_cp_io_capability_neg_reply cp; 3313 3314 bacpy(&cp.bdaddr, &ev->bdaddr); 3315 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 3316 3317 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 3318 sizeof(cp), &cp); 3319 } 3320 3321unlock: 3322 hci_dev_unlock(hdev); 3323} 3324 3325static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 3326{ 3327 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 3328 struct hci_conn *conn; 3329 3330 BT_DBG("%s", hdev->name); 3331 3332 hci_dev_lock(hdev); 3333 3334 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3335 if (!conn) 3336 goto unlock; 3337 3338 conn->remote_cap = ev->capability; 3339 conn->remote_auth = ev->authentication; 3340 if (ev->oob_data) 3341 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags); 3342 3343unlock: 3344 hci_dev_unlock(hdev); 3345} 3346 3347static void hci_user_confirm_request_evt(struct hci_dev *hdev, 3348 struct sk_buff *skb) 3349{ 3350 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 3351 int loc_mitm, rem_mitm, confirm_hint = 0; 3352 struct hci_conn *conn; 3353 3354 BT_DBG("%s", hdev->name); 3355 3356 hci_dev_lock(hdev); 3357 3358 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3359 goto unlock; 3360 3361 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3362 if (!conn) 3363 goto unlock; 3364 3365 loc_mitm = (conn->auth_type & 0x01); 3366 rem_mitm = (conn->remote_auth & 0x01); 3367 3368 /* If we require MITM but the remote device can't provide that 3369 * (it has NoInputNoOutput) then reject the confirmation 3370 * request. The only exception is when we're dedicated bonding 3371 * initiators (connect_cfm_cb set) since then we always have the MITM 3372 * bit set. */ 3373 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) { 3374 BT_DBG("Rejecting request: remote device can't provide MITM"); 3375 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 3376 sizeof(ev->bdaddr), &ev->bdaddr); 3377 goto unlock; 3378 } 3379 3380 /* If no side requires MITM protection; auto-accept */ 3381 if ((!loc_mitm || conn->remote_cap == 0x03) && 3382 (!rem_mitm || conn->io_capability == 0x03)) { 3383 3384 /* If we're not the initiators request authorization to 3385 * proceed from user space (mgmt_user_confirm with 3386 * confirm_hint set to 1). */ 3387 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3388 BT_DBG("Confirming auto-accept as acceptor"); 3389 confirm_hint = 1; 3390 goto confirm; 3391 } 3392 3393 BT_DBG("Auto-accept of user confirmation with %ums delay", 3394 hdev->auto_accept_delay); 3395 3396 if (hdev->auto_accept_delay > 0) { 3397 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 3398 mod_timer(&conn->auto_accept_timer, jiffies + delay); 3399 goto unlock; 3400 } 3401 3402 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 3403 sizeof(ev->bdaddr), &ev->bdaddr); 3404 goto unlock; 3405 } 3406 3407confirm: 3408 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey, 3409 confirm_hint); 3410 3411unlock: 3412 hci_dev_unlock(hdev); 3413} 3414 3415static void hci_user_passkey_request_evt(struct hci_dev *hdev, 3416 struct sk_buff *skb) 3417{ 3418 struct hci_ev_user_passkey_req *ev = (void *) skb->data; 3419 3420 BT_DBG("%s", hdev->name); 3421 3422 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3423 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 3424} 3425 3426static void hci_user_passkey_notify_evt(struct hci_dev *hdev, 3427 struct sk_buff *skb) 3428{ 3429 struct hci_ev_user_passkey_notify *ev = (void *) skb->data; 3430 struct hci_conn *conn; 3431 3432 BT_DBG("%s", hdev->name); 3433 3434 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3435 if (!conn) 3436 return; 3437 3438 conn->passkey_notify = __le32_to_cpu(ev->passkey); 3439 conn->passkey_entered = 0; 3440 3441 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3442 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 3443 conn->dst_type, conn->passkey_notify, 3444 conn->passkey_entered); 3445} 3446 3447static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 3448{ 3449 struct hci_ev_keypress_notify *ev = (void *) skb->data; 3450 struct hci_conn *conn; 3451 3452 BT_DBG("%s", hdev->name); 3453 3454 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3455 if (!conn) 3456 return; 3457 3458 switch (ev->type) { 3459 case HCI_KEYPRESS_STARTED: 3460 conn->passkey_entered = 0; 3461 return; 3462 3463 case HCI_KEYPRESS_ENTERED: 3464 conn->passkey_entered++; 3465 break; 3466 3467 case HCI_KEYPRESS_ERASED: 3468 conn->passkey_entered--; 3469 break; 3470 3471 case HCI_KEYPRESS_CLEARED: 3472 conn->passkey_entered = 0; 3473 break; 3474 3475 case HCI_KEYPRESS_COMPLETED: 3476 return; 3477 } 3478 3479 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3480 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 3481 conn->dst_type, conn->passkey_notify, 3482 conn->passkey_entered); 3483} 3484 3485static void hci_simple_pair_complete_evt(struct hci_dev *hdev, 3486 struct sk_buff *skb) 3487{ 3488 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 3489 struct hci_conn *conn; 3490 3491 BT_DBG("%s", hdev->name); 3492 3493 hci_dev_lock(hdev); 3494 3495 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3496 if (!conn) 3497 goto unlock; 3498 3499 /* To avoid duplicate auth_failed events to user space we check 3500 * the HCI_CONN_AUTH_PEND flag which will be set if we 3501 * initiated the authentication. A traditional auth_complete 3502 * event gets always produced as initiator and is also mapped to 3503 * the mgmt_auth_failed event */ 3504 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 3505 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type, 3506 ev->status); 3507 3508 hci_conn_put(conn); 3509 3510unlock: 3511 hci_dev_unlock(hdev); 3512} 3513 3514static void hci_remote_host_features_evt(struct hci_dev *hdev, 3515 struct sk_buff *skb) 3516{ 3517 struct hci_ev_remote_host_features *ev = (void *) skb->data; 3518 struct inquiry_entry *ie; 3519 3520 BT_DBG("%s", hdev->name); 3521 3522 hci_dev_lock(hdev); 3523 3524 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3525 if (ie) 3526 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 3527 3528 hci_dev_unlock(hdev); 3529} 3530 3531static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 3532 struct sk_buff *skb) 3533{ 3534 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 3535 struct oob_data *data; 3536 3537 BT_DBG("%s", hdev->name); 3538 3539 hci_dev_lock(hdev); 3540 3541 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3542 goto unlock; 3543 3544 data = hci_find_remote_oob_data(hdev, &ev->bdaddr); 3545 if (data) { 3546 struct hci_cp_remote_oob_data_reply cp; 3547 3548 bacpy(&cp.bdaddr, &ev->bdaddr); 3549 memcpy(cp.hash, data->hash, sizeof(cp.hash)); 3550 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); 3551 3552 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), 3553 &cp); 3554 } else { 3555 struct hci_cp_remote_oob_data_neg_reply cp; 3556 3557 bacpy(&cp.bdaddr, &ev->bdaddr); 3558 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), 3559 &cp); 3560 } 3561 3562unlock: 3563 hci_dev_unlock(hdev); 3564} 3565 3566static void hci_phy_link_complete_evt(struct hci_dev *hdev, 3567 struct sk_buff *skb) 3568{ 3569 struct hci_ev_phy_link_complete *ev = (void *) skb->data; 3570 struct hci_conn *hcon, *bredr_hcon; 3571 3572 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle, 3573 ev->status); 3574 3575 hci_dev_lock(hdev); 3576 3577 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 3578 if (!hcon) { 3579 hci_dev_unlock(hdev); 3580 return; 3581 } 3582 3583 if (ev->status) { 3584 hci_conn_del(hcon); 3585 hci_dev_unlock(hdev); 3586 return; 3587 } 3588 3589 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; 3590 3591 hcon->state = BT_CONNECTED; 3592 bacpy(&hcon->dst, &bredr_hcon->dst); 3593 3594 hci_conn_hold(hcon); 3595 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 3596 hci_conn_put(hcon); 3597 3598 hci_conn_hold_device(hcon); 3599 hci_conn_add_sysfs(hcon); 3600 3601 amp_physical_cfm(bredr_hcon, hcon); 3602 3603 hci_dev_unlock(hdev); 3604} 3605 3606static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3607{ 3608 struct hci_ev_logical_link_complete *ev = (void *) skb->data; 3609 struct hci_conn *hcon; 3610 struct hci_chan *hchan; 3611 struct amp_mgr *mgr; 3612 3613 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", 3614 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle, 3615 ev->status); 3616 3617 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 3618 if (!hcon) 3619 return; 3620 3621 /* Create AMP hchan */ 3622 hchan = hci_chan_create(hcon); 3623 if (!hchan) 3624 return; 3625 3626 hchan->handle = le16_to_cpu(ev->handle); 3627 3628 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); 3629 3630 mgr = hcon->amp_mgr; 3631 if (mgr && mgr->bredr_chan) { 3632 struct l2cap_chan *bredr_chan = mgr->bredr_chan; 3633 3634 l2cap_chan_lock(bredr_chan); 3635 3636 bredr_chan->conn->mtu = hdev->block_mtu; 3637 l2cap_logical_cfm(bredr_chan, hchan, 0); 3638 hci_conn_hold(hcon); 3639 3640 l2cap_chan_unlock(bredr_chan); 3641 } 3642} 3643 3644static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, 3645 struct sk_buff *skb) 3646{ 3647 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data; 3648 struct hci_chan *hchan; 3649 3650 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name, 3651 le16_to_cpu(ev->handle), ev->status); 3652 3653 if (ev->status) 3654 return; 3655 3656 hci_dev_lock(hdev); 3657 3658 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); 3659 if (!hchan) 3660 goto unlock; 3661 3662 amp_destroy_logical_link(hchan, ev->reason); 3663 3664unlock: 3665 hci_dev_unlock(hdev); 3666} 3667 3668static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, 3669 struct sk_buff *skb) 3670{ 3671 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data; 3672 struct hci_conn *hcon; 3673 3674 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3675 3676 if (ev->status) 3677 return; 3678 3679 hci_dev_lock(hdev); 3680 3681 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 3682 if (hcon) { 3683 hcon->state = BT_CLOSED; 3684 hci_conn_del(hcon); 3685 } 3686 3687 hci_dev_unlock(hdev); 3688} 3689 3690static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3691{ 3692 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 3693 struct hci_conn *conn; 3694 3695 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3696 3697 hci_dev_lock(hdev); 3698 3699 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 3700 if (!conn) { 3701 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); 3702 if (!conn) { 3703 BT_ERR("No memory for new connection"); 3704 goto unlock; 3705 } 3706 3707 conn->dst_type = ev->bdaddr_type; 3708 3709 if (ev->role == LE_CONN_ROLE_MASTER) { 3710 conn->out = true; 3711 conn->link_mode |= HCI_LM_MASTER; 3712 } 3713 } 3714 3715 if (ev->status) { 3716 mgmt_connect_failed(hdev, &conn->dst, conn->type, 3717 conn->dst_type, ev->status); 3718 hci_proto_connect_cfm(conn, ev->status); 3719 conn->state = BT_CLOSED; 3720 hci_conn_del(conn); 3721 goto unlock; 3722 } 3723 3724 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3725 mgmt_device_connected(hdev, &ev->bdaddr, conn->type, 3726 conn->dst_type, 0, NULL, 0, NULL); 3727 3728 conn->sec_level = BT_SECURITY_LOW; 3729 conn->handle = __le16_to_cpu(ev->handle); 3730 conn->state = BT_CONNECTED; 3731 3732 hci_conn_hold_device(conn); 3733 hci_conn_add_sysfs(conn); 3734 3735 hci_proto_connect_cfm(conn, ev->status); 3736 3737unlock: 3738 hci_dev_unlock(hdev); 3739} 3740 3741static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) 3742{ 3743 u8 num_reports = skb->data[0]; 3744 void *ptr = &skb->data[1]; 3745 s8 rssi; 3746 3747 while (num_reports--) { 3748 struct hci_ev_le_advertising_info *ev = ptr; 3749 3750 rssi = ev->data[ev->length]; 3751 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type, 3752 NULL, rssi, 0, 1, ev->data, ev->length); 3753 3754 ptr += sizeof(*ev) + ev->length + 1; 3755 } 3756} 3757 3758static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3759{ 3760 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 3761 struct hci_cp_le_ltk_reply cp; 3762 struct hci_cp_le_ltk_neg_reply neg; 3763 struct hci_conn *conn; 3764 struct smp_ltk *ltk; 3765 3766 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle)); 3767 3768 hci_dev_lock(hdev); 3769 3770 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3771 if (conn == NULL) 3772 goto not_found; 3773 3774 ltk = hci_find_ltk(hdev, ev->ediv, ev->random); 3775 if (ltk == NULL) 3776 goto not_found; 3777 3778 memcpy(cp.ltk, ltk->val, sizeof(ltk->val)); 3779 cp.handle = cpu_to_le16(conn->handle); 3780 3781 if (ltk->authenticated) 3782 conn->sec_level = BT_SECURITY_HIGH; 3783 3784 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 3785 3786 if (ltk->type & HCI_SMP_STK) { 3787 list_del(<k->list); 3788 kfree(ltk); 3789 } 3790 3791 hci_dev_unlock(hdev); 3792 3793 return; 3794 3795not_found: 3796 neg.handle = ev->handle; 3797 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 3798 hci_dev_unlock(hdev); 3799} 3800 3801static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 3802{ 3803 struct hci_ev_le_meta *le_ev = (void *) skb->data; 3804 3805 skb_pull(skb, sizeof(*le_ev)); 3806 3807 switch (le_ev->subevent) { 3808 case HCI_EV_LE_CONN_COMPLETE: 3809 hci_le_conn_complete_evt(hdev, skb); 3810 break; 3811 3812 case HCI_EV_LE_ADVERTISING_REPORT: 3813 hci_le_adv_report_evt(hdev, skb); 3814 break; 3815 3816 case HCI_EV_LE_LTK_REQ: 3817 hci_le_ltk_request_evt(hdev, skb); 3818 break; 3819 3820 default: 3821 break; 3822 } 3823} 3824 3825static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb) 3826{ 3827 struct hci_ev_channel_selected *ev = (void *) skb->data; 3828 struct hci_conn *hcon; 3829 3830 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle); 3831 3832 skb_pull(skb, sizeof(*ev)); 3833 3834 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 3835 if (!hcon) 3836 return; 3837 3838 amp_read_loc_assoc_final_data(hdev, hcon); 3839} 3840 3841void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 3842{ 3843 struct hci_event_hdr *hdr = (void *) skb->data; 3844 __u8 event = hdr->evt; 3845 3846 skb_pull(skb, HCI_EVENT_HDR_SIZE); 3847 3848 switch (event) { 3849 case HCI_EV_INQUIRY_COMPLETE: 3850 hci_inquiry_complete_evt(hdev, skb); 3851 break; 3852 3853 case HCI_EV_INQUIRY_RESULT: 3854 hci_inquiry_result_evt(hdev, skb); 3855 break; 3856 3857 case HCI_EV_CONN_COMPLETE: 3858 hci_conn_complete_evt(hdev, skb); 3859 break; 3860 3861 case HCI_EV_CONN_REQUEST: 3862 hci_conn_request_evt(hdev, skb); 3863 break; 3864 3865 case HCI_EV_DISCONN_COMPLETE: 3866 hci_disconn_complete_evt(hdev, skb); 3867 break; 3868 3869 case HCI_EV_AUTH_COMPLETE: 3870 hci_auth_complete_evt(hdev, skb); 3871 break; 3872 3873 case HCI_EV_REMOTE_NAME: 3874 hci_remote_name_evt(hdev, skb); 3875 break; 3876 3877 case HCI_EV_ENCRYPT_CHANGE: 3878 hci_encrypt_change_evt(hdev, skb); 3879 break; 3880 3881 case HCI_EV_CHANGE_LINK_KEY_COMPLETE: 3882 hci_change_link_key_complete_evt(hdev, skb); 3883 break; 3884 3885 case HCI_EV_REMOTE_FEATURES: 3886 hci_remote_features_evt(hdev, skb); 3887 break; 3888 3889 case HCI_EV_REMOTE_VERSION: 3890 hci_remote_version_evt(hdev, skb); 3891 break; 3892 3893 case HCI_EV_QOS_SETUP_COMPLETE: 3894 hci_qos_setup_complete_evt(hdev, skb); 3895 break; 3896 3897 case HCI_EV_CMD_COMPLETE: 3898 hci_cmd_complete_evt(hdev, skb); 3899 break; 3900 3901 case HCI_EV_CMD_STATUS: 3902 hci_cmd_status_evt(hdev, skb); 3903 break; 3904 3905 case HCI_EV_ROLE_CHANGE: 3906 hci_role_change_evt(hdev, skb); 3907 break; 3908 3909 case HCI_EV_NUM_COMP_PKTS: 3910 hci_num_comp_pkts_evt(hdev, skb); 3911 break; 3912 3913 case HCI_EV_MODE_CHANGE: 3914 hci_mode_change_evt(hdev, skb); 3915 break; 3916 3917 case HCI_EV_PIN_CODE_REQ: 3918 hci_pin_code_request_evt(hdev, skb); 3919 break; 3920 3921 case HCI_EV_LINK_KEY_REQ: 3922 hci_link_key_request_evt(hdev, skb); 3923 break; 3924 3925 case HCI_EV_LINK_KEY_NOTIFY: 3926 hci_link_key_notify_evt(hdev, skb); 3927 break; 3928 3929 case HCI_EV_CLOCK_OFFSET: 3930 hci_clock_offset_evt(hdev, skb); 3931 break; 3932 3933 case HCI_EV_PKT_TYPE_CHANGE: 3934 hci_pkt_type_change_evt(hdev, skb); 3935 break; 3936 3937 case HCI_EV_PSCAN_REP_MODE: 3938 hci_pscan_rep_mode_evt(hdev, skb); 3939 break; 3940 3941 case HCI_EV_INQUIRY_RESULT_WITH_RSSI: 3942 hci_inquiry_result_with_rssi_evt(hdev, skb); 3943 break; 3944 3945 case HCI_EV_REMOTE_EXT_FEATURES: 3946 hci_remote_ext_features_evt(hdev, skb); 3947 break; 3948 3949 case HCI_EV_SYNC_CONN_COMPLETE: 3950 hci_sync_conn_complete_evt(hdev, skb); 3951 break; 3952 3953 case HCI_EV_SYNC_CONN_CHANGED: 3954 hci_sync_conn_changed_evt(hdev, skb); 3955 break; 3956 3957 case HCI_EV_SNIFF_SUBRATE: 3958 hci_sniff_subrate_evt(hdev, skb); 3959 break; 3960 3961 case HCI_EV_EXTENDED_INQUIRY_RESULT: 3962 hci_extended_inquiry_result_evt(hdev, skb); 3963 break; 3964 3965 case HCI_EV_KEY_REFRESH_COMPLETE: 3966 hci_key_refresh_complete_evt(hdev, skb); 3967 break; 3968 3969 case HCI_EV_IO_CAPA_REQUEST: 3970 hci_io_capa_request_evt(hdev, skb); 3971 break; 3972 3973 case HCI_EV_IO_CAPA_REPLY: 3974 hci_io_capa_reply_evt(hdev, skb); 3975 break; 3976 3977 case HCI_EV_USER_CONFIRM_REQUEST: 3978 hci_user_confirm_request_evt(hdev, skb); 3979 break; 3980 3981 case HCI_EV_USER_PASSKEY_REQUEST: 3982 hci_user_passkey_request_evt(hdev, skb); 3983 break; 3984 3985 case HCI_EV_USER_PASSKEY_NOTIFY: 3986 hci_user_passkey_notify_evt(hdev, skb); 3987 break; 3988 3989 case HCI_EV_KEYPRESS_NOTIFY: 3990 hci_keypress_notify_evt(hdev, skb); 3991 break; 3992 3993 case HCI_EV_SIMPLE_PAIR_COMPLETE: 3994 hci_simple_pair_complete_evt(hdev, skb); 3995 break; 3996 3997 case HCI_EV_REMOTE_HOST_FEATURES: 3998 hci_remote_host_features_evt(hdev, skb); 3999 break; 4000 4001 case HCI_EV_LE_META: 4002 hci_le_meta_evt(hdev, skb); 4003 break; 4004 4005 case HCI_EV_CHANNEL_SELECTED: 4006 hci_chan_selected_evt(hdev, skb); 4007 break; 4008 4009 case HCI_EV_REMOTE_OOB_DATA_REQUEST: 4010 hci_remote_oob_data_request_evt(hdev, skb); 4011 break; 4012 4013 case HCI_EV_PHY_LINK_COMPLETE: 4014 hci_phy_link_complete_evt(hdev, skb); 4015 break; 4016 4017 case HCI_EV_LOGICAL_LINK_COMPLETE: 4018 hci_loglink_complete_evt(hdev, skb); 4019 break; 4020 4021 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE: 4022 hci_disconn_loglink_complete_evt(hdev, skb); 4023 break; 4024 4025 case HCI_EV_DISCONN_PHY_LINK_COMPLETE: 4026 hci_disconn_phylink_complete_evt(hdev, skb); 4027 break; 4028 4029 case HCI_EV_NUM_COMP_BLOCKS: 4030 hci_num_comp_blocks_evt(hdev, skb); 4031 break; 4032 4033 default: 4034 BT_DBG("%s event 0x%2.2x", hdev->name, event); 4035 break; 4036 } 4037 4038 kfree_skb(skb); 4039 hdev->stat.evt_rx++; 4040} 4041