hci_event.c revision 479453d5fe3a5b911b7f56474764988100f9f650
1/* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23*/ 24 25/* Bluetooth HCI event handling. */ 26 27#include <linux/module.h> 28 29#include <linux/types.h> 30#include <linux/errno.h> 31#include <linux/kernel.h> 32#include <linux/slab.h> 33#include <linux/poll.h> 34#include <linux/fcntl.h> 35#include <linux/init.h> 36#include <linux/skbuff.h> 37#include <linux/interrupt.h> 38#include <net/sock.h> 39 40#include <linux/uaccess.h> 41#include <asm/unaligned.h> 42 43#include <net/bluetooth/bluetooth.h> 44#include <net/bluetooth/hci_core.h> 45 46/* Handle HCI Event packets */ 47 48static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) 49{ 50 __u8 status = *((__u8 *) skb->data); 51 52 BT_DBG("%s status 0x%x", hdev->name, status); 53 54 if (status) { 55 hci_dev_lock(hdev); 56 mgmt_stop_discovery_failed(hdev, status); 57 hci_dev_unlock(hdev); 58 return; 59 } 60 61 clear_bit(HCI_INQUIRY, &hdev->flags); 62 63 hci_dev_lock(hdev); 64 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 65 hci_dev_unlock(hdev); 66 67 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); 68 69 hci_conn_check_pending(hdev); 70} 71 72static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 73{ 74 __u8 status = *((__u8 *) skb->data); 75 76 BT_DBG("%s status 0x%x", hdev->name, status); 77 78 if (status) 79 return; 80 81 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags); 82} 83 84static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 85{ 86 __u8 status = *((__u8 *) skb->data); 87 88 BT_DBG("%s status 0x%x", hdev->name, status); 89 90 if (status) 91 return; 92 93 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags); 94 95 hci_conn_check_pending(hdev); 96} 97 98static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb) 99{ 100 BT_DBG("%s", hdev->name); 101} 102 103static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) 104{ 105 struct hci_rp_role_discovery *rp = (void *) skb->data; 106 struct hci_conn *conn; 107 108 BT_DBG("%s status 0x%x", hdev->name, rp->status); 109 110 if (rp->status) 111 return; 112 113 hci_dev_lock(hdev); 114 115 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 116 if (conn) { 117 if (rp->role) 118 conn->link_mode &= ~HCI_LM_MASTER; 119 else 120 conn->link_mode |= HCI_LM_MASTER; 121 } 122 123 hci_dev_unlock(hdev); 124} 125 126static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 127{ 128 struct hci_rp_read_link_policy *rp = (void *) skb->data; 129 struct hci_conn *conn; 130 131 BT_DBG("%s status 0x%x", hdev->name, rp->status); 132 133 if (rp->status) 134 return; 135 136 hci_dev_lock(hdev); 137 138 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 139 if (conn) 140 conn->link_policy = __le16_to_cpu(rp->policy); 141 142 hci_dev_unlock(hdev); 143} 144 145static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 146{ 147 struct hci_rp_write_link_policy *rp = (void *) skb->data; 148 struct hci_conn *conn; 149 void *sent; 150 151 BT_DBG("%s status 0x%x", hdev->name, rp->status); 152 153 if (rp->status) 154 return; 155 156 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 157 if (!sent) 158 return; 159 160 hci_dev_lock(hdev); 161 162 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 163 if (conn) 164 conn->link_policy = get_unaligned_le16(sent + 2); 165 166 hci_dev_unlock(hdev); 167} 168 169static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 170{ 171 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 172 173 BT_DBG("%s status 0x%x", hdev->name, rp->status); 174 175 if (rp->status) 176 return; 177 178 hdev->link_policy = __le16_to_cpu(rp->policy); 179} 180 181static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 182{ 183 __u8 status = *((__u8 *) skb->data); 184 void *sent; 185 186 BT_DBG("%s status 0x%x", hdev->name, status); 187 188 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 189 if (!sent) 190 return; 191 192 if (!status) 193 hdev->link_policy = get_unaligned_le16(sent); 194 195 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status); 196} 197 198static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 199{ 200 __u8 status = *((__u8 *) skb->data); 201 202 BT_DBG("%s status 0x%x", hdev->name, status); 203 204 clear_bit(HCI_RESET, &hdev->flags); 205 206 hci_req_complete(hdev, HCI_OP_RESET, status); 207 208 /* Reset all non-persistent flags */ 209 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) | 210 BIT(HCI_PERIODIC_INQ)); 211 212 hdev->discovery.state = DISCOVERY_STOPPED; 213} 214 215static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 216{ 217 __u8 status = *((__u8 *) skb->data); 218 void *sent; 219 220 BT_DBG("%s status 0x%x", hdev->name, status); 221 222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 223 if (!sent) 224 return; 225 226 hci_dev_lock(hdev); 227 228 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 229 mgmt_set_local_name_complete(hdev, sent, status); 230 else if (!status) 231 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 232 233 hci_dev_unlock(hdev); 234 235 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status); 236} 237 238static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 239{ 240 struct hci_rp_read_local_name *rp = (void *) skb->data; 241 242 BT_DBG("%s status 0x%x", hdev->name, rp->status); 243 244 if (rp->status) 245 return; 246 247 if (test_bit(HCI_SETUP, &hdev->dev_flags)) 248 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 249} 250 251static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 252{ 253 __u8 status = *((__u8 *) skb->data); 254 void *sent; 255 256 BT_DBG("%s status 0x%x", hdev->name, status); 257 258 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 259 if (!sent) 260 return; 261 262 if (!status) { 263 __u8 param = *((__u8 *) sent); 264 265 if (param == AUTH_ENABLED) 266 set_bit(HCI_AUTH, &hdev->flags); 267 else 268 clear_bit(HCI_AUTH, &hdev->flags); 269 } 270 271 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 272 mgmt_auth_enable_complete(hdev, status); 273 274 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status); 275} 276 277static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 278{ 279 __u8 status = *((__u8 *) skb->data); 280 void *sent; 281 282 BT_DBG("%s status 0x%x", hdev->name, status); 283 284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 285 if (!sent) 286 return; 287 288 if (!status) { 289 __u8 param = *((__u8 *) sent); 290 291 if (param) 292 set_bit(HCI_ENCRYPT, &hdev->flags); 293 else 294 clear_bit(HCI_ENCRYPT, &hdev->flags); 295 } 296 297 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status); 298} 299 300static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 301{ 302 __u8 param, status = *((__u8 *) skb->data); 303 int old_pscan, old_iscan; 304 void *sent; 305 306 BT_DBG("%s status 0x%x", hdev->name, status); 307 308 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 309 if (!sent) 310 return; 311 312 param = *((__u8 *) sent); 313 314 hci_dev_lock(hdev); 315 316 if (status != 0) { 317 mgmt_write_scan_failed(hdev, param, status); 318 hdev->discov_timeout = 0; 319 goto done; 320 } 321 322 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags); 323 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags); 324 325 if (param & SCAN_INQUIRY) { 326 set_bit(HCI_ISCAN, &hdev->flags); 327 if (!old_iscan) 328 mgmt_discoverable(hdev, 1); 329 if (hdev->discov_timeout > 0) { 330 int to = msecs_to_jiffies(hdev->discov_timeout * 1000); 331 queue_delayed_work(hdev->workqueue, &hdev->discov_off, 332 to); 333 } 334 } else if (old_iscan) 335 mgmt_discoverable(hdev, 0); 336 337 if (param & SCAN_PAGE) { 338 set_bit(HCI_PSCAN, &hdev->flags); 339 if (!old_pscan) 340 mgmt_connectable(hdev, 1); 341 } else if (old_pscan) 342 mgmt_connectable(hdev, 0); 343 344done: 345 hci_dev_unlock(hdev); 346 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status); 347} 348 349static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 350{ 351 struct hci_rp_read_class_of_dev *rp = (void *) skb->data; 352 353 BT_DBG("%s status 0x%x", hdev->name, rp->status); 354 355 if (rp->status) 356 return; 357 358 memcpy(hdev->dev_class, rp->dev_class, 3); 359 360 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 361 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 362} 363 364static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 365{ 366 __u8 status = *((__u8 *) skb->data); 367 void *sent; 368 369 BT_DBG("%s status 0x%x", hdev->name, status); 370 371 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 372 if (!sent) 373 return; 374 375 hci_dev_lock(hdev); 376 377 if (status == 0) 378 memcpy(hdev->dev_class, sent, 3); 379 380 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 381 mgmt_set_class_of_dev_complete(hdev, sent, status); 382 383 hci_dev_unlock(hdev); 384} 385 386static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 387{ 388 struct hci_rp_read_voice_setting *rp = (void *) skb->data; 389 __u16 setting; 390 391 BT_DBG("%s status 0x%x", hdev->name, rp->status); 392 393 if (rp->status) 394 return; 395 396 setting = __le16_to_cpu(rp->voice_setting); 397 398 if (hdev->voice_setting == setting) 399 return; 400 401 hdev->voice_setting = setting; 402 403 BT_DBG("%s voice setting 0x%04x", hdev->name, setting); 404 405 if (hdev->notify) 406 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 407} 408 409static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 410{ 411 __u8 status = *((__u8 *) skb->data); 412 __u16 setting; 413 void *sent; 414 415 BT_DBG("%s status 0x%x", hdev->name, status); 416 417 if (status) 418 return; 419 420 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 421 if (!sent) 422 return; 423 424 setting = get_unaligned_le16(sent); 425 426 if (hdev->voice_setting == setting) 427 return; 428 429 hdev->voice_setting = setting; 430 431 BT_DBG("%s voice setting 0x%04x", hdev->name, setting); 432 433 if (hdev->notify) 434 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 435} 436 437static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 438{ 439 __u8 status = *((__u8 *) skb->data); 440 441 BT_DBG("%s status 0x%x", hdev->name, status); 442 443 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status); 444} 445 446static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 447{ 448 __u8 status = *((__u8 *) skb->data); 449 void *sent; 450 451 BT_DBG("%s status 0x%x", hdev->name, status); 452 453 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 454 if (!sent) 455 return; 456 457 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 458 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status); 459 else if (!status) { 460 if (*((u8 *) sent)) 461 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 462 else 463 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 464 } 465} 466 467static u8 hci_get_inquiry_mode(struct hci_dev *hdev) 468{ 469 if (hdev->features[6] & LMP_EXT_INQ) 470 return 2; 471 472 if (hdev->features[3] & LMP_RSSI_INQ) 473 return 1; 474 475 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && 476 hdev->lmp_subver == 0x0757) 477 return 1; 478 479 if (hdev->manufacturer == 15) { 480 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963) 481 return 1; 482 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963) 483 return 1; 484 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965) 485 return 1; 486 } 487 488 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && 489 hdev->lmp_subver == 0x1805) 490 return 1; 491 492 return 0; 493} 494 495static void hci_setup_inquiry_mode(struct hci_dev *hdev) 496{ 497 u8 mode; 498 499 mode = hci_get_inquiry_mode(hdev); 500 501 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); 502} 503 504static void hci_setup_event_mask(struct hci_dev *hdev) 505{ 506 /* The second byte is 0xff instead of 0x9f (two reserved bits 507 * disabled) since a Broadcom 1.2 dongle doesn't respond to the 508 * command otherwise */ 509 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; 510 511 /* CSR 1.1 dongles does not accept any bitfield so don't try to set 512 * any event mask for pre 1.2 devices */ 513 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 514 return; 515 516 events[4] |= 0x01; /* Flow Specification Complete */ 517 events[4] |= 0x02; /* Inquiry Result with RSSI */ 518 events[4] |= 0x04; /* Read Remote Extended Features Complete */ 519 events[5] |= 0x08; /* Synchronous Connection Complete */ 520 events[5] |= 0x10; /* Synchronous Connection Changed */ 521 522 if (hdev->features[3] & LMP_RSSI_INQ) 523 events[4] |= 0x04; /* Inquiry Result with RSSI */ 524 525 if (hdev->features[5] & LMP_SNIFF_SUBR) 526 events[5] |= 0x20; /* Sniff Subrating */ 527 528 if (hdev->features[5] & LMP_PAUSE_ENC) 529 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 530 531 if (hdev->features[6] & LMP_EXT_INQ) 532 events[5] |= 0x40; /* Extended Inquiry Result */ 533 534 if (hdev->features[6] & LMP_NO_FLUSH) 535 events[7] |= 0x01; /* Enhanced Flush Complete */ 536 537 if (hdev->features[7] & LMP_LSTO) 538 events[6] |= 0x80; /* Link Supervision Timeout Changed */ 539 540 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 541 events[6] |= 0x01; /* IO Capability Request */ 542 events[6] |= 0x02; /* IO Capability Response */ 543 events[6] |= 0x04; /* User Confirmation Request */ 544 events[6] |= 0x08; /* User Passkey Request */ 545 events[6] |= 0x10; /* Remote OOB Data Request */ 546 events[6] |= 0x20; /* Simple Pairing Complete */ 547 events[7] |= 0x04; /* User Passkey Notification */ 548 events[7] |= 0x08; /* Keypress Notification */ 549 events[7] |= 0x10; /* Remote Host Supported 550 * Features Notification */ 551 } 552 553 if (hdev->features[4] & LMP_LE) 554 events[7] |= 0x20; /* LE Meta-Event */ 555 556 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 557} 558 559static void hci_setup(struct hci_dev *hdev) 560{ 561 if (hdev->dev_type != HCI_BREDR) 562 return; 563 564 hci_setup_event_mask(hdev); 565 566 if (hdev->hci_ver > BLUETOOTH_VER_1_1) 567 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 568 569 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 570 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { 571 u8 mode = 0x01; 572 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 573 sizeof(mode), &mode); 574 } else { 575 struct hci_cp_write_eir cp; 576 577 memset(hdev->eir, 0, sizeof(hdev->eir)); 578 memset(&cp, 0, sizeof(cp)); 579 580 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp); 581 } 582 } 583 584 if (hdev->features[3] & LMP_RSSI_INQ) 585 hci_setup_inquiry_mode(hdev); 586 587 if (hdev->features[7] & LMP_INQ_TX_PWR) 588 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); 589 590 if (hdev->features[7] & LMP_EXTFEATURES) { 591 struct hci_cp_read_local_ext_features cp; 592 593 cp.page = 0x01; 594 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), 595 &cp); 596 } 597 598 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) { 599 u8 enable = 1; 600 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable), 601 &enable); 602 } 603} 604 605static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 606{ 607 struct hci_rp_read_local_version *rp = (void *) skb->data; 608 609 BT_DBG("%s status 0x%x", hdev->name, rp->status); 610 611 if (rp->status) 612 goto done; 613 614 hdev->hci_ver = rp->hci_ver; 615 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 616 hdev->lmp_ver = rp->lmp_ver; 617 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 618 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 619 620 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, 621 hdev->manufacturer, 622 hdev->hci_ver, hdev->hci_rev); 623 624 if (test_bit(HCI_INIT, &hdev->flags)) 625 hci_setup(hdev); 626 627done: 628 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status); 629} 630 631static void hci_setup_link_policy(struct hci_dev *hdev) 632{ 633 struct hci_cp_write_def_link_policy cp; 634 u16 link_policy = 0; 635 636 if (hdev->features[0] & LMP_RSWITCH) 637 link_policy |= HCI_LP_RSWITCH; 638 if (hdev->features[0] & LMP_HOLD) 639 link_policy |= HCI_LP_HOLD; 640 if (hdev->features[0] & LMP_SNIFF) 641 link_policy |= HCI_LP_SNIFF; 642 if (hdev->features[1] & LMP_PARK) 643 link_policy |= HCI_LP_PARK; 644 645 cp.policy = cpu_to_le16(link_policy); 646 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); 647} 648 649static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) 650{ 651 struct hci_rp_read_local_commands *rp = (void *) skb->data; 652 653 BT_DBG("%s status 0x%x", hdev->name, rp->status); 654 655 if (rp->status) 656 goto done; 657 658 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 659 660 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10)) 661 hci_setup_link_policy(hdev); 662 663done: 664 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status); 665} 666 667static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) 668{ 669 struct hci_rp_read_local_features *rp = (void *) skb->data; 670 671 BT_DBG("%s status 0x%x", hdev->name, rp->status); 672 673 if (rp->status) 674 return; 675 676 memcpy(hdev->features, rp->features, 8); 677 678 /* Adjust default settings according to features 679 * supported by device. */ 680 681 if (hdev->features[0] & LMP_3SLOT) 682 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 683 684 if (hdev->features[0] & LMP_5SLOT) 685 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 686 687 if (hdev->features[1] & LMP_HV2) { 688 hdev->pkt_type |= (HCI_HV2); 689 hdev->esco_type |= (ESCO_HV2); 690 } 691 692 if (hdev->features[1] & LMP_HV3) { 693 hdev->pkt_type |= (HCI_HV3); 694 hdev->esco_type |= (ESCO_HV3); 695 } 696 697 if (hdev->features[3] & LMP_ESCO) 698 hdev->esco_type |= (ESCO_EV3); 699 700 if (hdev->features[4] & LMP_EV4) 701 hdev->esco_type |= (ESCO_EV4); 702 703 if (hdev->features[4] & LMP_EV5) 704 hdev->esco_type |= (ESCO_EV5); 705 706 if (hdev->features[5] & LMP_EDR_ESCO_2M) 707 hdev->esco_type |= (ESCO_2EV3); 708 709 if (hdev->features[5] & LMP_EDR_ESCO_3M) 710 hdev->esco_type |= (ESCO_3EV3); 711 712 if (hdev->features[5] & LMP_EDR_3S_ESCO) 713 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 714 715 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, 716 hdev->features[0], hdev->features[1], 717 hdev->features[2], hdev->features[3], 718 hdev->features[4], hdev->features[5], 719 hdev->features[6], hdev->features[7]); 720} 721 722static void hci_set_le_support(struct hci_dev *hdev) 723{ 724 struct hci_cp_write_le_host_supported cp; 725 726 memset(&cp, 0, sizeof(cp)); 727 728 if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { 729 cp.le = 1; 730 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); 731 } 732 733 if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE)) 734 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), 735 &cp); 736} 737 738static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 739 struct sk_buff *skb) 740{ 741 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 742 743 BT_DBG("%s status 0x%x", hdev->name, rp->status); 744 745 if (rp->status) 746 goto done; 747 748 switch (rp->page) { 749 case 0: 750 memcpy(hdev->features, rp->features, 8); 751 break; 752 case 1: 753 memcpy(hdev->host_features, rp->features, 8); 754 break; 755 } 756 757 if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE) 758 hci_set_le_support(hdev); 759 760done: 761 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status); 762} 763 764static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 765 struct sk_buff *skb) 766{ 767 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; 768 769 BT_DBG("%s status 0x%x", hdev->name, rp->status); 770 771 if (rp->status) 772 return; 773 774 hdev->flow_ctl_mode = rp->mode; 775 776 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status); 777} 778 779static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 780{ 781 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 782 783 BT_DBG("%s status 0x%x", hdev->name, rp->status); 784 785 if (rp->status) 786 return; 787 788 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 789 hdev->sco_mtu = rp->sco_mtu; 790 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 791 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 792 793 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 794 hdev->sco_mtu = 64; 795 hdev->sco_pkts = 8; 796 } 797 798 hdev->acl_cnt = hdev->acl_pkts; 799 hdev->sco_cnt = hdev->sco_pkts; 800 801 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, 802 hdev->acl_mtu, hdev->acl_pkts, 803 hdev->sco_mtu, hdev->sco_pkts); 804} 805 806static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 807{ 808 struct hci_rp_read_bd_addr *rp = (void *) skb->data; 809 810 BT_DBG("%s status 0x%x", hdev->name, rp->status); 811 812 if (!rp->status) 813 bacpy(&hdev->bdaddr, &rp->bdaddr); 814 815 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status); 816} 817 818static void hci_cc_read_data_block_size(struct hci_dev *hdev, 819 struct sk_buff *skb) 820{ 821 struct hci_rp_read_data_block_size *rp = (void *) skb->data; 822 823 BT_DBG("%s status 0x%x", hdev->name, rp->status); 824 825 if (rp->status) 826 return; 827 828 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 829 hdev->block_len = __le16_to_cpu(rp->block_len); 830 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 831 832 hdev->block_cnt = hdev->num_blocks; 833 834 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 835 hdev->block_cnt, hdev->block_len); 836 837 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status); 838} 839 840static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) 841{ 842 __u8 status = *((__u8 *) skb->data); 843 844 BT_DBG("%s status 0x%x", hdev->name, status); 845 846 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); 847} 848 849static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 850 struct sk_buff *skb) 851{ 852 struct hci_rp_read_local_amp_info *rp = (void *) skb->data; 853 854 BT_DBG("%s status 0x%x", hdev->name, rp->status); 855 856 if (rp->status) 857 return; 858 859 hdev->amp_status = rp->amp_status; 860 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 861 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 862 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 863 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 864 hdev->amp_type = rp->amp_type; 865 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 866 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 867 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 868 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 869 870 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status); 871} 872 873static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 874 struct sk_buff *skb) 875{ 876 __u8 status = *((__u8 *) skb->data); 877 878 BT_DBG("%s status 0x%x", hdev->name, status); 879 880 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status); 881} 882 883static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb) 884{ 885 __u8 status = *((__u8 *) skb->data); 886 887 BT_DBG("%s status 0x%x", hdev->name, status); 888 889 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status); 890} 891 892static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, 893 struct sk_buff *skb) 894{ 895 __u8 status = *((__u8 *) skb->data); 896 897 BT_DBG("%s status 0x%x", hdev->name, status); 898 899 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status); 900} 901 902static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 903 struct sk_buff *skb) 904{ 905 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; 906 907 BT_DBG("%s status 0x%x", hdev->name, rp->status); 908 909 if (!rp->status) 910 hdev->inq_tx_power = rp->tx_power; 911 912 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status); 913} 914 915static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb) 916{ 917 __u8 status = *((__u8 *) skb->data); 918 919 BT_DBG("%s status 0x%x", hdev->name, status); 920 921 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status); 922} 923 924static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 925{ 926 struct hci_rp_pin_code_reply *rp = (void *) skb->data; 927 struct hci_cp_pin_code_reply *cp; 928 struct hci_conn *conn; 929 930 BT_DBG("%s status 0x%x", hdev->name, rp->status); 931 932 hci_dev_lock(hdev); 933 934 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 935 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 936 937 if (rp->status != 0) 938 goto unlock; 939 940 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 941 if (!cp) 942 goto unlock; 943 944 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 945 if (conn) 946 conn->pin_length = cp->pin_len; 947 948unlock: 949 hci_dev_unlock(hdev); 950} 951 952static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 953{ 954 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; 955 956 BT_DBG("%s status 0x%x", hdev->name, rp->status); 957 958 hci_dev_lock(hdev); 959 960 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 961 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 962 rp->status); 963 964 hci_dev_unlock(hdev); 965} 966 967static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, 968 struct sk_buff *skb) 969{ 970 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; 971 972 BT_DBG("%s status 0x%x", hdev->name, rp->status); 973 974 if (rp->status) 975 return; 976 977 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 978 hdev->le_pkts = rp->le_max_pkt; 979 980 hdev->le_cnt = hdev->le_pkts; 981 982 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 983 984 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status); 985} 986 987static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 988{ 989 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 990 991 BT_DBG("%s status 0x%x", hdev->name, rp->status); 992 993 hci_dev_lock(hdev); 994 995 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 996 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 997 rp->status); 998 999 hci_dev_unlock(hdev); 1000} 1001 1002static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 1003 struct sk_buff *skb) 1004{ 1005 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1006 1007 BT_DBG("%s status 0x%x", hdev->name, rp->status); 1008 1009 hci_dev_lock(hdev); 1010 1011 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1012 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1013 ACL_LINK, 0, rp->status); 1014 1015 hci_dev_unlock(hdev); 1016} 1017 1018static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) 1019{ 1020 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1021 1022 BT_DBG("%s status 0x%x", hdev->name, rp->status); 1023 1024 hci_dev_lock(hdev); 1025 1026 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1027 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 1028 0, rp->status); 1029 1030 hci_dev_unlock(hdev); 1031} 1032 1033static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, 1034 struct sk_buff *skb) 1035{ 1036 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1037 1038 BT_DBG("%s status 0x%x", hdev->name, rp->status); 1039 1040 hci_dev_lock(hdev); 1041 1042 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1043 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1044 ACL_LINK, 0, rp->status); 1045 1046 hci_dev_unlock(hdev); 1047} 1048 1049static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, 1050 struct sk_buff *skb) 1051{ 1052 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 1053 1054 BT_DBG("%s status 0x%x", hdev->name, rp->status); 1055 1056 hci_dev_lock(hdev); 1057 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash, 1058 rp->randomizer, rp->status); 1059 hci_dev_unlock(hdev); 1060} 1061 1062static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) 1063{ 1064 __u8 status = *((__u8 *) skb->data); 1065 1066 BT_DBG("%s status 0x%x", hdev->name, status); 1067 1068 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status); 1069 1070 if (status) { 1071 hci_dev_lock(hdev); 1072 mgmt_start_discovery_failed(hdev, status); 1073 hci_dev_unlock(hdev); 1074 return; 1075 } 1076} 1077 1078static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1079 struct sk_buff *skb) 1080{ 1081 struct hci_cp_le_set_scan_enable *cp; 1082 __u8 status = *((__u8 *) skb->data); 1083 1084 BT_DBG("%s status 0x%x", hdev->name, status); 1085 1086 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1087 if (!cp) 1088 return; 1089 1090 switch (cp->enable) { 1091 case LE_SCANNING_ENABLED: 1092 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status); 1093 1094 if (status) { 1095 hci_dev_lock(hdev); 1096 mgmt_start_discovery_failed(hdev, status); 1097 hci_dev_unlock(hdev); 1098 return; 1099 } 1100 1101 set_bit(HCI_LE_SCAN, &hdev->dev_flags); 1102 1103 hci_dev_lock(hdev); 1104 hci_discovery_set_state(hdev, DISCOVERY_FINDING); 1105 hci_dev_unlock(hdev); 1106 break; 1107 1108 case LE_SCANNING_DISABLED: 1109 if (status) { 1110 hci_dev_lock(hdev); 1111 mgmt_stop_discovery_failed(hdev, status); 1112 hci_dev_unlock(hdev); 1113 return; 1114 } 1115 1116 clear_bit(HCI_LE_SCAN, &hdev->dev_flags); 1117 1118 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED && 1119 hdev->discovery.state == DISCOVERY_FINDING) { 1120 mgmt_interleaved_discovery(hdev); 1121 } else { 1122 hci_dev_lock(hdev); 1123 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1124 hci_dev_unlock(hdev); 1125 } 1126 1127 break; 1128 1129 default: 1130 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable); 1131 break; 1132 } 1133} 1134 1135static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb) 1136{ 1137 struct hci_rp_le_ltk_reply *rp = (void *) skb->data; 1138 1139 BT_DBG("%s status 0x%x", hdev->name, rp->status); 1140 1141 if (rp->status) 1142 return; 1143 1144 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status); 1145} 1146 1147static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 1148{ 1149 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data; 1150 1151 BT_DBG("%s status 0x%x", hdev->name, rp->status); 1152 1153 if (rp->status) 1154 return; 1155 1156 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); 1157} 1158 1159static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1160 struct sk_buff *skb) 1161{ 1162 struct hci_cp_write_le_host_supported *sent; 1163 __u8 status = *((__u8 *) skb->data); 1164 1165 BT_DBG("%s status 0x%x", hdev->name, status); 1166 1167 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 1168 if (!sent) 1169 return; 1170 1171 if (!status) { 1172 if (sent->le) 1173 hdev->host_features[0] |= LMP_HOST_LE; 1174 else 1175 hdev->host_features[0] &= ~LMP_HOST_LE; 1176 } 1177 1178 if (test_bit(HCI_MGMT, &hdev->dev_flags) && 1179 !test_bit(HCI_INIT, &hdev->flags)) 1180 mgmt_le_enable_complete(hdev, sent->le, status); 1181 1182 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status); 1183} 1184 1185static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1186{ 1187 BT_DBG("%s status 0x%x", hdev->name, status); 1188 1189 if (status) { 1190 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1191 hci_conn_check_pending(hdev); 1192 hci_dev_lock(hdev); 1193 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1194 mgmt_start_discovery_failed(hdev, status); 1195 hci_dev_unlock(hdev); 1196 return; 1197 } 1198 1199 set_bit(HCI_INQUIRY, &hdev->flags); 1200 1201 hci_dev_lock(hdev); 1202 hci_discovery_set_state(hdev, DISCOVERY_FINDING); 1203 hci_dev_unlock(hdev); 1204} 1205 1206static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 1207{ 1208 struct hci_cp_create_conn *cp; 1209 struct hci_conn *conn; 1210 1211 BT_DBG("%s status 0x%x", hdev->name, status); 1212 1213 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 1214 if (!cp) 1215 return; 1216 1217 hci_dev_lock(hdev); 1218 1219 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1220 1221 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn); 1222 1223 if (status) { 1224 if (conn && conn->state == BT_CONNECT) { 1225 if (status != 0x0c || conn->attempt > 2) { 1226 conn->state = BT_CLOSED; 1227 hci_proto_connect_cfm(conn, status); 1228 hci_conn_del(conn); 1229 } else 1230 conn->state = BT_CONNECT2; 1231 } 1232 } else { 1233 if (!conn) { 1234 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr); 1235 if (conn) { 1236 conn->out = true; 1237 conn->link_mode |= HCI_LM_MASTER; 1238 } else 1239 BT_ERR("No memory for new connection"); 1240 } 1241 } 1242 1243 hci_dev_unlock(hdev); 1244} 1245 1246static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 1247{ 1248 struct hci_cp_add_sco *cp; 1249 struct hci_conn *acl, *sco; 1250 __u16 handle; 1251 1252 BT_DBG("%s status 0x%x", hdev->name, status); 1253 1254 if (!status) 1255 return; 1256 1257 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 1258 if (!cp) 1259 return; 1260 1261 handle = __le16_to_cpu(cp->handle); 1262 1263 BT_DBG("%s handle %d", hdev->name, handle); 1264 1265 hci_dev_lock(hdev); 1266 1267 acl = hci_conn_hash_lookup_handle(hdev, handle); 1268 if (acl) { 1269 sco = acl->link; 1270 if (sco) { 1271 sco->state = BT_CLOSED; 1272 1273 hci_proto_connect_cfm(sco, status); 1274 hci_conn_del(sco); 1275 } 1276 } 1277 1278 hci_dev_unlock(hdev); 1279} 1280 1281static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 1282{ 1283 struct hci_cp_auth_requested *cp; 1284 struct hci_conn *conn; 1285 1286 BT_DBG("%s status 0x%x", hdev->name, status); 1287 1288 if (!status) 1289 return; 1290 1291 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 1292 if (!cp) 1293 return; 1294 1295 hci_dev_lock(hdev); 1296 1297 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1298 if (conn) { 1299 if (conn->state == BT_CONFIG) { 1300 hci_proto_connect_cfm(conn, status); 1301 hci_conn_put(conn); 1302 } 1303 } 1304 1305 hci_dev_unlock(hdev); 1306} 1307 1308static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 1309{ 1310 struct hci_cp_set_conn_encrypt *cp; 1311 struct hci_conn *conn; 1312 1313 BT_DBG("%s status 0x%x", hdev->name, status); 1314 1315 if (!status) 1316 return; 1317 1318 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 1319 if (!cp) 1320 return; 1321 1322 hci_dev_lock(hdev); 1323 1324 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1325 if (conn) { 1326 if (conn->state == BT_CONFIG) { 1327 hci_proto_connect_cfm(conn, status); 1328 hci_conn_put(conn); 1329 } 1330 } 1331 1332 hci_dev_unlock(hdev); 1333} 1334 1335static int hci_outgoing_auth_needed(struct hci_dev *hdev, 1336 struct hci_conn *conn) 1337{ 1338 if (conn->state != BT_CONFIG || !conn->out) 1339 return 0; 1340 1341 if (conn->pending_sec_level == BT_SECURITY_SDP) 1342 return 0; 1343 1344 /* Only request authentication for SSP connections or non-SSP 1345 * devices with sec_level HIGH or if MITM protection is requested */ 1346 if (!hci_conn_ssp_enabled(conn) && 1347 conn->pending_sec_level != BT_SECURITY_HIGH && 1348 !(conn->auth_type & 0x01)) 1349 return 0; 1350 1351 return 1; 1352} 1353 1354static inline int hci_resolve_name(struct hci_dev *hdev, 1355 struct inquiry_entry *e) 1356{ 1357 struct hci_cp_remote_name_req cp; 1358 1359 memset(&cp, 0, sizeof(cp)); 1360 1361 bacpy(&cp.bdaddr, &e->data.bdaddr); 1362 cp.pscan_rep_mode = e->data.pscan_rep_mode; 1363 cp.pscan_mode = e->data.pscan_mode; 1364 cp.clock_offset = e->data.clock_offset; 1365 1366 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 1367} 1368 1369static bool hci_resolve_next_name(struct hci_dev *hdev) 1370{ 1371 struct discovery_state *discov = &hdev->discovery; 1372 struct inquiry_entry *e; 1373 1374 if (list_empty(&discov->resolve)) 1375 return false; 1376 1377 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 1378 if (hci_resolve_name(hdev, e) == 0) { 1379 e->name_state = NAME_PENDING; 1380 return true; 1381 } 1382 1383 return false; 1384} 1385 1386static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 1387 bdaddr_t *bdaddr, u8 *name, u8 name_len) 1388{ 1389 struct discovery_state *discov = &hdev->discovery; 1390 struct inquiry_entry *e; 1391 1392 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 1393 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name, 1394 name_len, conn->dev_class); 1395 1396 if (discov->state == DISCOVERY_STOPPED) 1397 return; 1398 1399 if (discov->state == DISCOVERY_STOPPING) 1400 goto discov_complete; 1401 1402 if (discov->state != DISCOVERY_RESOLVING) 1403 return; 1404 1405 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 1406 if (e) { 1407 e->name_state = NAME_KNOWN; 1408 list_del(&e->list); 1409 if (name) 1410 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, 1411 e->data.rssi, name, name_len); 1412 } 1413 1414 if (hci_resolve_next_name(hdev)) 1415 return; 1416 1417discov_complete: 1418 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1419} 1420 1421static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 1422{ 1423 struct hci_cp_remote_name_req *cp; 1424 struct hci_conn *conn; 1425 1426 BT_DBG("%s status 0x%x", hdev->name, status); 1427 1428 /* If successful wait for the name req complete event before 1429 * checking for the need to do authentication */ 1430 if (!status) 1431 return; 1432 1433 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 1434 if (!cp) 1435 return; 1436 1437 hci_dev_lock(hdev); 1438 1439 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1440 1441 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1442 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 1443 1444 if (!conn) 1445 goto unlock; 1446 1447 if (!hci_outgoing_auth_needed(hdev, conn)) 1448 goto unlock; 1449 1450 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 1451 struct hci_cp_auth_requested cp; 1452 cp.handle = __cpu_to_le16(conn->handle); 1453 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1454 } 1455 1456unlock: 1457 hci_dev_unlock(hdev); 1458} 1459 1460static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 1461{ 1462 struct hci_cp_read_remote_features *cp; 1463 struct hci_conn *conn; 1464 1465 BT_DBG("%s status 0x%x", hdev->name, status); 1466 1467 if (!status) 1468 return; 1469 1470 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 1471 if (!cp) 1472 return; 1473 1474 hci_dev_lock(hdev); 1475 1476 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1477 if (conn) { 1478 if (conn->state == BT_CONFIG) { 1479 hci_proto_connect_cfm(conn, status); 1480 hci_conn_put(conn); 1481 } 1482 } 1483 1484 hci_dev_unlock(hdev); 1485} 1486 1487static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 1488{ 1489 struct hci_cp_read_remote_ext_features *cp; 1490 struct hci_conn *conn; 1491 1492 BT_DBG("%s status 0x%x", hdev->name, status); 1493 1494 if (!status) 1495 return; 1496 1497 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 1498 if (!cp) 1499 return; 1500 1501 hci_dev_lock(hdev); 1502 1503 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1504 if (conn) { 1505 if (conn->state == BT_CONFIG) { 1506 hci_proto_connect_cfm(conn, status); 1507 hci_conn_put(conn); 1508 } 1509 } 1510 1511 hci_dev_unlock(hdev); 1512} 1513 1514static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 1515{ 1516 struct hci_cp_setup_sync_conn *cp; 1517 struct hci_conn *acl, *sco; 1518 __u16 handle; 1519 1520 BT_DBG("%s status 0x%x", hdev->name, status); 1521 1522 if (!status) 1523 return; 1524 1525 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 1526 if (!cp) 1527 return; 1528 1529 handle = __le16_to_cpu(cp->handle); 1530 1531 BT_DBG("%s handle %d", hdev->name, handle); 1532 1533 hci_dev_lock(hdev); 1534 1535 acl = hci_conn_hash_lookup_handle(hdev, handle); 1536 if (acl) { 1537 sco = acl->link; 1538 if (sco) { 1539 sco->state = BT_CLOSED; 1540 1541 hci_proto_connect_cfm(sco, status); 1542 hci_conn_del(sco); 1543 } 1544 } 1545 1546 hci_dev_unlock(hdev); 1547} 1548 1549static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 1550{ 1551 struct hci_cp_sniff_mode *cp; 1552 struct hci_conn *conn; 1553 1554 BT_DBG("%s status 0x%x", hdev->name, status); 1555 1556 if (!status) 1557 return; 1558 1559 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 1560 if (!cp) 1561 return; 1562 1563 hci_dev_lock(hdev); 1564 1565 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1566 if (conn) { 1567 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 1568 1569 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 1570 hci_sco_setup(conn, status); 1571 } 1572 1573 hci_dev_unlock(hdev); 1574} 1575 1576static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 1577{ 1578 struct hci_cp_exit_sniff_mode *cp; 1579 struct hci_conn *conn; 1580 1581 BT_DBG("%s status 0x%x", hdev->name, status); 1582 1583 if (!status) 1584 return; 1585 1586 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 1587 if (!cp) 1588 return; 1589 1590 hci_dev_lock(hdev); 1591 1592 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1593 if (conn) { 1594 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 1595 1596 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 1597 hci_sco_setup(conn, status); 1598 } 1599 1600 hci_dev_unlock(hdev); 1601} 1602 1603static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 1604{ 1605 struct hci_cp_disconnect *cp; 1606 struct hci_conn *conn; 1607 1608 if (!status) 1609 return; 1610 1611 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 1612 if (!cp) 1613 return; 1614 1615 hci_dev_lock(hdev); 1616 1617 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1618 if (conn) 1619 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 1620 conn->dst_type, status); 1621 1622 hci_dev_unlock(hdev); 1623} 1624 1625static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) 1626{ 1627 struct hci_cp_le_create_conn *cp; 1628 struct hci_conn *conn; 1629 1630 BT_DBG("%s status 0x%x", hdev->name, status); 1631 1632 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 1633 if (!cp) 1634 return; 1635 1636 hci_dev_lock(hdev); 1637 1638 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); 1639 1640 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr), 1641 conn); 1642 1643 if (status) { 1644 if (conn && conn->state == BT_CONNECT) { 1645 conn->state = BT_CLOSED; 1646 mgmt_connect_failed(hdev, &cp->peer_addr, conn->type, 1647 conn->dst_type, status); 1648 hci_proto_connect_cfm(conn, status); 1649 hci_conn_del(conn); 1650 } 1651 } else { 1652 if (!conn) { 1653 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr); 1654 if (conn) { 1655 conn->dst_type = cp->peer_addr_type; 1656 conn->out = true; 1657 } else { 1658 BT_ERR("No memory for new connection"); 1659 } 1660 } 1661 } 1662 1663 hci_dev_unlock(hdev); 1664} 1665 1666static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 1667{ 1668 BT_DBG("%s status 0x%x", hdev->name, status); 1669} 1670 1671static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1672{ 1673 __u8 status = *((__u8 *) skb->data); 1674 struct discovery_state *discov = &hdev->discovery; 1675 struct inquiry_entry *e; 1676 1677 BT_DBG("%s status %d", hdev->name, status); 1678 1679 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1680 1681 hci_conn_check_pending(hdev); 1682 1683 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 1684 return; 1685 1686 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 1687 return; 1688 1689 hci_dev_lock(hdev); 1690 1691 if (discov->state != DISCOVERY_FINDING) 1692 goto unlock; 1693 1694 if (list_empty(&discov->resolve)) { 1695 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1696 goto unlock; 1697 } 1698 1699 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 1700 if (e && hci_resolve_name(hdev, e) == 0) { 1701 e->name_state = NAME_PENDING; 1702 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 1703 } else { 1704 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1705 } 1706 1707unlock: 1708 hci_dev_unlock(hdev); 1709} 1710 1711static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 1712{ 1713 struct inquiry_data data; 1714 struct inquiry_info *info = (void *) (skb->data + 1); 1715 int num_rsp = *((__u8 *) skb->data); 1716 1717 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 1718 1719 if (!num_rsp) 1720 return; 1721 1722 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 1723 return; 1724 1725 hci_dev_lock(hdev); 1726 1727 for (; num_rsp; num_rsp--, info++) { 1728 bool name_known, ssp; 1729 1730 bacpy(&data.bdaddr, &info->bdaddr); 1731 data.pscan_rep_mode = info->pscan_rep_mode; 1732 data.pscan_period_mode = info->pscan_period_mode; 1733 data.pscan_mode = info->pscan_mode; 1734 memcpy(data.dev_class, info->dev_class, 3); 1735 data.clock_offset = info->clock_offset; 1736 data.rssi = 0x00; 1737 data.ssp_mode = 0x00; 1738 1739 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp); 1740 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 1741 info->dev_class, 0, !name_known, ssp, NULL, 1742 0); 1743 } 1744 1745 hci_dev_unlock(hdev); 1746} 1747 1748static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1749{ 1750 struct hci_ev_conn_complete *ev = (void *) skb->data; 1751 struct hci_conn *conn; 1752 1753 BT_DBG("%s", hdev->name); 1754 1755 hci_dev_lock(hdev); 1756 1757 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1758 if (!conn) { 1759 if (ev->link_type != SCO_LINK) 1760 goto unlock; 1761 1762 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 1763 if (!conn) 1764 goto unlock; 1765 1766 conn->type = SCO_LINK; 1767 } 1768 1769 if (!ev->status) { 1770 conn->handle = __le16_to_cpu(ev->handle); 1771 1772 if (conn->type == ACL_LINK) { 1773 conn->state = BT_CONFIG; 1774 hci_conn_hold(conn); 1775 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1776 } else 1777 conn->state = BT_CONNECTED; 1778 1779 hci_conn_hold_device(conn); 1780 hci_conn_add_sysfs(conn); 1781 1782 if (test_bit(HCI_AUTH, &hdev->flags)) 1783 conn->link_mode |= HCI_LM_AUTH; 1784 1785 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 1786 conn->link_mode |= HCI_LM_ENCRYPT; 1787 1788 /* Get remote features */ 1789 if (conn->type == ACL_LINK) { 1790 struct hci_cp_read_remote_features cp; 1791 cp.handle = ev->handle; 1792 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 1793 sizeof(cp), &cp); 1794 } 1795 1796 /* Set packet type for incoming connection */ 1797 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 1798 struct hci_cp_change_conn_ptype cp; 1799 cp.handle = ev->handle; 1800 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1801 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 1802 &cp); 1803 } 1804 } else { 1805 conn->state = BT_CLOSED; 1806 if (conn->type == ACL_LINK) 1807 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, 1808 conn->dst_type, ev->status); 1809 } 1810 1811 if (conn->type == ACL_LINK) 1812 hci_sco_setup(conn, ev->status); 1813 1814 if (ev->status) { 1815 hci_proto_connect_cfm(conn, ev->status); 1816 hci_conn_del(conn); 1817 } else if (ev->link_type != ACL_LINK) 1818 hci_proto_connect_cfm(conn, ev->status); 1819 1820unlock: 1821 hci_dev_unlock(hdev); 1822 1823 hci_conn_check_pending(hdev); 1824} 1825 1826static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1827{ 1828 struct hci_ev_conn_request *ev = (void *) skb->data; 1829 int mask = hdev->link_mode; 1830 1831 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, 1832 batostr(&ev->bdaddr), ev->link_type); 1833 1834 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); 1835 1836 if ((mask & HCI_LM_ACCEPT) && 1837 !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 1838 /* Connection accepted */ 1839 struct inquiry_entry *ie; 1840 struct hci_conn *conn; 1841 1842 hci_dev_lock(hdev); 1843 1844 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 1845 if (ie) 1846 memcpy(ie->data.dev_class, ev->dev_class, 3); 1847 1848 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1849 if (!conn) { 1850 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); 1851 if (!conn) { 1852 BT_ERR("No memory for new connection"); 1853 hci_dev_unlock(hdev); 1854 return; 1855 } 1856 } 1857 1858 memcpy(conn->dev_class, ev->dev_class, 3); 1859 conn->state = BT_CONNECT; 1860 1861 hci_dev_unlock(hdev); 1862 1863 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) { 1864 struct hci_cp_accept_conn_req cp; 1865 1866 bacpy(&cp.bdaddr, &ev->bdaddr); 1867 1868 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 1869 cp.role = 0x00; /* Become master */ 1870 else 1871 cp.role = 0x01; /* Remain slave */ 1872 1873 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), 1874 &cp); 1875 } else { 1876 struct hci_cp_accept_sync_conn_req cp; 1877 1878 bacpy(&cp.bdaddr, &ev->bdaddr); 1879 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1880 1881 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 1882 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 1883 cp.max_latency = cpu_to_le16(0xffff); 1884 cp.content_format = cpu_to_le16(hdev->voice_setting); 1885 cp.retrans_effort = 0xff; 1886 1887 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, 1888 sizeof(cp), &cp); 1889 } 1890 } else { 1891 /* Connection rejected */ 1892 struct hci_cp_reject_conn_req cp; 1893 1894 bacpy(&cp.bdaddr, &ev->bdaddr); 1895 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 1896 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 1897 } 1898} 1899 1900static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1901{ 1902 struct hci_ev_disconn_complete *ev = (void *) skb->data; 1903 struct hci_conn *conn; 1904 1905 BT_DBG("%s status %d", hdev->name, ev->status); 1906 1907 hci_dev_lock(hdev); 1908 1909 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1910 if (!conn) 1911 goto unlock; 1912 1913 if (ev->status == 0) 1914 conn->state = BT_CLOSED; 1915 1916 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) && 1917 (conn->type == ACL_LINK || conn->type == LE_LINK)) { 1918 if (ev->status != 0) 1919 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 1920 conn->dst_type, ev->status); 1921 else 1922 mgmt_device_disconnected(hdev, &conn->dst, conn->type, 1923 conn->dst_type); 1924 } 1925 1926 if (ev->status == 0) { 1927 if (conn->type == ACL_LINK && conn->flush_key) 1928 hci_remove_link_key(hdev, &conn->dst); 1929 hci_proto_disconn_cfm(conn, ev->reason); 1930 hci_conn_del(conn); 1931 } 1932 1933unlock: 1934 hci_dev_unlock(hdev); 1935} 1936 1937static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1938{ 1939 struct hci_ev_auth_complete *ev = (void *) skb->data; 1940 struct hci_conn *conn; 1941 1942 BT_DBG("%s status %d", hdev->name, ev->status); 1943 1944 hci_dev_lock(hdev); 1945 1946 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1947 if (!conn) 1948 goto unlock; 1949 1950 if (!ev->status) { 1951 if (!hci_conn_ssp_enabled(conn) && 1952 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 1953 BT_INFO("re-auth of legacy device is not possible."); 1954 } else { 1955 conn->link_mode |= HCI_LM_AUTH; 1956 conn->sec_level = conn->pending_sec_level; 1957 } 1958 } else { 1959 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type, 1960 ev->status); 1961 } 1962 1963 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 1964 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 1965 1966 if (conn->state == BT_CONFIG) { 1967 if (!ev->status && hci_conn_ssp_enabled(conn)) { 1968 struct hci_cp_set_conn_encrypt cp; 1969 cp.handle = ev->handle; 1970 cp.encrypt = 0x01; 1971 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1972 &cp); 1973 } else { 1974 conn->state = BT_CONNECTED; 1975 hci_proto_connect_cfm(conn, ev->status); 1976 hci_conn_put(conn); 1977 } 1978 } else { 1979 hci_auth_cfm(conn, ev->status); 1980 1981 hci_conn_hold(conn); 1982 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1983 hci_conn_put(conn); 1984 } 1985 1986 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 1987 if (!ev->status) { 1988 struct hci_cp_set_conn_encrypt cp; 1989 cp.handle = ev->handle; 1990 cp.encrypt = 0x01; 1991 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1992 &cp); 1993 } else { 1994 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 1995 hci_encrypt_cfm(conn, ev->status, 0x00); 1996 } 1997 } 1998 1999unlock: 2000 hci_dev_unlock(hdev); 2001} 2002 2003static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 2004{ 2005 struct hci_ev_remote_name *ev = (void *) skb->data; 2006 struct hci_conn *conn; 2007 2008 BT_DBG("%s", hdev->name); 2009 2010 hci_conn_check_pending(hdev); 2011 2012 hci_dev_lock(hdev); 2013 2014 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2015 2016 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 2017 goto check_auth; 2018 2019 if (ev->status == 0) 2020 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 2021 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 2022 else 2023 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 2024 2025check_auth: 2026 if (!conn) 2027 goto unlock; 2028 2029 if (!hci_outgoing_auth_needed(hdev, conn)) 2030 goto unlock; 2031 2032 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2033 struct hci_cp_auth_requested cp; 2034 cp.handle = __cpu_to_le16(conn->handle); 2035 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 2036 } 2037 2038unlock: 2039 hci_dev_unlock(hdev); 2040} 2041 2042static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2043{ 2044 struct hci_ev_encrypt_change *ev = (void *) skb->data; 2045 struct hci_conn *conn; 2046 2047 BT_DBG("%s status %d", hdev->name, ev->status); 2048 2049 hci_dev_lock(hdev); 2050 2051 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2052 if (conn) { 2053 if (!ev->status) { 2054 if (ev->encrypt) { 2055 /* Encryption implies authentication */ 2056 conn->link_mode |= HCI_LM_AUTH; 2057 conn->link_mode |= HCI_LM_ENCRYPT; 2058 conn->sec_level = conn->pending_sec_level; 2059 } else 2060 conn->link_mode &= ~HCI_LM_ENCRYPT; 2061 } 2062 2063 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2064 2065 if (conn->state == BT_CONFIG) { 2066 if (!ev->status) 2067 conn->state = BT_CONNECTED; 2068 2069 hci_proto_connect_cfm(conn, ev->status); 2070 hci_conn_put(conn); 2071 } else 2072 hci_encrypt_cfm(conn, ev->status, ev->encrypt); 2073 } 2074 2075 hci_dev_unlock(hdev); 2076} 2077 2078static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2079{ 2080 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 2081 struct hci_conn *conn; 2082 2083 BT_DBG("%s status %d", hdev->name, ev->status); 2084 2085 hci_dev_lock(hdev); 2086 2087 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2088 if (conn) { 2089 if (!ev->status) 2090 conn->link_mode |= HCI_LM_SECURE; 2091 2092 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 2093 2094 hci_key_change_cfm(conn, ev->status); 2095 } 2096 2097 hci_dev_unlock(hdev); 2098} 2099 2100static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2101{ 2102 struct hci_ev_remote_features *ev = (void *) skb->data; 2103 struct hci_conn *conn; 2104 2105 BT_DBG("%s status %d", hdev->name, ev->status); 2106 2107 hci_dev_lock(hdev); 2108 2109 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2110 if (!conn) 2111 goto unlock; 2112 2113 if (!ev->status) 2114 memcpy(conn->features, ev->features, 8); 2115 2116 if (conn->state != BT_CONFIG) 2117 goto unlock; 2118 2119 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) { 2120 struct hci_cp_read_remote_ext_features cp; 2121 cp.handle = ev->handle; 2122 cp.page = 0x01; 2123 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 2124 sizeof(cp), &cp); 2125 goto unlock; 2126 } 2127 2128 if (!ev->status) { 2129 struct hci_cp_remote_name_req cp; 2130 memset(&cp, 0, sizeof(cp)); 2131 bacpy(&cp.bdaddr, &conn->dst); 2132 cp.pscan_rep_mode = 0x02; 2133 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2134 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2135 mgmt_device_connected(hdev, &conn->dst, conn->type, 2136 conn->dst_type, 0, NULL, 0, 2137 conn->dev_class); 2138 2139 if (!hci_outgoing_auth_needed(hdev, conn)) { 2140 conn->state = BT_CONNECTED; 2141 hci_proto_connect_cfm(conn, ev->status); 2142 hci_conn_put(conn); 2143 } 2144 2145unlock: 2146 hci_dev_unlock(hdev); 2147} 2148 2149static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) 2150{ 2151 BT_DBG("%s", hdev->name); 2152} 2153 2154static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2155{ 2156 BT_DBG("%s", hdev->name); 2157} 2158 2159static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2160{ 2161 struct hci_ev_cmd_complete *ev = (void *) skb->data; 2162 __u16 opcode; 2163 2164 skb_pull(skb, sizeof(*ev)); 2165 2166 opcode = __le16_to_cpu(ev->opcode); 2167 2168 switch (opcode) { 2169 case HCI_OP_INQUIRY_CANCEL: 2170 hci_cc_inquiry_cancel(hdev, skb); 2171 break; 2172 2173 case HCI_OP_PERIODIC_INQ: 2174 hci_cc_periodic_inq(hdev, skb); 2175 break; 2176 2177 case HCI_OP_EXIT_PERIODIC_INQ: 2178 hci_cc_exit_periodic_inq(hdev, skb); 2179 break; 2180 2181 case HCI_OP_REMOTE_NAME_REQ_CANCEL: 2182 hci_cc_remote_name_req_cancel(hdev, skb); 2183 break; 2184 2185 case HCI_OP_ROLE_DISCOVERY: 2186 hci_cc_role_discovery(hdev, skb); 2187 break; 2188 2189 case HCI_OP_READ_LINK_POLICY: 2190 hci_cc_read_link_policy(hdev, skb); 2191 break; 2192 2193 case HCI_OP_WRITE_LINK_POLICY: 2194 hci_cc_write_link_policy(hdev, skb); 2195 break; 2196 2197 case HCI_OP_READ_DEF_LINK_POLICY: 2198 hci_cc_read_def_link_policy(hdev, skb); 2199 break; 2200 2201 case HCI_OP_WRITE_DEF_LINK_POLICY: 2202 hci_cc_write_def_link_policy(hdev, skb); 2203 break; 2204 2205 case HCI_OP_RESET: 2206 hci_cc_reset(hdev, skb); 2207 break; 2208 2209 case HCI_OP_WRITE_LOCAL_NAME: 2210 hci_cc_write_local_name(hdev, skb); 2211 break; 2212 2213 case HCI_OP_READ_LOCAL_NAME: 2214 hci_cc_read_local_name(hdev, skb); 2215 break; 2216 2217 case HCI_OP_WRITE_AUTH_ENABLE: 2218 hci_cc_write_auth_enable(hdev, skb); 2219 break; 2220 2221 case HCI_OP_WRITE_ENCRYPT_MODE: 2222 hci_cc_write_encrypt_mode(hdev, skb); 2223 break; 2224 2225 case HCI_OP_WRITE_SCAN_ENABLE: 2226 hci_cc_write_scan_enable(hdev, skb); 2227 break; 2228 2229 case HCI_OP_READ_CLASS_OF_DEV: 2230 hci_cc_read_class_of_dev(hdev, skb); 2231 break; 2232 2233 case HCI_OP_WRITE_CLASS_OF_DEV: 2234 hci_cc_write_class_of_dev(hdev, skb); 2235 break; 2236 2237 case HCI_OP_READ_VOICE_SETTING: 2238 hci_cc_read_voice_setting(hdev, skb); 2239 break; 2240 2241 case HCI_OP_WRITE_VOICE_SETTING: 2242 hci_cc_write_voice_setting(hdev, skb); 2243 break; 2244 2245 case HCI_OP_HOST_BUFFER_SIZE: 2246 hci_cc_host_buffer_size(hdev, skb); 2247 break; 2248 2249 case HCI_OP_WRITE_SSP_MODE: 2250 hci_cc_write_ssp_mode(hdev, skb); 2251 break; 2252 2253 case HCI_OP_READ_LOCAL_VERSION: 2254 hci_cc_read_local_version(hdev, skb); 2255 break; 2256 2257 case HCI_OP_READ_LOCAL_COMMANDS: 2258 hci_cc_read_local_commands(hdev, skb); 2259 break; 2260 2261 case HCI_OP_READ_LOCAL_FEATURES: 2262 hci_cc_read_local_features(hdev, skb); 2263 break; 2264 2265 case HCI_OP_READ_LOCAL_EXT_FEATURES: 2266 hci_cc_read_local_ext_features(hdev, skb); 2267 break; 2268 2269 case HCI_OP_READ_BUFFER_SIZE: 2270 hci_cc_read_buffer_size(hdev, skb); 2271 break; 2272 2273 case HCI_OP_READ_BD_ADDR: 2274 hci_cc_read_bd_addr(hdev, skb); 2275 break; 2276 2277 case HCI_OP_READ_DATA_BLOCK_SIZE: 2278 hci_cc_read_data_block_size(hdev, skb); 2279 break; 2280 2281 case HCI_OP_WRITE_CA_TIMEOUT: 2282 hci_cc_write_ca_timeout(hdev, skb); 2283 break; 2284 2285 case HCI_OP_READ_FLOW_CONTROL_MODE: 2286 hci_cc_read_flow_control_mode(hdev, skb); 2287 break; 2288 2289 case HCI_OP_READ_LOCAL_AMP_INFO: 2290 hci_cc_read_local_amp_info(hdev, skb); 2291 break; 2292 2293 case HCI_OP_DELETE_STORED_LINK_KEY: 2294 hci_cc_delete_stored_link_key(hdev, skb); 2295 break; 2296 2297 case HCI_OP_SET_EVENT_MASK: 2298 hci_cc_set_event_mask(hdev, skb); 2299 break; 2300 2301 case HCI_OP_WRITE_INQUIRY_MODE: 2302 hci_cc_write_inquiry_mode(hdev, skb); 2303 break; 2304 2305 case HCI_OP_READ_INQ_RSP_TX_POWER: 2306 hci_cc_read_inq_rsp_tx_power(hdev, skb); 2307 break; 2308 2309 case HCI_OP_SET_EVENT_FLT: 2310 hci_cc_set_event_flt(hdev, skb); 2311 break; 2312 2313 case HCI_OP_PIN_CODE_REPLY: 2314 hci_cc_pin_code_reply(hdev, skb); 2315 break; 2316 2317 case HCI_OP_PIN_CODE_NEG_REPLY: 2318 hci_cc_pin_code_neg_reply(hdev, skb); 2319 break; 2320 2321 case HCI_OP_READ_LOCAL_OOB_DATA: 2322 hci_cc_read_local_oob_data_reply(hdev, skb); 2323 break; 2324 2325 case HCI_OP_LE_READ_BUFFER_SIZE: 2326 hci_cc_le_read_buffer_size(hdev, skb); 2327 break; 2328 2329 case HCI_OP_USER_CONFIRM_REPLY: 2330 hci_cc_user_confirm_reply(hdev, skb); 2331 break; 2332 2333 case HCI_OP_USER_CONFIRM_NEG_REPLY: 2334 hci_cc_user_confirm_neg_reply(hdev, skb); 2335 break; 2336 2337 case HCI_OP_USER_PASSKEY_REPLY: 2338 hci_cc_user_passkey_reply(hdev, skb); 2339 break; 2340 2341 case HCI_OP_USER_PASSKEY_NEG_REPLY: 2342 hci_cc_user_passkey_neg_reply(hdev, skb); 2343 break; 2344 2345 case HCI_OP_LE_SET_SCAN_PARAM: 2346 hci_cc_le_set_scan_param(hdev, skb); 2347 break; 2348 2349 case HCI_OP_LE_SET_SCAN_ENABLE: 2350 hci_cc_le_set_scan_enable(hdev, skb); 2351 break; 2352 2353 case HCI_OP_LE_LTK_REPLY: 2354 hci_cc_le_ltk_reply(hdev, skb); 2355 break; 2356 2357 case HCI_OP_LE_LTK_NEG_REPLY: 2358 hci_cc_le_ltk_neg_reply(hdev, skb); 2359 break; 2360 2361 case HCI_OP_WRITE_LE_HOST_SUPPORTED: 2362 hci_cc_write_le_host_supported(hdev, skb); 2363 break; 2364 2365 default: 2366 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 2367 break; 2368 } 2369 2370 if (ev->opcode != HCI_OP_NOP) 2371 del_timer(&hdev->cmd_timer); 2372 2373 if (ev->ncmd) { 2374 atomic_set(&hdev->cmd_cnt, 1); 2375 if (!skb_queue_empty(&hdev->cmd_q)) 2376 queue_work(hdev->workqueue, &hdev->cmd_work); 2377 } 2378} 2379 2380static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) 2381{ 2382 struct hci_ev_cmd_status *ev = (void *) skb->data; 2383 __u16 opcode; 2384 2385 skb_pull(skb, sizeof(*ev)); 2386 2387 opcode = __le16_to_cpu(ev->opcode); 2388 2389 switch (opcode) { 2390 case HCI_OP_INQUIRY: 2391 hci_cs_inquiry(hdev, ev->status); 2392 break; 2393 2394 case HCI_OP_CREATE_CONN: 2395 hci_cs_create_conn(hdev, ev->status); 2396 break; 2397 2398 case HCI_OP_ADD_SCO: 2399 hci_cs_add_sco(hdev, ev->status); 2400 break; 2401 2402 case HCI_OP_AUTH_REQUESTED: 2403 hci_cs_auth_requested(hdev, ev->status); 2404 break; 2405 2406 case HCI_OP_SET_CONN_ENCRYPT: 2407 hci_cs_set_conn_encrypt(hdev, ev->status); 2408 break; 2409 2410 case HCI_OP_REMOTE_NAME_REQ: 2411 hci_cs_remote_name_req(hdev, ev->status); 2412 break; 2413 2414 case HCI_OP_READ_REMOTE_FEATURES: 2415 hci_cs_read_remote_features(hdev, ev->status); 2416 break; 2417 2418 case HCI_OP_READ_REMOTE_EXT_FEATURES: 2419 hci_cs_read_remote_ext_features(hdev, ev->status); 2420 break; 2421 2422 case HCI_OP_SETUP_SYNC_CONN: 2423 hci_cs_setup_sync_conn(hdev, ev->status); 2424 break; 2425 2426 case HCI_OP_SNIFF_MODE: 2427 hci_cs_sniff_mode(hdev, ev->status); 2428 break; 2429 2430 case HCI_OP_EXIT_SNIFF_MODE: 2431 hci_cs_exit_sniff_mode(hdev, ev->status); 2432 break; 2433 2434 case HCI_OP_DISCONNECT: 2435 hci_cs_disconnect(hdev, ev->status); 2436 break; 2437 2438 case HCI_OP_LE_CREATE_CONN: 2439 hci_cs_le_create_conn(hdev, ev->status); 2440 break; 2441 2442 case HCI_OP_LE_START_ENC: 2443 hci_cs_le_start_enc(hdev, ev->status); 2444 break; 2445 2446 default: 2447 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 2448 break; 2449 } 2450 2451 if (ev->opcode != HCI_OP_NOP) 2452 del_timer(&hdev->cmd_timer); 2453 2454 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 2455 atomic_set(&hdev->cmd_cnt, 1); 2456 if (!skb_queue_empty(&hdev->cmd_q)) 2457 queue_work(hdev->workqueue, &hdev->cmd_work); 2458 } 2459} 2460 2461static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2462{ 2463 struct hci_ev_role_change *ev = (void *) skb->data; 2464 struct hci_conn *conn; 2465 2466 BT_DBG("%s status %d", hdev->name, ev->status); 2467 2468 hci_dev_lock(hdev); 2469 2470 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2471 if (conn) { 2472 if (!ev->status) { 2473 if (ev->role) 2474 conn->link_mode &= ~HCI_LM_MASTER; 2475 else 2476 conn->link_mode |= HCI_LM_MASTER; 2477 } 2478 2479 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2480 2481 hci_role_switch_cfm(conn, ev->status, ev->role); 2482 } 2483 2484 hci_dev_unlock(hdev); 2485} 2486 2487static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 2488{ 2489 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 2490 int i; 2491 2492 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 2493 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode); 2494 return; 2495 } 2496 2497 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2498 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { 2499 BT_DBG("%s bad parameters", hdev->name); 2500 return; 2501 } 2502 2503 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); 2504 2505 for (i = 0; i < ev->num_hndl; i++) { 2506 struct hci_comp_pkts_info *info = &ev->handles[i]; 2507 struct hci_conn *conn; 2508 __u16 handle, count; 2509 2510 handle = __le16_to_cpu(info->handle); 2511 count = __le16_to_cpu(info->count); 2512 2513 conn = hci_conn_hash_lookup_handle(hdev, handle); 2514 if (!conn) 2515 continue; 2516 2517 conn->sent -= count; 2518 2519 switch (conn->type) { 2520 case ACL_LINK: 2521 hdev->acl_cnt += count; 2522 if (hdev->acl_cnt > hdev->acl_pkts) 2523 hdev->acl_cnt = hdev->acl_pkts; 2524 break; 2525 2526 case LE_LINK: 2527 if (hdev->le_pkts) { 2528 hdev->le_cnt += count; 2529 if (hdev->le_cnt > hdev->le_pkts) 2530 hdev->le_cnt = hdev->le_pkts; 2531 } else { 2532 hdev->acl_cnt += count; 2533 if (hdev->acl_cnt > hdev->acl_pkts) 2534 hdev->acl_cnt = hdev->acl_pkts; 2535 } 2536 break; 2537 2538 case SCO_LINK: 2539 hdev->sco_cnt += count; 2540 if (hdev->sco_cnt > hdev->sco_pkts) 2541 hdev->sco_cnt = hdev->sco_pkts; 2542 break; 2543 2544 default: 2545 BT_ERR("Unknown type %d conn %p", conn->type, conn); 2546 break; 2547 } 2548 } 2549 2550 queue_work(hdev->workqueue, &hdev->tx_work); 2551} 2552 2553static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev, 2554 struct sk_buff *skb) 2555{ 2556 struct hci_ev_num_comp_blocks *ev = (void *) skb->data; 2557 int i; 2558 2559 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 2560 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode); 2561 return; 2562 } 2563 2564 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2565 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { 2566 BT_DBG("%s bad parameters", hdev->name); 2567 return; 2568 } 2569 2570 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, 2571 ev->num_hndl); 2572 2573 for (i = 0; i < ev->num_hndl; i++) { 2574 struct hci_comp_blocks_info *info = &ev->handles[i]; 2575 struct hci_conn *conn; 2576 __u16 handle, block_count; 2577 2578 handle = __le16_to_cpu(info->handle); 2579 block_count = __le16_to_cpu(info->blocks); 2580 2581 conn = hci_conn_hash_lookup_handle(hdev, handle); 2582 if (!conn) 2583 continue; 2584 2585 conn->sent -= block_count; 2586 2587 switch (conn->type) { 2588 case ACL_LINK: 2589 hdev->block_cnt += block_count; 2590 if (hdev->block_cnt > hdev->num_blocks) 2591 hdev->block_cnt = hdev->num_blocks; 2592 break; 2593 2594 default: 2595 BT_ERR("Unknown type %d conn %p", conn->type, conn); 2596 break; 2597 } 2598 } 2599 2600 queue_work(hdev->workqueue, &hdev->tx_work); 2601} 2602 2603static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2604{ 2605 struct hci_ev_mode_change *ev = (void *) skb->data; 2606 struct hci_conn *conn; 2607 2608 BT_DBG("%s status %d", hdev->name, ev->status); 2609 2610 hci_dev_lock(hdev); 2611 2612 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2613 if (conn) { 2614 conn->mode = ev->mode; 2615 conn->interval = __le16_to_cpu(ev->interval); 2616 2617 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { 2618 if (conn->mode == HCI_CM_ACTIVE) 2619 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 2620 else 2621 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 2622 } 2623 2624 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2625 hci_sco_setup(conn, ev->status); 2626 } 2627 2628 hci_dev_unlock(hdev); 2629} 2630 2631static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2632{ 2633 struct hci_ev_pin_code_req *ev = (void *) skb->data; 2634 struct hci_conn *conn; 2635 2636 BT_DBG("%s", hdev->name); 2637 2638 hci_dev_lock(hdev); 2639 2640 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2641 if (!conn) 2642 goto unlock; 2643 2644 if (conn->state == BT_CONNECTED) { 2645 hci_conn_hold(conn); 2646 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 2647 hci_conn_put(conn); 2648 } 2649 2650 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags)) 2651 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2652 sizeof(ev->bdaddr), &ev->bdaddr); 2653 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { 2654 u8 secure; 2655 2656 if (conn->pending_sec_level == BT_SECURITY_HIGH) 2657 secure = 1; 2658 else 2659 secure = 0; 2660 2661 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 2662 } 2663 2664unlock: 2665 hci_dev_unlock(hdev); 2666} 2667 2668static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2669{ 2670 struct hci_ev_link_key_req *ev = (void *) skb->data; 2671 struct hci_cp_link_key_reply cp; 2672 struct hci_conn *conn; 2673 struct link_key *key; 2674 2675 BT_DBG("%s", hdev->name); 2676 2677 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags)) 2678 return; 2679 2680 hci_dev_lock(hdev); 2681 2682 key = hci_find_link_key(hdev, &ev->bdaddr); 2683 if (!key) { 2684 BT_DBG("%s link key not found for %s", hdev->name, 2685 batostr(&ev->bdaddr)); 2686 goto not_found; 2687 } 2688 2689 BT_DBG("%s found key type %u for %s", hdev->name, key->type, 2690 batostr(&ev->bdaddr)); 2691 2692 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && 2693 key->type == HCI_LK_DEBUG_COMBINATION) { 2694 BT_DBG("%s ignoring debug key", hdev->name); 2695 goto not_found; 2696 } 2697 2698 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2699 if (conn) { 2700 if (key->type == HCI_LK_UNAUTH_COMBINATION && 2701 conn->auth_type != 0xff && 2702 (conn->auth_type & 0x01)) { 2703 BT_DBG("%s ignoring unauthenticated key", hdev->name); 2704 goto not_found; 2705 } 2706 2707 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 2708 conn->pending_sec_level == BT_SECURITY_HIGH) { 2709 BT_DBG("%s ignoring key unauthenticated for high \ 2710 security", hdev->name); 2711 goto not_found; 2712 } 2713 2714 conn->key_type = key->type; 2715 conn->pin_length = key->pin_len; 2716 } 2717 2718 bacpy(&cp.bdaddr, &ev->bdaddr); 2719 memcpy(cp.link_key, key->val, 16); 2720 2721 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 2722 2723 hci_dev_unlock(hdev); 2724 2725 return; 2726 2727not_found: 2728 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 2729 hci_dev_unlock(hdev); 2730} 2731 2732static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 2733{ 2734 struct hci_ev_link_key_notify *ev = (void *) skb->data; 2735 struct hci_conn *conn; 2736 u8 pin_len = 0; 2737 2738 BT_DBG("%s", hdev->name); 2739 2740 hci_dev_lock(hdev); 2741 2742 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2743 if (conn) { 2744 hci_conn_hold(conn); 2745 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2746 pin_len = conn->pin_length; 2747 2748 if (ev->key_type != HCI_LK_CHANGED_COMBINATION) 2749 conn->key_type = ev->key_type; 2750 2751 hci_conn_put(conn); 2752 } 2753 2754 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags)) 2755 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, 2756 ev->key_type, pin_len); 2757 2758 hci_dev_unlock(hdev); 2759} 2760 2761static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 2762{ 2763 struct hci_ev_clock_offset *ev = (void *) skb->data; 2764 struct hci_conn *conn; 2765 2766 BT_DBG("%s status %d", hdev->name, ev->status); 2767 2768 hci_dev_lock(hdev); 2769 2770 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2771 if (conn && !ev->status) { 2772 struct inquiry_entry *ie; 2773 2774 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 2775 if (ie) { 2776 ie->data.clock_offset = ev->clock_offset; 2777 ie->timestamp = jiffies; 2778 } 2779 } 2780 2781 hci_dev_unlock(hdev); 2782} 2783 2784static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2785{ 2786 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 2787 struct hci_conn *conn; 2788 2789 BT_DBG("%s status %d", hdev->name, ev->status); 2790 2791 hci_dev_lock(hdev); 2792 2793 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2794 if (conn && !ev->status) 2795 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 2796 2797 hci_dev_unlock(hdev); 2798} 2799 2800static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 2801{ 2802 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 2803 struct inquiry_entry *ie; 2804 2805 BT_DBG("%s", hdev->name); 2806 2807 hci_dev_lock(hdev); 2808 2809 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2810 if (ie) { 2811 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 2812 ie->timestamp = jiffies; 2813 } 2814 2815 hci_dev_unlock(hdev); 2816} 2817 2818static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) 2819{ 2820 struct inquiry_data data; 2821 int num_rsp = *((__u8 *) skb->data); 2822 bool name_known, ssp; 2823 2824 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2825 2826 if (!num_rsp) 2827 return; 2828 2829 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 2830 return; 2831 2832 hci_dev_lock(hdev); 2833 2834 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 2835 struct inquiry_info_with_rssi_and_pscan_mode *info; 2836 info = (void *) (skb->data + 1); 2837 2838 for (; num_rsp; num_rsp--, info++) { 2839 bacpy(&data.bdaddr, &info->bdaddr); 2840 data.pscan_rep_mode = info->pscan_rep_mode; 2841 data.pscan_period_mode = info->pscan_period_mode; 2842 data.pscan_mode = info->pscan_mode; 2843 memcpy(data.dev_class, info->dev_class, 3); 2844 data.clock_offset = info->clock_offset; 2845 data.rssi = info->rssi; 2846 data.ssp_mode = 0x00; 2847 2848 name_known = hci_inquiry_cache_update(hdev, &data, 2849 false, &ssp); 2850 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2851 info->dev_class, info->rssi, 2852 !name_known, ssp, NULL, 0); 2853 } 2854 } else { 2855 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 2856 2857 for (; num_rsp; num_rsp--, info++) { 2858 bacpy(&data.bdaddr, &info->bdaddr); 2859 data.pscan_rep_mode = info->pscan_rep_mode; 2860 data.pscan_period_mode = info->pscan_period_mode; 2861 data.pscan_mode = 0x00; 2862 memcpy(data.dev_class, info->dev_class, 3); 2863 data.clock_offset = info->clock_offset; 2864 data.rssi = info->rssi; 2865 data.ssp_mode = 0x00; 2866 name_known = hci_inquiry_cache_update(hdev, &data, 2867 false, &ssp); 2868 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2869 info->dev_class, info->rssi, 2870 !name_known, ssp, NULL, 0); 2871 } 2872 } 2873 2874 hci_dev_unlock(hdev); 2875} 2876 2877static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2878{ 2879 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 2880 struct hci_conn *conn; 2881 2882 BT_DBG("%s", hdev->name); 2883 2884 hci_dev_lock(hdev); 2885 2886 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2887 if (!conn) 2888 goto unlock; 2889 2890 if (!ev->status && ev->page == 0x01) { 2891 struct inquiry_entry *ie; 2892 2893 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 2894 if (ie) 2895 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 2896 2897 if (ev->features[0] & LMP_HOST_SSP) 2898 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 2899 } 2900 2901 if (conn->state != BT_CONFIG) 2902 goto unlock; 2903 2904 if (!ev->status) { 2905 struct hci_cp_remote_name_req cp; 2906 memset(&cp, 0, sizeof(cp)); 2907 bacpy(&cp.bdaddr, &conn->dst); 2908 cp.pscan_rep_mode = 0x02; 2909 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2910 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2911 mgmt_device_connected(hdev, &conn->dst, conn->type, 2912 conn->dst_type, 0, NULL, 0, 2913 conn->dev_class); 2914 2915 if (!hci_outgoing_auth_needed(hdev, conn)) { 2916 conn->state = BT_CONNECTED; 2917 hci_proto_connect_cfm(conn, ev->status); 2918 hci_conn_put(conn); 2919 } 2920 2921unlock: 2922 hci_dev_unlock(hdev); 2923} 2924 2925static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2926{ 2927 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 2928 struct hci_conn *conn; 2929 2930 BT_DBG("%s status %d", hdev->name, ev->status); 2931 2932 hci_dev_lock(hdev); 2933 2934 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 2935 if (!conn) { 2936 if (ev->link_type == ESCO_LINK) 2937 goto unlock; 2938 2939 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 2940 if (!conn) 2941 goto unlock; 2942 2943 conn->type = SCO_LINK; 2944 } 2945 2946 switch (ev->status) { 2947 case 0x00: 2948 conn->handle = __le16_to_cpu(ev->handle); 2949 conn->state = BT_CONNECTED; 2950 2951 hci_conn_hold_device(conn); 2952 hci_conn_add_sysfs(conn); 2953 break; 2954 2955 case 0x11: /* Unsupported Feature or Parameter Value */ 2956 case 0x1c: /* SCO interval rejected */ 2957 case 0x1a: /* Unsupported Remote Feature */ 2958 case 0x1f: /* Unspecified error */ 2959 if (conn->out && conn->attempt < 2) { 2960 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 2961 (hdev->esco_type & EDR_ESCO_MASK); 2962 hci_setup_sync(conn, conn->link->handle); 2963 goto unlock; 2964 } 2965 /* fall through */ 2966 2967 default: 2968 conn->state = BT_CLOSED; 2969 break; 2970 } 2971 2972 hci_proto_connect_cfm(conn, ev->status); 2973 if (ev->status) 2974 hci_conn_del(conn); 2975 2976unlock: 2977 hci_dev_unlock(hdev); 2978} 2979 2980static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) 2981{ 2982 BT_DBG("%s", hdev->name); 2983} 2984 2985static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) 2986{ 2987 struct hci_ev_sniff_subrate *ev = (void *) skb->data; 2988 2989 BT_DBG("%s status %d", hdev->name, ev->status); 2990} 2991 2992static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2993{ 2994 struct inquiry_data data; 2995 struct extended_inquiry_info *info = (void *) (skb->data + 1); 2996 int num_rsp = *((__u8 *) skb->data); 2997 2998 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2999 3000 if (!num_rsp) 3001 return; 3002 3003 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 3004 return; 3005 3006 hci_dev_lock(hdev); 3007 3008 for (; num_rsp; num_rsp--, info++) { 3009 bool name_known, ssp; 3010 3011 bacpy(&data.bdaddr, &info->bdaddr); 3012 data.pscan_rep_mode = info->pscan_rep_mode; 3013 data.pscan_period_mode = info->pscan_period_mode; 3014 data.pscan_mode = 0x00; 3015 memcpy(data.dev_class, info->dev_class, 3); 3016 data.clock_offset = info->clock_offset; 3017 data.rssi = info->rssi; 3018 data.ssp_mode = 0x01; 3019 3020 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3021 name_known = eir_has_data_type(info->data, 3022 sizeof(info->data), 3023 EIR_NAME_COMPLETE); 3024 else 3025 name_known = true; 3026 3027 name_known = hci_inquiry_cache_update(hdev, &data, name_known, 3028 &ssp); 3029 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3030 info->dev_class, info->rssi, !name_known, 3031 ssp, info->data, sizeof(info->data)); 3032 } 3033 3034 hci_dev_unlock(hdev); 3035} 3036 3037static inline u8 hci_get_auth_req(struct hci_conn *conn) 3038{ 3039 /* If remote requests dedicated bonding follow that lead */ 3040 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) { 3041 /* If both remote and local IO capabilities allow MITM 3042 * protection then require it, otherwise don't */ 3043 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03) 3044 return 0x02; 3045 else 3046 return 0x03; 3047 } 3048 3049 /* If remote requests no-bonding follow that lead */ 3050 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01) 3051 return conn->remote_auth | (conn->auth_type & 0x01); 3052 3053 return conn->auth_type; 3054} 3055 3056static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3057{ 3058 struct hci_ev_io_capa_request *ev = (void *) skb->data; 3059 struct hci_conn *conn; 3060 3061 BT_DBG("%s", hdev->name); 3062 3063 hci_dev_lock(hdev); 3064 3065 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3066 if (!conn) 3067 goto unlock; 3068 3069 hci_conn_hold(conn); 3070 3071 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3072 goto unlock; 3073 3074 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) || 3075 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 3076 struct hci_cp_io_capability_reply cp; 3077 3078 bacpy(&cp.bdaddr, &ev->bdaddr); 3079 /* Change the IO capability from KeyboardDisplay 3080 * to DisplayYesNo as it is not supported by BT spec. */ 3081 cp.capability = (conn->io_capability == 0x04) ? 3082 0x01 : conn->io_capability; 3083 conn->auth_type = hci_get_auth_req(conn); 3084 cp.authentication = conn->auth_type; 3085 3086 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) && 3087 hci_find_remote_oob_data(hdev, &conn->dst)) 3088 cp.oob_data = 0x01; 3089 else 3090 cp.oob_data = 0x00; 3091 3092 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 3093 sizeof(cp), &cp); 3094 } else { 3095 struct hci_cp_io_capability_neg_reply cp; 3096 3097 bacpy(&cp.bdaddr, &ev->bdaddr); 3098 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 3099 3100 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 3101 sizeof(cp), &cp); 3102 } 3103 3104unlock: 3105 hci_dev_unlock(hdev); 3106} 3107 3108static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 3109{ 3110 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 3111 struct hci_conn *conn; 3112 3113 BT_DBG("%s", hdev->name); 3114 3115 hci_dev_lock(hdev); 3116 3117 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3118 if (!conn) 3119 goto unlock; 3120 3121 conn->remote_cap = ev->capability; 3122 conn->remote_auth = ev->authentication; 3123 if (ev->oob_data) 3124 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags); 3125 3126unlock: 3127 hci_dev_unlock(hdev); 3128} 3129 3130static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, 3131 struct sk_buff *skb) 3132{ 3133 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 3134 int loc_mitm, rem_mitm, confirm_hint = 0; 3135 struct hci_conn *conn; 3136 3137 BT_DBG("%s", hdev->name); 3138 3139 hci_dev_lock(hdev); 3140 3141 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3142 goto unlock; 3143 3144 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3145 if (!conn) 3146 goto unlock; 3147 3148 loc_mitm = (conn->auth_type & 0x01); 3149 rem_mitm = (conn->remote_auth & 0x01); 3150 3151 /* If we require MITM but the remote device can't provide that 3152 * (it has NoInputNoOutput) then reject the confirmation 3153 * request. The only exception is when we're dedicated bonding 3154 * initiators (connect_cfm_cb set) since then we always have the MITM 3155 * bit set. */ 3156 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) { 3157 BT_DBG("Rejecting request: remote device can't provide MITM"); 3158 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 3159 sizeof(ev->bdaddr), &ev->bdaddr); 3160 goto unlock; 3161 } 3162 3163 /* If no side requires MITM protection; auto-accept */ 3164 if ((!loc_mitm || conn->remote_cap == 0x03) && 3165 (!rem_mitm || conn->io_capability == 0x03)) { 3166 3167 /* If we're not the initiators request authorization to 3168 * proceed from user space (mgmt_user_confirm with 3169 * confirm_hint set to 1). */ 3170 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3171 BT_DBG("Confirming auto-accept as acceptor"); 3172 confirm_hint = 1; 3173 goto confirm; 3174 } 3175 3176 BT_DBG("Auto-accept of user confirmation with %ums delay", 3177 hdev->auto_accept_delay); 3178 3179 if (hdev->auto_accept_delay > 0) { 3180 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 3181 mod_timer(&conn->auto_accept_timer, jiffies + delay); 3182 goto unlock; 3183 } 3184 3185 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 3186 sizeof(ev->bdaddr), &ev->bdaddr); 3187 goto unlock; 3188 } 3189 3190confirm: 3191 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey, 3192 confirm_hint); 3193 3194unlock: 3195 hci_dev_unlock(hdev); 3196} 3197 3198static inline void hci_user_passkey_request_evt(struct hci_dev *hdev, 3199 struct sk_buff *skb) 3200{ 3201 struct hci_ev_user_passkey_req *ev = (void *) skb->data; 3202 3203 BT_DBG("%s", hdev->name); 3204 3205 hci_dev_lock(hdev); 3206 3207 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3208 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 3209 3210 hci_dev_unlock(hdev); 3211} 3212 3213static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3214{ 3215 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 3216 struct hci_conn *conn; 3217 3218 BT_DBG("%s", hdev->name); 3219 3220 hci_dev_lock(hdev); 3221 3222 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3223 if (!conn) 3224 goto unlock; 3225 3226 /* To avoid duplicate auth_failed events to user space we check 3227 * the HCI_CONN_AUTH_PEND flag which will be set if we 3228 * initiated the authentication. A traditional auth_complete 3229 * event gets always produced as initiator and is also mapped to 3230 * the mgmt_auth_failed event */ 3231 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0) 3232 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type, 3233 ev->status); 3234 3235 hci_conn_put(conn); 3236 3237unlock: 3238 hci_dev_unlock(hdev); 3239} 3240 3241static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 3242{ 3243 struct hci_ev_remote_host_features *ev = (void *) skb->data; 3244 struct inquiry_entry *ie; 3245 3246 BT_DBG("%s", hdev->name); 3247 3248 hci_dev_lock(hdev); 3249 3250 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3251 if (ie) 3252 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 3253 3254 hci_dev_unlock(hdev); 3255} 3256 3257static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 3258 struct sk_buff *skb) 3259{ 3260 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 3261 struct oob_data *data; 3262 3263 BT_DBG("%s", hdev->name); 3264 3265 hci_dev_lock(hdev); 3266 3267 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3268 goto unlock; 3269 3270 data = hci_find_remote_oob_data(hdev, &ev->bdaddr); 3271 if (data) { 3272 struct hci_cp_remote_oob_data_reply cp; 3273 3274 bacpy(&cp.bdaddr, &ev->bdaddr); 3275 memcpy(cp.hash, data->hash, sizeof(cp.hash)); 3276 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); 3277 3278 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), 3279 &cp); 3280 } else { 3281 struct hci_cp_remote_oob_data_neg_reply cp; 3282 3283 bacpy(&cp.bdaddr, &ev->bdaddr); 3284 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), 3285 &cp); 3286 } 3287 3288unlock: 3289 hci_dev_unlock(hdev); 3290} 3291 3292static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3293{ 3294 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 3295 struct hci_conn *conn; 3296 3297 BT_DBG("%s status %d", hdev->name, ev->status); 3298 3299 hci_dev_lock(hdev); 3300 3301 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); 3302 if (!conn) { 3303 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); 3304 if (!conn) { 3305 BT_ERR("No memory for new connection"); 3306 hci_dev_unlock(hdev); 3307 return; 3308 } 3309 3310 conn->dst_type = ev->bdaddr_type; 3311 } 3312 3313 if (ev->status) { 3314 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, 3315 conn->dst_type, ev->status); 3316 hci_proto_connect_cfm(conn, ev->status); 3317 conn->state = BT_CLOSED; 3318 hci_conn_del(conn); 3319 goto unlock; 3320 } 3321 3322 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3323 mgmt_device_connected(hdev, &ev->bdaddr, conn->type, 3324 conn->dst_type, 0, NULL, 0, NULL); 3325 3326 conn->sec_level = BT_SECURITY_LOW; 3327 conn->handle = __le16_to_cpu(ev->handle); 3328 conn->state = BT_CONNECTED; 3329 3330 hci_conn_hold_device(conn); 3331 hci_conn_add_sysfs(conn); 3332 3333 hci_proto_connect_cfm(conn, ev->status); 3334 3335unlock: 3336 hci_dev_unlock(hdev); 3337} 3338 3339static inline void hci_le_adv_report_evt(struct hci_dev *hdev, 3340 struct sk_buff *skb) 3341{ 3342 u8 num_reports = skb->data[0]; 3343 void *ptr = &skb->data[1]; 3344 s8 rssi; 3345 3346 hci_dev_lock(hdev); 3347 3348 while (num_reports--) { 3349 struct hci_ev_le_advertising_info *ev = ptr; 3350 3351 rssi = ev->data[ev->length]; 3352 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type, 3353 NULL, rssi, 0, 1, ev->data, ev->length); 3354 3355 ptr += sizeof(*ev) + ev->length + 1; 3356 } 3357 3358 hci_dev_unlock(hdev); 3359} 3360 3361static inline void hci_le_ltk_request_evt(struct hci_dev *hdev, 3362 struct sk_buff *skb) 3363{ 3364 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 3365 struct hci_cp_le_ltk_reply cp; 3366 struct hci_cp_le_ltk_neg_reply neg; 3367 struct hci_conn *conn; 3368 struct smp_ltk *ltk; 3369 3370 BT_DBG("%s handle %d", hdev->name, __le16_to_cpu(ev->handle)); 3371 3372 hci_dev_lock(hdev); 3373 3374 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3375 if (conn == NULL) 3376 goto not_found; 3377 3378 ltk = hci_find_ltk(hdev, ev->ediv, ev->random); 3379 if (ltk == NULL) 3380 goto not_found; 3381 3382 memcpy(cp.ltk, ltk->val, sizeof(ltk->val)); 3383 cp.handle = cpu_to_le16(conn->handle); 3384 3385 if (ltk->authenticated) 3386 conn->sec_level = BT_SECURITY_HIGH; 3387 3388 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 3389 3390 if (ltk->type & HCI_SMP_STK) { 3391 list_del(<k->list); 3392 kfree(ltk); 3393 } 3394 3395 hci_dev_unlock(hdev); 3396 3397 return; 3398 3399not_found: 3400 neg.handle = ev->handle; 3401 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 3402 hci_dev_unlock(hdev); 3403} 3404 3405static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 3406{ 3407 struct hci_ev_le_meta *le_ev = (void *) skb->data; 3408 3409 skb_pull(skb, sizeof(*le_ev)); 3410 3411 switch (le_ev->subevent) { 3412 case HCI_EV_LE_CONN_COMPLETE: 3413 hci_le_conn_complete_evt(hdev, skb); 3414 break; 3415 3416 case HCI_EV_LE_ADVERTISING_REPORT: 3417 hci_le_adv_report_evt(hdev, skb); 3418 break; 3419 3420 case HCI_EV_LE_LTK_REQ: 3421 hci_le_ltk_request_evt(hdev, skb); 3422 break; 3423 3424 default: 3425 break; 3426 } 3427} 3428 3429void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 3430{ 3431 struct hci_event_hdr *hdr = (void *) skb->data; 3432 __u8 event = hdr->evt; 3433 3434 skb_pull(skb, HCI_EVENT_HDR_SIZE); 3435 3436 switch (event) { 3437 case HCI_EV_INQUIRY_COMPLETE: 3438 hci_inquiry_complete_evt(hdev, skb); 3439 break; 3440 3441 case HCI_EV_INQUIRY_RESULT: 3442 hci_inquiry_result_evt(hdev, skb); 3443 break; 3444 3445 case HCI_EV_CONN_COMPLETE: 3446 hci_conn_complete_evt(hdev, skb); 3447 break; 3448 3449 case HCI_EV_CONN_REQUEST: 3450 hci_conn_request_evt(hdev, skb); 3451 break; 3452 3453 case HCI_EV_DISCONN_COMPLETE: 3454 hci_disconn_complete_evt(hdev, skb); 3455 break; 3456 3457 case HCI_EV_AUTH_COMPLETE: 3458 hci_auth_complete_evt(hdev, skb); 3459 break; 3460 3461 case HCI_EV_REMOTE_NAME: 3462 hci_remote_name_evt(hdev, skb); 3463 break; 3464 3465 case HCI_EV_ENCRYPT_CHANGE: 3466 hci_encrypt_change_evt(hdev, skb); 3467 break; 3468 3469 case HCI_EV_CHANGE_LINK_KEY_COMPLETE: 3470 hci_change_link_key_complete_evt(hdev, skb); 3471 break; 3472 3473 case HCI_EV_REMOTE_FEATURES: 3474 hci_remote_features_evt(hdev, skb); 3475 break; 3476 3477 case HCI_EV_REMOTE_VERSION: 3478 hci_remote_version_evt(hdev, skb); 3479 break; 3480 3481 case HCI_EV_QOS_SETUP_COMPLETE: 3482 hci_qos_setup_complete_evt(hdev, skb); 3483 break; 3484 3485 case HCI_EV_CMD_COMPLETE: 3486 hci_cmd_complete_evt(hdev, skb); 3487 break; 3488 3489 case HCI_EV_CMD_STATUS: 3490 hci_cmd_status_evt(hdev, skb); 3491 break; 3492 3493 case HCI_EV_ROLE_CHANGE: 3494 hci_role_change_evt(hdev, skb); 3495 break; 3496 3497 case HCI_EV_NUM_COMP_PKTS: 3498 hci_num_comp_pkts_evt(hdev, skb); 3499 break; 3500 3501 case HCI_EV_MODE_CHANGE: 3502 hci_mode_change_evt(hdev, skb); 3503 break; 3504 3505 case HCI_EV_PIN_CODE_REQ: 3506 hci_pin_code_request_evt(hdev, skb); 3507 break; 3508 3509 case HCI_EV_LINK_KEY_REQ: 3510 hci_link_key_request_evt(hdev, skb); 3511 break; 3512 3513 case HCI_EV_LINK_KEY_NOTIFY: 3514 hci_link_key_notify_evt(hdev, skb); 3515 break; 3516 3517 case HCI_EV_CLOCK_OFFSET: 3518 hci_clock_offset_evt(hdev, skb); 3519 break; 3520 3521 case HCI_EV_PKT_TYPE_CHANGE: 3522 hci_pkt_type_change_evt(hdev, skb); 3523 break; 3524 3525 case HCI_EV_PSCAN_REP_MODE: 3526 hci_pscan_rep_mode_evt(hdev, skb); 3527 break; 3528 3529 case HCI_EV_INQUIRY_RESULT_WITH_RSSI: 3530 hci_inquiry_result_with_rssi_evt(hdev, skb); 3531 break; 3532 3533 case HCI_EV_REMOTE_EXT_FEATURES: 3534 hci_remote_ext_features_evt(hdev, skb); 3535 break; 3536 3537 case HCI_EV_SYNC_CONN_COMPLETE: 3538 hci_sync_conn_complete_evt(hdev, skb); 3539 break; 3540 3541 case HCI_EV_SYNC_CONN_CHANGED: 3542 hci_sync_conn_changed_evt(hdev, skb); 3543 break; 3544 3545 case HCI_EV_SNIFF_SUBRATE: 3546 hci_sniff_subrate_evt(hdev, skb); 3547 break; 3548 3549 case HCI_EV_EXTENDED_INQUIRY_RESULT: 3550 hci_extended_inquiry_result_evt(hdev, skb); 3551 break; 3552 3553 case HCI_EV_IO_CAPA_REQUEST: 3554 hci_io_capa_request_evt(hdev, skb); 3555 break; 3556 3557 case HCI_EV_IO_CAPA_REPLY: 3558 hci_io_capa_reply_evt(hdev, skb); 3559 break; 3560 3561 case HCI_EV_USER_CONFIRM_REQUEST: 3562 hci_user_confirm_request_evt(hdev, skb); 3563 break; 3564 3565 case HCI_EV_USER_PASSKEY_REQUEST: 3566 hci_user_passkey_request_evt(hdev, skb); 3567 break; 3568 3569 case HCI_EV_SIMPLE_PAIR_COMPLETE: 3570 hci_simple_pair_complete_evt(hdev, skb); 3571 break; 3572 3573 case HCI_EV_REMOTE_HOST_FEATURES: 3574 hci_remote_host_features_evt(hdev, skb); 3575 break; 3576 3577 case HCI_EV_LE_META: 3578 hci_le_meta_evt(hdev, skb); 3579 break; 3580 3581 case HCI_EV_REMOTE_OOB_DATA_REQUEST: 3582 hci_remote_oob_data_request_evt(hdev, skb); 3583 break; 3584 3585 case HCI_EV_NUM_COMP_BLOCKS: 3586 hci_num_comp_blocks_evt(hdev, skb); 3587 break; 3588 3589 default: 3590 BT_DBG("%s event 0x%x", hdev->name, event); 3591 break; 3592 } 3593 3594 kfree_skb(skb); 3595 hdev->stat.evt_rx++; 3596} 3597