Lines Matching defs:hdev

119 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
120 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
196 bool mgmt_valid_hdev(struct hci_dev *hdev)
198 return hdev->dev_type == HCI_BREDR;
273 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
287 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
321 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
374 static u32 get_supported_settings(struct hci_dev *hdev)
381 if (lmp_ssp_capable(hdev))
384 if (lmp_bredr_capable(hdev)) {
386 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
396 if (lmp_le_capable(hdev))
402 static u32 get_current_settings(struct hci_dev *hdev)
406 if (hdev_is_powered(hdev))
409 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
412 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
415 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
418 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
421 if (lmp_bredr_capable(hdev))
424 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
427 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
430 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
433 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
441 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
449 list_for_each_entry(uuid, &hdev->uuids, list) {
483 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
491 list_for_each_entry(uuid, &hdev->uuids, list) {
516 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
524 list_for_each_entry(uuid, &hdev->uuids, list) {
549 static void create_eir(struct hci_dev *hdev, u8 *data)
554 name_len = strlen(hdev->dev_name);
567 memcpy(ptr + 2, hdev->dev_name, name_len);
572 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
575 ptr[2] = (u8) hdev->inq_tx_power;
580 if (hdev->devid_source > 0) {
584 put_unaligned_le16(hdev->devid_source, ptr + 2);
585 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
586 put_unaligned_le16(hdev->devid_product, ptr + 6);
587 put_unaligned_le16(hdev->devid_version, ptr + 8);
592 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
593 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
594 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
599 struct hci_dev *hdev = req->hdev;
602 if (!hdev_is_powered(hdev))
605 if (!lmp_ext_inq_capable(hdev))
608 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
611 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
616 create_eir(hdev, cp.data);
618 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
621 memcpy(hdev->eir, cp.data, sizeof(cp.data));
626 static u8 get_service_classes(struct hci_dev *hdev)
631 list_for_each_entry(uuid, &hdev->uuids, list)
639 struct hci_dev *hdev = req->hdev;
642 BT_DBG("%s", hdev->name);
644 if (!hdev_is_powered(hdev))
647 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
650 cod[0] = hdev->minor_class;
651 cod[1] = hdev->major_class;
652 cod[2] = get_service_classes(hdev);
654 if (memcmp(cod, hdev->dev_class, 3) == 0)
662 struct hci_dev *hdev = container_of(work, struct hci_dev,
666 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
669 hci_req_init(&req, hdev);
671 hci_dev_lock(hdev);
676 hci_dev_unlock(hdev);
681 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
683 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
686 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
693 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
696 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
701 BT_DBG("sock %p %s", sk, hdev->name);
703 hci_dev_lock(hdev);
707 bacpy(&rp.bdaddr, &hdev->bdaddr);
709 rp.version = hdev->hci_ver;
710 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
712 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
713 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
715 memcpy(rp.dev_class, hdev->dev_class, 3);
717 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
718 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
720 hci_dev_unlock(hdev);
722 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
734 struct hci_dev *hdev, void *data,
744 cmd->index = hdev->id;
758 list_add(&cmd->list, &hdev->mgmt_pending);
763 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
770 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
778 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
782 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
796 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
798 __le32 settings = cpu_to_le32(get_current_settings(hdev));
800 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
804 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
811 BT_DBG("request for %s", hdev->name);
814 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
817 hci_dev_lock(hdev);
819 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
820 cancel_delayed_work(&hdev->power_off);
823 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
825 err = mgmt_powered(hdev, 1);
830 if (!!cp->val == hdev_is_powered(hdev)) {
831 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
835 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
836 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
841 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
848 queue_work(hdev->req_workqueue, &hdev->power_on);
850 queue_work(hdev->req_workqueue, &hdev->power_off.work);
855 hci_dev_unlock(hdev);
859 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
871 if (hdev)
872 hdr->index = cpu_to_le16(hdev->id);
889 static int new_settings(struct hci_dev *hdev, struct sock *skip)
893 ev = cpu_to_le32(get_current_settings(hdev));
895 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
898 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
907 BT_DBG("request for %s", hdev->name);
909 if (!lmp_bredr_capable(hdev))
910 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
914 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
919 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
922 hci_dev_lock(hdev);
924 if (!hdev_is_powered(hdev) && timeout > 0) {
925 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
930 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
931 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
932 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
937 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
938 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
943 if (!hdev_is_powered(hdev)) {
946 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
947 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
951 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
956 err = new_settings(hdev, sk);
961 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
962 if (hdev->discov_timeout > 0) {
963 cancel_delayed_work(&hdev->discov_off);
964 hdev->discov_timeout = 0;
968 hdev->discov_timeout = timeout;
969 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
970 msecs_to_jiffies(hdev->discov_timeout * 1000));
973 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
977 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
988 cancel_delayed_work(&hdev->discov_off);
990 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
995 hdev->discov_timeout = timeout;
998 hci_dev_unlock(hdev);
1004 struct hci_dev *hdev = req->hdev;
1008 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1025 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1026 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1030 if (hdev->page_scan_type != type)
1034 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1040 hci_dev_lock(hdev);
1042 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1046 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1051 hci_dev_unlock(hdev);
1054 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1063 BT_DBG("request for %s", hdev->name);
1065 if (!lmp_bredr_capable(hdev))
1066 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1070 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1073 hci_dev_lock(hdev);
1075 if (!hdev_is_powered(hdev)) {
1078 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1082 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1084 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1085 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1088 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1093 err = new_settings(hdev, sk);
1098 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1099 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1100 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1105 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1106 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1110 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1121 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1122 hdev->discov_timeout > 0)
1123 cancel_delayed_work(&hdev->discov_off);
1126 hci_req_init(&req, hdev);
1136 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1144 hci_dev_unlock(hdev);
1148 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1154 BT_DBG("request for %s", hdev->name);
1157 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1160 hci_dev_lock(hdev);
1163 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1165 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1167 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1171 err = new_settings(hdev, sk);
1174 hci_dev_unlock(hdev);
1178 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1186 BT_DBG("request for %s", hdev->name);
1188 if (!lmp_bredr_capable(hdev))
1189 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1193 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1196 hci_dev_lock(hdev);
1198 if (!hdev_is_powered(hdev)) {
1202 &hdev->dev_flags)) {
1203 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1207 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1212 err = new_settings(hdev, sk);
1217 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1218 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1225 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1226 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1230 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1236 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1243 hci_dev_unlock(hdev);
1247 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1254 BT_DBG("request for %s", hdev->name);
1256 if (!lmp_ssp_capable(hdev))
1257 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1261 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1264 hci_dev_lock(hdev);
1268 if (!hdev_is_powered(hdev)) {
1271 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1272 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1276 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1281 err = new_settings(hdev, sk);
1286 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1287 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1292 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1293 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1297 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1303 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1310 hci_dev_unlock(hdev);
1314 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1318 BT_DBG("request for %s", hdev->name);
1321 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1325 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1329 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1331 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1333 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1336 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1344 BT_DBG("request for %s", hdev->name);
1346 if (!lmp_le_capable(hdev))
1347 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1351 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1355 if (!lmp_bredr_capable(hdev))
1356 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1359 hci_dev_lock(hdev);
1362 enabled = lmp_host_le_capable(hdev);
1364 if (!hdev_is_powered(hdev) || val == enabled) {
1367 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1368 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1372 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1377 err = new_settings(hdev, sk);
1382 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1383 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1388 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1398 hci_cp.simul = lmp_le_br_capable(hdev);
1401 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1407 hci_dev_unlock(hdev);
1417 static bool pending_eir_or_class(struct hci_dev *hdev)
1421 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1453 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1457 hci_dev_lock(hdev);
1459 cmd = mgmt_pending_find(mgmt_op, hdev);
1464 hdev->dev_class, 3);
1469 hci_dev_unlock(hdev);
1472 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1476 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1479 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1487 BT_DBG("request for %s", hdev->name);
1489 hci_dev_lock(hdev);
1491 if (pending_eir_or_class(hdev)) {
1492 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1507 list_add_tail(&uuid->list, &hdev->uuids);
1509 hci_req_init(&req, hdev);
1519 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1520 hdev->dev_class, 3);
1524 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1533 hci_dev_unlock(hdev);
1537 static bool enable_service_cache(struct hci_dev *hdev)
1539 if (!hdev_is_powered(hdev))
1542 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1543 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1551 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1555 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1558 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1568 BT_DBG("request for %s", hdev->name);
1570 hci_dev_lock(hdev);
1572 if (pending_eir_or_class(hdev)) {
1573 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1579 err = hci_uuids_clear(hdev);
1581 if (enable_service_cache(hdev)) {
1582 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1583 0, hdev->dev_class, 3);
1592 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1602 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1608 hci_req_init(&req, hdev);
1618 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1619 hdev->dev_class, 3);
1623 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1632 hci_dev_unlock(hdev);
1636 static void set_class_complete(struct hci_dev *hdev, u8 status)
1640 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1643 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1651 BT_DBG("request for %s", hdev->name);
1653 if (!lmp_bredr_capable(hdev))
1654 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1657 hci_dev_lock(hdev);
1659 if (pending_eir_or_class(hdev)) {
1660 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1666 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1671 hdev->major_class = cp->major;
1672 hdev->minor_class = cp->minor;
1674 if (!hdev_is_powered(hdev)) {
1675 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1676 hdev->dev_class, 3);
1680 hci_req_init(&req, hdev);
1682 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1683 hci_dev_unlock(hdev);
1684 cancel_delayed_work_sync(&hdev->service_cache);
1685 hci_dev_lock(hdev);
1696 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1697 hdev->dev_class, 3);
1701 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1710 hci_dev_unlock(hdev);
1714 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1728 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1733 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1736 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1743 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1747 hci_dev_lock(hdev);
1749 hci_link_keys_clear(hdev);
1751 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1754 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1756 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1761 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1765 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1767 hci_dev_unlock(hdev);
1772 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1780 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1784 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1799 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1804 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1808 hci_dev_lock(hdev);
1810 if (!hdev_is_powered(hdev)) {
1811 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1817 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1819 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1822 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1829 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1832 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1839 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1841 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1845 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1854 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1859 hci_dev_unlock(hdev);
1863 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1880 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1884 hci_dev_lock(hdev);
1886 if (!test_bit(HCI_UP, &hdev->flags)) {
1887 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1892 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1893 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1899 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1902 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1905 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1910 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1919 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1924 hci_dev_unlock(hdev);
1947 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1958 hci_dev_lock(hdev);
1960 if (!hdev_is_powered(hdev)) {
1961 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1967 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1980 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1995 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2001 hci_dev_unlock(hdev);
2005 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2011 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2016 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2024 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2035 hci_dev_lock(hdev);
2037 if (!hdev_is_powered(hdev)) {
2038 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2043 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2045 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2057 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2059 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2065 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2075 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2080 hci_dev_unlock(hdev);
2084 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2091 hci_dev_lock(hdev);
2093 hdev->io_capability = cp->io_capability;
2095 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2096 hdev->io_capability);
2098 hci_dev_unlock(hdev);
2100 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2106 struct hci_dev *hdev = conn->hdev;
2109 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2172 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2189 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2193 hci_dev_lock(hdev);
2195 if (!hdev_is_powered(hdev)) {
2196 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2208 conn = hci_connect(hdev, ACL_LINK, 0, &cp->addr.bdaddr,
2211 conn = hci_connect(hdev, LE_LINK, 0, &cp->addr.bdaddr,
2222 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2230 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2235 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2260 hci_dev_unlock(hdev);
2264 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2274 hci_dev_lock(hdev);
2276 if (!hdev_is_powered(hdev)) {
2277 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2282 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2284 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2292 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2299 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2302 hci_dev_unlock(hdev);
2306 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2314 hci_dev_lock(hdev);
2316 if (!hdev_is_powered(hdev)) {
2317 err = cmd_complete(sk, hdev->id, mgmt_op,
2324 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2326 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2329 err = cmd_complete(sk, hdev->id, mgmt_op,
2340 err = cmd_complete(sk, hdev->id, mgmt_op,
2344 err = cmd_complete(sk, hdev->id, mgmt_op,
2351 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2363 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2365 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2372 hci_dev_unlock(hdev);
2376 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2383 return user_pairing_resp(sk, hdev, &cp->addr,
2388 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2396 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2399 return user_pairing_resp(sk, hdev, &cp->addr,
2404 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2411 return user_pairing_resp(sk, hdev, &cp->addr,
2416 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2423 return user_pairing_resp(sk, hdev, &cp->addr,
2428 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2435 return user_pairing_resp(sk, hdev, &cp->addr,
2442 struct hci_dev *hdev = req->hdev;
2445 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2450 static void set_name_complete(struct hci_dev *hdev, u8 status)
2457 hci_dev_lock(hdev);
2459 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2466 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2469 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2475 hci_dev_unlock(hdev);
2478 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2488 hci_dev_lock(hdev);
2493 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2494 !memcmp(hdev->short_name, cp->short_name,
2495 sizeof(hdev->short_name))) {
2496 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2501 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2503 if (!hdev_is_powered(hdev)) {
2504 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2506 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2511 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2517 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2523 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2525 hci_req_init(&req, hdev);
2527 if (lmp_bredr_capable(hdev)) {
2532 if (lmp_le_capable(hdev))
2540 hci_dev_unlock(hdev);
2544 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2550 BT_DBG("%s", hdev->name);
2552 hci_dev_lock(hdev);
2554 if (!hdev_is_powered(hdev)) {
2555 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2560 if (!lmp_ssp_capable(hdev)) {
2561 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2566 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2567 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2572 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2578 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2583 hci_dev_unlock(hdev);
2587 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2594 BT_DBG("%s ", hdev->name);
2596 hci_dev_lock(hdev);
2598 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2605 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2608 hci_dev_unlock(hdev);
2612 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2619 BT_DBG("%s", hdev->name);
2621 hci_dev_lock(hdev);
2623 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2629 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2632 hci_dev_unlock(hdev);
2636 int mgmt_interleaved_discovery(struct hci_dev *hdev)
2640 BT_DBG("%s", hdev->name);
2642 hci_dev_lock(hdev);
2644 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2646 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2648 hci_dev_unlock(hdev);
2653 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2660 BT_DBG("%s", hdev->name);
2662 hci_dev_lock(hdev);
2664 if (!hdev_is_powered(hdev)) {
2665 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2670 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2671 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2676 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2677 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2682 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2688 hdev->discovery.type = cp->type;
2690 switch (hdev->discovery.type) {
2692 if (!lmp_bredr_capable(hdev)) {
2693 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2699 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2703 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2704 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2710 err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
2715 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2716 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2722 err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
2727 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2736 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2739 hci_dev_unlock(hdev);
2743 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2752 BT_DBG("%s", hdev->name);
2754 hci_dev_lock(hdev);
2756 if (!hci_discovery_active(hdev)) {
2757 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2763 if (hdev->discovery.type != mgmt_cp->type) {
2764 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2770 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2776 switch (hdev->discovery.state) {
2778 if (test_bit(HCI_INQUIRY, &hdev->flags))
2779 err = hci_cancel_inquiry(hdev);
2781 err = hci_cancel_le_scan(hdev);
2786 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2790 err = cmd_complete(sk, hdev->id,
2794 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2799 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2805 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2812 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2815 hci_dev_unlock(hdev);
2819 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2826 BT_DBG("%s", hdev->name);
2828 hci_dev_lock(hdev);
2830 if (!hci_discovery_active(hdev)) {
2831 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2836 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2838 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2848 hci_inquiry_cache_update_resolve(hdev, e);
2851 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2855 hci_dev_unlock(hdev);
2859 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2866 BT_DBG("%s", hdev->name);
2869 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2873 hci_dev_lock(hdev);
2875 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2881 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2884 hci_dev_unlock(hdev);
2889 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2896 BT_DBG("%s", hdev->name);
2899 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2903 hci_dev_lock(hdev);
2905 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2911 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2914 hci_dev_unlock(hdev);
2919 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2927 BT_DBG("%s", hdev->name);
2932 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2935 hci_dev_lock(hdev);
2937 hdev->devid_source = source;
2938 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2939 hdev->devid_product = __le16_to_cpu(cp->product);
2940 hdev->devid_version = __le16_to_cpu(cp->version);
2942 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2944 hci_req_init(&req, hdev);
2948 hci_dev_unlock(hdev);
2953 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
2959 hci_dev_lock(hdev);
2961 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2966 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2972 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
2974 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
2976 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2977 new_settings(hdev, cmd->sk);
2983 hci_dev_unlock(hdev);
2986 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2994 BT_DBG("%s", hdev->name);
2996 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
2997 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3001 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3004 if (!hdev_is_powered(hdev))
3005 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3008 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3009 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3012 hci_dev_lock(hdev);
3014 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3015 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3020 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3022 hdev);
3026 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3033 hci_req_init(&req, hdev);
3039 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3045 hci_dev_unlock(hdev);
3061 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3075 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3079 BT_DBG("%s key_count %u", hdev->name, key_count);
3085 return cmd_status(sk, hdev->id,
3090 hci_dev_lock(hdev);
3092 hci_smp_ltks_clear(hdev);
3103 hci_add_ltk(hdev, &key->addr.bdaddr,
3109 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3112 hci_dev_unlock(hdev);
3118 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3173 struct hci_dev *hdev = NULL;
3202 hdev = hci_dev_get(index);
3203 if (!hdev) {
3218 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3219 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3234 if (hdev)
3235 mgmt_init_hdev(sk, hdev);
3239 err = handler->func(sk, hdev, cp, len);
3246 if (hdev)
3247 hci_dev_put(hdev);
3261 int mgmt_index_added(struct hci_dev *hdev)
3263 if (!mgmt_valid_hdev(hdev))
3266 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3269 int mgmt_index_removed(struct hci_dev *hdev)
3273 if (!mgmt_valid_hdev(hdev))
3276 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3278 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3283 struct hci_dev *hdev;
3291 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3305 struct hci_dev *hdev = req->hdev;
3314 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3316 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3323 static void powered_complete(struct hci_dev *hdev, u8 status)
3325 struct cmd_lookup match = { NULL, hdev };
3329 hci_dev_lock(hdev);
3331 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3333 new_settings(hdev, match.sk);
3335 hci_dev_unlock(hdev);
3341 static int powered_update_hci(struct hci_dev *hdev)
3346 hci_req_init(&req, hdev);
3348 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3349 !lmp_host_ssp_capable(hdev)) {
3355 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
3356 lmp_bredr_capable(hdev)) {
3360 cp.simul = lmp_le_br_capable(hdev);
3365 if (cp.le != lmp_host_le_capable(hdev) ||
3366 cp.simul != lmp_host_le_br_capable(hdev))
3371 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3372 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3376 if (lmp_bredr_capable(hdev)) {
3386 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3388 struct cmd_lookup match = { NULL, hdev };
3393 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3397 if (powered_update_hci(hdev) == 0)
3400 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3405 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3406 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3408 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3409 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3413 err = new_settings(hdev, match.sk);
3421 int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
3426 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
3435 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
3442 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3444 struct cmd_lookup match = { NULL, hdev };
3449 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3452 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3456 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3460 err = new_settings(hdev, match.sk);
3468 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3475 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3478 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3482 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3485 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3490 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3495 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3499 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3505 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3519 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3522 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3541 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3545 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3568 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3592 struct hci_dev *hdev = data;
3600 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3607 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3614 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3620 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3626 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3627 hdev);
3632 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3639 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3640 hdev);
3642 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3657 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3666 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3669 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3677 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3681 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3688 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3695 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3703 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3710 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3717 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3725 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3731 BT_DBG("%s", hdev->name);
3738 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3742 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3747 BT_DBG("%s", hdev->name);
3752 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3756 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3764 cmd = mgmt_pending_find(opcode, hdev);
3770 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3778 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3781 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3785 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3788 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3793 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3796 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3800 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3803 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3808 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3814 BT_DBG("%s", hdev->name);
3821 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3824 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3833 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3836 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3838 struct cmd_lookup match = { NULL, hdev };
3844 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3849 if (test_bit(HCI_AUTH, &hdev->flags)) {
3850 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3853 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3857 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3861 err = new_settings(hdev, match.sk);
3871 struct hci_dev *hdev = req->hdev;
3874 if (!lmp_ext_inq_capable(hdev))
3877 memset(hdev->eir, 0, sizeof(hdev->eir));
3884 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3886 struct cmd_lookup match = { NULL, hdev };
3895 &hdev->dev_flags))
3896 err = new_settings(hdev, NULL);
3898 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3905 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3908 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3912 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3915 err = new_settings(hdev, match.sk);
3920 hci_req_init(&req, hdev);
3922 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3942 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3945 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3948 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
3949 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
3950 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
3953 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3962 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3972 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3974 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3976 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3981 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
3985 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
3989 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3995 BT_DBG("%s status %u", hdev->name, status);
3997 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4002 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4010 err = cmd_complete(cmd->sk, hdev->id,
4020 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4022 struct cmd_lookup match = { NULL, hdev };
4030 &hdev->dev_flags))
4031 err = new_settings(hdev, NULL);
4033 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
4040 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4043 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4047 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
4050 err = new_settings(hdev, match.sk);
4058 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4090 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4093 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4113 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4117 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
4123 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4125 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4129 type = hdev->discovery.type;
4131 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4138 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
4143 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4147 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4148 &hdev->discovery.type, sizeof(hdev->discovery.type));
4154 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4159 BT_DBG("%s discovering %u", hdev->name, discovering);
4162 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4164 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4167 u8 type = hdev->discovery.type;
4169 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4175 ev.type = hdev->discovery.type;
4178 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4181 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4186 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4191 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4195 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4200 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4205 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),