hci_event.c revision 40b552aa5a0bfa785bc7ddb5c2d7965b1e0bb08d
1/*
2   BlueZ - Bluetooth protocol stack for Linux
3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <asm/unaligned.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/mgmt.h>
32
33#include "a2mp.h"
34#include "amp.h"
35
36/* Handle HCI Event packets */
37
38static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
39{
40	__u8 status = *((__u8 *) skb->data);
41
42	BT_DBG("%s status 0x%2.2x", hdev->name, status);
43
44	if (status)
45		return;
46
47	clear_bit(HCI_INQUIRY, &hdev->flags);
48	smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
49	wake_up_bit(&hdev->flags, HCI_INQUIRY);
50
51	hci_conn_check_pending(hdev);
52}
53
54static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
55{
56	__u8 status = *((__u8 *) skb->data);
57
58	BT_DBG("%s status 0x%2.2x", hdev->name, status);
59
60	if (status)
61		return;
62
63	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
64}
65
66static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
67{
68	__u8 status = *((__u8 *) skb->data);
69
70	BT_DBG("%s status 0x%2.2x", hdev->name, status);
71
72	if (status)
73		return;
74
75	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
76
77	hci_conn_check_pending(hdev);
78}
79
80static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
81					  struct sk_buff *skb)
82{
83	BT_DBG("%s", hdev->name);
84}
85
86static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
87{
88	struct hci_rp_role_discovery *rp = (void *) skb->data;
89	struct hci_conn *conn;
90
91	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
92
93	if (rp->status)
94		return;
95
96	hci_dev_lock(hdev);
97
98	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
99	if (conn) {
100		if (rp->role)
101			conn->link_mode &= ~HCI_LM_MASTER;
102		else
103			conn->link_mode |= HCI_LM_MASTER;
104	}
105
106	hci_dev_unlock(hdev);
107}
108
109static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
110{
111	struct hci_rp_read_link_policy *rp = (void *) skb->data;
112	struct hci_conn *conn;
113
114	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
115
116	if (rp->status)
117		return;
118
119	hci_dev_lock(hdev);
120
121	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
122	if (conn)
123		conn->link_policy = __le16_to_cpu(rp->policy);
124
125	hci_dev_unlock(hdev);
126}
127
128static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
129{
130	struct hci_rp_write_link_policy *rp = (void *) skb->data;
131	struct hci_conn *conn;
132	void *sent;
133
134	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
135
136	if (rp->status)
137		return;
138
139	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
140	if (!sent)
141		return;
142
143	hci_dev_lock(hdev);
144
145	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
146	if (conn)
147		conn->link_policy = get_unaligned_le16(sent + 2);
148
149	hci_dev_unlock(hdev);
150}
151
152static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
153					struct sk_buff *skb)
154{
155	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
156
157	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
158
159	if (rp->status)
160		return;
161
162	hdev->link_policy = __le16_to_cpu(rp->policy);
163}
164
165static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
166					 struct sk_buff *skb)
167{
168	__u8 status = *((__u8 *) skb->data);
169	void *sent;
170
171	BT_DBG("%s status 0x%2.2x", hdev->name, status);
172
173	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
174	if (!sent)
175		return;
176
177	if (!status)
178		hdev->link_policy = get_unaligned_le16(sent);
179}
180
181static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
182{
183	__u8 status = *((__u8 *) skb->data);
184
185	BT_DBG("%s status 0x%2.2x", hdev->name, status);
186
187	clear_bit(HCI_RESET, &hdev->flags);
188
189	/* Reset all non-persistent flags */
190	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
191
192	hdev->discovery.state = DISCOVERY_STOPPED;
193	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
194	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
195
196	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
197	hdev->adv_data_len = 0;
198
199	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
200	hdev->scan_rsp_data_len = 0;
201
202	hdev->ssp_debug_mode = 0;
203}
204
205static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
206{
207	__u8 status = *((__u8 *) skb->data);
208	void *sent;
209
210	BT_DBG("%s status 0x%2.2x", hdev->name, status);
211
212	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
213	if (!sent)
214		return;
215
216	hci_dev_lock(hdev);
217
218	if (test_bit(HCI_MGMT, &hdev->dev_flags))
219		mgmt_set_local_name_complete(hdev, sent, status);
220	else if (!status)
221		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
222
223	hci_dev_unlock(hdev);
224}
225
226static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
227{
228	struct hci_rp_read_local_name *rp = (void *) skb->data;
229
230	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
231
232	if (rp->status)
233		return;
234
235	if (test_bit(HCI_SETUP, &hdev->dev_flags))
236		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
237}
238
239static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
240{
241	__u8 status = *((__u8 *) skb->data);
242	void *sent;
243
244	BT_DBG("%s status 0x%2.2x", hdev->name, status);
245
246	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
247	if (!sent)
248		return;
249
250	if (!status) {
251		__u8 param = *((__u8 *) sent);
252
253		if (param == AUTH_ENABLED)
254			set_bit(HCI_AUTH, &hdev->flags);
255		else
256			clear_bit(HCI_AUTH, &hdev->flags);
257	}
258
259	if (test_bit(HCI_MGMT, &hdev->dev_flags))
260		mgmt_auth_enable_complete(hdev, status);
261}
262
263static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
264{
265	__u8 status = *((__u8 *) skb->data);
266	void *sent;
267
268	BT_DBG("%s status 0x%2.2x", hdev->name, status);
269
270	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
271	if (!sent)
272		return;
273
274	if (!status) {
275		__u8 param = *((__u8 *) sent);
276
277		if (param)
278			set_bit(HCI_ENCRYPT, &hdev->flags);
279		else
280			clear_bit(HCI_ENCRYPT, &hdev->flags);
281	}
282}
283
284static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
285{
286	__u8 param, status = *((__u8 *) skb->data);
287	int old_pscan, old_iscan;
288	void *sent;
289
290	BT_DBG("%s status 0x%2.2x", hdev->name, status);
291
292	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
293	if (!sent)
294		return;
295
296	param = *((__u8 *) sent);
297
298	hci_dev_lock(hdev);
299
300	if (status) {
301		mgmt_write_scan_failed(hdev, param, status);
302		hdev->discov_timeout = 0;
303		goto done;
304	}
305
306	/* We need to ensure that we set this back on if someone changed
307	 * the scan mode through a raw HCI socket.
308	 */
309	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
310
311	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
312	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
313
314	if (param & SCAN_INQUIRY) {
315		set_bit(HCI_ISCAN, &hdev->flags);
316		if (!old_iscan)
317			mgmt_discoverable(hdev, 1);
318	} else if (old_iscan)
319		mgmt_discoverable(hdev, 0);
320
321	if (param & SCAN_PAGE) {
322		set_bit(HCI_PSCAN, &hdev->flags);
323		if (!old_pscan)
324			mgmt_connectable(hdev, 1);
325	} else if (old_pscan)
326		mgmt_connectable(hdev, 0);
327
328done:
329	hci_dev_unlock(hdev);
330}
331
332static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
333{
334	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
335
336	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
337
338	if (rp->status)
339		return;
340
341	memcpy(hdev->dev_class, rp->dev_class, 3);
342
343	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
344	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
345}
346
347static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
348{
349	__u8 status = *((__u8 *) skb->data);
350	void *sent;
351
352	BT_DBG("%s status 0x%2.2x", hdev->name, status);
353
354	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
355	if (!sent)
356		return;
357
358	hci_dev_lock(hdev);
359
360	if (status == 0)
361		memcpy(hdev->dev_class, sent, 3);
362
363	if (test_bit(HCI_MGMT, &hdev->dev_flags))
364		mgmt_set_class_of_dev_complete(hdev, sent, status);
365
366	hci_dev_unlock(hdev);
367}
368
369static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
370{
371	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
372	__u16 setting;
373
374	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
375
376	if (rp->status)
377		return;
378
379	setting = __le16_to_cpu(rp->voice_setting);
380
381	if (hdev->voice_setting == setting)
382		return;
383
384	hdev->voice_setting = setting;
385
386	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
387
388	if (hdev->notify)
389		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
390}
391
392static void hci_cc_write_voice_setting(struct hci_dev *hdev,
393				       struct sk_buff *skb)
394{
395	__u8 status = *((__u8 *) skb->data);
396	__u16 setting;
397	void *sent;
398
399	BT_DBG("%s status 0x%2.2x", hdev->name, status);
400
401	if (status)
402		return;
403
404	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
405	if (!sent)
406		return;
407
408	setting = get_unaligned_le16(sent);
409
410	if (hdev->voice_setting == setting)
411		return;
412
413	hdev->voice_setting = setting;
414
415	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
416
417	if (hdev->notify)
418		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
419}
420
421static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
422					  struct sk_buff *skb)
423{
424	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
425
426	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
427
428	if (rp->status)
429		return;
430
431	hdev->num_iac = rp->num_iac;
432
433	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
434}
435
436static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
437{
438	__u8 status = *((__u8 *) skb->data);
439	struct hci_cp_write_ssp_mode *sent;
440
441	BT_DBG("%s status 0x%2.2x", hdev->name, status);
442
443	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
444	if (!sent)
445		return;
446
447	if (!status) {
448		if (sent->mode)
449			hdev->features[1][0] |= LMP_HOST_SSP;
450		else
451			hdev->features[1][0] &= ~LMP_HOST_SSP;
452	}
453
454	if (test_bit(HCI_MGMT, &hdev->dev_flags))
455		mgmt_ssp_enable_complete(hdev, sent->mode, status);
456	else if (!status) {
457		if (sent->mode)
458			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
459		else
460			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
461	}
462}
463
464static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
465{
466	u8 status = *((u8 *) skb->data);
467	struct hci_cp_write_sc_support *sent;
468
469	BT_DBG("%s status 0x%2.2x", hdev->name, status);
470
471	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
472	if (!sent)
473		return;
474
475	if (!status) {
476		if (sent->support)
477			hdev->features[1][0] |= LMP_HOST_SC;
478		else
479			hdev->features[1][0] &= ~LMP_HOST_SC;
480	}
481
482	if (test_bit(HCI_MGMT, &hdev->dev_flags))
483		mgmt_sc_enable_complete(hdev, sent->support, status);
484	else if (!status) {
485		if (sent->support)
486			set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
487		else
488			clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
489	}
490}
491
492static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
493{
494	struct hci_rp_read_local_version *rp = (void *) skb->data;
495
496	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
497
498	if (rp->status)
499		return;
500
501	if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
502		hdev->hci_ver = rp->hci_ver;
503		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
504		hdev->lmp_ver = rp->lmp_ver;
505		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
506		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
507	}
508}
509
510static void hci_cc_read_local_commands(struct hci_dev *hdev,
511				       struct sk_buff *skb)
512{
513	struct hci_rp_read_local_commands *rp = (void *) skb->data;
514
515	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
516
517	if (rp->status)
518		return;
519
520	if (test_bit(HCI_SETUP, &hdev->dev_flags))
521		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
522}
523
524static void hci_cc_read_local_features(struct hci_dev *hdev,
525				       struct sk_buff *skb)
526{
527	struct hci_rp_read_local_features *rp = (void *) skb->data;
528
529	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
530
531	if (rp->status)
532		return;
533
534	memcpy(hdev->features, rp->features, 8);
535
536	/* Adjust default settings according to features
537	 * supported by device. */
538
539	if (hdev->features[0][0] & LMP_3SLOT)
540		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
541
542	if (hdev->features[0][0] & LMP_5SLOT)
543		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
544
545	if (hdev->features[0][1] & LMP_HV2) {
546		hdev->pkt_type  |= (HCI_HV2);
547		hdev->esco_type |= (ESCO_HV2);
548	}
549
550	if (hdev->features[0][1] & LMP_HV3) {
551		hdev->pkt_type  |= (HCI_HV3);
552		hdev->esco_type |= (ESCO_HV3);
553	}
554
555	if (lmp_esco_capable(hdev))
556		hdev->esco_type |= (ESCO_EV3);
557
558	if (hdev->features[0][4] & LMP_EV4)
559		hdev->esco_type |= (ESCO_EV4);
560
561	if (hdev->features[0][4] & LMP_EV5)
562		hdev->esco_type |= (ESCO_EV5);
563
564	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
565		hdev->esco_type |= (ESCO_2EV3);
566
567	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
568		hdev->esco_type |= (ESCO_3EV3);
569
570	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
571		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
572}
573
574static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
575					   struct sk_buff *skb)
576{
577	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
578
579	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
580
581	if (rp->status)
582		return;
583
584	if (hdev->max_page < rp->max_page)
585		hdev->max_page = rp->max_page;
586
587	if (rp->page < HCI_MAX_PAGES)
588		memcpy(hdev->features[rp->page], rp->features, 8);
589}
590
591static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
592					  struct sk_buff *skb)
593{
594	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
595
596	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
597
598	if (!rp->status)
599		hdev->flow_ctl_mode = rp->mode;
600}
601
602static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
603{
604	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
605
606	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
607
608	if (rp->status)
609		return;
610
611	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
612	hdev->sco_mtu  = rp->sco_mtu;
613	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
614	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
615
616	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
617		hdev->sco_mtu  = 64;
618		hdev->sco_pkts = 8;
619	}
620
621	hdev->acl_cnt = hdev->acl_pkts;
622	hdev->sco_cnt = hdev->sco_pkts;
623
624	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
625	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
626}
627
628static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
629{
630	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
631
632	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
633
634	if (!rp->status)
635		bacpy(&hdev->bdaddr, &rp->bdaddr);
636}
637
638static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
639					   struct sk_buff *skb)
640{
641	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
642
643	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
644
645	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
646		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
647		hdev->page_scan_window = __le16_to_cpu(rp->window);
648	}
649}
650
651static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
652					    struct sk_buff *skb)
653{
654	u8 status = *((u8 *) skb->data);
655	struct hci_cp_write_page_scan_activity *sent;
656
657	BT_DBG("%s status 0x%2.2x", hdev->name, status);
658
659	if (status)
660		return;
661
662	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
663	if (!sent)
664		return;
665
666	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
667	hdev->page_scan_window = __le16_to_cpu(sent->window);
668}
669
670static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
671					   struct sk_buff *skb)
672{
673	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
674
675	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
676
677	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
678		hdev->page_scan_type = rp->type;
679}
680
681static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
682					struct sk_buff *skb)
683{
684	u8 status = *((u8 *) skb->data);
685	u8 *type;
686
687	BT_DBG("%s status 0x%2.2x", hdev->name, status);
688
689	if (status)
690		return;
691
692	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
693	if (type)
694		hdev->page_scan_type = *type;
695}
696
697static void hci_cc_read_data_block_size(struct hci_dev *hdev,
698					struct sk_buff *skb)
699{
700	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
701
702	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
703
704	if (rp->status)
705		return;
706
707	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
708	hdev->block_len = __le16_to_cpu(rp->block_len);
709	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
710
711	hdev->block_cnt = hdev->num_blocks;
712
713	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
714	       hdev->block_cnt, hdev->block_len);
715}
716
717static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
718				       struct sk_buff *skb)
719{
720	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
721
722	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
723
724	if (rp->status)
725		goto a2mp_rsp;
726
727	hdev->amp_status = rp->amp_status;
728	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
729	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
730	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
731	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
732	hdev->amp_type = rp->amp_type;
733	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
734	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
735	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
736	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
737
738a2mp_rsp:
739	a2mp_send_getinfo_rsp(hdev);
740}
741
742static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
743					struct sk_buff *skb)
744{
745	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
746	struct amp_assoc *assoc = &hdev->loc_assoc;
747	size_t rem_len, frag_len;
748
749	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
750
751	if (rp->status)
752		goto a2mp_rsp;
753
754	frag_len = skb->len - sizeof(*rp);
755	rem_len = __le16_to_cpu(rp->rem_len);
756
757	if (rem_len > frag_len) {
758		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
759
760		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
761		assoc->offset += frag_len;
762
763		/* Read other fragments */
764		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
765
766		return;
767	}
768
769	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
770	assoc->len = assoc->offset + rem_len;
771	assoc->offset = 0;
772
773a2mp_rsp:
774	/* Send A2MP Rsp when all fragments are received */
775	a2mp_send_getampassoc_rsp(hdev, rp->status);
776	a2mp_send_create_phy_link_req(hdev, rp->status);
777}
778
779static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
780					 struct sk_buff *skb)
781{
782	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
783
784	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
785
786	if (!rp->status)
787		hdev->inq_tx_power = rp->tx_power;
788}
789
790static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
791{
792	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
793	struct hci_cp_pin_code_reply *cp;
794	struct hci_conn *conn;
795
796	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
797
798	hci_dev_lock(hdev);
799
800	if (test_bit(HCI_MGMT, &hdev->dev_flags))
801		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
802
803	if (rp->status)
804		goto unlock;
805
806	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
807	if (!cp)
808		goto unlock;
809
810	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
811	if (conn)
812		conn->pin_length = cp->pin_len;
813
814unlock:
815	hci_dev_unlock(hdev);
816}
817
818static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
819{
820	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
821
822	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
823
824	hci_dev_lock(hdev);
825
826	if (test_bit(HCI_MGMT, &hdev->dev_flags))
827		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
828						 rp->status);
829
830	hci_dev_unlock(hdev);
831}
832
833static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
834				       struct sk_buff *skb)
835{
836	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
837
838	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
839
840	if (rp->status)
841		return;
842
843	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
844	hdev->le_pkts = rp->le_max_pkt;
845
846	hdev->le_cnt = hdev->le_pkts;
847
848	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
849}
850
851static void hci_cc_le_read_local_features(struct hci_dev *hdev,
852					  struct sk_buff *skb)
853{
854	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
855
856	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
857
858	if (!rp->status)
859		memcpy(hdev->le_features, rp->features, 8);
860}
861
862static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
863					struct sk_buff *skb)
864{
865	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
866
867	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
868
869	if (!rp->status)
870		hdev->adv_tx_power = rp->tx_power;
871}
872
873static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
874{
875	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
876
877	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
878
879	hci_dev_lock(hdev);
880
881	if (test_bit(HCI_MGMT, &hdev->dev_flags))
882		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
883						 rp->status);
884
885	hci_dev_unlock(hdev);
886}
887
888static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
889					  struct sk_buff *skb)
890{
891	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
892
893	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
894
895	hci_dev_lock(hdev);
896
897	if (test_bit(HCI_MGMT, &hdev->dev_flags))
898		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
899						     ACL_LINK, 0, rp->status);
900
901	hci_dev_unlock(hdev);
902}
903
904static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
905{
906	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
907
908	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
909
910	hci_dev_lock(hdev);
911
912	if (test_bit(HCI_MGMT, &hdev->dev_flags))
913		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
914						 0, rp->status);
915
916	hci_dev_unlock(hdev);
917}
918
919static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
920					  struct sk_buff *skb)
921{
922	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
923
924	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
925
926	hci_dev_lock(hdev);
927
928	if (test_bit(HCI_MGMT, &hdev->dev_flags))
929		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
930						     ACL_LINK, 0, rp->status);
931
932	hci_dev_unlock(hdev);
933}
934
935static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
936				       struct sk_buff *skb)
937{
938	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
939
940	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
941
942	hci_dev_lock(hdev);
943	mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
944					  NULL, NULL, rp->status);
945	hci_dev_unlock(hdev);
946}
947
948static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
949					   struct sk_buff *skb)
950{
951	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
952
953	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
954
955	hci_dev_lock(hdev);
956	mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
957					  rp->hash256, rp->randomizer256,
958					  rp->status);
959	hci_dev_unlock(hdev);
960}
961
962
963static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
964{
965	__u8 status = *((__u8 *) skb->data);
966	bdaddr_t *sent;
967
968	BT_DBG("%s status 0x%2.2x", hdev->name, status);
969
970	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
971	if (!sent)
972		return;
973
974	hci_dev_lock(hdev);
975
976	if (!status)
977		bacpy(&hdev->random_addr, sent);
978
979	hci_dev_unlock(hdev);
980}
981
982static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
983{
984	__u8 *sent, status = *((__u8 *) skb->data);
985
986	BT_DBG("%s status 0x%2.2x", hdev->name, status);
987
988	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
989	if (!sent)
990		return;
991
992	hci_dev_lock(hdev);
993
994	if (!status)
995		mgmt_advertising(hdev, *sent);
996
997	hci_dev_unlock(hdev);
998}
999
1000static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1001				      struct sk_buff *skb)
1002{
1003	struct hci_cp_le_set_scan_enable *cp;
1004	__u8 status = *((__u8 *) skb->data);
1005
1006	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1007
1008	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1009	if (!cp)
1010		return;
1011
1012	if (status)
1013		return;
1014
1015	switch (cp->enable) {
1016	case LE_SCAN_ENABLE:
1017		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1018		break;
1019
1020	case LE_SCAN_DISABLE:
1021		/* Cancel this timer so that we don't try to disable scanning
1022		 * when it's already disabled.
1023		 */
1024		cancel_delayed_work(&hdev->le_scan_disable);
1025
1026		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1027		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1028		 * interrupted scanning due to a connect request. Mark
1029		 * therefore discovery as stopped.
1030		 */
1031		if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1032				       &hdev->dev_flags))
1033			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1034		break;
1035
1036	default:
1037		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1038		break;
1039	}
1040}
1041
1042static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1043					   struct sk_buff *skb)
1044{
1045	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1046
1047	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1048
1049	if (!rp->status)
1050		hdev->le_white_list_size = rp->size;
1051}
1052
1053static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1054				       struct sk_buff *skb)
1055{
1056	__u8 status = *((__u8 *) skb->data);
1057
1058	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1059
1060	if (!status)
1061		hci_white_list_clear(hdev);
1062}
1063
1064static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1065					struct sk_buff *skb)
1066{
1067	struct hci_cp_le_add_to_white_list *sent;
1068	__u8 status = *((__u8 *) skb->data);
1069
1070	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1071
1072	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1073	if (!sent)
1074		return;
1075
1076	if (!status)
1077		hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
1078}
1079
1080static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1081					  struct sk_buff *skb)
1082{
1083	struct hci_cp_le_del_from_white_list *sent;
1084	__u8 status = *((__u8 *) skb->data);
1085
1086	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1087
1088	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1089	if (!sent)
1090		return;
1091
1092	if (!status)
1093		hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
1094}
1095
1096static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1097					    struct sk_buff *skb)
1098{
1099	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1100
1101	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1102
1103	if (!rp->status)
1104		memcpy(hdev->le_states, rp->le_states, 8);
1105}
1106
1107static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1108					   struct sk_buff *skb)
1109{
1110	struct hci_cp_write_le_host_supported *sent;
1111	__u8 status = *((__u8 *) skb->data);
1112
1113	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1114
1115	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1116	if (!sent)
1117		return;
1118
1119	if (!status) {
1120		if (sent->le) {
1121			hdev->features[1][0] |= LMP_HOST_LE;
1122			set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1123		} else {
1124			hdev->features[1][0] &= ~LMP_HOST_LE;
1125			clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1126			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1127		}
1128
1129		if (sent->simul)
1130			hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1131		else
1132			hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1133	}
1134}
1135
1136static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1137{
1138	struct hci_cp_le_set_adv_param *cp;
1139	u8 status = *((u8 *) skb->data);
1140
1141	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1142
1143	if (status)
1144		return;
1145
1146	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1147	if (!cp)
1148		return;
1149
1150	hci_dev_lock(hdev);
1151	hdev->adv_addr_type = cp->own_address_type;
1152	hci_dev_unlock(hdev);
1153}
1154
1155static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1156					  struct sk_buff *skb)
1157{
1158	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1159
1160	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1161	       hdev->name, rp->status, rp->phy_handle);
1162
1163	if (rp->status)
1164		return;
1165
1166	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1167}
1168
1169static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1170{
1171	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1172
1173	if (status) {
1174		hci_conn_check_pending(hdev);
1175		return;
1176	}
1177
1178	set_bit(HCI_INQUIRY, &hdev->flags);
1179}
1180
1181static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1182{
1183	struct hci_cp_create_conn *cp;
1184	struct hci_conn *conn;
1185
1186	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1187
1188	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1189	if (!cp)
1190		return;
1191
1192	hci_dev_lock(hdev);
1193
1194	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1195
1196	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1197
1198	if (status) {
1199		if (conn && conn->state == BT_CONNECT) {
1200			if (status != 0x0c || conn->attempt > 2) {
1201				conn->state = BT_CLOSED;
1202				hci_proto_connect_cfm(conn, status);
1203				hci_conn_del(conn);
1204			} else
1205				conn->state = BT_CONNECT2;
1206		}
1207	} else {
1208		if (!conn) {
1209			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1210			if (conn) {
1211				conn->out = true;
1212				conn->link_mode |= HCI_LM_MASTER;
1213			} else
1214				BT_ERR("No memory for new connection");
1215		}
1216	}
1217
1218	hci_dev_unlock(hdev);
1219}
1220
1221static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1222{
1223	struct hci_cp_add_sco *cp;
1224	struct hci_conn *acl, *sco;
1225	__u16 handle;
1226
1227	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1228
1229	if (!status)
1230		return;
1231
1232	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1233	if (!cp)
1234		return;
1235
1236	handle = __le16_to_cpu(cp->handle);
1237
1238	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1239
1240	hci_dev_lock(hdev);
1241
1242	acl = hci_conn_hash_lookup_handle(hdev, handle);
1243	if (acl) {
1244		sco = acl->link;
1245		if (sco) {
1246			sco->state = BT_CLOSED;
1247
1248			hci_proto_connect_cfm(sco, status);
1249			hci_conn_del(sco);
1250		}
1251	}
1252
1253	hci_dev_unlock(hdev);
1254}
1255
1256static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1257{
1258	struct hci_cp_auth_requested *cp;
1259	struct hci_conn *conn;
1260
1261	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1262
1263	if (!status)
1264		return;
1265
1266	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1267	if (!cp)
1268		return;
1269
1270	hci_dev_lock(hdev);
1271
1272	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1273	if (conn) {
1274		if (conn->state == BT_CONFIG) {
1275			hci_proto_connect_cfm(conn, status);
1276			hci_conn_drop(conn);
1277		}
1278	}
1279
1280	hci_dev_unlock(hdev);
1281}
1282
1283static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1284{
1285	struct hci_cp_set_conn_encrypt *cp;
1286	struct hci_conn *conn;
1287
1288	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1289
1290	if (!status)
1291		return;
1292
1293	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1294	if (!cp)
1295		return;
1296
1297	hci_dev_lock(hdev);
1298
1299	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1300	if (conn) {
1301		if (conn->state == BT_CONFIG) {
1302			hci_proto_connect_cfm(conn, status);
1303			hci_conn_drop(conn);
1304		}
1305	}
1306
1307	hci_dev_unlock(hdev);
1308}
1309
1310static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1311				    struct hci_conn *conn)
1312{
1313	if (conn->state != BT_CONFIG || !conn->out)
1314		return 0;
1315
1316	if (conn->pending_sec_level == BT_SECURITY_SDP)
1317		return 0;
1318
1319	/* Only request authentication for SSP connections or non-SSP
1320	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1321	 * is requested.
1322	 */
1323	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1324	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1325	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1326		return 0;
1327
1328	return 1;
1329}
1330
1331static int hci_resolve_name(struct hci_dev *hdev,
1332				   struct inquiry_entry *e)
1333{
1334	struct hci_cp_remote_name_req cp;
1335
1336	memset(&cp, 0, sizeof(cp));
1337
1338	bacpy(&cp.bdaddr, &e->data.bdaddr);
1339	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1340	cp.pscan_mode = e->data.pscan_mode;
1341	cp.clock_offset = e->data.clock_offset;
1342
1343	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1344}
1345
1346static bool hci_resolve_next_name(struct hci_dev *hdev)
1347{
1348	struct discovery_state *discov = &hdev->discovery;
1349	struct inquiry_entry *e;
1350
1351	if (list_empty(&discov->resolve))
1352		return false;
1353
1354	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1355	if (!e)
1356		return false;
1357
1358	if (hci_resolve_name(hdev, e) == 0) {
1359		e->name_state = NAME_PENDING;
1360		return true;
1361	}
1362
1363	return false;
1364}
1365
1366static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1367				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1368{
1369	struct discovery_state *discov = &hdev->discovery;
1370	struct inquiry_entry *e;
1371
1372	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1373		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1374				      name_len, conn->dev_class);
1375
1376	if (discov->state == DISCOVERY_STOPPED)
1377		return;
1378
1379	if (discov->state == DISCOVERY_STOPPING)
1380		goto discov_complete;
1381
1382	if (discov->state != DISCOVERY_RESOLVING)
1383		return;
1384
1385	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1386	/* If the device was not found in a list of found devices names of which
1387	 * are pending. there is no need to continue resolving a next name as it
1388	 * will be done upon receiving another Remote Name Request Complete
1389	 * Event */
1390	if (!e)
1391		return;
1392
1393	list_del(&e->list);
1394	if (name) {
1395		e->name_state = NAME_KNOWN;
1396		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1397				 e->data.rssi, name, name_len);
1398	} else {
1399		e->name_state = NAME_NOT_KNOWN;
1400	}
1401
1402	if (hci_resolve_next_name(hdev))
1403		return;
1404
1405discov_complete:
1406	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1407}
1408
1409static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1410{
1411	struct hci_cp_remote_name_req *cp;
1412	struct hci_conn *conn;
1413
1414	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1415
1416	/* If successful wait for the name req complete event before
1417	 * checking for the need to do authentication */
1418	if (!status)
1419		return;
1420
1421	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1422	if (!cp)
1423		return;
1424
1425	hci_dev_lock(hdev);
1426
1427	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1428
1429	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1430		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1431
1432	if (!conn)
1433		goto unlock;
1434
1435	if (!hci_outgoing_auth_needed(hdev, conn))
1436		goto unlock;
1437
1438	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1439		struct hci_cp_auth_requested auth_cp;
1440
1441		auth_cp.handle = __cpu_to_le16(conn->handle);
1442		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1443			     sizeof(auth_cp), &auth_cp);
1444	}
1445
1446unlock:
1447	hci_dev_unlock(hdev);
1448}
1449
1450static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1451{
1452	struct hci_cp_read_remote_features *cp;
1453	struct hci_conn *conn;
1454
1455	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1456
1457	if (!status)
1458		return;
1459
1460	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1461	if (!cp)
1462		return;
1463
1464	hci_dev_lock(hdev);
1465
1466	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1467	if (conn) {
1468		if (conn->state == BT_CONFIG) {
1469			hci_proto_connect_cfm(conn, status);
1470			hci_conn_drop(conn);
1471		}
1472	}
1473
1474	hci_dev_unlock(hdev);
1475}
1476
1477static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1478{
1479	struct hci_cp_read_remote_ext_features *cp;
1480	struct hci_conn *conn;
1481
1482	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1483
1484	if (!status)
1485		return;
1486
1487	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1488	if (!cp)
1489		return;
1490
1491	hci_dev_lock(hdev);
1492
1493	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1494	if (conn) {
1495		if (conn->state == BT_CONFIG) {
1496			hci_proto_connect_cfm(conn, status);
1497			hci_conn_drop(conn);
1498		}
1499	}
1500
1501	hci_dev_unlock(hdev);
1502}
1503
1504static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1505{
1506	struct hci_cp_setup_sync_conn *cp;
1507	struct hci_conn *acl, *sco;
1508	__u16 handle;
1509
1510	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1511
1512	if (!status)
1513		return;
1514
1515	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1516	if (!cp)
1517		return;
1518
1519	handle = __le16_to_cpu(cp->handle);
1520
1521	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1522
1523	hci_dev_lock(hdev);
1524
1525	acl = hci_conn_hash_lookup_handle(hdev, handle);
1526	if (acl) {
1527		sco = acl->link;
1528		if (sco) {
1529			sco->state = BT_CLOSED;
1530
1531			hci_proto_connect_cfm(sco, status);
1532			hci_conn_del(sco);
1533		}
1534	}
1535
1536	hci_dev_unlock(hdev);
1537}
1538
1539static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1540{
1541	struct hci_cp_sniff_mode *cp;
1542	struct hci_conn *conn;
1543
1544	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1545
1546	if (!status)
1547		return;
1548
1549	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1550	if (!cp)
1551		return;
1552
1553	hci_dev_lock(hdev);
1554
1555	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1556	if (conn) {
1557		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1558
1559		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1560			hci_sco_setup(conn, status);
1561	}
1562
1563	hci_dev_unlock(hdev);
1564}
1565
1566static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1567{
1568	struct hci_cp_exit_sniff_mode *cp;
1569	struct hci_conn *conn;
1570
1571	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1572
1573	if (!status)
1574		return;
1575
1576	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1577	if (!cp)
1578		return;
1579
1580	hci_dev_lock(hdev);
1581
1582	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1583	if (conn) {
1584		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1585
1586		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1587			hci_sco_setup(conn, status);
1588	}
1589
1590	hci_dev_unlock(hdev);
1591}
1592
1593static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1594{
1595	struct hci_cp_disconnect *cp;
1596	struct hci_conn *conn;
1597
1598	if (!status)
1599		return;
1600
1601	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1602	if (!cp)
1603		return;
1604
1605	hci_dev_lock(hdev);
1606
1607	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1608	if (conn)
1609		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1610				       conn->dst_type, status);
1611
1612	hci_dev_unlock(hdev);
1613}
1614
1615static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1616{
1617	struct hci_cp_create_phy_link *cp;
1618
1619	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1620
1621	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1622	if (!cp)
1623		return;
1624
1625	hci_dev_lock(hdev);
1626
1627	if (status) {
1628		struct hci_conn *hcon;
1629
1630		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1631		if (hcon)
1632			hci_conn_del(hcon);
1633	} else {
1634		amp_write_remote_assoc(hdev, cp->phy_handle);
1635	}
1636
1637	hci_dev_unlock(hdev);
1638}
1639
1640static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1641{
1642	struct hci_cp_accept_phy_link *cp;
1643
1644	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1645
1646	if (status)
1647		return;
1648
1649	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1650	if (!cp)
1651		return;
1652
1653	amp_write_remote_assoc(hdev, cp->phy_handle);
1654}
1655
1656static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1657{
1658	struct hci_cp_le_create_conn *cp;
1659	struct hci_conn *conn;
1660
1661	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1662
1663	/* All connection failure handling is taken care of by the
1664	 * hci_le_conn_failed function which is triggered by the HCI
1665	 * request completion callbacks used for connecting.
1666	 */
1667	if (status)
1668		return;
1669
1670	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1671	if (!cp)
1672		return;
1673
1674	hci_dev_lock(hdev);
1675
1676	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1677	if (!conn)
1678		goto unlock;
1679
1680	/* Store the initiator and responder address information which
1681	 * is needed for SMP. These values will not change during the
1682	 * lifetime of the connection.
1683	 */
1684	conn->init_addr_type = cp->own_address_type;
1685	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1686		bacpy(&conn->init_addr, &hdev->random_addr);
1687	else
1688		bacpy(&conn->init_addr, &hdev->bdaddr);
1689
1690	conn->resp_addr_type = cp->peer_addr_type;
1691	bacpy(&conn->resp_addr, &cp->peer_addr);
1692
1693	/* We don't want the connection attempt to stick around
1694	 * indefinitely since LE doesn't have a page timeout concept
1695	 * like BR/EDR. Set a timer for any connection that doesn't use
1696	 * the white list for connecting.
1697	 */
1698	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1699		queue_delayed_work(conn->hdev->workqueue,
1700				   &conn->le_conn_timeout,
1701				   HCI_LE_CONN_TIMEOUT);
1702
1703unlock:
1704	hci_dev_unlock(hdev);
1705}
1706
1707static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1708{
1709	__u8 status = *((__u8 *) skb->data);
1710	struct discovery_state *discov = &hdev->discovery;
1711	struct inquiry_entry *e;
1712
1713	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1714
1715	hci_conn_check_pending(hdev);
1716
1717	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1718		return;
1719
1720	smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1721	wake_up_bit(&hdev->flags, HCI_INQUIRY);
1722
1723	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1724		return;
1725
1726	hci_dev_lock(hdev);
1727
1728	if (discov->state != DISCOVERY_FINDING)
1729		goto unlock;
1730
1731	if (list_empty(&discov->resolve)) {
1732		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1733		goto unlock;
1734	}
1735
1736	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1737	if (e && hci_resolve_name(hdev, e) == 0) {
1738		e->name_state = NAME_PENDING;
1739		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1740	} else {
1741		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1742	}
1743
1744unlock:
1745	hci_dev_unlock(hdev);
1746}
1747
1748static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1749{
1750	struct inquiry_data data;
1751	struct inquiry_info *info = (void *) (skb->data + 1);
1752	int num_rsp = *((__u8 *) skb->data);
1753
1754	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1755
1756	if (!num_rsp)
1757		return;
1758
1759	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1760		return;
1761
1762	hci_dev_lock(hdev);
1763
1764	for (; num_rsp; num_rsp--, info++) {
1765		bool name_known, ssp;
1766
1767		bacpy(&data.bdaddr, &info->bdaddr);
1768		data.pscan_rep_mode	= info->pscan_rep_mode;
1769		data.pscan_period_mode	= info->pscan_period_mode;
1770		data.pscan_mode		= info->pscan_mode;
1771		memcpy(data.dev_class, info->dev_class, 3);
1772		data.clock_offset	= info->clock_offset;
1773		data.rssi		= 0x00;
1774		data.ssp_mode		= 0x00;
1775
1776		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1777		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1778				  info->dev_class, 0, !name_known, ssp, NULL,
1779				  0);
1780	}
1781
1782	hci_dev_unlock(hdev);
1783}
1784
1785static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1786{
1787	struct hci_ev_conn_complete *ev = (void *) skb->data;
1788	struct hci_conn *conn;
1789
1790	BT_DBG("%s", hdev->name);
1791
1792	hci_dev_lock(hdev);
1793
1794	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1795	if (!conn) {
1796		if (ev->link_type != SCO_LINK)
1797			goto unlock;
1798
1799		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1800		if (!conn)
1801			goto unlock;
1802
1803		conn->type = SCO_LINK;
1804	}
1805
1806	if (!ev->status) {
1807		conn->handle = __le16_to_cpu(ev->handle);
1808
1809		if (conn->type == ACL_LINK) {
1810			conn->state = BT_CONFIG;
1811			hci_conn_hold(conn);
1812
1813			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1814			    !hci_find_link_key(hdev, &ev->bdaddr))
1815				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1816			else
1817				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1818		} else
1819			conn->state = BT_CONNECTED;
1820
1821		hci_conn_add_sysfs(conn);
1822
1823		if (test_bit(HCI_AUTH, &hdev->flags))
1824			conn->link_mode |= HCI_LM_AUTH;
1825
1826		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1827			conn->link_mode |= HCI_LM_ENCRYPT;
1828
1829		/* Get remote features */
1830		if (conn->type == ACL_LINK) {
1831			struct hci_cp_read_remote_features cp;
1832			cp.handle = ev->handle;
1833			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1834				     sizeof(cp), &cp);
1835		}
1836
1837		/* Set packet type for incoming connection */
1838		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1839			struct hci_cp_change_conn_ptype cp;
1840			cp.handle = ev->handle;
1841			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1842			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1843				     &cp);
1844		}
1845	} else {
1846		conn->state = BT_CLOSED;
1847		if (conn->type == ACL_LINK)
1848			mgmt_connect_failed(hdev, &conn->dst, conn->type,
1849					    conn->dst_type, ev->status);
1850	}
1851
1852	if (conn->type == ACL_LINK)
1853		hci_sco_setup(conn, ev->status);
1854
1855	if (ev->status) {
1856		hci_proto_connect_cfm(conn, ev->status);
1857		hci_conn_del(conn);
1858	} else if (ev->link_type != ACL_LINK)
1859		hci_proto_connect_cfm(conn, ev->status);
1860
1861unlock:
1862	hci_dev_unlock(hdev);
1863
1864	hci_conn_check_pending(hdev);
1865}
1866
1867static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1868{
1869	struct hci_ev_conn_request *ev = (void *) skb->data;
1870	int mask = hdev->link_mode;
1871	__u8 flags = 0;
1872
1873	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1874	       ev->link_type);
1875
1876	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1877				      &flags);
1878
1879	if ((mask & HCI_LM_ACCEPT) &&
1880	    !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
1881		/* Connection accepted */
1882		struct inquiry_entry *ie;
1883		struct hci_conn *conn;
1884
1885		hci_dev_lock(hdev);
1886
1887		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1888		if (ie)
1889			memcpy(ie->data.dev_class, ev->dev_class, 3);
1890
1891		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1892					       &ev->bdaddr);
1893		if (!conn) {
1894			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1895			if (!conn) {
1896				BT_ERR("No memory for new connection");
1897				hci_dev_unlock(hdev);
1898				return;
1899			}
1900		}
1901
1902		memcpy(conn->dev_class, ev->dev_class, 3);
1903
1904		hci_dev_unlock(hdev);
1905
1906		if (ev->link_type == ACL_LINK ||
1907		    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1908			struct hci_cp_accept_conn_req cp;
1909			conn->state = BT_CONNECT;
1910
1911			bacpy(&cp.bdaddr, &ev->bdaddr);
1912
1913			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1914				cp.role = 0x00; /* Become master */
1915			else
1916				cp.role = 0x01; /* Remain slave */
1917
1918			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1919				     &cp);
1920		} else if (!(flags & HCI_PROTO_DEFER)) {
1921			struct hci_cp_accept_sync_conn_req cp;
1922			conn->state = BT_CONNECT;
1923
1924			bacpy(&cp.bdaddr, &ev->bdaddr);
1925			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1926
1927			cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
1928			cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
1929			cp.max_latency    = cpu_to_le16(0xffff);
1930			cp.content_format = cpu_to_le16(hdev->voice_setting);
1931			cp.retrans_effort = 0xff;
1932
1933			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1934				     sizeof(cp), &cp);
1935		} else {
1936			conn->state = BT_CONNECT2;
1937			hci_proto_connect_cfm(conn, 0);
1938		}
1939	} else {
1940		/* Connection rejected */
1941		struct hci_cp_reject_conn_req cp;
1942
1943		bacpy(&cp.bdaddr, &ev->bdaddr);
1944		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1945		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1946	}
1947}
1948
1949static u8 hci_to_mgmt_reason(u8 err)
1950{
1951	switch (err) {
1952	case HCI_ERROR_CONNECTION_TIMEOUT:
1953		return MGMT_DEV_DISCONN_TIMEOUT;
1954	case HCI_ERROR_REMOTE_USER_TERM:
1955	case HCI_ERROR_REMOTE_LOW_RESOURCES:
1956	case HCI_ERROR_REMOTE_POWER_OFF:
1957		return MGMT_DEV_DISCONN_REMOTE;
1958	case HCI_ERROR_LOCAL_HOST_TERM:
1959		return MGMT_DEV_DISCONN_LOCAL_HOST;
1960	default:
1961		return MGMT_DEV_DISCONN_UNKNOWN;
1962	}
1963}
1964
1965static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1966{
1967	struct hci_ev_disconn_complete *ev = (void *) skb->data;
1968	u8 reason = hci_to_mgmt_reason(ev->reason);
1969	struct hci_conn_params *params;
1970	struct hci_conn *conn;
1971	bool mgmt_connected;
1972	u8 type;
1973
1974	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1975
1976	hci_dev_lock(hdev);
1977
1978	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1979	if (!conn)
1980		goto unlock;
1981
1982	if (ev->status) {
1983		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1984				       conn->dst_type, ev->status);
1985		goto unlock;
1986	}
1987
1988	conn->state = BT_CLOSED;
1989
1990	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
1991	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
1992				reason, mgmt_connected);
1993
1994	if (conn->type == ACL_LINK && conn->flush_key)
1995		hci_remove_link_key(hdev, &conn->dst);
1996
1997	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
1998	if (params) {
1999		switch (params->auto_connect) {
2000		case HCI_AUTO_CONN_LINK_LOSS:
2001			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2002				break;
2003			/* Fall through */
2004
2005		case HCI_AUTO_CONN_ALWAYS:
2006			hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type);
2007			break;
2008
2009		default:
2010			break;
2011		}
2012	}
2013
2014	type = conn->type;
2015
2016	hci_proto_disconn_cfm(conn, ev->reason);
2017	hci_conn_del(conn);
2018
2019	/* Re-enable advertising if necessary, since it might
2020	 * have been disabled by the connection. From the
2021	 * HCI_LE_Set_Advertise_Enable command description in
2022	 * the core specification (v4.0):
2023	 * "The Controller shall continue advertising until the Host
2024	 * issues an LE_Set_Advertise_Enable command with
2025	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2026	 * or until a connection is created or until the Advertising
2027	 * is timed out due to Directed Advertising."
2028	 */
2029	if (type == LE_LINK)
2030		mgmt_reenable_advertising(hdev);
2031
2032unlock:
2033	hci_dev_unlock(hdev);
2034}
2035
2036static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2037{
2038	struct hci_ev_auth_complete *ev = (void *) skb->data;
2039	struct hci_conn *conn;
2040
2041	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2042
2043	hci_dev_lock(hdev);
2044
2045	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2046	if (!conn)
2047		goto unlock;
2048
2049	if (!ev->status) {
2050		if (!hci_conn_ssp_enabled(conn) &&
2051		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2052			BT_INFO("re-auth of legacy device is not possible.");
2053		} else {
2054			conn->link_mode |= HCI_LM_AUTH;
2055			conn->sec_level = conn->pending_sec_level;
2056		}
2057	} else {
2058		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2059				 ev->status);
2060	}
2061
2062	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2063	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2064
2065	if (conn->state == BT_CONFIG) {
2066		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2067			struct hci_cp_set_conn_encrypt cp;
2068			cp.handle  = ev->handle;
2069			cp.encrypt = 0x01;
2070			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2071				     &cp);
2072		} else {
2073			conn->state = BT_CONNECTED;
2074			hci_proto_connect_cfm(conn, ev->status);
2075			hci_conn_drop(conn);
2076		}
2077	} else {
2078		hci_auth_cfm(conn, ev->status);
2079
2080		hci_conn_hold(conn);
2081		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2082		hci_conn_drop(conn);
2083	}
2084
2085	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2086		if (!ev->status) {
2087			struct hci_cp_set_conn_encrypt cp;
2088			cp.handle  = ev->handle;
2089			cp.encrypt = 0x01;
2090			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2091				     &cp);
2092		} else {
2093			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2094			hci_encrypt_cfm(conn, ev->status, 0x00);
2095		}
2096	}
2097
2098unlock:
2099	hci_dev_unlock(hdev);
2100}
2101
2102static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2103{
2104	struct hci_ev_remote_name *ev = (void *) skb->data;
2105	struct hci_conn *conn;
2106
2107	BT_DBG("%s", hdev->name);
2108
2109	hci_conn_check_pending(hdev);
2110
2111	hci_dev_lock(hdev);
2112
2113	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2114
2115	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2116		goto check_auth;
2117
2118	if (ev->status == 0)
2119		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2120				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2121	else
2122		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2123
2124check_auth:
2125	if (!conn)
2126		goto unlock;
2127
2128	if (!hci_outgoing_auth_needed(hdev, conn))
2129		goto unlock;
2130
2131	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2132		struct hci_cp_auth_requested cp;
2133		cp.handle = __cpu_to_le16(conn->handle);
2134		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2135	}
2136
2137unlock:
2138	hci_dev_unlock(hdev);
2139}
2140
2141static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2142{
2143	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2144	struct hci_conn *conn;
2145
2146	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2147
2148	hci_dev_lock(hdev);
2149
2150	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2151	if (!conn)
2152		goto unlock;
2153
2154	if (!ev->status) {
2155		if (ev->encrypt) {
2156			/* Encryption implies authentication */
2157			conn->link_mode |= HCI_LM_AUTH;
2158			conn->link_mode |= HCI_LM_ENCRYPT;
2159			conn->sec_level = conn->pending_sec_level;
2160
2161			/* P-256 authentication key implies FIPS */
2162			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2163				conn->link_mode |= HCI_LM_FIPS;
2164
2165			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2166			    conn->type == LE_LINK)
2167				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2168		} else {
2169			conn->link_mode &= ~HCI_LM_ENCRYPT;
2170			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2171		}
2172	}
2173
2174	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2175
2176	if (ev->status && conn->state == BT_CONNECTED) {
2177		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2178		hci_conn_drop(conn);
2179		goto unlock;
2180	}
2181
2182	if (conn->state == BT_CONFIG) {
2183		if (!ev->status)
2184			conn->state = BT_CONNECTED;
2185
2186		/* In Secure Connections Only mode, do not allow any
2187		 * connections that are not encrypted with AES-CCM
2188		 * using a P-256 authenticated combination key.
2189		 */
2190		if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2191		    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2192		     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2193			hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2194			hci_conn_drop(conn);
2195			goto unlock;
2196		}
2197
2198		hci_proto_connect_cfm(conn, ev->status);
2199		hci_conn_drop(conn);
2200	} else
2201		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2202
2203unlock:
2204	hci_dev_unlock(hdev);
2205}
2206
2207static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2208					     struct sk_buff *skb)
2209{
2210	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2211	struct hci_conn *conn;
2212
2213	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2214
2215	hci_dev_lock(hdev);
2216
2217	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2218	if (conn) {
2219		if (!ev->status)
2220			conn->link_mode |= HCI_LM_SECURE;
2221
2222		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2223
2224		hci_key_change_cfm(conn, ev->status);
2225	}
2226
2227	hci_dev_unlock(hdev);
2228}
2229
2230static void hci_remote_features_evt(struct hci_dev *hdev,
2231				    struct sk_buff *skb)
2232{
2233	struct hci_ev_remote_features *ev = (void *) skb->data;
2234	struct hci_conn *conn;
2235
2236	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2237
2238	hci_dev_lock(hdev);
2239
2240	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2241	if (!conn)
2242		goto unlock;
2243
2244	if (!ev->status)
2245		memcpy(conn->features[0], ev->features, 8);
2246
2247	if (conn->state != BT_CONFIG)
2248		goto unlock;
2249
2250	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2251		struct hci_cp_read_remote_ext_features cp;
2252		cp.handle = ev->handle;
2253		cp.page = 0x01;
2254		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2255			     sizeof(cp), &cp);
2256		goto unlock;
2257	}
2258
2259	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2260		struct hci_cp_remote_name_req cp;
2261		memset(&cp, 0, sizeof(cp));
2262		bacpy(&cp.bdaddr, &conn->dst);
2263		cp.pscan_rep_mode = 0x02;
2264		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2265	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2266		mgmt_device_connected(hdev, &conn->dst, conn->type,
2267				      conn->dst_type, 0, NULL, 0,
2268				      conn->dev_class);
2269
2270	if (!hci_outgoing_auth_needed(hdev, conn)) {
2271		conn->state = BT_CONNECTED;
2272		hci_proto_connect_cfm(conn, ev->status);
2273		hci_conn_drop(conn);
2274	}
2275
2276unlock:
2277	hci_dev_unlock(hdev);
2278}
2279
2280static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2281{
2282	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2283	u8 status = skb->data[sizeof(*ev)];
2284	__u16 opcode;
2285
2286	skb_pull(skb, sizeof(*ev));
2287
2288	opcode = __le16_to_cpu(ev->opcode);
2289
2290	switch (opcode) {
2291	case HCI_OP_INQUIRY_CANCEL:
2292		hci_cc_inquiry_cancel(hdev, skb);
2293		break;
2294
2295	case HCI_OP_PERIODIC_INQ:
2296		hci_cc_periodic_inq(hdev, skb);
2297		break;
2298
2299	case HCI_OP_EXIT_PERIODIC_INQ:
2300		hci_cc_exit_periodic_inq(hdev, skb);
2301		break;
2302
2303	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2304		hci_cc_remote_name_req_cancel(hdev, skb);
2305		break;
2306
2307	case HCI_OP_ROLE_DISCOVERY:
2308		hci_cc_role_discovery(hdev, skb);
2309		break;
2310
2311	case HCI_OP_READ_LINK_POLICY:
2312		hci_cc_read_link_policy(hdev, skb);
2313		break;
2314
2315	case HCI_OP_WRITE_LINK_POLICY:
2316		hci_cc_write_link_policy(hdev, skb);
2317		break;
2318
2319	case HCI_OP_READ_DEF_LINK_POLICY:
2320		hci_cc_read_def_link_policy(hdev, skb);
2321		break;
2322
2323	case HCI_OP_WRITE_DEF_LINK_POLICY:
2324		hci_cc_write_def_link_policy(hdev, skb);
2325		break;
2326
2327	case HCI_OP_RESET:
2328		hci_cc_reset(hdev, skb);
2329		break;
2330
2331	case HCI_OP_WRITE_LOCAL_NAME:
2332		hci_cc_write_local_name(hdev, skb);
2333		break;
2334
2335	case HCI_OP_READ_LOCAL_NAME:
2336		hci_cc_read_local_name(hdev, skb);
2337		break;
2338
2339	case HCI_OP_WRITE_AUTH_ENABLE:
2340		hci_cc_write_auth_enable(hdev, skb);
2341		break;
2342
2343	case HCI_OP_WRITE_ENCRYPT_MODE:
2344		hci_cc_write_encrypt_mode(hdev, skb);
2345		break;
2346
2347	case HCI_OP_WRITE_SCAN_ENABLE:
2348		hci_cc_write_scan_enable(hdev, skb);
2349		break;
2350
2351	case HCI_OP_READ_CLASS_OF_DEV:
2352		hci_cc_read_class_of_dev(hdev, skb);
2353		break;
2354
2355	case HCI_OP_WRITE_CLASS_OF_DEV:
2356		hci_cc_write_class_of_dev(hdev, skb);
2357		break;
2358
2359	case HCI_OP_READ_VOICE_SETTING:
2360		hci_cc_read_voice_setting(hdev, skb);
2361		break;
2362
2363	case HCI_OP_WRITE_VOICE_SETTING:
2364		hci_cc_write_voice_setting(hdev, skb);
2365		break;
2366
2367	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2368		hci_cc_read_num_supported_iac(hdev, skb);
2369		break;
2370
2371	case HCI_OP_WRITE_SSP_MODE:
2372		hci_cc_write_ssp_mode(hdev, skb);
2373		break;
2374
2375	case HCI_OP_WRITE_SC_SUPPORT:
2376		hci_cc_write_sc_support(hdev, skb);
2377		break;
2378
2379	case HCI_OP_READ_LOCAL_VERSION:
2380		hci_cc_read_local_version(hdev, skb);
2381		break;
2382
2383	case HCI_OP_READ_LOCAL_COMMANDS:
2384		hci_cc_read_local_commands(hdev, skb);
2385		break;
2386
2387	case HCI_OP_READ_LOCAL_FEATURES:
2388		hci_cc_read_local_features(hdev, skb);
2389		break;
2390
2391	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2392		hci_cc_read_local_ext_features(hdev, skb);
2393		break;
2394
2395	case HCI_OP_READ_BUFFER_SIZE:
2396		hci_cc_read_buffer_size(hdev, skb);
2397		break;
2398
2399	case HCI_OP_READ_BD_ADDR:
2400		hci_cc_read_bd_addr(hdev, skb);
2401		break;
2402
2403	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2404		hci_cc_read_page_scan_activity(hdev, skb);
2405		break;
2406
2407	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2408		hci_cc_write_page_scan_activity(hdev, skb);
2409		break;
2410
2411	case HCI_OP_READ_PAGE_SCAN_TYPE:
2412		hci_cc_read_page_scan_type(hdev, skb);
2413		break;
2414
2415	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2416		hci_cc_write_page_scan_type(hdev, skb);
2417		break;
2418
2419	case HCI_OP_READ_DATA_BLOCK_SIZE:
2420		hci_cc_read_data_block_size(hdev, skb);
2421		break;
2422
2423	case HCI_OP_READ_FLOW_CONTROL_MODE:
2424		hci_cc_read_flow_control_mode(hdev, skb);
2425		break;
2426
2427	case HCI_OP_READ_LOCAL_AMP_INFO:
2428		hci_cc_read_local_amp_info(hdev, skb);
2429		break;
2430
2431	case HCI_OP_READ_LOCAL_AMP_ASSOC:
2432		hci_cc_read_local_amp_assoc(hdev, skb);
2433		break;
2434
2435	case HCI_OP_READ_INQ_RSP_TX_POWER:
2436		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2437		break;
2438
2439	case HCI_OP_PIN_CODE_REPLY:
2440		hci_cc_pin_code_reply(hdev, skb);
2441		break;
2442
2443	case HCI_OP_PIN_CODE_NEG_REPLY:
2444		hci_cc_pin_code_neg_reply(hdev, skb);
2445		break;
2446
2447	case HCI_OP_READ_LOCAL_OOB_DATA:
2448		hci_cc_read_local_oob_data(hdev, skb);
2449		break;
2450
2451	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2452		hci_cc_read_local_oob_ext_data(hdev, skb);
2453		break;
2454
2455	case HCI_OP_LE_READ_BUFFER_SIZE:
2456		hci_cc_le_read_buffer_size(hdev, skb);
2457		break;
2458
2459	case HCI_OP_LE_READ_LOCAL_FEATURES:
2460		hci_cc_le_read_local_features(hdev, skb);
2461		break;
2462
2463	case HCI_OP_LE_READ_ADV_TX_POWER:
2464		hci_cc_le_read_adv_tx_power(hdev, skb);
2465		break;
2466
2467	case HCI_OP_USER_CONFIRM_REPLY:
2468		hci_cc_user_confirm_reply(hdev, skb);
2469		break;
2470
2471	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2472		hci_cc_user_confirm_neg_reply(hdev, skb);
2473		break;
2474
2475	case HCI_OP_USER_PASSKEY_REPLY:
2476		hci_cc_user_passkey_reply(hdev, skb);
2477		break;
2478
2479	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2480		hci_cc_user_passkey_neg_reply(hdev, skb);
2481		break;
2482
2483	case HCI_OP_LE_SET_RANDOM_ADDR:
2484		hci_cc_le_set_random_addr(hdev, skb);
2485		break;
2486
2487	case HCI_OP_LE_SET_ADV_ENABLE:
2488		hci_cc_le_set_adv_enable(hdev, skb);
2489		break;
2490
2491	case HCI_OP_LE_SET_SCAN_ENABLE:
2492		hci_cc_le_set_scan_enable(hdev, skb);
2493		break;
2494
2495	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2496		hci_cc_le_read_white_list_size(hdev, skb);
2497		break;
2498
2499	case HCI_OP_LE_CLEAR_WHITE_LIST:
2500		hci_cc_le_clear_white_list(hdev, skb);
2501		break;
2502
2503	case HCI_OP_LE_ADD_TO_WHITE_LIST:
2504		hci_cc_le_add_to_white_list(hdev, skb);
2505		break;
2506
2507	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2508		hci_cc_le_del_from_white_list(hdev, skb);
2509		break;
2510
2511	case HCI_OP_LE_READ_SUPPORTED_STATES:
2512		hci_cc_le_read_supported_states(hdev, skb);
2513		break;
2514
2515	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2516		hci_cc_write_le_host_supported(hdev, skb);
2517		break;
2518
2519	case HCI_OP_LE_SET_ADV_PARAM:
2520		hci_cc_set_adv_param(hdev, skb);
2521		break;
2522
2523	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2524		hci_cc_write_remote_amp_assoc(hdev, skb);
2525		break;
2526
2527	default:
2528		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2529		break;
2530	}
2531
2532	if (opcode != HCI_OP_NOP)
2533		del_timer(&hdev->cmd_timer);
2534
2535	hci_req_cmd_complete(hdev, opcode, status);
2536
2537	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2538		atomic_set(&hdev->cmd_cnt, 1);
2539		if (!skb_queue_empty(&hdev->cmd_q))
2540			queue_work(hdev->workqueue, &hdev->cmd_work);
2541	}
2542}
2543
2544static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2545{
2546	struct hci_ev_cmd_status *ev = (void *) skb->data;
2547	__u16 opcode;
2548
2549	skb_pull(skb, sizeof(*ev));
2550
2551	opcode = __le16_to_cpu(ev->opcode);
2552
2553	switch (opcode) {
2554	case HCI_OP_INQUIRY:
2555		hci_cs_inquiry(hdev, ev->status);
2556		break;
2557
2558	case HCI_OP_CREATE_CONN:
2559		hci_cs_create_conn(hdev, ev->status);
2560		break;
2561
2562	case HCI_OP_ADD_SCO:
2563		hci_cs_add_sco(hdev, ev->status);
2564		break;
2565
2566	case HCI_OP_AUTH_REQUESTED:
2567		hci_cs_auth_requested(hdev, ev->status);
2568		break;
2569
2570	case HCI_OP_SET_CONN_ENCRYPT:
2571		hci_cs_set_conn_encrypt(hdev, ev->status);
2572		break;
2573
2574	case HCI_OP_REMOTE_NAME_REQ:
2575		hci_cs_remote_name_req(hdev, ev->status);
2576		break;
2577
2578	case HCI_OP_READ_REMOTE_FEATURES:
2579		hci_cs_read_remote_features(hdev, ev->status);
2580		break;
2581
2582	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2583		hci_cs_read_remote_ext_features(hdev, ev->status);
2584		break;
2585
2586	case HCI_OP_SETUP_SYNC_CONN:
2587		hci_cs_setup_sync_conn(hdev, ev->status);
2588		break;
2589
2590	case HCI_OP_SNIFF_MODE:
2591		hci_cs_sniff_mode(hdev, ev->status);
2592		break;
2593
2594	case HCI_OP_EXIT_SNIFF_MODE:
2595		hci_cs_exit_sniff_mode(hdev, ev->status);
2596		break;
2597
2598	case HCI_OP_DISCONNECT:
2599		hci_cs_disconnect(hdev, ev->status);
2600		break;
2601
2602	case HCI_OP_CREATE_PHY_LINK:
2603		hci_cs_create_phylink(hdev, ev->status);
2604		break;
2605
2606	case HCI_OP_ACCEPT_PHY_LINK:
2607		hci_cs_accept_phylink(hdev, ev->status);
2608		break;
2609
2610	case HCI_OP_LE_CREATE_CONN:
2611		hci_cs_le_create_conn(hdev, ev->status);
2612		break;
2613
2614	default:
2615		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2616		break;
2617	}
2618
2619	if (opcode != HCI_OP_NOP)
2620		del_timer(&hdev->cmd_timer);
2621
2622	if (ev->status ||
2623	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2624		hci_req_cmd_complete(hdev, opcode, ev->status);
2625
2626	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2627		atomic_set(&hdev->cmd_cnt, 1);
2628		if (!skb_queue_empty(&hdev->cmd_q))
2629			queue_work(hdev->workqueue, &hdev->cmd_work);
2630	}
2631}
2632
2633static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2634{
2635	struct hci_ev_role_change *ev = (void *) skb->data;
2636	struct hci_conn *conn;
2637
2638	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2639
2640	hci_dev_lock(hdev);
2641
2642	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2643	if (conn) {
2644		if (!ev->status) {
2645			if (ev->role)
2646				conn->link_mode &= ~HCI_LM_MASTER;
2647			else
2648				conn->link_mode |= HCI_LM_MASTER;
2649		}
2650
2651		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2652
2653		hci_role_switch_cfm(conn, ev->status, ev->role);
2654	}
2655
2656	hci_dev_unlock(hdev);
2657}
2658
2659static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2660{
2661	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2662	int i;
2663
2664	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2665		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2666		return;
2667	}
2668
2669	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2670	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2671		BT_DBG("%s bad parameters", hdev->name);
2672		return;
2673	}
2674
2675	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2676
2677	for (i = 0; i < ev->num_hndl; i++) {
2678		struct hci_comp_pkts_info *info = &ev->handles[i];
2679		struct hci_conn *conn;
2680		__u16  handle, count;
2681
2682		handle = __le16_to_cpu(info->handle);
2683		count  = __le16_to_cpu(info->count);
2684
2685		conn = hci_conn_hash_lookup_handle(hdev, handle);
2686		if (!conn)
2687			continue;
2688
2689		conn->sent -= count;
2690
2691		switch (conn->type) {
2692		case ACL_LINK:
2693			hdev->acl_cnt += count;
2694			if (hdev->acl_cnt > hdev->acl_pkts)
2695				hdev->acl_cnt = hdev->acl_pkts;
2696			break;
2697
2698		case LE_LINK:
2699			if (hdev->le_pkts) {
2700				hdev->le_cnt += count;
2701				if (hdev->le_cnt > hdev->le_pkts)
2702					hdev->le_cnt = hdev->le_pkts;
2703			} else {
2704				hdev->acl_cnt += count;
2705				if (hdev->acl_cnt > hdev->acl_pkts)
2706					hdev->acl_cnt = hdev->acl_pkts;
2707			}
2708			break;
2709
2710		case SCO_LINK:
2711			hdev->sco_cnt += count;
2712			if (hdev->sco_cnt > hdev->sco_pkts)
2713				hdev->sco_cnt = hdev->sco_pkts;
2714			break;
2715
2716		default:
2717			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2718			break;
2719		}
2720	}
2721
2722	queue_work(hdev->workqueue, &hdev->tx_work);
2723}
2724
2725static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2726						 __u16 handle)
2727{
2728	struct hci_chan *chan;
2729
2730	switch (hdev->dev_type) {
2731	case HCI_BREDR:
2732		return hci_conn_hash_lookup_handle(hdev, handle);
2733	case HCI_AMP:
2734		chan = hci_chan_lookup_handle(hdev, handle);
2735		if (chan)
2736			return chan->conn;
2737		break;
2738	default:
2739		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2740		break;
2741	}
2742
2743	return NULL;
2744}
2745
2746static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2747{
2748	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2749	int i;
2750
2751	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2752		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2753		return;
2754	}
2755
2756	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2757	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2758		BT_DBG("%s bad parameters", hdev->name);
2759		return;
2760	}
2761
2762	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2763	       ev->num_hndl);
2764
2765	for (i = 0; i < ev->num_hndl; i++) {
2766		struct hci_comp_blocks_info *info = &ev->handles[i];
2767		struct hci_conn *conn = NULL;
2768		__u16  handle, block_count;
2769
2770		handle = __le16_to_cpu(info->handle);
2771		block_count = __le16_to_cpu(info->blocks);
2772
2773		conn = __hci_conn_lookup_handle(hdev, handle);
2774		if (!conn)
2775			continue;
2776
2777		conn->sent -= block_count;
2778
2779		switch (conn->type) {
2780		case ACL_LINK:
2781		case AMP_LINK:
2782			hdev->block_cnt += block_count;
2783			if (hdev->block_cnt > hdev->num_blocks)
2784				hdev->block_cnt = hdev->num_blocks;
2785			break;
2786
2787		default:
2788			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2789			break;
2790		}
2791	}
2792
2793	queue_work(hdev->workqueue, &hdev->tx_work);
2794}
2795
2796static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2797{
2798	struct hci_ev_mode_change *ev = (void *) skb->data;
2799	struct hci_conn *conn;
2800
2801	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2802
2803	hci_dev_lock(hdev);
2804
2805	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2806	if (conn) {
2807		conn->mode = ev->mode;
2808
2809		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2810					&conn->flags)) {
2811			if (conn->mode == HCI_CM_ACTIVE)
2812				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2813			else
2814				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2815		}
2816
2817		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2818			hci_sco_setup(conn, ev->status);
2819	}
2820
2821	hci_dev_unlock(hdev);
2822}
2823
2824static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2825{
2826	struct hci_ev_pin_code_req *ev = (void *) skb->data;
2827	struct hci_conn *conn;
2828
2829	BT_DBG("%s", hdev->name);
2830
2831	hci_dev_lock(hdev);
2832
2833	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2834	if (!conn)
2835		goto unlock;
2836
2837	if (conn->state == BT_CONNECTED) {
2838		hci_conn_hold(conn);
2839		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2840		hci_conn_drop(conn);
2841	}
2842
2843	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2844		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2845			     sizeof(ev->bdaddr), &ev->bdaddr);
2846	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2847		u8 secure;
2848
2849		if (conn->pending_sec_level == BT_SECURITY_HIGH)
2850			secure = 1;
2851		else
2852			secure = 0;
2853
2854		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2855	}
2856
2857unlock:
2858	hci_dev_unlock(hdev);
2859}
2860
2861static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2862{
2863	struct hci_ev_link_key_req *ev = (void *) skb->data;
2864	struct hci_cp_link_key_reply cp;
2865	struct hci_conn *conn;
2866	struct link_key *key;
2867
2868	BT_DBG("%s", hdev->name);
2869
2870	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2871		return;
2872
2873	hci_dev_lock(hdev);
2874
2875	key = hci_find_link_key(hdev, &ev->bdaddr);
2876	if (!key) {
2877		BT_DBG("%s link key not found for %pMR", hdev->name,
2878		       &ev->bdaddr);
2879		goto not_found;
2880	}
2881
2882	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2883	       &ev->bdaddr);
2884
2885	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2886	    key->type == HCI_LK_DEBUG_COMBINATION) {
2887		BT_DBG("%s ignoring debug key", hdev->name);
2888		goto not_found;
2889	}
2890
2891	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2892	if (conn) {
2893		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
2894		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
2895		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2896			BT_DBG("%s ignoring unauthenticated key", hdev->name);
2897			goto not_found;
2898		}
2899
2900		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2901		    conn->pending_sec_level == BT_SECURITY_HIGH) {
2902			BT_DBG("%s ignoring key unauthenticated for high security",
2903			       hdev->name);
2904			goto not_found;
2905		}
2906
2907		conn->key_type = key->type;
2908		conn->pin_length = key->pin_len;
2909	}
2910
2911	bacpy(&cp.bdaddr, &ev->bdaddr);
2912	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2913
2914	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2915
2916	hci_dev_unlock(hdev);
2917
2918	return;
2919
2920not_found:
2921	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2922	hci_dev_unlock(hdev);
2923}
2924
2925static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2926{
2927	struct hci_ev_link_key_notify *ev = (void *) skb->data;
2928	struct hci_conn *conn;
2929	u8 pin_len = 0;
2930
2931	BT_DBG("%s", hdev->name);
2932
2933	hci_dev_lock(hdev);
2934
2935	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2936	if (conn) {
2937		hci_conn_hold(conn);
2938		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2939		pin_len = conn->pin_length;
2940
2941		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2942			conn->key_type = ev->key_type;
2943
2944		hci_conn_drop(conn);
2945	}
2946
2947	if (test_bit(HCI_MGMT, &hdev->dev_flags))
2948		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2949				 ev->key_type, pin_len);
2950
2951	hci_dev_unlock(hdev);
2952}
2953
2954static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2955{
2956	struct hci_ev_clock_offset *ev = (void *) skb->data;
2957	struct hci_conn *conn;
2958
2959	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2960
2961	hci_dev_lock(hdev);
2962
2963	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2964	if (conn && !ev->status) {
2965		struct inquiry_entry *ie;
2966
2967		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2968		if (ie) {
2969			ie->data.clock_offset = ev->clock_offset;
2970			ie->timestamp = jiffies;
2971		}
2972	}
2973
2974	hci_dev_unlock(hdev);
2975}
2976
2977static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2978{
2979	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2980	struct hci_conn *conn;
2981
2982	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2983
2984	hci_dev_lock(hdev);
2985
2986	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2987	if (conn && !ev->status)
2988		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2989
2990	hci_dev_unlock(hdev);
2991}
2992
2993static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2994{
2995	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2996	struct inquiry_entry *ie;
2997
2998	BT_DBG("%s", hdev->name);
2999
3000	hci_dev_lock(hdev);
3001
3002	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3003	if (ie) {
3004		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3005		ie->timestamp = jiffies;
3006	}
3007
3008	hci_dev_unlock(hdev);
3009}
3010
3011static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3012					     struct sk_buff *skb)
3013{
3014	struct inquiry_data data;
3015	int num_rsp = *((__u8 *) skb->data);
3016	bool name_known, ssp;
3017
3018	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3019
3020	if (!num_rsp)
3021		return;
3022
3023	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3024		return;
3025
3026	hci_dev_lock(hdev);
3027
3028	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3029		struct inquiry_info_with_rssi_and_pscan_mode *info;
3030		info = (void *) (skb->data + 1);
3031
3032		for (; num_rsp; num_rsp--, info++) {
3033			bacpy(&data.bdaddr, &info->bdaddr);
3034			data.pscan_rep_mode	= info->pscan_rep_mode;
3035			data.pscan_period_mode	= info->pscan_period_mode;
3036			data.pscan_mode		= info->pscan_mode;
3037			memcpy(data.dev_class, info->dev_class, 3);
3038			data.clock_offset	= info->clock_offset;
3039			data.rssi		= info->rssi;
3040			data.ssp_mode		= 0x00;
3041
3042			name_known = hci_inquiry_cache_update(hdev, &data,
3043							      false, &ssp);
3044			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3045					  info->dev_class, info->rssi,
3046					  !name_known, ssp, NULL, 0);
3047		}
3048	} else {
3049		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3050
3051		for (; num_rsp; num_rsp--, info++) {
3052			bacpy(&data.bdaddr, &info->bdaddr);
3053			data.pscan_rep_mode	= info->pscan_rep_mode;
3054			data.pscan_period_mode	= info->pscan_period_mode;
3055			data.pscan_mode		= 0x00;
3056			memcpy(data.dev_class, info->dev_class, 3);
3057			data.clock_offset	= info->clock_offset;
3058			data.rssi		= info->rssi;
3059			data.ssp_mode		= 0x00;
3060			name_known = hci_inquiry_cache_update(hdev, &data,
3061							      false, &ssp);
3062			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3063					  info->dev_class, info->rssi,
3064					  !name_known, ssp, NULL, 0);
3065		}
3066	}
3067
3068	hci_dev_unlock(hdev);
3069}
3070
3071static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3072					struct sk_buff *skb)
3073{
3074	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3075	struct hci_conn *conn;
3076
3077	BT_DBG("%s", hdev->name);
3078
3079	hci_dev_lock(hdev);
3080
3081	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3082	if (!conn)
3083		goto unlock;
3084
3085	if (ev->page < HCI_MAX_PAGES)
3086		memcpy(conn->features[ev->page], ev->features, 8);
3087
3088	if (!ev->status && ev->page == 0x01) {
3089		struct inquiry_entry *ie;
3090
3091		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3092		if (ie)
3093			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3094
3095		if (ev->features[0] & LMP_HOST_SSP) {
3096			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3097		} else {
3098			/* It is mandatory by the Bluetooth specification that
3099			 * Extended Inquiry Results are only used when Secure
3100			 * Simple Pairing is enabled, but some devices violate
3101			 * this.
3102			 *
3103			 * To make these devices work, the internal SSP
3104			 * enabled flag needs to be cleared if the remote host
3105			 * features do not indicate SSP support */
3106			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3107		}
3108
3109		if (ev->features[0] & LMP_HOST_SC)
3110			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3111	}
3112
3113	if (conn->state != BT_CONFIG)
3114		goto unlock;
3115
3116	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3117		struct hci_cp_remote_name_req cp;
3118		memset(&cp, 0, sizeof(cp));
3119		bacpy(&cp.bdaddr, &conn->dst);
3120		cp.pscan_rep_mode = 0x02;
3121		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3122	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3123		mgmt_device_connected(hdev, &conn->dst, conn->type,
3124				      conn->dst_type, 0, NULL, 0,
3125				      conn->dev_class);
3126
3127	if (!hci_outgoing_auth_needed(hdev, conn)) {
3128		conn->state = BT_CONNECTED;
3129		hci_proto_connect_cfm(conn, ev->status);
3130		hci_conn_drop(conn);
3131	}
3132
3133unlock:
3134	hci_dev_unlock(hdev);
3135}
3136
3137static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3138				       struct sk_buff *skb)
3139{
3140	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3141	struct hci_conn *conn;
3142
3143	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3144
3145	hci_dev_lock(hdev);
3146
3147	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3148	if (!conn) {
3149		if (ev->link_type == ESCO_LINK)
3150			goto unlock;
3151
3152		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3153		if (!conn)
3154			goto unlock;
3155
3156		conn->type = SCO_LINK;
3157	}
3158
3159	switch (ev->status) {
3160	case 0x00:
3161		conn->handle = __le16_to_cpu(ev->handle);
3162		conn->state  = BT_CONNECTED;
3163
3164		hci_conn_add_sysfs(conn);
3165		break;
3166
3167	case 0x0d:	/* Connection Rejected due to Limited Resources */
3168	case 0x11:	/* Unsupported Feature or Parameter Value */
3169	case 0x1c:	/* SCO interval rejected */
3170	case 0x1a:	/* Unsupported Remote Feature */
3171	case 0x1f:	/* Unspecified error */
3172	case 0x20:	/* Unsupported LMP Parameter value */
3173		if (conn->out) {
3174			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3175					(hdev->esco_type & EDR_ESCO_MASK);
3176			if (hci_setup_sync(conn, conn->link->handle))
3177				goto unlock;
3178		}
3179		/* fall through */
3180
3181	default:
3182		conn->state = BT_CLOSED;
3183		break;
3184	}
3185
3186	hci_proto_connect_cfm(conn, ev->status);
3187	if (ev->status)
3188		hci_conn_del(conn);
3189
3190unlock:
3191	hci_dev_unlock(hdev);
3192}
3193
3194static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3195{
3196	size_t parsed = 0;
3197
3198	while (parsed < eir_len) {
3199		u8 field_len = eir[0];
3200
3201		if (field_len == 0)
3202			return parsed;
3203
3204		parsed += field_len + 1;
3205		eir += field_len + 1;
3206	}
3207
3208	return eir_len;
3209}
3210
3211static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3212					    struct sk_buff *skb)
3213{
3214	struct inquiry_data data;
3215	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3216	int num_rsp = *((__u8 *) skb->data);
3217	size_t eir_len;
3218
3219	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3220
3221	if (!num_rsp)
3222		return;
3223
3224	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3225		return;
3226
3227	hci_dev_lock(hdev);
3228
3229	for (; num_rsp; num_rsp--, info++) {
3230		bool name_known, ssp;
3231
3232		bacpy(&data.bdaddr, &info->bdaddr);
3233		data.pscan_rep_mode	= info->pscan_rep_mode;
3234		data.pscan_period_mode	= info->pscan_period_mode;
3235		data.pscan_mode		= 0x00;
3236		memcpy(data.dev_class, info->dev_class, 3);
3237		data.clock_offset	= info->clock_offset;
3238		data.rssi		= info->rssi;
3239		data.ssp_mode		= 0x01;
3240
3241		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3242			name_known = eir_has_data_type(info->data,
3243						       sizeof(info->data),
3244						       EIR_NAME_COMPLETE);
3245		else
3246			name_known = true;
3247
3248		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3249						      &ssp);
3250		eir_len = eir_get_length(info->data, sizeof(info->data));
3251		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3252				  info->dev_class, info->rssi, !name_known,
3253				  ssp, info->data, eir_len);
3254	}
3255
3256	hci_dev_unlock(hdev);
3257}
3258
3259static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3260					 struct sk_buff *skb)
3261{
3262	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3263	struct hci_conn *conn;
3264
3265	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3266	       __le16_to_cpu(ev->handle));
3267
3268	hci_dev_lock(hdev);
3269
3270	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3271	if (!conn)
3272		goto unlock;
3273
3274	if (!ev->status)
3275		conn->sec_level = conn->pending_sec_level;
3276
3277	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3278
3279	if (ev->status && conn->state == BT_CONNECTED) {
3280		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3281		hci_conn_drop(conn);
3282		goto unlock;
3283	}
3284
3285	if (conn->state == BT_CONFIG) {
3286		if (!ev->status)
3287			conn->state = BT_CONNECTED;
3288
3289		hci_proto_connect_cfm(conn, ev->status);
3290		hci_conn_drop(conn);
3291	} else {
3292		hci_auth_cfm(conn, ev->status);
3293
3294		hci_conn_hold(conn);
3295		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3296		hci_conn_drop(conn);
3297	}
3298
3299unlock:
3300	hci_dev_unlock(hdev);
3301}
3302
3303static u8 hci_get_auth_req(struct hci_conn *conn)
3304{
3305	/* If remote requests dedicated bonding follow that lead */
3306	if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
3307	    conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
3308		/* If both remote and local IO capabilities allow MITM
3309		 * protection then require it, otherwise don't */
3310		if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
3311		    conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
3312			return HCI_AT_DEDICATED_BONDING;
3313		else
3314			return HCI_AT_DEDICATED_BONDING_MITM;
3315	}
3316
3317	/* If remote requests no-bonding follow that lead */
3318	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3319	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3320		return conn->remote_auth | (conn->auth_type & 0x01);
3321
3322	return conn->auth_type;
3323}
3324
3325static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3326{
3327	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3328	struct hci_conn *conn;
3329
3330	BT_DBG("%s", hdev->name);
3331
3332	hci_dev_lock(hdev);
3333
3334	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3335	if (!conn)
3336		goto unlock;
3337
3338	hci_conn_hold(conn);
3339
3340	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3341		goto unlock;
3342
3343	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3344	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3345		struct hci_cp_io_capability_reply cp;
3346
3347		bacpy(&cp.bdaddr, &ev->bdaddr);
3348		/* Change the IO capability from KeyboardDisplay
3349		 * to DisplayYesNo as it is not supported by BT spec. */
3350		cp.capability = (conn->io_capability == 0x04) ?
3351				HCI_IO_DISPLAY_YESNO : conn->io_capability;
3352		conn->auth_type = hci_get_auth_req(conn);
3353		cp.authentication = conn->auth_type;
3354
3355		if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3356		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3357			cp.oob_data = 0x01;
3358		else
3359			cp.oob_data = 0x00;
3360
3361		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3362			     sizeof(cp), &cp);
3363	} else {
3364		struct hci_cp_io_capability_neg_reply cp;
3365
3366		bacpy(&cp.bdaddr, &ev->bdaddr);
3367		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3368
3369		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3370			     sizeof(cp), &cp);
3371	}
3372
3373unlock:
3374	hci_dev_unlock(hdev);
3375}
3376
3377static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3378{
3379	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3380	struct hci_conn *conn;
3381
3382	BT_DBG("%s", hdev->name);
3383
3384	hci_dev_lock(hdev);
3385
3386	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3387	if (!conn)
3388		goto unlock;
3389
3390	conn->remote_cap = ev->capability;
3391	conn->remote_auth = ev->authentication;
3392	if (ev->oob_data)
3393		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3394
3395unlock:
3396	hci_dev_unlock(hdev);
3397}
3398
3399static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3400					 struct sk_buff *skb)
3401{
3402	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3403	int loc_mitm, rem_mitm, confirm_hint = 0;
3404	struct hci_conn *conn;
3405
3406	BT_DBG("%s", hdev->name);
3407
3408	hci_dev_lock(hdev);
3409
3410	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3411		goto unlock;
3412
3413	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3414	if (!conn)
3415		goto unlock;
3416
3417	loc_mitm = (conn->auth_type & 0x01);
3418	rem_mitm = (conn->remote_auth & 0x01);
3419
3420	/* If we require MITM but the remote device can't provide that
3421	 * (it has NoInputNoOutput) then reject the confirmation
3422	 * request. The only exception is when we're dedicated bonding
3423	 * initiators (connect_cfm_cb set) since then we always have the MITM
3424	 * bit set. */
3425	if (!conn->connect_cfm_cb && loc_mitm &&
3426	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3427		BT_DBG("Rejecting request: remote device can't provide MITM");
3428		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3429			     sizeof(ev->bdaddr), &ev->bdaddr);
3430		goto unlock;
3431	}
3432
3433	/* If no side requires MITM protection; auto-accept */
3434	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3435	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3436
3437		/* If we're not the initiators request authorization to
3438		 * proceed from user space (mgmt_user_confirm with
3439		 * confirm_hint set to 1). */
3440		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3441			BT_DBG("Confirming auto-accept as acceptor");
3442			confirm_hint = 1;
3443			goto confirm;
3444		}
3445
3446		BT_DBG("Auto-accept of user confirmation with %ums delay",
3447		       hdev->auto_accept_delay);
3448
3449		if (hdev->auto_accept_delay > 0) {
3450			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3451			queue_delayed_work(conn->hdev->workqueue,
3452					   &conn->auto_accept_work, delay);
3453			goto unlock;
3454		}
3455
3456		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3457			     sizeof(ev->bdaddr), &ev->bdaddr);
3458		goto unlock;
3459	}
3460
3461confirm:
3462	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3463				  confirm_hint);
3464
3465unlock:
3466	hci_dev_unlock(hdev);
3467}
3468
3469static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3470					 struct sk_buff *skb)
3471{
3472	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3473
3474	BT_DBG("%s", hdev->name);
3475
3476	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3477		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3478}
3479
3480static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3481					struct sk_buff *skb)
3482{
3483	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3484	struct hci_conn *conn;
3485
3486	BT_DBG("%s", hdev->name);
3487
3488	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3489	if (!conn)
3490		return;
3491
3492	conn->passkey_notify = __le32_to_cpu(ev->passkey);
3493	conn->passkey_entered = 0;
3494
3495	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3496		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3497					 conn->dst_type, conn->passkey_notify,
3498					 conn->passkey_entered);
3499}
3500
3501static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3502{
3503	struct hci_ev_keypress_notify *ev = (void *) skb->data;
3504	struct hci_conn *conn;
3505
3506	BT_DBG("%s", hdev->name);
3507
3508	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3509	if (!conn)
3510		return;
3511
3512	switch (ev->type) {
3513	case HCI_KEYPRESS_STARTED:
3514		conn->passkey_entered = 0;
3515		return;
3516
3517	case HCI_KEYPRESS_ENTERED:
3518		conn->passkey_entered++;
3519		break;
3520
3521	case HCI_KEYPRESS_ERASED:
3522		conn->passkey_entered--;
3523		break;
3524
3525	case HCI_KEYPRESS_CLEARED:
3526		conn->passkey_entered = 0;
3527		break;
3528
3529	case HCI_KEYPRESS_COMPLETED:
3530		return;
3531	}
3532
3533	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3534		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3535					 conn->dst_type, conn->passkey_notify,
3536					 conn->passkey_entered);
3537}
3538
3539static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3540					 struct sk_buff *skb)
3541{
3542	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3543	struct hci_conn *conn;
3544
3545	BT_DBG("%s", hdev->name);
3546
3547	hci_dev_lock(hdev);
3548
3549	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3550	if (!conn)
3551		goto unlock;
3552
3553	/* To avoid duplicate auth_failed events to user space we check
3554	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3555	 * initiated the authentication. A traditional auth_complete
3556	 * event gets always produced as initiator and is also mapped to
3557	 * the mgmt_auth_failed event */
3558	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3559		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3560				 ev->status);
3561
3562	hci_conn_drop(conn);
3563
3564unlock:
3565	hci_dev_unlock(hdev);
3566}
3567
3568static void hci_remote_host_features_evt(struct hci_dev *hdev,
3569					 struct sk_buff *skb)
3570{
3571	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3572	struct inquiry_entry *ie;
3573	struct hci_conn *conn;
3574
3575	BT_DBG("%s", hdev->name);
3576
3577	hci_dev_lock(hdev);
3578
3579	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3580	if (conn)
3581		memcpy(conn->features[1], ev->features, 8);
3582
3583	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3584	if (ie)
3585		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3586
3587	hci_dev_unlock(hdev);
3588}
3589
3590static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3591					    struct sk_buff *skb)
3592{
3593	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3594	struct oob_data *data;
3595
3596	BT_DBG("%s", hdev->name);
3597
3598	hci_dev_lock(hdev);
3599
3600	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3601		goto unlock;
3602
3603	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3604	if (data) {
3605		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3606			struct hci_cp_remote_oob_ext_data_reply cp;
3607
3608			bacpy(&cp.bdaddr, &ev->bdaddr);
3609			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3610			memcpy(cp.randomizer192, data->randomizer192,
3611			       sizeof(cp.randomizer192));
3612			memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3613			memcpy(cp.randomizer256, data->randomizer256,
3614			       sizeof(cp.randomizer256));
3615
3616			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3617				     sizeof(cp), &cp);
3618		} else {
3619			struct hci_cp_remote_oob_data_reply cp;
3620
3621			bacpy(&cp.bdaddr, &ev->bdaddr);
3622			memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3623			memcpy(cp.randomizer, data->randomizer192,
3624			       sizeof(cp.randomizer));
3625
3626			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3627				     sizeof(cp), &cp);
3628		}
3629	} else {
3630		struct hci_cp_remote_oob_data_neg_reply cp;
3631
3632		bacpy(&cp.bdaddr, &ev->bdaddr);
3633		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3634			     sizeof(cp), &cp);
3635	}
3636
3637unlock:
3638	hci_dev_unlock(hdev);
3639}
3640
3641static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3642				      struct sk_buff *skb)
3643{
3644	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3645	struct hci_conn *hcon, *bredr_hcon;
3646
3647	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3648	       ev->status);
3649
3650	hci_dev_lock(hdev);
3651
3652	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3653	if (!hcon) {
3654		hci_dev_unlock(hdev);
3655		return;
3656	}
3657
3658	if (ev->status) {
3659		hci_conn_del(hcon);
3660		hci_dev_unlock(hdev);
3661		return;
3662	}
3663
3664	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3665
3666	hcon->state = BT_CONNECTED;
3667	bacpy(&hcon->dst, &bredr_hcon->dst);
3668
3669	hci_conn_hold(hcon);
3670	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3671	hci_conn_drop(hcon);
3672
3673	hci_conn_add_sysfs(hcon);
3674
3675	amp_physical_cfm(bredr_hcon, hcon);
3676
3677	hci_dev_unlock(hdev);
3678}
3679
3680static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3681{
3682	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3683	struct hci_conn *hcon;
3684	struct hci_chan *hchan;
3685	struct amp_mgr *mgr;
3686
3687	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3688	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3689	       ev->status);
3690
3691	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3692	if (!hcon)
3693		return;
3694
3695	/* Create AMP hchan */
3696	hchan = hci_chan_create(hcon);
3697	if (!hchan)
3698		return;
3699
3700	hchan->handle = le16_to_cpu(ev->handle);
3701
3702	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3703
3704	mgr = hcon->amp_mgr;
3705	if (mgr && mgr->bredr_chan) {
3706		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3707
3708		l2cap_chan_lock(bredr_chan);
3709
3710		bredr_chan->conn->mtu = hdev->block_mtu;
3711		l2cap_logical_cfm(bredr_chan, hchan, 0);
3712		hci_conn_hold(hcon);
3713
3714		l2cap_chan_unlock(bredr_chan);
3715	}
3716}
3717
3718static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3719					     struct sk_buff *skb)
3720{
3721	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3722	struct hci_chan *hchan;
3723
3724	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3725	       le16_to_cpu(ev->handle), ev->status);
3726
3727	if (ev->status)
3728		return;
3729
3730	hci_dev_lock(hdev);
3731
3732	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3733	if (!hchan)
3734		goto unlock;
3735
3736	amp_destroy_logical_link(hchan, ev->reason);
3737
3738unlock:
3739	hci_dev_unlock(hdev);
3740}
3741
3742static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3743					     struct sk_buff *skb)
3744{
3745	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3746	struct hci_conn *hcon;
3747
3748	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3749
3750	if (ev->status)
3751		return;
3752
3753	hci_dev_lock(hdev);
3754
3755	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3756	if (hcon) {
3757		hcon->state = BT_CLOSED;
3758		hci_conn_del(hcon);
3759	}
3760
3761	hci_dev_unlock(hdev);
3762}
3763
3764static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3765{
3766	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3767	struct hci_conn *conn;
3768	struct smp_irk *irk;
3769
3770	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3771
3772	hci_dev_lock(hdev);
3773
3774	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3775	if (!conn) {
3776		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3777		if (!conn) {
3778			BT_ERR("No memory for new connection");
3779			goto unlock;
3780		}
3781
3782		conn->dst_type = ev->bdaddr_type;
3783
3784		/* The advertising parameters for own address type
3785		 * define which source address and source address
3786		 * type this connections has.
3787		 */
3788		if (bacmp(&conn->src, BDADDR_ANY)) {
3789			conn->src_type = ADDR_LE_DEV_PUBLIC;
3790		} else {
3791			bacpy(&conn->src, &hdev->static_addr);
3792			conn->src_type = ADDR_LE_DEV_RANDOM;
3793		}
3794
3795		if (ev->role == LE_CONN_ROLE_MASTER) {
3796			conn->out = true;
3797			conn->link_mode |= HCI_LM_MASTER;
3798		}
3799
3800		/* If we didn't have a hci_conn object previously
3801		 * but we're in master role this must be something
3802		 * initiated using a white list. Since white list based
3803		 * connections are not "first class citizens" we don't
3804		 * have full tracking of them. Therefore, we go ahead
3805		 * with a "best effort" approach of determining the
3806		 * initiator address based on the HCI_PRIVACY flag.
3807		 */
3808		if (conn->out) {
3809			conn->resp_addr_type = ev->bdaddr_type;
3810			bacpy(&conn->resp_addr, &ev->bdaddr);
3811			if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3812				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
3813				bacpy(&conn->init_addr, &hdev->rpa);
3814			} else {
3815				hci_copy_identity_address(hdev,
3816							  &conn->init_addr,
3817							  &conn->init_addr_type);
3818			}
3819		} else {
3820			/* Set the responder (our side) address type based on
3821			 * the advertising address type.
3822			 */
3823			conn->resp_addr_type = hdev->adv_addr_type;
3824			if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
3825				bacpy(&conn->resp_addr, &hdev->random_addr);
3826			else
3827				bacpy(&conn->resp_addr, &hdev->bdaddr);
3828
3829			conn->init_addr_type = ev->bdaddr_type;
3830			bacpy(&conn->init_addr, &ev->bdaddr);
3831		}
3832	} else {
3833		cancel_delayed_work(&conn->le_conn_timeout);
3834	}
3835
3836	/* Ensure that the hci_conn contains the identity address type
3837	 * regardless of which address the connection was made with.
3838	 */
3839	hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
3840
3841	/* Lookup the identity address from the stored connection
3842	 * address and address type.
3843	 *
3844	 * When establishing connections to an identity address, the
3845	 * connection procedure will store the resolvable random
3846	 * address first. Now if it can be converted back into the
3847	 * identity address, start using the identity address from
3848	 * now on.
3849	 */
3850	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
3851	if (irk) {
3852		bacpy(&conn->dst, &irk->bdaddr);
3853		conn->dst_type = irk->addr_type;
3854	}
3855
3856	if (ev->status) {
3857		hci_le_conn_failed(conn, ev->status);
3858		goto unlock;
3859	}
3860
3861	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3862		mgmt_device_connected(hdev, &conn->dst, conn->type,
3863				      conn->dst_type, 0, NULL, 0, NULL);
3864
3865	conn->sec_level = BT_SECURITY_LOW;
3866	conn->handle = __le16_to_cpu(ev->handle);
3867	conn->state = BT_CONNECTED;
3868
3869	if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
3870		set_bit(HCI_CONN_6LOWPAN, &conn->flags);
3871
3872	hci_conn_add_sysfs(conn);
3873
3874	hci_proto_connect_cfm(conn, ev->status);
3875
3876	hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type);
3877
3878unlock:
3879	hci_dev_unlock(hdev);
3880}
3881
3882/* This function requires the caller holds hdev->lock */
3883static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
3884				  u8 addr_type)
3885{
3886	struct hci_conn *conn;
3887	struct smp_irk *irk;
3888
3889	/* If this is a resolvable address, we should resolve it and then
3890	 * update address and address type variables.
3891	 */
3892	irk = hci_get_irk(hdev, addr, addr_type);
3893	if (irk) {
3894		addr = &irk->bdaddr;
3895		addr_type = irk->addr_type;
3896	}
3897
3898	if (!hci_pend_le_conn_lookup(hdev, addr, addr_type))
3899		return;
3900
3901	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
3902			      HCI_AT_NO_BONDING);
3903	if (!IS_ERR(conn))
3904		return;
3905
3906	switch (PTR_ERR(conn)) {
3907	case -EBUSY:
3908		/* If hci_connect() returns -EBUSY it means there is already
3909		 * an LE connection attempt going on. Since controllers don't
3910		 * support more than one connection attempt at the time, we
3911		 * don't consider this an error case.
3912		 */
3913		break;
3914	default:
3915		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
3916	}
3917}
3918
3919static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3920{
3921	u8 num_reports = skb->data[0];
3922	void *ptr = &skb->data[1];
3923	s8 rssi;
3924
3925	hci_dev_lock(hdev);
3926
3927	while (num_reports--) {
3928		struct hci_ev_le_advertising_info *ev = ptr;
3929
3930		if (ev->evt_type == LE_ADV_IND ||
3931		    ev->evt_type == LE_ADV_DIRECT_IND)
3932			check_pending_le_conn(hdev, &ev->bdaddr,
3933					      ev->bdaddr_type);
3934
3935		rssi = ev->data[ev->length];
3936		mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3937				  NULL, rssi, 0, 1, ev->data, ev->length);
3938
3939		ptr += sizeof(*ev) + ev->length + 1;
3940	}
3941
3942	hci_dev_unlock(hdev);
3943}
3944
3945static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3946{
3947	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3948	struct hci_cp_le_ltk_reply cp;
3949	struct hci_cp_le_ltk_neg_reply neg;
3950	struct hci_conn *conn;
3951	struct smp_ltk *ltk;
3952
3953	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3954
3955	hci_dev_lock(hdev);
3956
3957	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3958	if (conn == NULL)
3959		goto not_found;
3960
3961	ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
3962	if (ltk == NULL)
3963		goto not_found;
3964
3965	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3966	cp.handle = cpu_to_le16(conn->handle);
3967
3968	if (ltk->authenticated)
3969		conn->pending_sec_level = BT_SECURITY_HIGH;
3970	else
3971		conn->pending_sec_level = BT_SECURITY_MEDIUM;
3972
3973	conn->enc_key_size = ltk->enc_size;
3974
3975	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3976
3977	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
3978	 * temporary key used to encrypt a connection following
3979	 * pairing. It is used during the Encrypted Session Setup to
3980	 * distribute the keys. Later, security can be re-established
3981	 * using a distributed LTK.
3982	 */
3983	if (ltk->type == HCI_SMP_STK_SLAVE) {
3984		list_del(&ltk->list);
3985		kfree(ltk);
3986	}
3987
3988	hci_dev_unlock(hdev);
3989
3990	return;
3991
3992not_found:
3993	neg.handle = ev->handle;
3994	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3995	hci_dev_unlock(hdev);
3996}
3997
3998static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3999{
4000	struct hci_ev_le_meta *le_ev = (void *) skb->data;
4001
4002	skb_pull(skb, sizeof(*le_ev));
4003
4004	switch (le_ev->subevent) {
4005	case HCI_EV_LE_CONN_COMPLETE:
4006		hci_le_conn_complete_evt(hdev, skb);
4007		break;
4008
4009	case HCI_EV_LE_ADVERTISING_REPORT:
4010		hci_le_adv_report_evt(hdev, skb);
4011		break;
4012
4013	case HCI_EV_LE_LTK_REQ:
4014		hci_le_ltk_request_evt(hdev, skb);
4015		break;
4016
4017	default:
4018		break;
4019	}
4020}
4021
4022static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4023{
4024	struct hci_ev_channel_selected *ev = (void *) skb->data;
4025	struct hci_conn *hcon;
4026
4027	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4028
4029	skb_pull(skb, sizeof(*ev));
4030
4031	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4032	if (!hcon)
4033		return;
4034
4035	amp_read_loc_assoc_final_data(hdev, hcon);
4036}
4037
4038void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4039{
4040	struct hci_event_hdr *hdr = (void *) skb->data;
4041	__u8 event = hdr->evt;
4042
4043	hci_dev_lock(hdev);
4044
4045	/* Received events are (currently) only needed when a request is
4046	 * ongoing so avoid unnecessary memory allocation.
4047	 */
4048	if (hdev->req_status == HCI_REQ_PEND) {
4049		kfree_skb(hdev->recv_evt);
4050		hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4051	}
4052
4053	hci_dev_unlock(hdev);
4054
4055	skb_pull(skb, HCI_EVENT_HDR_SIZE);
4056
4057	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4058		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4059		u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4060
4061		hci_req_cmd_complete(hdev, opcode, 0);
4062	}
4063
4064	switch (event) {
4065	case HCI_EV_INQUIRY_COMPLETE:
4066		hci_inquiry_complete_evt(hdev, skb);
4067		break;
4068
4069	case HCI_EV_INQUIRY_RESULT:
4070		hci_inquiry_result_evt(hdev, skb);
4071		break;
4072
4073	case HCI_EV_CONN_COMPLETE:
4074		hci_conn_complete_evt(hdev, skb);
4075		break;
4076
4077	case HCI_EV_CONN_REQUEST:
4078		hci_conn_request_evt(hdev, skb);
4079		break;
4080
4081	case HCI_EV_DISCONN_COMPLETE:
4082		hci_disconn_complete_evt(hdev, skb);
4083		break;
4084
4085	case HCI_EV_AUTH_COMPLETE:
4086		hci_auth_complete_evt(hdev, skb);
4087		break;
4088
4089	case HCI_EV_REMOTE_NAME:
4090		hci_remote_name_evt(hdev, skb);
4091		break;
4092
4093	case HCI_EV_ENCRYPT_CHANGE:
4094		hci_encrypt_change_evt(hdev, skb);
4095		break;
4096
4097	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4098		hci_change_link_key_complete_evt(hdev, skb);
4099		break;
4100
4101	case HCI_EV_REMOTE_FEATURES:
4102		hci_remote_features_evt(hdev, skb);
4103		break;
4104
4105	case HCI_EV_CMD_COMPLETE:
4106		hci_cmd_complete_evt(hdev, skb);
4107		break;
4108
4109	case HCI_EV_CMD_STATUS:
4110		hci_cmd_status_evt(hdev, skb);
4111		break;
4112
4113	case HCI_EV_ROLE_CHANGE:
4114		hci_role_change_evt(hdev, skb);
4115		break;
4116
4117	case HCI_EV_NUM_COMP_PKTS:
4118		hci_num_comp_pkts_evt(hdev, skb);
4119		break;
4120
4121	case HCI_EV_MODE_CHANGE:
4122		hci_mode_change_evt(hdev, skb);
4123		break;
4124
4125	case HCI_EV_PIN_CODE_REQ:
4126		hci_pin_code_request_evt(hdev, skb);
4127		break;
4128
4129	case HCI_EV_LINK_KEY_REQ:
4130		hci_link_key_request_evt(hdev, skb);
4131		break;
4132
4133	case HCI_EV_LINK_KEY_NOTIFY:
4134		hci_link_key_notify_evt(hdev, skb);
4135		break;
4136
4137	case HCI_EV_CLOCK_OFFSET:
4138		hci_clock_offset_evt(hdev, skb);
4139		break;
4140
4141	case HCI_EV_PKT_TYPE_CHANGE:
4142		hci_pkt_type_change_evt(hdev, skb);
4143		break;
4144
4145	case HCI_EV_PSCAN_REP_MODE:
4146		hci_pscan_rep_mode_evt(hdev, skb);
4147		break;
4148
4149	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4150		hci_inquiry_result_with_rssi_evt(hdev, skb);
4151		break;
4152
4153	case HCI_EV_REMOTE_EXT_FEATURES:
4154		hci_remote_ext_features_evt(hdev, skb);
4155		break;
4156
4157	case HCI_EV_SYNC_CONN_COMPLETE:
4158		hci_sync_conn_complete_evt(hdev, skb);
4159		break;
4160
4161	case HCI_EV_EXTENDED_INQUIRY_RESULT:
4162		hci_extended_inquiry_result_evt(hdev, skb);
4163		break;
4164
4165	case HCI_EV_KEY_REFRESH_COMPLETE:
4166		hci_key_refresh_complete_evt(hdev, skb);
4167		break;
4168
4169	case HCI_EV_IO_CAPA_REQUEST:
4170		hci_io_capa_request_evt(hdev, skb);
4171		break;
4172
4173	case HCI_EV_IO_CAPA_REPLY:
4174		hci_io_capa_reply_evt(hdev, skb);
4175		break;
4176
4177	case HCI_EV_USER_CONFIRM_REQUEST:
4178		hci_user_confirm_request_evt(hdev, skb);
4179		break;
4180
4181	case HCI_EV_USER_PASSKEY_REQUEST:
4182		hci_user_passkey_request_evt(hdev, skb);
4183		break;
4184
4185	case HCI_EV_USER_PASSKEY_NOTIFY:
4186		hci_user_passkey_notify_evt(hdev, skb);
4187		break;
4188
4189	case HCI_EV_KEYPRESS_NOTIFY:
4190		hci_keypress_notify_evt(hdev, skb);
4191		break;
4192
4193	case HCI_EV_SIMPLE_PAIR_COMPLETE:
4194		hci_simple_pair_complete_evt(hdev, skb);
4195		break;
4196
4197	case HCI_EV_REMOTE_HOST_FEATURES:
4198		hci_remote_host_features_evt(hdev, skb);
4199		break;
4200
4201	case HCI_EV_LE_META:
4202		hci_le_meta_evt(hdev, skb);
4203		break;
4204
4205	case HCI_EV_CHANNEL_SELECTED:
4206		hci_chan_selected_evt(hdev, skb);
4207		break;
4208
4209	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4210		hci_remote_oob_data_request_evt(hdev, skb);
4211		break;
4212
4213	case HCI_EV_PHY_LINK_COMPLETE:
4214		hci_phy_link_complete_evt(hdev, skb);
4215		break;
4216
4217	case HCI_EV_LOGICAL_LINK_COMPLETE:
4218		hci_loglink_complete_evt(hdev, skb);
4219		break;
4220
4221	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4222		hci_disconn_loglink_complete_evt(hdev, skb);
4223		break;
4224
4225	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4226		hci_disconn_phylink_complete_evt(hdev, skb);
4227		break;
4228
4229	case HCI_EV_NUM_COMP_BLOCKS:
4230		hci_num_comp_blocks_evt(hdev, skb);
4231		break;
4232
4233	default:
4234		BT_DBG("%s event 0x%2.2x", hdev->name, event);
4235		break;
4236	}
4237
4238	kfree_skb(skb);
4239	hdev->stat.evt_rx++;
4240}
4241