hci_event.c revision 40bef302f6323d1ee6fb3dc0e62edb0f446d0339
1/*
2   BlueZ - Bluetooth protocol stack for Linux
3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <asm/unaligned.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/mgmt.h>
32
33#include "a2mp.h"
34#include "amp.h"
35#include "smp.h"
36
37/* Handle HCI Event packets */
38
39static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40{
41	__u8 status = *((__u8 *) skb->data);
42
43	BT_DBG("%s status 0x%2.2x", hdev->name, status);
44
45	if (status)
46		return;
47
48	clear_bit(HCI_INQUIRY, &hdev->flags);
49	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50	wake_up_bit(&hdev->flags, HCI_INQUIRY);
51
52	hci_dev_lock(hdev);
53	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54	hci_dev_unlock(hdev);
55
56	hci_conn_check_pending(hdev);
57}
58
59static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60{
61	__u8 status = *((__u8 *) skb->data);
62
63	BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65	if (status)
66		return;
67
68	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69}
70
71static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72{
73	__u8 status = *((__u8 *) skb->data);
74
75	BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77	if (status)
78		return;
79
80	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82	hci_conn_check_pending(hdev);
83}
84
85static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86					  struct sk_buff *skb)
87{
88	BT_DBG("%s", hdev->name);
89}
90
91static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92{
93	struct hci_rp_role_discovery *rp = (void *) skb->data;
94	struct hci_conn *conn;
95
96	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98	if (rp->status)
99		return;
100
101	hci_dev_lock(hdev);
102
103	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104	if (conn)
105		conn->role = rp->role;
106
107	hci_dev_unlock(hdev);
108}
109
110static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
111{
112	struct hci_rp_read_link_policy *rp = (void *) skb->data;
113	struct hci_conn *conn;
114
115	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
116
117	if (rp->status)
118		return;
119
120	hci_dev_lock(hdev);
121
122	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
123	if (conn)
124		conn->link_policy = __le16_to_cpu(rp->policy);
125
126	hci_dev_unlock(hdev);
127}
128
129static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
130{
131	struct hci_rp_write_link_policy *rp = (void *) skb->data;
132	struct hci_conn *conn;
133	void *sent;
134
135	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
136
137	if (rp->status)
138		return;
139
140	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
141	if (!sent)
142		return;
143
144	hci_dev_lock(hdev);
145
146	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
147	if (conn)
148		conn->link_policy = get_unaligned_le16(sent + 2);
149
150	hci_dev_unlock(hdev);
151}
152
153static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
154					struct sk_buff *skb)
155{
156	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
157
158	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
159
160	if (rp->status)
161		return;
162
163	hdev->link_policy = __le16_to_cpu(rp->policy);
164}
165
166static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
167					 struct sk_buff *skb)
168{
169	__u8 status = *((__u8 *) skb->data);
170	void *sent;
171
172	BT_DBG("%s status 0x%2.2x", hdev->name, status);
173
174	if (status)
175		return;
176
177	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178	if (!sent)
179		return;
180
181	hdev->link_policy = get_unaligned_le16(sent);
182}
183
184static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185{
186	__u8 status = *((__u8 *) skb->data);
187
188	BT_DBG("%s status 0x%2.2x", hdev->name, status);
189
190	clear_bit(HCI_RESET, &hdev->flags);
191
192	/* Reset all non-persistent flags */
193	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
194
195	hdev->discovery.state = DISCOVERY_STOPPED;
196	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
197	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
198
199	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
200	hdev->adv_data_len = 0;
201
202	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
203	hdev->scan_rsp_data_len = 0;
204
205	hdev->le_scan_type = LE_SCAN_PASSIVE;
206
207	hdev->ssp_debug_mode = 0;
208}
209
210static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
211{
212	__u8 status = *((__u8 *) skb->data);
213	void *sent;
214
215	BT_DBG("%s status 0x%2.2x", hdev->name, status);
216
217	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
218	if (!sent)
219		return;
220
221	hci_dev_lock(hdev);
222
223	if (test_bit(HCI_MGMT, &hdev->dev_flags))
224		mgmt_set_local_name_complete(hdev, sent, status);
225	else if (!status)
226		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
227
228	hci_dev_unlock(hdev);
229}
230
231static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
232{
233	struct hci_rp_read_local_name *rp = (void *) skb->data;
234
235	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
236
237	if (rp->status)
238		return;
239
240	if (test_bit(HCI_SETUP, &hdev->dev_flags))
241		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
242}
243
244static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
245{
246	__u8 status = *((__u8 *) skb->data);
247	void *sent;
248
249	BT_DBG("%s status 0x%2.2x", hdev->name, status);
250
251	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
252	if (!sent)
253		return;
254
255	if (!status) {
256		__u8 param = *((__u8 *) sent);
257
258		if (param == AUTH_ENABLED)
259			set_bit(HCI_AUTH, &hdev->flags);
260		else
261			clear_bit(HCI_AUTH, &hdev->flags);
262	}
263
264	if (test_bit(HCI_MGMT, &hdev->dev_flags))
265		mgmt_auth_enable_complete(hdev, status);
266}
267
268static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
269{
270	__u8 status = *((__u8 *) skb->data);
271	__u8 param;
272	void *sent;
273
274	BT_DBG("%s status 0x%2.2x", hdev->name, status);
275
276	if (status)
277		return;
278
279	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
280	if (!sent)
281		return;
282
283	param = *((__u8 *) sent);
284
285	if (param)
286		set_bit(HCI_ENCRYPT, &hdev->flags);
287	else
288		clear_bit(HCI_ENCRYPT, &hdev->flags);
289}
290
291static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
292{
293	__u8 status = *((__u8 *) skb->data);
294	__u8 param;
295	void *sent;
296
297	BT_DBG("%s status 0x%2.2x", hdev->name, status);
298
299	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
300	if (!sent)
301		return;
302
303	param = *((__u8 *) sent);
304
305	hci_dev_lock(hdev);
306
307	if (status) {
308		hdev->discov_timeout = 0;
309		goto done;
310	}
311
312	if (param & SCAN_INQUIRY)
313		set_bit(HCI_ISCAN, &hdev->flags);
314	else
315		clear_bit(HCI_ISCAN, &hdev->flags);
316
317	if (param & SCAN_PAGE)
318		set_bit(HCI_PSCAN, &hdev->flags);
319	else
320		clear_bit(HCI_ISCAN, &hdev->flags);
321
322done:
323	hci_dev_unlock(hdev);
324}
325
326static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
327{
328	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
329
330	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
331
332	if (rp->status)
333		return;
334
335	memcpy(hdev->dev_class, rp->dev_class, 3);
336
337	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
338	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
339}
340
341static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
342{
343	__u8 status = *((__u8 *) skb->data);
344	void *sent;
345
346	BT_DBG("%s status 0x%2.2x", hdev->name, status);
347
348	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
349	if (!sent)
350		return;
351
352	hci_dev_lock(hdev);
353
354	if (status == 0)
355		memcpy(hdev->dev_class, sent, 3);
356
357	if (test_bit(HCI_MGMT, &hdev->dev_flags))
358		mgmt_set_class_of_dev_complete(hdev, sent, status);
359
360	hci_dev_unlock(hdev);
361}
362
363static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
364{
365	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
366	__u16 setting;
367
368	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
369
370	if (rp->status)
371		return;
372
373	setting = __le16_to_cpu(rp->voice_setting);
374
375	if (hdev->voice_setting == setting)
376		return;
377
378	hdev->voice_setting = setting;
379
380	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
381
382	if (hdev->notify)
383		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
384}
385
386static void hci_cc_write_voice_setting(struct hci_dev *hdev,
387				       struct sk_buff *skb)
388{
389	__u8 status = *((__u8 *) skb->data);
390	__u16 setting;
391	void *sent;
392
393	BT_DBG("%s status 0x%2.2x", hdev->name, status);
394
395	if (status)
396		return;
397
398	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
399	if (!sent)
400		return;
401
402	setting = get_unaligned_le16(sent);
403
404	if (hdev->voice_setting == setting)
405		return;
406
407	hdev->voice_setting = setting;
408
409	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
410
411	if (hdev->notify)
412		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
413}
414
415static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
416					  struct sk_buff *skb)
417{
418	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
419
420	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
421
422	if (rp->status)
423		return;
424
425	hdev->num_iac = rp->num_iac;
426
427	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
428}
429
430static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
431{
432	__u8 status = *((__u8 *) skb->data);
433	struct hci_cp_write_ssp_mode *sent;
434
435	BT_DBG("%s status 0x%2.2x", hdev->name, status);
436
437	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
438	if (!sent)
439		return;
440
441	if (!status) {
442		if (sent->mode)
443			hdev->features[1][0] |= LMP_HOST_SSP;
444		else
445			hdev->features[1][0] &= ~LMP_HOST_SSP;
446	}
447
448	if (test_bit(HCI_MGMT, &hdev->dev_flags))
449		mgmt_ssp_enable_complete(hdev, sent->mode, status);
450	else if (!status) {
451		if (sent->mode)
452			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
453		else
454			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
455	}
456}
457
458static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
459{
460	u8 status = *((u8 *) skb->data);
461	struct hci_cp_write_sc_support *sent;
462
463	BT_DBG("%s status 0x%2.2x", hdev->name, status);
464
465	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
466	if (!sent)
467		return;
468
469	if (!status) {
470		if (sent->support)
471			hdev->features[1][0] |= LMP_HOST_SC;
472		else
473			hdev->features[1][0] &= ~LMP_HOST_SC;
474	}
475
476	if (test_bit(HCI_MGMT, &hdev->dev_flags))
477		mgmt_sc_enable_complete(hdev, sent->support, status);
478	else if (!status) {
479		if (sent->support)
480			set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
481		else
482			clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
483	}
484}
485
486static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
487{
488	struct hci_rp_read_local_version *rp = (void *) skb->data;
489
490	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
491
492	if (rp->status)
493		return;
494
495	if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
496		hdev->hci_ver = rp->hci_ver;
497		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
498		hdev->lmp_ver = rp->lmp_ver;
499		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
500		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
501	}
502}
503
504static void hci_cc_read_local_commands(struct hci_dev *hdev,
505				       struct sk_buff *skb)
506{
507	struct hci_rp_read_local_commands *rp = (void *) skb->data;
508
509	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
510
511	if (rp->status)
512		return;
513
514	if (test_bit(HCI_SETUP, &hdev->dev_flags))
515		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
516}
517
518static void hci_cc_read_local_features(struct hci_dev *hdev,
519				       struct sk_buff *skb)
520{
521	struct hci_rp_read_local_features *rp = (void *) skb->data;
522
523	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
524
525	if (rp->status)
526		return;
527
528	memcpy(hdev->features, rp->features, 8);
529
530	/* Adjust default settings according to features
531	 * supported by device. */
532
533	if (hdev->features[0][0] & LMP_3SLOT)
534		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
535
536	if (hdev->features[0][0] & LMP_5SLOT)
537		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
538
539	if (hdev->features[0][1] & LMP_HV2) {
540		hdev->pkt_type  |= (HCI_HV2);
541		hdev->esco_type |= (ESCO_HV2);
542	}
543
544	if (hdev->features[0][1] & LMP_HV3) {
545		hdev->pkt_type  |= (HCI_HV3);
546		hdev->esco_type |= (ESCO_HV3);
547	}
548
549	if (lmp_esco_capable(hdev))
550		hdev->esco_type |= (ESCO_EV3);
551
552	if (hdev->features[0][4] & LMP_EV4)
553		hdev->esco_type |= (ESCO_EV4);
554
555	if (hdev->features[0][4] & LMP_EV5)
556		hdev->esco_type |= (ESCO_EV5);
557
558	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
559		hdev->esco_type |= (ESCO_2EV3);
560
561	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
562		hdev->esco_type |= (ESCO_3EV3);
563
564	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
565		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
566}
567
568static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
569					   struct sk_buff *skb)
570{
571	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
572
573	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
574
575	if (rp->status)
576		return;
577
578	if (hdev->max_page < rp->max_page)
579		hdev->max_page = rp->max_page;
580
581	if (rp->page < HCI_MAX_PAGES)
582		memcpy(hdev->features[rp->page], rp->features, 8);
583}
584
585static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
586					  struct sk_buff *skb)
587{
588	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
589
590	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
591
592	if (rp->status)
593		return;
594
595	hdev->flow_ctl_mode = rp->mode;
596}
597
598static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
599{
600	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
601
602	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
603
604	if (rp->status)
605		return;
606
607	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
608	hdev->sco_mtu  = rp->sco_mtu;
609	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
610	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
611
612	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
613		hdev->sco_mtu  = 64;
614		hdev->sco_pkts = 8;
615	}
616
617	hdev->acl_cnt = hdev->acl_pkts;
618	hdev->sco_cnt = hdev->sco_pkts;
619
620	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
621	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
622}
623
624static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
625{
626	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
627
628	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
629
630	if (rp->status)
631		return;
632
633	if (test_bit(HCI_INIT, &hdev->flags))
634		bacpy(&hdev->bdaddr, &rp->bdaddr);
635
636	if (test_bit(HCI_SETUP, &hdev->dev_flags))
637		bacpy(&hdev->setup_addr, &rp->bdaddr);
638}
639
640static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
641					   struct sk_buff *skb)
642{
643	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
644
645	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
646
647	if (rp->status)
648		return;
649
650	if (test_bit(HCI_INIT, &hdev->flags)) {
651		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
652		hdev->page_scan_window = __le16_to_cpu(rp->window);
653	}
654}
655
656static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
657					    struct sk_buff *skb)
658{
659	u8 status = *((u8 *) skb->data);
660	struct hci_cp_write_page_scan_activity *sent;
661
662	BT_DBG("%s status 0x%2.2x", hdev->name, status);
663
664	if (status)
665		return;
666
667	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
668	if (!sent)
669		return;
670
671	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
672	hdev->page_scan_window = __le16_to_cpu(sent->window);
673}
674
675static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
676					   struct sk_buff *skb)
677{
678	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
679
680	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
681
682	if (rp->status)
683		return;
684
685	if (test_bit(HCI_INIT, &hdev->flags))
686		hdev->page_scan_type = rp->type;
687}
688
689static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
690					struct sk_buff *skb)
691{
692	u8 status = *((u8 *) skb->data);
693	u8 *type;
694
695	BT_DBG("%s status 0x%2.2x", hdev->name, status);
696
697	if (status)
698		return;
699
700	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
701	if (type)
702		hdev->page_scan_type = *type;
703}
704
705static void hci_cc_read_data_block_size(struct hci_dev *hdev,
706					struct sk_buff *skb)
707{
708	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
709
710	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
711
712	if (rp->status)
713		return;
714
715	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
716	hdev->block_len = __le16_to_cpu(rp->block_len);
717	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
718
719	hdev->block_cnt = hdev->num_blocks;
720
721	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
722	       hdev->block_cnt, hdev->block_len);
723}
724
725static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
726{
727	struct hci_rp_read_clock *rp = (void *) skb->data;
728	struct hci_cp_read_clock *cp;
729	struct hci_conn *conn;
730
731	BT_DBG("%s", hdev->name);
732
733	if (skb->len < sizeof(*rp))
734		return;
735
736	if (rp->status)
737		return;
738
739	hci_dev_lock(hdev);
740
741	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
742	if (!cp)
743		goto unlock;
744
745	if (cp->which == 0x00) {
746		hdev->clock = le32_to_cpu(rp->clock);
747		goto unlock;
748	}
749
750	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
751	if (conn) {
752		conn->clock = le32_to_cpu(rp->clock);
753		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
754	}
755
756unlock:
757	hci_dev_unlock(hdev);
758}
759
760static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
761				       struct sk_buff *skb)
762{
763	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
764
765	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
766
767	if (rp->status)
768		goto a2mp_rsp;
769
770	hdev->amp_status = rp->amp_status;
771	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
772	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
773	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
774	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
775	hdev->amp_type = rp->amp_type;
776	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
777	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
778	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
779	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
780
781a2mp_rsp:
782	a2mp_send_getinfo_rsp(hdev);
783}
784
785static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
786					struct sk_buff *skb)
787{
788	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
789	struct amp_assoc *assoc = &hdev->loc_assoc;
790	size_t rem_len, frag_len;
791
792	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
793
794	if (rp->status)
795		goto a2mp_rsp;
796
797	frag_len = skb->len - sizeof(*rp);
798	rem_len = __le16_to_cpu(rp->rem_len);
799
800	if (rem_len > frag_len) {
801		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
802
803		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
804		assoc->offset += frag_len;
805
806		/* Read other fragments */
807		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
808
809		return;
810	}
811
812	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
813	assoc->len = assoc->offset + rem_len;
814	assoc->offset = 0;
815
816a2mp_rsp:
817	/* Send A2MP Rsp when all fragments are received */
818	a2mp_send_getampassoc_rsp(hdev, rp->status);
819	a2mp_send_create_phy_link_req(hdev, rp->status);
820}
821
822static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
823					 struct sk_buff *skb)
824{
825	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
826
827	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
828
829	if (rp->status)
830		return;
831
832	hdev->inq_tx_power = rp->tx_power;
833}
834
835static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
836{
837	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
838	struct hci_cp_pin_code_reply *cp;
839	struct hci_conn *conn;
840
841	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
842
843	hci_dev_lock(hdev);
844
845	if (test_bit(HCI_MGMT, &hdev->dev_flags))
846		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
847
848	if (rp->status)
849		goto unlock;
850
851	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
852	if (!cp)
853		goto unlock;
854
855	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
856	if (conn)
857		conn->pin_length = cp->pin_len;
858
859unlock:
860	hci_dev_unlock(hdev);
861}
862
863static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
864{
865	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
866
867	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
868
869	hci_dev_lock(hdev);
870
871	if (test_bit(HCI_MGMT, &hdev->dev_flags))
872		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
873						 rp->status);
874
875	hci_dev_unlock(hdev);
876}
877
878static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
879				       struct sk_buff *skb)
880{
881	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
882
883	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
884
885	if (rp->status)
886		return;
887
888	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
889	hdev->le_pkts = rp->le_max_pkt;
890
891	hdev->le_cnt = hdev->le_pkts;
892
893	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
894}
895
896static void hci_cc_le_read_local_features(struct hci_dev *hdev,
897					  struct sk_buff *skb)
898{
899	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
900
901	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
902
903	if (rp->status)
904		return;
905
906	memcpy(hdev->le_features, rp->features, 8);
907}
908
909static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
910					struct sk_buff *skb)
911{
912	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
913
914	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
915
916	if (rp->status)
917		return;
918
919	hdev->adv_tx_power = rp->tx_power;
920}
921
922static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
923{
924	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
925
926	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
927
928	hci_dev_lock(hdev);
929
930	if (test_bit(HCI_MGMT, &hdev->dev_flags))
931		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
932						 rp->status);
933
934	hci_dev_unlock(hdev);
935}
936
937static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
938					  struct sk_buff *skb)
939{
940	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
941
942	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943
944	hci_dev_lock(hdev);
945
946	if (test_bit(HCI_MGMT, &hdev->dev_flags))
947		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
948						     ACL_LINK, 0, rp->status);
949
950	hci_dev_unlock(hdev);
951}
952
953static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
954{
955	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
956
957	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
958
959	hci_dev_lock(hdev);
960
961	if (test_bit(HCI_MGMT, &hdev->dev_flags))
962		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
963						 0, rp->status);
964
965	hci_dev_unlock(hdev);
966}
967
968static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
969					  struct sk_buff *skb)
970{
971	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
972
973	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
974
975	hci_dev_lock(hdev);
976
977	if (test_bit(HCI_MGMT, &hdev->dev_flags))
978		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
979						     ACL_LINK, 0, rp->status);
980
981	hci_dev_unlock(hdev);
982}
983
984static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
985				       struct sk_buff *skb)
986{
987	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
988
989	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
990
991	hci_dev_lock(hdev);
992	mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
993					  NULL, NULL, rp->status);
994	hci_dev_unlock(hdev);
995}
996
997static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
998					   struct sk_buff *skb)
999{
1000	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1001
1002	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1003
1004	hci_dev_lock(hdev);
1005	mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
1006					  rp->hash256, rp->randomizer256,
1007					  rp->status);
1008	hci_dev_unlock(hdev);
1009}
1010
1011
1012static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1013{
1014	__u8 status = *((__u8 *) skb->data);
1015	bdaddr_t *sent;
1016
1017	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1018
1019	if (status)
1020		return;
1021
1022	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1023	if (!sent)
1024		return;
1025
1026	hci_dev_lock(hdev);
1027
1028	bacpy(&hdev->random_addr, sent);
1029
1030	hci_dev_unlock(hdev);
1031}
1032
1033static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1034{
1035	__u8 *sent, status = *((__u8 *) skb->data);
1036
1037	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1038
1039	if (status)
1040		return;
1041
1042	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1043	if (!sent)
1044		return;
1045
1046	hci_dev_lock(hdev);
1047
1048	/* If we're doing connection initation as peripheral. Set a
1049	 * timeout in case something goes wrong.
1050	 */
1051	if (*sent) {
1052		struct hci_conn *conn;
1053
1054		set_bit(HCI_LE_ADV, &hdev->dev_flags);
1055
1056		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1057		if (conn)
1058			queue_delayed_work(hdev->workqueue,
1059					   &conn->le_conn_timeout,
1060					   conn->conn_timeout);
1061	} else {
1062		clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1063	}
1064
1065	hci_dev_unlock(hdev);
1066}
1067
1068static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1069{
1070	struct hci_cp_le_set_scan_param *cp;
1071	__u8 status = *((__u8 *) skb->data);
1072
1073	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1074
1075	if (status)
1076		return;
1077
1078	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1079	if (!cp)
1080		return;
1081
1082	hci_dev_lock(hdev);
1083
1084	hdev->le_scan_type = cp->type;
1085
1086	hci_dev_unlock(hdev);
1087}
1088
1089static bool has_pending_adv_report(struct hci_dev *hdev)
1090{
1091	struct discovery_state *d = &hdev->discovery;
1092
1093	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1094}
1095
1096static void clear_pending_adv_report(struct hci_dev *hdev)
1097{
1098	struct discovery_state *d = &hdev->discovery;
1099
1100	bacpy(&d->last_adv_addr, BDADDR_ANY);
1101	d->last_adv_data_len = 0;
1102}
1103
1104static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1105				     u8 bdaddr_type, s8 rssi, u32 flags,
1106				     u8 *data, u8 len)
1107{
1108	struct discovery_state *d = &hdev->discovery;
1109
1110	bacpy(&d->last_adv_addr, bdaddr);
1111	d->last_adv_addr_type = bdaddr_type;
1112	d->last_adv_rssi = rssi;
1113	d->last_adv_flags = flags;
1114	memcpy(d->last_adv_data, data, len);
1115	d->last_adv_data_len = len;
1116}
1117
1118static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1119				      struct sk_buff *skb)
1120{
1121	struct hci_cp_le_set_scan_enable *cp;
1122	__u8 status = *((__u8 *) skb->data);
1123
1124	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1125
1126	if (status)
1127		return;
1128
1129	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1130	if (!cp)
1131		return;
1132
1133	switch (cp->enable) {
1134	case LE_SCAN_ENABLE:
1135		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1136		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1137			clear_pending_adv_report(hdev);
1138		break;
1139
1140	case LE_SCAN_DISABLE:
1141		/* We do this here instead of when setting DISCOVERY_STOPPED
1142		 * since the latter would potentially require waiting for
1143		 * inquiry to stop too.
1144		 */
1145		if (has_pending_adv_report(hdev)) {
1146			struct discovery_state *d = &hdev->discovery;
1147
1148			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1149					  d->last_adv_addr_type, NULL,
1150					  d->last_adv_rssi, d->last_adv_flags,
1151					  d->last_adv_data,
1152					  d->last_adv_data_len, NULL, 0);
1153		}
1154
1155		/* Cancel this timer so that we don't try to disable scanning
1156		 * when it's already disabled.
1157		 */
1158		cancel_delayed_work(&hdev->le_scan_disable);
1159
1160		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1161
1162		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1163		 * interrupted scanning due to a connect request. Mark
1164		 * therefore discovery as stopped. If this was not
1165		 * because of a connect request advertising might have
1166		 * been disabled because of active scanning, so
1167		 * re-enable it again if necessary.
1168		 */
1169		if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1170				       &hdev->dev_flags))
1171			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1172		else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1173			 hdev->discovery.state == DISCOVERY_FINDING)
1174			mgmt_reenable_advertising(hdev);
1175
1176		break;
1177
1178	default:
1179		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1180		break;
1181	}
1182}
1183
1184static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1185					   struct sk_buff *skb)
1186{
1187	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1188
1189	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1190
1191	if (rp->status)
1192		return;
1193
1194	hdev->le_white_list_size = rp->size;
1195}
1196
1197static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1198				       struct sk_buff *skb)
1199{
1200	__u8 status = *((__u8 *) skb->data);
1201
1202	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1203
1204	if (status)
1205		return;
1206
1207	hci_bdaddr_list_clear(&hdev->le_white_list);
1208}
1209
1210static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1211					struct sk_buff *skb)
1212{
1213	struct hci_cp_le_add_to_white_list *sent;
1214	__u8 status = *((__u8 *) skb->data);
1215
1216	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1217
1218	if (status)
1219		return;
1220
1221	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1222	if (!sent)
1223		return;
1224
1225	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1226			   sent->bdaddr_type);
1227}
1228
1229static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1230					  struct sk_buff *skb)
1231{
1232	struct hci_cp_le_del_from_white_list *sent;
1233	__u8 status = *((__u8 *) skb->data);
1234
1235	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1236
1237	if (status)
1238		return;
1239
1240	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1241	if (!sent)
1242		return;
1243
1244	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1245			    sent->bdaddr_type);
1246}
1247
1248static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1249					    struct sk_buff *skb)
1250{
1251	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1252
1253	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1254
1255	if (rp->status)
1256		return;
1257
1258	memcpy(hdev->le_states, rp->le_states, 8);
1259}
1260
1261static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1262					   struct sk_buff *skb)
1263{
1264	struct hci_cp_write_le_host_supported *sent;
1265	__u8 status = *((__u8 *) skb->data);
1266
1267	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1268
1269	if (status)
1270		return;
1271
1272	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1273	if (!sent)
1274		return;
1275
1276	if (sent->le) {
1277		hdev->features[1][0] |= LMP_HOST_LE;
1278		set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1279	} else {
1280		hdev->features[1][0] &= ~LMP_HOST_LE;
1281		clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1282		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1283	}
1284
1285	if (sent->simul)
1286		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1287	else
1288		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1289}
1290
1291static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1292{
1293	struct hci_cp_le_set_adv_param *cp;
1294	u8 status = *((u8 *) skb->data);
1295
1296	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1297
1298	if (status)
1299		return;
1300
1301	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1302	if (!cp)
1303		return;
1304
1305	hci_dev_lock(hdev);
1306	hdev->adv_addr_type = cp->own_address_type;
1307	hci_dev_unlock(hdev);
1308}
1309
1310static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1311					  struct sk_buff *skb)
1312{
1313	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1314
1315	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1316	       hdev->name, rp->status, rp->phy_handle);
1317
1318	if (rp->status)
1319		return;
1320
1321	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1322}
1323
1324static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1325{
1326	struct hci_rp_read_rssi *rp = (void *) skb->data;
1327	struct hci_conn *conn;
1328
1329	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1330
1331	if (rp->status)
1332		return;
1333
1334	hci_dev_lock(hdev);
1335
1336	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1337	if (conn)
1338		conn->rssi = rp->rssi;
1339
1340	hci_dev_unlock(hdev);
1341}
1342
1343static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1344{
1345	struct hci_cp_read_tx_power *sent;
1346	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1347	struct hci_conn *conn;
1348
1349	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1350
1351	if (rp->status)
1352		return;
1353
1354	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1355	if (!sent)
1356		return;
1357
1358	hci_dev_lock(hdev);
1359
1360	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1361	if (!conn)
1362		goto unlock;
1363
1364	switch (sent->type) {
1365	case 0x00:
1366		conn->tx_power = rp->tx_power;
1367		break;
1368	case 0x01:
1369		conn->max_tx_power = rp->tx_power;
1370		break;
1371	}
1372
1373unlock:
1374	hci_dev_unlock(hdev);
1375}
1376
1377static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1378{
1379	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1380
1381	if (status) {
1382		hci_conn_check_pending(hdev);
1383		return;
1384	}
1385
1386	set_bit(HCI_INQUIRY, &hdev->flags);
1387}
1388
1389static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1390{
1391	struct hci_cp_create_conn *cp;
1392	struct hci_conn *conn;
1393
1394	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1395
1396	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1397	if (!cp)
1398		return;
1399
1400	hci_dev_lock(hdev);
1401
1402	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1403
1404	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1405
1406	if (status) {
1407		if (conn && conn->state == BT_CONNECT) {
1408			if (status != 0x0c || conn->attempt > 2) {
1409				conn->state = BT_CLOSED;
1410				hci_proto_connect_cfm(conn, status);
1411				hci_conn_del(conn);
1412			} else
1413				conn->state = BT_CONNECT2;
1414		}
1415	} else {
1416		if (!conn) {
1417			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1418			if (conn) {
1419				conn->out  = true;
1420				conn->role = HCI_ROLE_MASTER;
1421			} else
1422				BT_ERR("No memory for new connection");
1423		}
1424	}
1425
1426	hci_dev_unlock(hdev);
1427}
1428
1429static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1430{
1431	struct hci_cp_add_sco *cp;
1432	struct hci_conn *acl, *sco;
1433	__u16 handle;
1434
1435	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1436
1437	if (!status)
1438		return;
1439
1440	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1441	if (!cp)
1442		return;
1443
1444	handle = __le16_to_cpu(cp->handle);
1445
1446	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1447
1448	hci_dev_lock(hdev);
1449
1450	acl = hci_conn_hash_lookup_handle(hdev, handle);
1451	if (acl) {
1452		sco = acl->link;
1453		if (sco) {
1454			sco->state = BT_CLOSED;
1455
1456			hci_proto_connect_cfm(sco, status);
1457			hci_conn_del(sco);
1458		}
1459	}
1460
1461	hci_dev_unlock(hdev);
1462}
1463
1464static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1465{
1466	struct hci_cp_auth_requested *cp;
1467	struct hci_conn *conn;
1468
1469	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1470
1471	if (!status)
1472		return;
1473
1474	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1475	if (!cp)
1476		return;
1477
1478	hci_dev_lock(hdev);
1479
1480	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1481	if (conn) {
1482		if (conn->state == BT_CONFIG) {
1483			hci_proto_connect_cfm(conn, status);
1484			hci_conn_drop(conn);
1485		}
1486	}
1487
1488	hci_dev_unlock(hdev);
1489}
1490
1491static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1492{
1493	struct hci_cp_set_conn_encrypt *cp;
1494	struct hci_conn *conn;
1495
1496	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1497
1498	if (!status)
1499		return;
1500
1501	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1502	if (!cp)
1503		return;
1504
1505	hci_dev_lock(hdev);
1506
1507	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1508	if (conn) {
1509		if (conn->state == BT_CONFIG) {
1510			hci_proto_connect_cfm(conn, status);
1511			hci_conn_drop(conn);
1512		}
1513	}
1514
1515	hci_dev_unlock(hdev);
1516}
1517
1518static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1519				    struct hci_conn *conn)
1520{
1521	if (conn->state != BT_CONFIG || !conn->out)
1522		return 0;
1523
1524	if (conn->pending_sec_level == BT_SECURITY_SDP)
1525		return 0;
1526
1527	/* Only request authentication for SSP connections or non-SSP
1528	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1529	 * is requested.
1530	 */
1531	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1532	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1533	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1534	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1535		return 0;
1536
1537	return 1;
1538}
1539
1540static int hci_resolve_name(struct hci_dev *hdev,
1541				   struct inquiry_entry *e)
1542{
1543	struct hci_cp_remote_name_req cp;
1544
1545	memset(&cp, 0, sizeof(cp));
1546
1547	bacpy(&cp.bdaddr, &e->data.bdaddr);
1548	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1549	cp.pscan_mode = e->data.pscan_mode;
1550	cp.clock_offset = e->data.clock_offset;
1551
1552	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1553}
1554
1555static bool hci_resolve_next_name(struct hci_dev *hdev)
1556{
1557	struct discovery_state *discov = &hdev->discovery;
1558	struct inquiry_entry *e;
1559
1560	if (list_empty(&discov->resolve))
1561		return false;
1562
1563	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1564	if (!e)
1565		return false;
1566
1567	if (hci_resolve_name(hdev, e) == 0) {
1568		e->name_state = NAME_PENDING;
1569		return true;
1570	}
1571
1572	return false;
1573}
1574
1575static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1576				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1577{
1578	struct discovery_state *discov = &hdev->discovery;
1579	struct inquiry_entry *e;
1580
1581	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1582		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1583				      name_len, conn->dev_class);
1584
1585	if (discov->state == DISCOVERY_STOPPED)
1586		return;
1587
1588	if (discov->state == DISCOVERY_STOPPING)
1589		goto discov_complete;
1590
1591	if (discov->state != DISCOVERY_RESOLVING)
1592		return;
1593
1594	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1595	/* If the device was not found in a list of found devices names of which
1596	 * are pending. there is no need to continue resolving a next name as it
1597	 * will be done upon receiving another Remote Name Request Complete
1598	 * Event */
1599	if (!e)
1600		return;
1601
1602	list_del(&e->list);
1603	if (name) {
1604		e->name_state = NAME_KNOWN;
1605		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1606				 e->data.rssi, name, name_len);
1607	} else {
1608		e->name_state = NAME_NOT_KNOWN;
1609	}
1610
1611	if (hci_resolve_next_name(hdev))
1612		return;
1613
1614discov_complete:
1615	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1616}
1617
1618static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1619{
1620	struct hci_cp_remote_name_req *cp;
1621	struct hci_conn *conn;
1622
1623	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1624
1625	/* If successful wait for the name req complete event before
1626	 * checking for the need to do authentication */
1627	if (!status)
1628		return;
1629
1630	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1631	if (!cp)
1632		return;
1633
1634	hci_dev_lock(hdev);
1635
1636	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1637
1638	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1639		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1640
1641	if (!conn)
1642		goto unlock;
1643
1644	if (!hci_outgoing_auth_needed(hdev, conn))
1645		goto unlock;
1646
1647	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1648		struct hci_cp_auth_requested auth_cp;
1649
1650		auth_cp.handle = __cpu_to_le16(conn->handle);
1651		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1652			     sizeof(auth_cp), &auth_cp);
1653	}
1654
1655unlock:
1656	hci_dev_unlock(hdev);
1657}
1658
1659static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1660{
1661	struct hci_cp_read_remote_features *cp;
1662	struct hci_conn *conn;
1663
1664	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1665
1666	if (!status)
1667		return;
1668
1669	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1670	if (!cp)
1671		return;
1672
1673	hci_dev_lock(hdev);
1674
1675	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1676	if (conn) {
1677		if (conn->state == BT_CONFIG) {
1678			hci_proto_connect_cfm(conn, status);
1679			hci_conn_drop(conn);
1680		}
1681	}
1682
1683	hci_dev_unlock(hdev);
1684}
1685
1686static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1687{
1688	struct hci_cp_read_remote_ext_features *cp;
1689	struct hci_conn *conn;
1690
1691	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1692
1693	if (!status)
1694		return;
1695
1696	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1697	if (!cp)
1698		return;
1699
1700	hci_dev_lock(hdev);
1701
1702	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1703	if (conn) {
1704		if (conn->state == BT_CONFIG) {
1705			hci_proto_connect_cfm(conn, status);
1706			hci_conn_drop(conn);
1707		}
1708	}
1709
1710	hci_dev_unlock(hdev);
1711}
1712
1713static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1714{
1715	struct hci_cp_setup_sync_conn *cp;
1716	struct hci_conn *acl, *sco;
1717	__u16 handle;
1718
1719	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1720
1721	if (!status)
1722		return;
1723
1724	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1725	if (!cp)
1726		return;
1727
1728	handle = __le16_to_cpu(cp->handle);
1729
1730	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1731
1732	hci_dev_lock(hdev);
1733
1734	acl = hci_conn_hash_lookup_handle(hdev, handle);
1735	if (acl) {
1736		sco = acl->link;
1737		if (sco) {
1738			sco->state = BT_CLOSED;
1739
1740			hci_proto_connect_cfm(sco, status);
1741			hci_conn_del(sco);
1742		}
1743	}
1744
1745	hci_dev_unlock(hdev);
1746}
1747
1748static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1749{
1750	struct hci_cp_sniff_mode *cp;
1751	struct hci_conn *conn;
1752
1753	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1754
1755	if (!status)
1756		return;
1757
1758	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1759	if (!cp)
1760		return;
1761
1762	hci_dev_lock(hdev);
1763
1764	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1765	if (conn) {
1766		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1767
1768		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1769			hci_sco_setup(conn, status);
1770	}
1771
1772	hci_dev_unlock(hdev);
1773}
1774
1775static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1776{
1777	struct hci_cp_exit_sniff_mode *cp;
1778	struct hci_conn *conn;
1779
1780	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1781
1782	if (!status)
1783		return;
1784
1785	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1786	if (!cp)
1787		return;
1788
1789	hci_dev_lock(hdev);
1790
1791	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1792	if (conn) {
1793		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1794
1795		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1796			hci_sco_setup(conn, status);
1797	}
1798
1799	hci_dev_unlock(hdev);
1800}
1801
1802static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1803{
1804	struct hci_cp_disconnect *cp;
1805	struct hci_conn *conn;
1806
1807	if (!status)
1808		return;
1809
1810	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1811	if (!cp)
1812		return;
1813
1814	hci_dev_lock(hdev);
1815
1816	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1817	if (conn)
1818		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1819				       conn->dst_type, status);
1820
1821	hci_dev_unlock(hdev);
1822}
1823
1824static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1825{
1826	struct hci_cp_create_phy_link *cp;
1827
1828	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1829
1830	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1831	if (!cp)
1832		return;
1833
1834	hci_dev_lock(hdev);
1835
1836	if (status) {
1837		struct hci_conn *hcon;
1838
1839		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1840		if (hcon)
1841			hci_conn_del(hcon);
1842	} else {
1843		amp_write_remote_assoc(hdev, cp->phy_handle);
1844	}
1845
1846	hci_dev_unlock(hdev);
1847}
1848
1849static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1850{
1851	struct hci_cp_accept_phy_link *cp;
1852
1853	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1854
1855	if (status)
1856		return;
1857
1858	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1859	if (!cp)
1860		return;
1861
1862	amp_write_remote_assoc(hdev, cp->phy_handle);
1863}
1864
1865static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1866{
1867	struct hci_cp_le_create_conn *cp;
1868	struct hci_conn *conn;
1869
1870	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1871
1872	/* All connection failure handling is taken care of by the
1873	 * hci_le_conn_failed function which is triggered by the HCI
1874	 * request completion callbacks used for connecting.
1875	 */
1876	if (status)
1877		return;
1878
1879	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1880	if (!cp)
1881		return;
1882
1883	hci_dev_lock(hdev);
1884
1885	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1886	if (!conn)
1887		goto unlock;
1888
1889	/* Store the initiator and responder address information which
1890	 * is needed for SMP. These values will not change during the
1891	 * lifetime of the connection.
1892	 */
1893	conn->init_addr_type = cp->own_address_type;
1894	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1895		bacpy(&conn->init_addr, &hdev->random_addr);
1896	else
1897		bacpy(&conn->init_addr, &hdev->bdaddr);
1898
1899	conn->resp_addr_type = cp->peer_addr_type;
1900	bacpy(&conn->resp_addr, &cp->peer_addr);
1901
1902	/* We don't want the connection attempt to stick around
1903	 * indefinitely since LE doesn't have a page timeout concept
1904	 * like BR/EDR. Set a timer for any connection that doesn't use
1905	 * the white list for connecting.
1906	 */
1907	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1908		queue_delayed_work(conn->hdev->workqueue,
1909				   &conn->le_conn_timeout,
1910				   conn->conn_timeout);
1911
1912unlock:
1913	hci_dev_unlock(hdev);
1914}
1915
1916static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1917{
1918	struct hci_cp_le_start_enc *cp;
1919	struct hci_conn *conn;
1920
1921	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1922
1923	if (!status)
1924		return;
1925
1926	hci_dev_lock(hdev);
1927
1928	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1929	if (!cp)
1930		goto unlock;
1931
1932	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1933	if (!conn)
1934		goto unlock;
1935
1936	if (conn->state != BT_CONNECTED)
1937		goto unlock;
1938
1939	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1940	hci_conn_drop(conn);
1941
1942unlock:
1943	hci_dev_unlock(hdev);
1944}
1945
1946static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1947{
1948	__u8 status = *((__u8 *) skb->data);
1949	struct discovery_state *discov = &hdev->discovery;
1950	struct inquiry_entry *e;
1951
1952	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1953
1954	hci_conn_check_pending(hdev);
1955
1956	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1957		return;
1958
1959	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1960	wake_up_bit(&hdev->flags, HCI_INQUIRY);
1961
1962	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1963		return;
1964
1965	hci_dev_lock(hdev);
1966
1967	if (discov->state != DISCOVERY_FINDING)
1968		goto unlock;
1969
1970	if (list_empty(&discov->resolve)) {
1971		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1972		goto unlock;
1973	}
1974
1975	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1976	if (e && hci_resolve_name(hdev, e) == 0) {
1977		e->name_state = NAME_PENDING;
1978		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1979	} else {
1980		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1981	}
1982
1983unlock:
1984	hci_dev_unlock(hdev);
1985}
1986
1987static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1988{
1989	struct inquiry_data data;
1990	struct inquiry_info *info = (void *) (skb->data + 1);
1991	int num_rsp = *((__u8 *) skb->data);
1992
1993	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1994
1995	if (!num_rsp)
1996		return;
1997
1998	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1999		return;
2000
2001	hci_dev_lock(hdev);
2002
2003	for (; num_rsp; num_rsp--, info++) {
2004		u32 flags;
2005
2006		bacpy(&data.bdaddr, &info->bdaddr);
2007		data.pscan_rep_mode	= info->pscan_rep_mode;
2008		data.pscan_period_mode	= info->pscan_period_mode;
2009		data.pscan_mode		= info->pscan_mode;
2010		memcpy(data.dev_class, info->dev_class, 3);
2011		data.clock_offset	= info->clock_offset;
2012		data.rssi		= 0x00;
2013		data.ssp_mode		= 0x00;
2014
2015		flags = hci_inquiry_cache_update(hdev, &data, false);
2016
2017		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2018				  info->dev_class, 0, flags, NULL, 0, NULL, 0);
2019	}
2020
2021	hci_dev_unlock(hdev);
2022}
2023
2024static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2025{
2026	struct hci_ev_conn_complete *ev = (void *) skb->data;
2027	struct hci_conn *conn;
2028
2029	BT_DBG("%s", hdev->name);
2030
2031	hci_dev_lock(hdev);
2032
2033	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2034	if (!conn) {
2035		if (ev->link_type != SCO_LINK)
2036			goto unlock;
2037
2038		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2039		if (!conn)
2040			goto unlock;
2041
2042		conn->type = SCO_LINK;
2043	}
2044
2045	if (!ev->status) {
2046		conn->handle = __le16_to_cpu(ev->handle);
2047
2048		if (conn->type == ACL_LINK) {
2049			conn->state = BT_CONFIG;
2050			hci_conn_hold(conn);
2051
2052			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2053			    !hci_find_link_key(hdev, &ev->bdaddr))
2054				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2055			else
2056				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2057		} else
2058			conn->state = BT_CONNECTED;
2059
2060		hci_conn_add_sysfs(conn);
2061
2062		if (test_bit(HCI_AUTH, &hdev->flags))
2063			set_bit(HCI_CONN_AUTH, &conn->flags);
2064
2065		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2066			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2067
2068		/* Get remote features */
2069		if (conn->type == ACL_LINK) {
2070			struct hci_cp_read_remote_features cp;
2071			cp.handle = ev->handle;
2072			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2073				     sizeof(cp), &cp);
2074		}
2075
2076		/* Set packet type for incoming connection */
2077		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2078			struct hci_cp_change_conn_ptype cp;
2079			cp.handle = ev->handle;
2080			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2081			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2082				     &cp);
2083		}
2084	} else {
2085		conn->state = BT_CLOSED;
2086		if (conn->type == ACL_LINK)
2087			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2088					    conn->dst_type, ev->status);
2089	}
2090
2091	if (conn->type == ACL_LINK)
2092		hci_sco_setup(conn, ev->status);
2093
2094	if (ev->status) {
2095		hci_proto_connect_cfm(conn, ev->status);
2096		hci_conn_del(conn);
2097	} else if (ev->link_type != ACL_LINK)
2098		hci_proto_connect_cfm(conn, ev->status);
2099
2100unlock:
2101	hci_dev_unlock(hdev);
2102
2103	hci_conn_check_pending(hdev);
2104}
2105
2106static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2107{
2108	struct hci_cp_reject_conn_req cp;
2109
2110	bacpy(&cp.bdaddr, bdaddr);
2111	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2112	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2113}
2114
2115static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2116{
2117	struct hci_ev_conn_request *ev = (void *) skb->data;
2118	int mask = hdev->link_mode;
2119	struct inquiry_entry *ie;
2120	struct hci_conn *conn;
2121	__u8 flags = 0;
2122
2123	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2124	       ev->link_type);
2125
2126	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2127				      &flags);
2128
2129	if (!(mask & HCI_LM_ACCEPT)) {
2130		hci_reject_conn(hdev, &ev->bdaddr);
2131		return;
2132	}
2133
2134	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
2135		if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2136					   BDADDR_BREDR)) {
2137			hci_reject_conn(hdev, &ev->bdaddr);
2138			return;
2139		}
2140	} else {
2141		if (!hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2142					    BDADDR_BREDR)) {
2143			hci_reject_conn(hdev, &ev->bdaddr);
2144			return;
2145		}
2146	}
2147
2148	/* Connection accepted */
2149
2150	hci_dev_lock(hdev);
2151
2152	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2153	if (ie)
2154		memcpy(ie->data.dev_class, ev->dev_class, 3);
2155
2156	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2157			&ev->bdaddr);
2158	if (!conn) {
2159		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2160		if (!conn) {
2161			BT_ERR("No memory for new connection");
2162			hci_dev_unlock(hdev);
2163			return;
2164		}
2165	}
2166
2167	memcpy(conn->dev_class, ev->dev_class, 3);
2168
2169	hci_dev_unlock(hdev);
2170
2171	if (ev->link_type == ACL_LINK ||
2172	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2173		struct hci_cp_accept_conn_req cp;
2174		conn->state = BT_CONNECT;
2175
2176		bacpy(&cp.bdaddr, &ev->bdaddr);
2177
2178		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2179			cp.role = 0x00; /* Become master */
2180		else
2181			cp.role = 0x01; /* Remain slave */
2182
2183		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2184	} else if (!(flags & HCI_PROTO_DEFER)) {
2185		struct hci_cp_accept_sync_conn_req cp;
2186		conn->state = BT_CONNECT;
2187
2188		bacpy(&cp.bdaddr, &ev->bdaddr);
2189		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2190
2191		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2192		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2193		cp.max_latency    = cpu_to_le16(0xffff);
2194		cp.content_format = cpu_to_le16(hdev->voice_setting);
2195		cp.retrans_effort = 0xff;
2196
2197		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2198			     &cp);
2199	} else {
2200		conn->state = BT_CONNECT2;
2201		hci_proto_connect_cfm(conn, 0);
2202	}
2203}
2204
2205static u8 hci_to_mgmt_reason(u8 err)
2206{
2207	switch (err) {
2208	case HCI_ERROR_CONNECTION_TIMEOUT:
2209		return MGMT_DEV_DISCONN_TIMEOUT;
2210	case HCI_ERROR_REMOTE_USER_TERM:
2211	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2212	case HCI_ERROR_REMOTE_POWER_OFF:
2213		return MGMT_DEV_DISCONN_REMOTE;
2214	case HCI_ERROR_LOCAL_HOST_TERM:
2215		return MGMT_DEV_DISCONN_LOCAL_HOST;
2216	default:
2217		return MGMT_DEV_DISCONN_UNKNOWN;
2218	}
2219}
2220
2221static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2222{
2223	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2224	u8 reason = hci_to_mgmt_reason(ev->reason);
2225	struct hci_conn_params *params;
2226	struct hci_conn *conn;
2227	bool mgmt_connected;
2228	u8 type;
2229
2230	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2231
2232	hci_dev_lock(hdev);
2233
2234	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2235	if (!conn)
2236		goto unlock;
2237
2238	if (ev->status) {
2239		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2240				       conn->dst_type, ev->status);
2241		goto unlock;
2242	}
2243
2244	conn->state = BT_CLOSED;
2245
2246	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2247	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2248				reason, mgmt_connected);
2249
2250	if (conn->type == ACL_LINK &&
2251	    test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2252		hci_remove_link_key(hdev, &conn->dst);
2253
2254	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2255	if (params) {
2256		switch (params->auto_connect) {
2257		case HCI_AUTO_CONN_LINK_LOSS:
2258			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2259				break;
2260			/* Fall through */
2261
2262		case HCI_AUTO_CONN_ALWAYS:
2263			list_del_init(&params->action);
2264			list_add(&params->action, &hdev->pend_le_conns);
2265			hci_update_background_scan(hdev);
2266			break;
2267
2268		default:
2269			break;
2270		}
2271	}
2272
2273	type = conn->type;
2274
2275	hci_proto_disconn_cfm(conn, ev->reason);
2276	hci_conn_del(conn);
2277
2278	/* Re-enable advertising if necessary, since it might
2279	 * have been disabled by the connection. From the
2280	 * HCI_LE_Set_Advertise_Enable command description in
2281	 * the core specification (v4.0):
2282	 * "The Controller shall continue advertising until the Host
2283	 * issues an LE_Set_Advertise_Enable command with
2284	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2285	 * or until a connection is created or until the Advertising
2286	 * is timed out due to Directed Advertising."
2287	 */
2288	if (type == LE_LINK)
2289		mgmt_reenable_advertising(hdev);
2290
2291unlock:
2292	hci_dev_unlock(hdev);
2293}
2294
2295static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2296{
2297	struct hci_ev_auth_complete *ev = (void *) skb->data;
2298	struct hci_conn *conn;
2299
2300	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2301
2302	hci_dev_lock(hdev);
2303
2304	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2305	if (!conn)
2306		goto unlock;
2307
2308	if (!ev->status) {
2309		if (!hci_conn_ssp_enabled(conn) &&
2310		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2311			BT_INFO("re-auth of legacy device is not possible.");
2312		} else {
2313			set_bit(HCI_CONN_AUTH, &conn->flags);
2314			conn->sec_level = conn->pending_sec_level;
2315		}
2316	} else {
2317		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2318				 ev->status);
2319	}
2320
2321	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2322	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2323
2324	if (conn->state == BT_CONFIG) {
2325		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2326			struct hci_cp_set_conn_encrypt cp;
2327			cp.handle  = ev->handle;
2328			cp.encrypt = 0x01;
2329			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2330				     &cp);
2331		} else {
2332			conn->state = BT_CONNECTED;
2333			hci_proto_connect_cfm(conn, ev->status);
2334			hci_conn_drop(conn);
2335		}
2336	} else {
2337		hci_auth_cfm(conn, ev->status);
2338
2339		hci_conn_hold(conn);
2340		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2341		hci_conn_drop(conn);
2342	}
2343
2344	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2345		if (!ev->status) {
2346			struct hci_cp_set_conn_encrypt cp;
2347			cp.handle  = ev->handle;
2348			cp.encrypt = 0x01;
2349			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2350				     &cp);
2351		} else {
2352			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2353			hci_encrypt_cfm(conn, ev->status, 0x00);
2354		}
2355	}
2356
2357unlock:
2358	hci_dev_unlock(hdev);
2359}
2360
2361static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2362{
2363	struct hci_ev_remote_name *ev = (void *) skb->data;
2364	struct hci_conn *conn;
2365
2366	BT_DBG("%s", hdev->name);
2367
2368	hci_conn_check_pending(hdev);
2369
2370	hci_dev_lock(hdev);
2371
2372	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2373
2374	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2375		goto check_auth;
2376
2377	if (ev->status == 0)
2378		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2379				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2380	else
2381		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2382
2383check_auth:
2384	if (!conn)
2385		goto unlock;
2386
2387	if (!hci_outgoing_auth_needed(hdev, conn))
2388		goto unlock;
2389
2390	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2391		struct hci_cp_auth_requested cp;
2392		cp.handle = __cpu_to_le16(conn->handle);
2393		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2394	}
2395
2396unlock:
2397	hci_dev_unlock(hdev);
2398}
2399
2400static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2401{
2402	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2403	struct hci_conn *conn;
2404
2405	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2406
2407	hci_dev_lock(hdev);
2408
2409	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2410	if (!conn)
2411		goto unlock;
2412
2413	if (!ev->status) {
2414		if (ev->encrypt) {
2415			/* Encryption implies authentication */
2416			set_bit(HCI_CONN_AUTH, &conn->flags);
2417			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2418			conn->sec_level = conn->pending_sec_level;
2419
2420			/* P-256 authentication key implies FIPS */
2421			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2422				set_bit(HCI_CONN_FIPS, &conn->flags);
2423
2424			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2425			    conn->type == LE_LINK)
2426				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2427		} else {
2428			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2429			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2430		}
2431	}
2432
2433	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2434
2435	if (ev->status && conn->state == BT_CONNECTED) {
2436		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2437		hci_conn_drop(conn);
2438		goto unlock;
2439	}
2440
2441	if (conn->state == BT_CONFIG) {
2442		if (!ev->status)
2443			conn->state = BT_CONNECTED;
2444
2445		/* In Secure Connections Only mode, do not allow any
2446		 * connections that are not encrypted with AES-CCM
2447		 * using a P-256 authenticated combination key.
2448		 */
2449		if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2450		    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2451		     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2452			hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2453			hci_conn_drop(conn);
2454			goto unlock;
2455		}
2456
2457		hci_proto_connect_cfm(conn, ev->status);
2458		hci_conn_drop(conn);
2459	} else
2460		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2461
2462unlock:
2463	hci_dev_unlock(hdev);
2464}
2465
2466static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2467					     struct sk_buff *skb)
2468{
2469	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2470	struct hci_conn *conn;
2471
2472	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2473
2474	hci_dev_lock(hdev);
2475
2476	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2477	if (conn) {
2478		if (!ev->status)
2479			set_bit(HCI_CONN_SECURE, &conn->flags);
2480
2481		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2482
2483		hci_key_change_cfm(conn, ev->status);
2484	}
2485
2486	hci_dev_unlock(hdev);
2487}
2488
2489static void hci_remote_features_evt(struct hci_dev *hdev,
2490				    struct sk_buff *skb)
2491{
2492	struct hci_ev_remote_features *ev = (void *) skb->data;
2493	struct hci_conn *conn;
2494
2495	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2496
2497	hci_dev_lock(hdev);
2498
2499	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2500	if (!conn)
2501		goto unlock;
2502
2503	if (!ev->status)
2504		memcpy(conn->features[0], ev->features, 8);
2505
2506	if (conn->state != BT_CONFIG)
2507		goto unlock;
2508
2509	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2510		struct hci_cp_read_remote_ext_features cp;
2511		cp.handle = ev->handle;
2512		cp.page = 0x01;
2513		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2514			     sizeof(cp), &cp);
2515		goto unlock;
2516	}
2517
2518	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2519		struct hci_cp_remote_name_req cp;
2520		memset(&cp, 0, sizeof(cp));
2521		bacpy(&cp.bdaddr, &conn->dst);
2522		cp.pscan_rep_mode = 0x02;
2523		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2524	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2525		mgmt_device_connected(hdev, &conn->dst, conn->type,
2526				      conn->dst_type, 0, NULL, 0,
2527				      conn->dev_class);
2528
2529	if (!hci_outgoing_auth_needed(hdev, conn)) {
2530		conn->state = BT_CONNECTED;
2531		hci_proto_connect_cfm(conn, ev->status);
2532		hci_conn_drop(conn);
2533	}
2534
2535unlock:
2536	hci_dev_unlock(hdev);
2537}
2538
2539static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2540{
2541	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2542	u8 status = skb->data[sizeof(*ev)];
2543	__u16 opcode;
2544
2545	skb_pull(skb, sizeof(*ev));
2546
2547	opcode = __le16_to_cpu(ev->opcode);
2548
2549	switch (opcode) {
2550	case HCI_OP_INQUIRY_CANCEL:
2551		hci_cc_inquiry_cancel(hdev, skb);
2552		break;
2553
2554	case HCI_OP_PERIODIC_INQ:
2555		hci_cc_periodic_inq(hdev, skb);
2556		break;
2557
2558	case HCI_OP_EXIT_PERIODIC_INQ:
2559		hci_cc_exit_periodic_inq(hdev, skb);
2560		break;
2561
2562	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2563		hci_cc_remote_name_req_cancel(hdev, skb);
2564		break;
2565
2566	case HCI_OP_ROLE_DISCOVERY:
2567		hci_cc_role_discovery(hdev, skb);
2568		break;
2569
2570	case HCI_OP_READ_LINK_POLICY:
2571		hci_cc_read_link_policy(hdev, skb);
2572		break;
2573
2574	case HCI_OP_WRITE_LINK_POLICY:
2575		hci_cc_write_link_policy(hdev, skb);
2576		break;
2577
2578	case HCI_OP_READ_DEF_LINK_POLICY:
2579		hci_cc_read_def_link_policy(hdev, skb);
2580		break;
2581
2582	case HCI_OP_WRITE_DEF_LINK_POLICY:
2583		hci_cc_write_def_link_policy(hdev, skb);
2584		break;
2585
2586	case HCI_OP_RESET:
2587		hci_cc_reset(hdev, skb);
2588		break;
2589
2590	case HCI_OP_WRITE_LOCAL_NAME:
2591		hci_cc_write_local_name(hdev, skb);
2592		break;
2593
2594	case HCI_OP_READ_LOCAL_NAME:
2595		hci_cc_read_local_name(hdev, skb);
2596		break;
2597
2598	case HCI_OP_WRITE_AUTH_ENABLE:
2599		hci_cc_write_auth_enable(hdev, skb);
2600		break;
2601
2602	case HCI_OP_WRITE_ENCRYPT_MODE:
2603		hci_cc_write_encrypt_mode(hdev, skb);
2604		break;
2605
2606	case HCI_OP_WRITE_SCAN_ENABLE:
2607		hci_cc_write_scan_enable(hdev, skb);
2608		break;
2609
2610	case HCI_OP_READ_CLASS_OF_DEV:
2611		hci_cc_read_class_of_dev(hdev, skb);
2612		break;
2613
2614	case HCI_OP_WRITE_CLASS_OF_DEV:
2615		hci_cc_write_class_of_dev(hdev, skb);
2616		break;
2617
2618	case HCI_OP_READ_VOICE_SETTING:
2619		hci_cc_read_voice_setting(hdev, skb);
2620		break;
2621
2622	case HCI_OP_WRITE_VOICE_SETTING:
2623		hci_cc_write_voice_setting(hdev, skb);
2624		break;
2625
2626	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2627		hci_cc_read_num_supported_iac(hdev, skb);
2628		break;
2629
2630	case HCI_OP_WRITE_SSP_MODE:
2631		hci_cc_write_ssp_mode(hdev, skb);
2632		break;
2633
2634	case HCI_OP_WRITE_SC_SUPPORT:
2635		hci_cc_write_sc_support(hdev, skb);
2636		break;
2637
2638	case HCI_OP_READ_LOCAL_VERSION:
2639		hci_cc_read_local_version(hdev, skb);
2640		break;
2641
2642	case HCI_OP_READ_LOCAL_COMMANDS:
2643		hci_cc_read_local_commands(hdev, skb);
2644		break;
2645
2646	case HCI_OP_READ_LOCAL_FEATURES:
2647		hci_cc_read_local_features(hdev, skb);
2648		break;
2649
2650	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2651		hci_cc_read_local_ext_features(hdev, skb);
2652		break;
2653
2654	case HCI_OP_READ_BUFFER_SIZE:
2655		hci_cc_read_buffer_size(hdev, skb);
2656		break;
2657
2658	case HCI_OP_READ_BD_ADDR:
2659		hci_cc_read_bd_addr(hdev, skb);
2660		break;
2661
2662	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2663		hci_cc_read_page_scan_activity(hdev, skb);
2664		break;
2665
2666	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2667		hci_cc_write_page_scan_activity(hdev, skb);
2668		break;
2669
2670	case HCI_OP_READ_PAGE_SCAN_TYPE:
2671		hci_cc_read_page_scan_type(hdev, skb);
2672		break;
2673
2674	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2675		hci_cc_write_page_scan_type(hdev, skb);
2676		break;
2677
2678	case HCI_OP_READ_DATA_BLOCK_SIZE:
2679		hci_cc_read_data_block_size(hdev, skb);
2680		break;
2681
2682	case HCI_OP_READ_FLOW_CONTROL_MODE:
2683		hci_cc_read_flow_control_mode(hdev, skb);
2684		break;
2685
2686	case HCI_OP_READ_LOCAL_AMP_INFO:
2687		hci_cc_read_local_amp_info(hdev, skb);
2688		break;
2689
2690	case HCI_OP_READ_CLOCK:
2691		hci_cc_read_clock(hdev, skb);
2692		break;
2693
2694	case HCI_OP_READ_LOCAL_AMP_ASSOC:
2695		hci_cc_read_local_amp_assoc(hdev, skb);
2696		break;
2697
2698	case HCI_OP_READ_INQ_RSP_TX_POWER:
2699		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2700		break;
2701
2702	case HCI_OP_PIN_CODE_REPLY:
2703		hci_cc_pin_code_reply(hdev, skb);
2704		break;
2705
2706	case HCI_OP_PIN_CODE_NEG_REPLY:
2707		hci_cc_pin_code_neg_reply(hdev, skb);
2708		break;
2709
2710	case HCI_OP_READ_LOCAL_OOB_DATA:
2711		hci_cc_read_local_oob_data(hdev, skb);
2712		break;
2713
2714	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2715		hci_cc_read_local_oob_ext_data(hdev, skb);
2716		break;
2717
2718	case HCI_OP_LE_READ_BUFFER_SIZE:
2719		hci_cc_le_read_buffer_size(hdev, skb);
2720		break;
2721
2722	case HCI_OP_LE_READ_LOCAL_FEATURES:
2723		hci_cc_le_read_local_features(hdev, skb);
2724		break;
2725
2726	case HCI_OP_LE_READ_ADV_TX_POWER:
2727		hci_cc_le_read_adv_tx_power(hdev, skb);
2728		break;
2729
2730	case HCI_OP_USER_CONFIRM_REPLY:
2731		hci_cc_user_confirm_reply(hdev, skb);
2732		break;
2733
2734	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2735		hci_cc_user_confirm_neg_reply(hdev, skb);
2736		break;
2737
2738	case HCI_OP_USER_PASSKEY_REPLY:
2739		hci_cc_user_passkey_reply(hdev, skb);
2740		break;
2741
2742	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2743		hci_cc_user_passkey_neg_reply(hdev, skb);
2744		break;
2745
2746	case HCI_OP_LE_SET_RANDOM_ADDR:
2747		hci_cc_le_set_random_addr(hdev, skb);
2748		break;
2749
2750	case HCI_OP_LE_SET_ADV_ENABLE:
2751		hci_cc_le_set_adv_enable(hdev, skb);
2752		break;
2753
2754	case HCI_OP_LE_SET_SCAN_PARAM:
2755		hci_cc_le_set_scan_param(hdev, skb);
2756		break;
2757
2758	case HCI_OP_LE_SET_SCAN_ENABLE:
2759		hci_cc_le_set_scan_enable(hdev, skb);
2760		break;
2761
2762	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2763		hci_cc_le_read_white_list_size(hdev, skb);
2764		break;
2765
2766	case HCI_OP_LE_CLEAR_WHITE_LIST:
2767		hci_cc_le_clear_white_list(hdev, skb);
2768		break;
2769
2770	case HCI_OP_LE_ADD_TO_WHITE_LIST:
2771		hci_cc_le_add_to_white_list(hdev, skb);
2772		break;
2773
2774	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2775		hci_cc_le_del_from_white_list(hdev, skb);
2776		break;
2777
2778	case HCI_OP_LE_READ_SUPPORTED_STATES:
2779		hci_cc_le_read_supported_states(hdev, skb);
2780		break;
2781
2782	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2783		hci_cc_write_le_host_supported(hdev, skb);
2784		break;
2785
2786	case HCI_OP_LE_SET_ADV_PARAM:
2787		hci_cc_set_adv_param(hdev, skb);
2788		break;
2789
2790	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2791		hci_cc_write_remote_amp_assoc(hdev, skb);
2792		break;
2793
2794	case HCI_OP_READ_RSSI:
2795		hci_cc_read_rssi(hdev, skb);
2796		break;
2797
2798	case HCI_OP_READ_TX_POWER:
2799		hci_cc_read_tx_power(hdev, skb);
2800		break;
2801
2802	default:
2803		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2804		break;
2805	}
2806
2807	if (opcode != HCI_OP_NOP)
2808		cancel_delayed_work(&hdev->cmd_timer);
2809
2810	hci_req_cmd_complete(hdev, opcode, status);
2811
2812	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2813		atomic_set(&hdev->cmd_cnt, 1);
2814		if (!skb_queue_empty(&hdev->cmd_q))
2815			queue_work(hdev->workqueue, &hdev->cmd_work);
2816	}
2817}
2818
2819static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2820{
2821	struct hci_ev_cmd_status *ev = (void *) skb->data;
2822	__u16 opcode;
2823
2824	skb_pull(skb, sizeof(*ev));
2825
2826	opcode = __le16_to_cpu(ev->opcode);
2827
2828	switch (opcode) {
2829	case HCI_OP_INQUIRY:
2830		hci_cs_inquiry(hdev, ev->status);
2831		break;
2832
2833	case HCI_OP_CREATE_CONN:
2834		hci_cs_create_conn(hdev, ev->status);
2835		break;
2836
2837	case HCI_OP_ADD_SCO:
2838		hci_cs_add_sco(hdev, ev->status);
2839		break;
2840
2841	case HCI_OP_AUTH_REQUESTED:
2842		hci_cs_auth_requested(hdev, ev->status);
2843		break;
2844
2845	case HCI_OP_SET_CONN_ENCRYPT:
2846		hci_cs_set_conn_encrypt(hdev, ev->status);
2847		break;
2848
2849	case HCI_OP_REMOTE_NAME_REQ:
2850		hci_cs_remote_name_req(hdev, ev->status);
2851		break;
2852
2853	case HCI_OP_READ_REMOTE_FEATURES:
2854		hci_cs_read_remote_features(hdev, ev->status);
2855		break;
2856
2857	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2858		hci_cs_read_remote_ext_features(hdev, ev->status);
2859		break;
2860
2861	case HCI_OP_SETUP_SYNC_CONN:
2862		hci_cs_setup_sync_conn(hdev, ev->status);
2863		break;
2864
2865	case HCI_OP_SNIFF_MODE:
2866		hci_cs_sniff_mode(hdev, ev->status);
2867		break;
2868
2869	case HCI_OP_EXIT_SNIFF_MODE:
2870		hci_cs_exit_sniff_mode(hdev, ev->status);
2871		break;
2872
2873	case HCI_OP_DISCONNECT:
2874		hci_cs_disconnect(hdev, ev->status);
2875		break;
2876
2877	case HCI_OP_CREATE_PHY_LINK:
2878		hci_cs_create_phylink(hdev, ev->status);
2879		break;
2880
2881	case HCI_OP_ACCEPT_PHY_LINK:
2882		hci_cs_accept_phylink(hdev, ev->status);
2883		break;
2884
2885	case HCI_OP_LE_CREATE_CONN:
2886		hci_cs_le_create_conn(hdev, ev->status);
2887		break;
2888
2889	case HCI_OP_LE_START_ENC:
2890		hci_cs_le_start_enc(hdev, ev->status);
2891		break;
2892
2893	default:
2894		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2895		break;
2896	}
2897
2898	if (opcode != HCI_OP_NOP)
2899		cancel_delayed_work(&hdev->cmd_timer);
2900
2901	if (ev->status ||
2902	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2903		hci_req_cmd_complete(hdev, opcode, ev->status);
2904
2905	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2906		atomic_set(&hdev->cmd_cnt, 1);
2907		if (!skb_queue_empty(&hdev->cmd_q))
2908			queue_work(hdev->workqueue, &hdev->cmd_work);
2909	}
2910}
2911
2912static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2913{
2914	struct hci_ev_role_change *ev = (void *) skb->data;
2915	struct hci_conn *conn;
2916
2917	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2918
2919	hci_dev_lock(hdev);
2920
2921	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2922	if (conn) {
2923		if (!ev->status)
2924			conn->role = ev->role;
2925
2926		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2927
2928		hci_role_switch_cfm(conn, ev->status, ev->role);
2929	}
2930
2931	hci_dev_unlock(hdev);
2932}
2933
2934static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2935{
2936	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2937	int i;
2938
2939	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2940		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2941		return;
2942	}
2943
2944	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2945	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2946		BT_DBG("%s bad parameters", hdev->name);
2947		return;
2948	}
2949
2950	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2951
2952	for (i = 0; i < ev->num_hndl; i++) {
2953		struct hci_comp_pkts_info *info = &ev->handles[i];
2954		struct hci_conn *conn;
2955		__u16  handle, count;
2956
2957		handle = __le16_to_cpu(info->handle);
2958		count  = __le16_to_cpu(info->count);
2959
2960		conn = hci_conn_hash_lookup_handle(hdev, handle);
2961		if (!conn)
2962			continue;
2963
2964		conn->sent -= count;
2965
2966		switch (conn->type) {
2967		case ACL_LINK:
2968			hdev->acl_cnt += count;
2969			if (hdev->acl_cnt > hdev->acl_pkts)
2970				hdev->acl_cnt = hdev->acl_pkts;
2971			break;
2972
2973		case LE_LINK:
2974			if (hdev->le_pkts) {
2975				hdev->le_cnt += count;
2976				if (hdev->le_cnt > hdev->le_pkts)
2977					hdev->le_cnt = hdev->le_pkts;
2978			} else {
2979				hdev->acl_cnt += count;
2980				if (hdev->acl_cnt > hdev->acl_pkts)
2981					hdev->acl_cnt = hdev->acl_pkts;
2982			}
2983			break;
2984
2985		case SCO_LINK:
2986			hdev->sco_cnt += count;
2987			if (hdev->sco_cnt > hdev->sco_pkts)
2988				hdev->sco_cnt = hdev->sco_pkts;
2989			break;
2990
2991		default:
2992			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2993			break;
2994		}
2995	}
2996
2997	queue_work(hdev->workqueue, &hdev->tx_work);
2998}
2999
3000static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3001						 __u16 handle)
3002{
3003	struct hci_chan *chan;
3004
3005	switch (hdev->dev_type) {
3006	case HCI_BREDR:
3007		return hci_conn_hash_lookup_handle(hdev, handle);
3008	case HCI_AMP:
3009		chan = hci_chan_lookup_handle(hdev, handle);
3010		if (chan)
3011			return chan->conn;
3012		break;
3013	default:
3014		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3015		break;
3016	}
3017
3018	return NULL;
3019}
3020
3021static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3022{
3023	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3024	int i;
3025
3026	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3027		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3028		return;
3029	}
3030
3031	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3032	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3033		BT_DBG("%s bad parameters", hdev->name);
3034		return;
3035	}
3036
3037	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3038	       ev->num_hndl);
3039
3040	for (i = 0; i < ev->num_hndl; i++) {
3041		struct hci_comp_blocks_info *info = &ev->handles[i];
3042		struct hci_conn *conn = NULL;
3043		__u16  handle, block_count;
3044
3045		handle = __le16_to_cpu(info->handle);
3046		block_count = __le16_to_cpu(info->blocks);
3047
3048		conn = __hci_conn_lookup_handle(hdev, handle);
3049		if (!conn)
3050			continue;
3051
3052		conn->sent -= block_count;
3053
3054		switch (conn->type) {
3055		case ACL_LINK:
3056		case AMP_LINK:
3057			hdev->block_cnt += block_count;
3058			if (hdev->block_cnt > hdev->num_blocks)
3059				hdev->block_cnt = hdev->num_blocks;
3060			break;
3061
3062		default:
3063			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3064			break;
3065		}
3066	}
3067
3068	queue_work(hdev->workqueue, &hdev->tx_work);
3069}
3070
3071static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3072{
3073	struct hci_ev_mode_change *ev = (void *) skb->data;
3074	struct hci_conn *conn;
3075
3076	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3077
3078	hci_dev_lock(hdev);
3079
3080	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3081	if (conn) {
3082		conn->mode = ev->mode;
3083
3084		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3085					&conn->flags)) {
3086			if (conn->mode == HCI_CM_ACTIVE)
3087				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3088			else
3089				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3090		}
3091
3092		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3093			hci_sco_setup(conn, ev->status);
3094	}
3095
3096	hci_dev_unlock(hdev);
3097}
3098
3099static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3100{
3101	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3102	struct hci_conn *conn;
3103
3104	BT_DBG("%s", hdev->name);
3105
3106	hci_dev_lock(hdev);
3107
3108	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3109	if (!conn)
3110		goto unlock;
3111
3112	if (conn->state == BT_CONNECTED) {
3113		hci_conn_hold(conn);
3114		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3115		hci_conn_drop(conn);
3116	}
3117
3118	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3119		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3120			     sizeof(ev->bdaddr), &ev->bdaddr);
3121	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3122		u8 secure;
3123
3124		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3125			secure = 1;
3126		else
3127			secure = 0;
3128
3129		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3130	}
3131
3132unlock:
3133	hci_dev_unlock(hdev);
3134}
3135
3136static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3137{
3138	struct hci_ev_link_key_req *ev = (void *) skb->data;
3139	struct hci_cp_link_key_reply cp;
3140	struct hci_conn *conn;
3141	struct link_key *key;
3142
3143	BT_DBG("%s", hdev->name);
3144
3145	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3146		return;
3147
3148	hci_dev_lock(hdev);
3149
3150	key = hci_find_link_key(hdev, &ev->bdaddr);
3151	if (!key) {
3152		BT_DBG("%s link key not found for %pMR", hdev->name,
3153		       &ev->bdaddr);
3154		goto not_found;
3155	}
3156
3157	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3158	       &ev->bdaddr);
3159
3160	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3161	if (conn) {
3162		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3163		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3164		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3165			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3166			goto not_found;
3167		}
3168
3169		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3170		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3171		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3172			BT_DBG("%s ignoring key unauthenticated for high security",
3173			       hdev->name);
3174			goto not_found;
3175		}
3176
3177		conn->key_type = key->type;
3178		conn->pin_length = key->pin_len;
3179	}
3180
3181	bacpy(&cp.bdaddr, &ev->bdaddr);
3182	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3183
3184	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3185
3186	hci_dev_unlock(hdev);
3187
3188	return;
3189
3190not_found:
3191	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3192	hci_dev_unlock(hdev);
3193}
3194
3195static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3196{
3197	struct hci_ev_link_key_notify *ev = (void *) skb->data;
3198	struct hci_conn *conn;
3199	struct link_key *key;
3200	bool persistent;
3201	u8 pin_len = 0;
3202
3203	BT_DBG("%s", hdev->name);
3204
3205	hci_dev_lock(hdev);
3206
3207	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3208	if (conn) {
3209		hci_conn_hold(conn);
3210		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3211		pin_len = conn->pin_length;
3212
3213		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3214			conn->key_type = ev->key_type;
3215
3216		hci_conn_drop(conn);
3217	}
3218
3219	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3220		goto unlock;
3221
3222	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3223			        ev->key_type, pin_len, &persistent);
3224	if (!key)
3225		goto unlock;
3226
3227	mgmt_new_link_key(hdev, key, persistent);
3228
3229	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3230	 * is set. If it's not set simply remove the key from the kernel
3231	 * list (we've still notified user space about it but with
3232	 * store_hint being 0).
3233	 */
3234	if (key->type == HCI_LK_DEBUG_COMBINATION &&
3235	    !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3236		list_del(&key->list);
3237		kfree(key);
3238	} else if (conn) {
3239		if (persistent)
3240			clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3241		else
3242			set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3243	}
3244
3245unlock:
3246	hci_dev_unlock(hdev);
3247}
3248
3249static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3250{
3251	struct hci_ev_clock_offset *ev = (void *) skb->data;
3252	struct hci_conn *conn;
3253
3254	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3255
3256	hci_dev_lock(hdev);
3257
3258	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3259	if (conn && !ev->status) {
3260		struct inquiry_entry *ie;
3261
3262		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3263		if (ie) {
3264			ie->data.clock_offset = ev->clock_offset;
3265			ie->timestamp = jiffies;
3266		}
3267	}
3268
3269	hci_dev_unlock(hdev);
3270}
3271
3272static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3273{
3274	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3275	struct hci_conn *conn;
3276
3277	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3278
3279	hci_dev_lock(hdev);
3280
3281	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3282	if (conn && !ev->status)
3283		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3284
3285	hci_dev_unlock(hdev);
3286}
3287
3288static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3289{
3290	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3291	struct inquiry_entry *ie;
3292
3293	BT_DBG("%s", hdev->name);
3294
3295	hci_dev_lock(hdev);
3296
3297	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3298	if (ie) {
3299		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3300		ie->timestamp = jiffies;
3301	}
3302
3303	hci_dev_unlock(hdev);
3304}
3305
3306static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3307					     struct sk_buff *skb)
3308{
3309	struct inquiry_data data;
3310	int num_rsp = *((__u8 *) skb->data);
3311
3312	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3313
3314	if (!num_rsp)
3315		return;
3316
3317	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3318		return;
3319
3320	hci_dev_lock(hdev);
3321
3322	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3323		struct inquiry_info_with_rssi_and_pscan_mode *info;
3324		info = (void *) (skb->data + 1);
3325
3326		for (; num_rsp; num_rsp--, info++) {
3327			u32 flags;
3328
3329			bacpy(&data.bdaddr, &info->bdaddr);
3330			data.pscan_rep_mode	= info->pscan_rep_mode;
3331			data.pscan_period_mode	= info->pscan_period_mode;
3332			data.pscan_mode		= info->pscan_mode;
3333			memcpy(data.dev_class, info->dev_class, 3);
3334			data.clock_offset	= info->clock_offset;
3335			data.rssi		= info->rssi;
3336			data.ssp_mode		= 0x00;
3337
3338			flags = hci_inquiry_cache_update(hdev, &data, false);
3339
3340			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3341					  info->dev_class, info->rssi,
3342					  flags, NULL, 0, NULL, 0);
3343		}
3344	} else {
3345		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3346
3347		for (; num_rsp; num_rsp--, info++) {
3348			u32 flags;
3349
3350			bacpy(&data.bdaddr, &info->bdaddr);
3351			data.pscan_rep_mode	= info->pscan_rep_mode;
3352			data.pscan_period_mode	= info->pscan_period_mode;
3353			data.pscan_mode		= 0x00;
3354			memcpy(data.dev_class, info->dev_class, 3);
3355			data.clock_offset	= info->clock_offset;
3356			data.rssi		= info->rssi;
3357			data.ssp_mode		= 0x00;
3358
3359			flags = hci_inquiry_cache_update(hdev, &data, false);
3360
3361			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3362					  info->dev_class, info->rssi,
3363					  flags, NULL, 0, NULL, 0);
3364		}
3365	}
3366
3367	hci_dev_unlock(hdev);
3368}
3369
3370static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3371					struct sk_buff *skb)
3372{
3373	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3374	struct hci_conn *conn;
3375
3376	BT_DBG("%s", hdev->name);
3377
3378	hci_dev_lock(hdev);
3379
3380	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3381	if (!conn)
3382		goto unlock;
3383
3384	if (ev->page < HCI_MAX_PAGES)
3385		memcpy(conn->features[ev->page], ev->features, 8);
3386
3387	if (!ev->status && ev->page == 0x01) {
3388		struct inquiry_entry *ie;
3389
3390		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3391		if (ie)
3392			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3393
3394		if (ev->features[0] & LMP_HOST_SSP) {
3395			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3396		} else {
3397			/* It is mandatory by the Bluetooth specification that
3398			 * Extended Inquiry Results are only used when Secure
3399			 * Simple Pairing is enabled, but some devices violate
3400			 * this.
3401			 *
3402			 * To make these devices work, the internal SSP
3403			 * enabled flag needs to be cleared if the remote host
3404			 * features do not indicate SSP support */
3405			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3406		}
3407
3408		if (ev->features[0] & LMP_HOST_SC)
3409			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3410	}
3411
3412	if (conn->state != BT_CONFIG)
3413		goto unlock;
3414
3415	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3416		struct hci_cp_remote_name_req cp;
3417		memset(&cp, 0, sizeof(cp));
3418		bacpy(&cp.bdaddr, &conn->dst);
3419		cp.pscan_rep_mode = 0x02;
3420		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3421	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3422		mgmt_device_connected(hdev, &conn->dst, conn->type,
3423				      conn->dst_type, 0, NULL, 0,
3424				      conn->dev_class);
3425
3426	if (!hci_outgoing_auth_needed(hdev, conn)) {
3427		conn->state = BT_CONNECTED;
3428		hci_proto_connect_cfm(conn, ev->status);
3429		hci_conn_drop(conn);
3430	}
3431
3432unlock:
3433	hci_dev_unlock(hdev);
3434}
3435
3436static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3437				       struct sk_buff *skb)
3438{
3439	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3440	struct hci_conn *conn;
3441
3442	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3443
3444	hci_dev_lock(hdev);
3445
3446	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3447	if (!conn) {
3448		if (ev->link_type == ESCO_LINK)
3449			goto unlock;
3450
3451		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3452		if (!conn)
3453			goto unlock;
3454
3455		conn->type = SCO_LINK;
3456	}
3457
3458	switch (ev->status) {
3459	case 0x00:
3460		conn->handle = __le16_to_cpu(ev->handle);
3461		conn->state  = BT_CONNECTED;
3462
3463		hci_conn_add_sysfs(conn);
3464		break;
3465
3466	case 0x10:	/* Connection Accept Timeout */
3467	case 0x0d:	/* Connection Rejected due to Limited Resources */
3468	case 0x11:	/* Unsupported Feature or Parameter Value */
3469	case 0x1c:	/* SCO interval rejected */
3470	case 0x1a:	/* Unsupported Remote Feature */
3471	case 0x1f:	/* Unspecified error */
3472	case 0x20:	/* Unsupported LMP Parameter value */
3473		if (conn->out) {
3474			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3475					(hdev->esco_type & EDR_ESCO_MASK);
3476			if (hci_setup_sync(conn, conn->link->handle))
3477				goto unlock;
3478		}
3479		/* fall through */
3480
3481	default:
3482		conn->state = BT_CLOSED;
3483		break;
3484	}
3485
3486	hci_proto_connect_cfm(conn, ev->status);
3487	if (ev->status)
3488		hci_conn_del(conn);
3489
3490unlock:
3491	hci_dev_unlock(hdev);
3492}
3493
3494static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3495{
3496	size_t parsed = 0;
3497
3498	while (parsed < eir_len) {
3499		u8 field_len = eir[0];
3500
3501		if (field_len == 0)
3502			return parsed;
3503
3504		parsed += field_len + 1;
3505		eir += field_len + 1;
3506	}
3507
3508	return eir_len;
3509}
3510
3511static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3512					    struct sk_buff *skb)
3513{
3514	struct inquiry_data data;
3515	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3516	int num_rsp = *((__u8 *) skb->data);
3517	size_t eir_len;
3518
3519	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3520
3521	if (!num_rsp)
3522		return;
3523
3524	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3525		return;
3526
3527	hci_dev_lock(hdev);
3528
3529	for (; num_rsp; num_rsp--, info++) {
3530		u32 flags;
3531		bool name_known;
3532
3533		bacpy(&data.bdaddr, &info->bdaddr);
3534		data.pscan_rep_mode	= info->pscan_rep_mode;
3535		data.pscan_period_mode	= info->pscan_period_mode;
3536		data.pscan_mode		= 0x00;
3537		memcpy(data.dev_class, info->dev_class, 3);
3538		data.clock_offset	= info->clock_offset;
3539		data.rssi		= info->rssi;
3540		data.ssp_mode		= 0x01;
3541
3542		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3543			name_known = eir_has_data_type(info->data,
3544						       sizeof(info->data),
3545						       EIR_NAME_COMPLETE);
3546		else
3547			name_known = true;
3548
3549		flags = hci_inquiry_cache_update(hdev, &data, name_known);
3550
3551		eir_len = eir_get_length(info->data, sizeof(info->data));
3552
3553		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3554				  info->dev_class, info->rssi,
3555				  flags, info->data, eir_len, NULL, 0);
3556	}
3557
3558	hci_dev_unlock(hdev);
3559}
3560
3561static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3562					 struct sk_buff *skb)
3563{
3564	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3565	struct hci_conn *conn;
3566
3567	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3568	       __le16_to_cpu(ev->handle));
3569
3570	hci_dev_lock(hdev);
3571
3572	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3573	if (!conn)
3574		goto unlock;
3575
3576	/* For BR/EDR the necessary steps are taken through the
3577	 * auth_complete event.
3578	 */
3579	if (conn->type != LE_LINK)
3580		goto unlock;
3581
3582	if (!ev->status)
3583		conn->sec_level = conn->pending_sec_level;
3584
3585	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3586
3587	if (ev->status && conn->state == BT_CONNECTED) {
3588		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3589		hci_conn_drop(conn);
3590		goto unlock;
3591	}
3592
3593	if (conn->state == BT_CONFIG) {
3594		if (!ev->status)
3595			conn->state = BT_CONNECTED;
3596
3597		hci_proto_connect_cfm(conn, ev->status);
3598		hci_conn_drop(conn);
3599	} else {
3600		hci_auth_cfm(conn, ev->status);
3601
3602		hci_conn_hold(conn);
3603		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3604		hci_conn_drop(conn);
3605	}
3606
3607unlock:
3608	hci_dev_unlock(hdev);
3609}
3610
3611static u8 hci_get_auth_req(struct hci_conn *conn)
3612{
3613	/* If remote requests no-bonding follow that lead */
3614	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3615	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3616		return conn->remote_auth | (conn->auth_type & 0x01);
3617
3618	/* If both remote and local have enough IO capabilities, require
3619	 * MITM protection
3620	 */
3621	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3622	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3623		return conn->remote_auth | 0x01;
3624
3625	/* No MITM protection possible so ignore remote requirement */
3626	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3627}
3628
3629static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3630{
3631	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3632	struct hci_conn *conn;
3633
3634	BT_DBG("%s", hdev->name);
3635
3636	hci_dev_lock(hdev);
3637
3638	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3639	if (!conn)
3640		goto unlock;
3641
3642	hci_conn_hold(conn);
3643
3644	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3645		goto unlock;
3646
3647	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3648	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3649		struct hci_cp_io_capability_reply cp;
3650
3651		bacpy(&cp.bdaddr, &ev->bdaddr);
3652		/* Change the IO capability from KeyboardDisplay
3653		 * to DisplayYesNo as it is not supported by BT spec. */
3654		cp.capability = (conn->io_capability == 0x04) ?
3655				HCI_IO_DISPLAY_YESNO : conn->io_capability;
3656
3657		/* If we are initiators, there is no remote information yet */
3658		if (conn->remote_auth == 0xff) {
3659			/* Request MITM protection if our IO caps allow it
3660			 * except for the no-bonding case.
3661			 */
3662			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3663			    cp.authentication != HCI_AT_NO_BONDING)
3664				conn->auth_type |= 0x01;
3665
3666			cp.authentication = conn->auth_type;
3667		} else {
3668			conn->auth_type = hci_get_auth_req(conn);
3669			cp.authentication = conn->auth_type;
3670		}
3671
3672		if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3673		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3674			cp.oob_data = 0x01;
3675		else
3676			cp.oob_data = 0x00;
3677
3678		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3679			     sizeof(cp), &cp);
3680	} else {
3681		struct hci_cp_io_capability_neg_reply cp;
3682
3683		bacpy(&cp.bdaddr, &ev->bdaddr);
3684		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3685
3686		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3687			     sizeof(cp), &cp);
3688	}
3689
3690unlock:
3691	hci_dev_unlock(hdev);
3692}
3693
3694static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3695{
3696	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3697	struct hci_conn *conn;
3698
3699	BT_DBG("%s", hdev->name);
3700
3701	hci_dev_lock(hdev);
3702
3703	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3704	if (!conn)
3705		goto unlock;
3706
3707	conn->remote_cap = ev->capability;
3708	conn->remote_auth = ev->authentication;
3709	if (ev->oob_data)
3710		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3711
3712unlock:
3713	hci_dev_unlock(hdev);
3714}
3715
3716static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3717					 struct sk_buff *skb)
3718{
3719	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3720	int loc_mitm, rem_mitm, confirm_hint = 0;
3721	struct hci_conn *conn;
3722
3723	BT_DBG("%s", hdev->name);
3724
3725	hci_dev_lock(hdev);
3726
3727	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3728		goto unlock;
3729
3730	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3731	if (!conn)
3732		goto unlock;
3733
3734	loc_mitm = (conn->auth_type & 0x01);
3735	rem_mitm = (conn->remote_auth & 0x01);
3736
3737	/* If we require MITM but the remote device can't provide that
3738	 * (it has NoInputNoOutput) then reject the confirmation
3739	 * request. We check the security level here since it doesn't
3740	 * necessarily match conn->auth_type.
3741	 */
3742	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3743	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3744		BT_DBG("Rejecting request: remote device can't provide MITM");
3745		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3746			     sizeof(ev->bdaddr), &ev->bdaddr);
3747		goto unlock;
3748	}
3749
3750	/* If no side requires MITM protection; auto-accept */
3751	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3752	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3753
3754		/* If we're not the initiators request authorization to
3755		 * proceed from user space (mgmt_user_confirm with
3756		 * confirm_hint set to 1). The exception is if neither
3757		 * side had MITM in which case we do auto-accept.
3758		 */
3759		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3760		    (loc_mitm || rem_mitm)) {
3761			BT_DBG("Confirming auto-accept as acceptor");
3762			confirm_hint = 1;
3763			goto confirm;
3764		}
3765
3766		BT_DBG("Auto-accept of user confirmation with %ums delay",
3767		       hdev->auto_accept_delay);
3768
3769		if (hdev->auto_accept_delay > 0) {
3770			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3771			queue_delayed_work(conn->hdev->workqueue,
3772					   &conn->auto_accept_work, delay);
3773			goto unlock;
3774		}
3775
3776		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3777			     sizeof(ev->bdaddr), &ev->bdaddr);
3778		goto unlock;
3779	}
3780
3781confirm:
3782	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3783				  le32_to_cpu(ev->passkey), confirm_hint);
3784
3785unlock:
3786	hci_dev_unlock(hdev);
3787}
3788
3789static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3790					 struct sk_buff *skb)
3791{
3792	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3793
3794	BT_DBG("%s", hdev->name);
3795
3796	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3797		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3798}
3799
3800static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3801					struct sk_buff *skb)
3802{
3803	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3804	struct hci_conn *conn;
3805
3806	BT_DBG("%s", hdev->name);
3807
3808	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3809	if (!conn)
3810		return;
3811
3812	conn->passkey_notify = __le32_to_cpu(ev->passkey);
3813	conn->passkey_entered = 0;
3814
3815	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3816		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3817					 conn->dst_type, conn->passkey_notify,
3818					 conn->passkey_entered);
3819}
3820
3821static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3822{
3823	struct hci_ev_keypress_notify *ev = (void *) skb->data;
3824	struct hci_conn *conn;
3825
3826	BT_DBG("%s", hdev->name);
3827
3828	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3829	if (!conn)
3830		return;
3831
3832	switch (ev->type) {
3833	case HCI_KEYPRESS_STARTED:
3834		conn->passkey_entered = 0;
3835		return;
3836
3837	case HCI_KEYPRESS_ENTERED:
3838		conn->passkey_entered++;
3839		break;
3840
3841	case HCI_KEYPRESS_ERASED:
3842		conn->passkey_entered--;
3843		break;
3844
3845	case HCI_KEYPRESS_CLEARED:
3846		conn->passkey_entered = 0;
3847		break;
3848
3849	case HCI_KEYPRESS_COMPLETED:
3850		return;
3851	}
3852
3853	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3854		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3855					 conn->dst_type, conn->passkey_notify,
3856					 conn->passkey_entered);
3857}
3858
3859static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3860					 struct sk_buff *skb)
3861{
3862	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3863	struct hci_conn *conn;
3864
3865	BT_DBG("%s", hdev->name);
3866
3867	hci_dev_lock(hdev);
3868
3869	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3870	if (!conn)
3871		goto unlock;
3872
3873	/* To avoid duplicate auth_failed events to user space we check
3874	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3875	 * initiated the authentication. A traditional auth_complete
3876	 * event gets always produced as initiator and is also mapped to
3877	 * the mgmt_auth_failed event */
3878	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3879		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3880				 ev->status);
3881
3882	hci_conn_drop(conn);
3883
3884unlock:
3885	hci_dev_unlock(hdev);
3886}
3887
3888static void hci_remote_host_features_evt(struct hci_dev *hdev,
3889					 struct sk_buff *skb)
3890{
3891	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3892	struct inquiry_entry *ie;
3893	struct hci_conn *conn;
3894
3895	BT_DBG("%s", hdev->name);
3896
3897	hci_dev_lock(hdev);
3898
3899	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3900	if (conn)
3901		memcpy(conn->features[1], ev->features, 8);
3902
3903	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3904	if (ie)
3905		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3906
3907	hci_dev_unlock(hdev);
3908}
3909
3910static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3911					    struct sk_buff *skb)
3912{
3913	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3914	struct oob_data *data;
3915
3916	BT_DBG("%s", hdev->name);
3917
3918	hci_dev_lock(hdev);
3919
3920	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3921		goto unlock;
3922
3923	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3924	if (data) {
3925		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3926			struct hci_cp_remote_oob_ext_data_reply cp;
3927
3928			bacpy(&cp.bdaddr, &ev->bdaddr);
3929			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3930			memcpy(cp.randomizer192, data->randomizer192,
3931			       sizeof(cp.randomizer192));
3932			memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3933			memcpy(cp.randomizer256, data->randomizer256,
3934			       sizeof(cp.randomizer256));
3935
3936			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3937				     sizeof(cp), &cp);
3938		} else {
3939			struct hci_cp_remote_oob_data_reply cp;
3940
3941			bacpy(&cp.bdaddr, &ev->bdaddr);
3942			memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3943			memcpy(cp.randomizer, data->randomizer192,
3944			       sizeof(cp.randomizer));
3945
3946			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3947				     sizeof(cp), &cp);
3948		}
3949	} else {
3950		struct hci_cp_remote_oob_data_neg_reply cp;
3951
3952		bacpy(&cp.bdaddr, &ev->bdaddr);
3953		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3954			     sizeof(cp), &cp);
3955	}
3956
3957unlock:
3958	hci_dev_unlock(hdev);
3959}
3960
3961static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3962				      struct sk_buff *skb)
3963{
3964	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3965	struct hci_conn *hcon, *bredr_hcon;
3966
3967	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3968	       ev->status);
3969
3970	hci_dev_lock(hdev);
3971
3972	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3973	if (!hcon) {
3974		hci_dev_unlock(hdev);
3975		return;
3976	}
3977
3978	if (ev->status) {
3979		hci_conn_del(hcon);
3980		hci_dev_unlock(hdev);
3981		return;
3982	}
3983
3984	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3985
3986	hcon->state = BT_CONNECTED;
3987	bacpy(&hcon->dst, &bredr_hcon->dst);
3988
3989	hci_conn_hold(hcon);
3990	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3991	hci_conn_drop(hcon);
3992
3993	hci_conn_add_sysfs(hcon);
3994
3995	amp_physical_cfm(bredr_hcon, hcon);
3996
3997	hci_dev_unlock(hdev);
3998}
3999
4000static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4001{
4002	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4003	struct hci_conn *hcon;
4004	struct hci_chan *hchan;
4005	struct amp_mgr *mgr;
4006
4007	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4008	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4009	       ev->status);
4010
4011	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4012	if (!hcon)
4013		return;
4014
4015	/* Create AMP hchan */
4016	hchan = hci_chan_create(hcon);
4017	if (!hchan)
4018		return;
4019
4020	hchan->handle = le16_to_cpu(ev->handle);
4021
4022	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4023
4024	mgr = hcon->amp_mgr;
4025	if (mgr && mgr->bredr_chan) {
4026		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4027
4028		l2cap_chan_lock(bredr_chan);
4029
4030		bredr_chan->conn->mtu = hdev->block_mtu;
4031		l2cap_logical_cfm(bredr_chan, hchan, 0);
4032		hci_conn_hold(hcon);
4033
4034		l2cap_chan_unlock(bredr_chan);
4035	}
4036}
4037
4038static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4039					     struct sk_buff *skb)
4040{
4041	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4042	struct hci_chan *hchan;
4043
4044	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4045	       le16_to_cpu(ev->handle), ev->status);
4046
4047	if (ev->status)
4048		return;
4049
4050	hci_dev_lock(hdev);
4051
4052	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4053	if (!hchan)
4054		goto unlock;
4055
4056	amp_destroy_logical_link(hchan, ev->reason);
4057
4058unlock:
4059	hci_dev_unlock(hdev);
4060}
4061
4062static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4063					     struct sk_buff *skb)
4064{
4065	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4066	struct hci_conn *hcon;
4067
4068	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4069
4070	if (ev->status)
4071		return;
4072
4073	hci_dev_lock(hdev);
4074
4075	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4076	if (hcon) {
4077		hcon->state = BT_CLOSED;
4078		hci_conn_del(hcon);
4079	}
4080
4081	hci_dev_unlock(hdev);
4082}
4083
4084static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4085{
4086	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4087	struct hci_conn_params *params;
4088	struct hci_conn *conn;
4089	struct smp_irk *irk;
4090	u8 addr_type;
4091
4092	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4093
4094	hci_dev_lock(hdev);
4095
4096	/* All controllers implicitly stop advertising in the event of a
4097	 * connection, so ensure that the state bit is cleared.
4098	 */
4099	clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4100
4101	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4102	if (!conn) {
4103		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
4104		if (!conn) {
4105			BT_ERR("No memory for new connection");
4106			goto unlock;
4107		}
4108
4109		conn->dst_type = ev->bdaddr_type;
4110
4111		conn->role = ev->role;
4112		if (conn->role == HCI_ROLE_MASTER)
4113			conn->out = true;
4114
4115		/* If we didn't have a hci_conn object previously
4116		 * but we're in master role this must be something
4117		 * initiated using a white list. Since white list based
4118		 * connections are not "first class citizens" we don't
4119		 * have full tracking of them. Therefore, we go ahead
4120		 * with a "best effort" approach of determining the
4121		 * initiator address based on the HCI_PRIVACY flag.
4122		 */
4123		if (conn->out) {
4124			conn->resp_addr_type = ev->bdaddr_type;
4125			bacpy(&conn->resp_addr, &ev->bdaddr);
4126			if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4127				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4128				bacpy(&conn->init_addr, &hdev->rpa);
4129			} else {
4130				hci_copy_identity_address(hdev,
4131							  &conn->init_addr,
4132							  &conn->init_addr_type);
4133			}
4134		}
4135	} else {
4136		cancel_delayed_work(&conn->le_conn_timeout);
4137	}
4138
4139	if (!conn->out) {
4140		/* Set the responder (our side) address type based on
4141		 * the advertising address type.
4142		 */
4143		conn->resp_addr_type = hdev->adv_addr_type;
4144		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4145			bacpy(&conn->resp_addr, &hdev->random_addr);
4146		else
4147			bacpy(&conn->resp_addr, &hdev->bdaddr);
4148
4149		conn->init_addr_type = ev->bdaddr_type;
4150		bacpy(&conn->init_addr, &ev->bdaddr);
4151
4152		/* For incoming connections, set the default minimum
4153		 * and maximum connection interval. They will be used
4154		 * to check if the parameters are in range and if not
4155		 * trigger the connection update procedure.
4156		 */
4157		conn->le_conn_min_interval = hdev->le_conn_min_interval;
4158		conn->le_conn_max_interval = hdev->le_conn_max_interval;
4159	}
4160
4161	/* Lookup the identity address from the stored connection
4162	 * address and address type.
4163	 *
4164	 * When establishing connections to an identity address, the
4165	 * connection procedure will store the resolvable random
4166	 * address first. Now if it can be converted back into the
4167	 * identity address, start using the identity address from
4168	 * now on.
4169	 */
4170	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4171	if (irk) {
4172		bacpy(&conn->dst, &irk->bdaddr);
4173		conn->dst_type = irk->addr_type;
4174	}
4175
4176	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4177		addr_type = BDADDR_LE_PUBLIC;
4178	else
4179		addr_type = BDADDR_LE_RANDOM;
4180
4181	if (ev->status) {
4182		hci_le_conn_failed(conn, ev->status);
4183		goto unlock;
4184	}
4185
4186	/* Drop the connection if the device is blocked */
4187	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4188		hci_conn_drop(conn);
4189		goto unlock;
4190	}
4191
4192	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4193		mgmt_device_connected(hdev, &conn->dst, conn->type,
4194				      conn->dst_type, 0, NULL, 0, NULL);
4195
4196	conn->sec_level = BT_SECURITY_LOW;
4197	conn->handle = __le16_to_cpu(ev->handle);
4198	conn->state = BT_CONNECTED;
4199
4200	conn->le_conn_interval = le16_to_cpu(ev->interval);
4201	conn->le_conn_latency = le16_to_cpu(ev->latency);
4202	conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4203
4204	hci_conn_add_sysfs(conn);
4205
4206	hci_proto_connect_cfm(conn, ev->status);
4207
4208	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
4209	if (params)
4210		list_del_init(&params->action);
4211
4212unlock:
4213	hci_update_background_scan(hdev);
4214	hci_dev_unlock(hdev);
4215}
4216
4217static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4218					    struct sk_buff *skb)
4219{
4220	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4221	struct hci_conn *conn;
4222
4223	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4224
4225	if (ev->status)
4226		return;
4227
4228	hci_dev_lock(hdev);
4229
4230	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4231	if (conn) {
4232		conn->le_conn_interval = le16_to_cpu(ev->interval);
4233		conn->le_conn_latency = le16_to_cpu(ev->latency);
4234		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4235	}
4236
4237	hci_dev_unlock(hdev);
4238}
4239
4240/* This function requires the caller holds hdev->lock */
4241static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4242				  u8 addr_type, u8 adv_type)
4243{
4244	struct hci_conn *conn;
4245
4246	/* If the event is not connectable don't proceed further */
4247	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4248		return;
4249
4250	/* Ignore if the device is blocked */
4251	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4252		return;
4253
4254	/* If we're connectable, always connect any ADV_DIRECT_IND event */
4255	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
4256	    adv_type == LE_ADV_DIRECT_IND)
4257		goto connect;
4258
4259	/* If we're not connectable only connect devices that we have in
4260	 * our pend_le_conns list.
4261	 */
4262	if (!hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, addr_type))
4263		return;
4264
4265connect:
4266	/* Request connection in master = true role */
4267	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4268			      HCI_LE_AUTOCONN_TIMEOUT, true);
4269	if (!IS_ERR(conn))
4270		return;
4271
4272	switch (PTR_ERR(conn)) {
4273	case -EBUSY:
4274		/* If hci_connect() returns -EBUSY it means there is already
4275		 * an LE connection attempt going on. Since controllers don't
4276		 * support more than one connection attempt at the time, we
4277		 * don't consider this an error case.
4278		 */
4279		break;
4280	default:
4281		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4282	}
4283}
4284
4285static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4286			       u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4287{
4288	struct discovery_state *d = &hdev->discovery;
4289	struct smp_irk *irk;
4290	bool match;
4291	u32 flags;
4292
4293	/* Check if we need to convert to identity address */
4294	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4295	if (irk) {
4296		bdaddr = &irk->bdaddr;
4297		bdaddr_type = irk->addr_type;
4298	}
4299
4300	/* Check if we have been requested to connect to this device */
4301	check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4302
4303	/* Passive scanning shouldn't trigger any device found events,
4304	 * except for devices marked as CONN_REPORT for which we do send
4305	 * device found events.
4306	 */
4307	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4308		if (type == LE_ADV_DIRECT_IND)
4309			return;
4310
4311		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4312					       bdaddr, bdaddr_type))
4313			return;
4314
4315		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4316			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4317		else
4318			flags = 0;
4319		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4320				  rssi, flags, data, len, NULL, 0);
4321		return;
4322	}
4323
4324	/* When receiving non-connectable or scannable undirected
4325	 * advertising reports, this means that the remote device is
4326	 * not connectable and then clearly indicate this in the
4327	 * device found event.
4328	 *
4329	 * When receiving a scan response, then there is no way to
4330	 * know if the remote device is connectable or not. However
4331	 * since scan responses are merged with a previously seen
4332	 * advertising report, the flags field from that report
4333	 * will be used.
4334	 *
4335	 * In the really unlikely case that a controller get confused
4336	 * and just sends a scan response event, then it is marked as
4337	 * not connectable as well.
4338	 */
4339	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4340	    type == LE_ADV_SCAN_RSP)
4341		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4342	else
4343		flags = 0;
4344
4345	/* If there's nothing pending either store the data from this
4346	 * event or send an immediate device found event if the data
4347	 * should not be stored for later.
4348	 */
4349	if (!has_pending_adv_report(hdev)) {
4350		/* If the report will trigger a SCAN_REQ store it for
4351		 * later merging.
4352		 */
4353		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4354			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4355						 rssi, flags, data, len);
4356			return;
4357		}
4358
4359		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4360				  rssi, flags, data, len, NULL, 0);
4361		return;
4362	}
4363
4364	/* Check if the pending report is for the same device as the new one */
4365	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4366		 bdaddr_type == d->last_adv_addr_type);
4367
4368	/* If the pending data doesn't match this report or this isn't a
4369	 * scan response (e.g. we got a duplicate ADV_IND) then force
4370	 * sending of the pending data.
4371	 */
4372	if (type != LE_ADV_SCAN_RSP || !match) {
4373		/* Send out whatever is in the cache, but skip duplicates */
4374		if (!match)
4375			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4376					  d->last_adv_addr_type, NULL,
4377					  d->last_adv_rssi, d->last_adv_flags,
4378					  d->last_adv_data,
4379					  d->last_adv_data_len, NULL, 0);
4380
4381		/* If the new report will trigger a SCAN_REQ store it for
4382		 * later merging.
4383		 */
4384		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4385			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4386						 rssi, flags, data, len);
4387			return;
4388		}
4389
4390		/* The advertising reports cannot be merged, so clear
4391		 * the pending report and send out a device found event.
4392		 */
4393		clear_pending_adv_report(hdev);
4394		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4395				  rssi, flags, data, len, NULL, 0);
4396		return;
4397	}
4398
4399	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4400	 * the new event is a SCAN_RSP. We can therefore proceed with
4401	 * sending a merged device found event.
4402	 */
4403	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4404			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4405			  d->last_adv_data, d->last_adv_data_len, data, len);
4406	clear_pending_adv_report(hdev);
4407}
4408
4409static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4410{
4411	u8 num_reports = skb->data[0];
4412	void *ptr = &skb->data[1];
4413
4414	hci_dev_lock(hdev);
4415
4416	while (num_reports--) {
4417		struct hci_ev_le_advertising_info *ev = ptr;
4418		s8 rssi;
4419
4420		rssi = ev->data[ev->length];
4421		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4422				   ev->bdaddr_type, rssi, ev->data, ev->length);
4423
4424		ptr += sizeof(*ev) + ev->length + 1;
4425	}
4426
4427	hci_dev_unlock(hdev);
4428}
4429
4430static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4431{
4432	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4433	struct hci_cp_le_ltk_reply cp;
4434	struct hci_cp_le_ltk_neg_reply neg;
4435	struct hci_conn *conn;
4436	struct smp_ltk *ltk;
4437
4438	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4439
4440	hci_dev_lock(hdev);
4441
4442	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4443	if (conn == NULL)
4444		goto not_found;
4445
4446	ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4447	if (ltk == NULL)
4448		goto not_found;
4449
4450	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4451	cp.handle = cpu_to_le16(conn->handle);
4452
4453	if (ltk->authenticated)
4454		conn->pending_sec_level = BT_SECURITY_HIGH;
4455	else
4456		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4457
4458	conn->enc_key_size = ltk->enc_size;
4459
4460	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4461
4462	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4463	 * temporary key used to encrypt a connection following
4464	 * pairing. It is used during the Encrypted Session Setup to
4465	 * distribute the keys. Later, security can be re-established
4466	 * using a distributed LTK.
4467	 */
4468	if (ltk->type == SMP_STK) {
4469		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4470		list_del(&ltk->list);
4471		kfree(ltk);
4472	} else {
4473		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4474	}
4475
4476	hci_dev_unlock(hdev);
4477
4478	return;
4479
4480not_found:
4481	neg.handle = ev->handle;
4482	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4483	hci_dev_unlock(hdev);
4484}
4485
4486static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4487				      u8 reason)
4488{
4489	struct hci_cp_le_conn_param_req_neg_reply cp;
4490
4491	cp.handle = cpu_to_le16(handle);
4492	cp.reason = reason;
4493
4494	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4495		     &cp);
4496}
4497
4498static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4499					     struct sk_buff *skb)
4500{
4501	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4502	struct hci_cp_le_conn_param_req_reply cp;
4503	struct hci_conn *hcon;
4504	u16 handle, min, max, latency, timeout;
4505
4506	handle = le16_to_cpu(ev->handle);
4507	min = le16_to_cpu(ev->interval_min);
4508	max = le16_to_cpu(ev->interval_max);
4509	latency = le16_to_cpu(ev->latency);
4510	timeout = le16_to_cpu(ev->timeout);
4511
4512	hcon = hci_conn_hash_lookup_handle(hdev, handle);
4513	if (!hcon || hcon->state != BT_CONNECTED)
4514		return send_conn_param_neg_reply(hdev, handle,
4515						 HCI_ERROR_UNKNOWN_CONN_ID);
4516
4517	if (hci_check_conn_params(min, max, latency, timeout))
4518		return send_conn_param_neg_reply(hdev, handle,
4519						 HCI_ERROR_INVALID_LL_PARAMS);
4520
4521	if (hcon->role == HCI_ROLE_MASTER) {
4522		struct hci_conn_params *params;
4523		u8 store_hint;
4524
4525		hci_dev_lock(hdev);
4526
4527		params = hci_conn_params_lookup(hdev, &hcon->dst,
4528						hcon->dst_type);
4529		if (params) {
4530			params->conn_min_interval = min;
4531			params->conn_max_interval = max;
4532			params->conn_latency = latency;
4533			params->supervision_timeout = timeout;
4534			store_hint = 0x01;
4535		} else{
4536			store_hint = 0x00;
4537		}
4538
4539		hci_dev_unlock(hdev);
4540
4541		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4542				    store_hint, min, max, latency, timeout);
4543	}
4544
4545	cp.handle = ev->handle;
4546	cp.interval_min = ev->interval_min;
4547	cp.interval_max = ev->interval_max;
4548	cp.latency = ev->latency;
4549	cp.timeout = ev->timeout;
4550	cp.min_ce_len = 0;
4551	cp.max_ce_len = 0;
4552
4553	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4554}
4555
4556static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4557{
4558	struct hci_ev_le_meta *le_ev = (void *) skb->data;
4559
4560	skb_pull(skb, sizeof(*le_ev));
4561
4562	switch (le_ev->subevent) {
4563	case HCI_EV_LE_CONN_COMPLETE:
4564		hci_le_conn_complete_evt(hdev, skb);
4565		break;
4566
4567	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4568		hci_le_conn_update_complete_evt(hdev, skb);
4569		break;
4570
4571	case HCI_EV_LE_ADVERTISING_REPORT:
4572		hci_le_adv_report_evt(hdev, skb);
4573		break;
4574
4575	case HCI_EV_LE_LTK_REQ:
4576		hci_le_ltk_request_evt(hdev, skb);
4577		break;
4578
4579	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4580		hci_le_remote_conn_param_req_evt(hdev, skb);
4581		break;
4582
4583	default:
4584		break;
4585	}
4586}
4587
4588static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4589{
4590	struct hci_ev_channel_selected *ev = (void *) skb->data;
4591	struct hci_conn *hcon;
4592
4593	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4594
4595	skb_pull(skb, sizeof(*ev));
4596
4597	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4598	if (!hcon)
4599		return;
4600
4601	amp_read_loc_assoc_final_data(hdev, hcon);
4602}
4603
4604void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4605{
4606	struct hci_event_hdr *hdr = (void *) skb->data;
4607	__u8 event = hdr->evt;
4608
4609	hci_dev_lock(hdev);
4610
4611	/* Received events are (currently) only needed when a request is
4612	 * ongoing so avoid unnecessary memory allocation.
4613	 */
4614	if (hci_req_pending(hdev)) {
4615		kfree_skb(hdev->recv_evt);
4616		hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4617	}
4618
4619	hci_dev_unlock(hdev);
4620
4621	skb_pull(skb, HCI_EVENT_HDR_SIZE);
4622
4623	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4624		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4625		u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4626
4627		hci_req_cmd_complete(hdev, opcode, 0);
4628	}
4629
4630	switch (event) {
4631	case HCI_EV_INQUIRY_COMPLETE:
4632		hci_inquiry_complete_evt(hdev, skb);
4633		break;
4634
4635	case HCI_EV_INQUIRY_RESULT:
4636		hci_inquiry_result_evt(hdev, skb);
4637		break;
4638
4639	case HCI_EV_CONN_COMPLETE:
4640		hci_conn_complete_evt(hdev, skb);
4641		break;
4642
4643	case HCI_EV_CONN_REQUEST:
4644		hci_conn_request_evt(hdev, skb);
4645		break;
4646
4647	case HCI_EV_DISCONN_COMPLETE:
4648		hci_disconn_complete_evt(hdev, skb);
4649		break;
4650
4651	case HCI_EV_AUTH_COMPLETE:
4652		hci_auth_complete_evt(hdev, skb);
4653		break;
4654
4655	case HCI_EV_REMOTE_NAME:
4656		hci_remote_name_evt(hdev, skb);
4657		break;
4658
4659	case HCI_EV_ENCRYPT_CHANGE:
4660		hci_encrypt_change_evt(hdev, skb);
4661		break;
4662
4663	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4664		hci_change_link_key_complete_evt(hdev, skb);
4665		break;
4666
4667	case HCI_EV_REMOTE_FEATURES:
4668		hci_remote_features_evt(hdev, skb);
4669		break;
4670
4671	case HCI_EV_CMD_COMPLETE:
4672		hci_cmd_complete_evt(hdev, skb);
4673		break;
4674
4675	case HCI_EV_CMD_STATUS:
4676		hci_cmd_status_evt(hdev, skb);
4677		break;
4678
4679	case HCI_EV_ROLE_CHANGE:
4680		hci_role_change_evt(hdev, skb);
4681		break;
4682
4683	case HCI_EV_NUM_COMP_PKTS:
4684		hci_num_comp_pkts_evt(hdev, skb);
4685		break;
4686
4687	case HCI_EV_MODE_CHANGE:
4688		hci_mode_change_evt(hdev, skb);
4689		break;
4690
4691	case HCI_EV_PIN_CODE_REQ:
4692		hci_pin_code_request_evt(hdev, skb);
4693		break;
4694
4695	case HCI_EV_LINK_KEY_REQ:
4696		hci_link_key_request_evt(hdev, skb);
4697		break;
4698
4699	case HCI_EV_LINK_KEY_NOTIFY:
4700		hci_link_key_notify_evt(hdev, skb);
4701		break;
4702
4703	case HCI_EV_CLOCK_OFFSET:
4704		hci_clock_offset_evt(hdev, skb);
4705		break;
4706
4707	case HCI_EV_PKT_TYPE_CHANGE:
4708		hci_pkt_type_change_evt(hdev, skb);
4709		break;
4710
4711	case HCI_EV_PSCAN_REP_MODE:
4712		hci_pscan_rep_mode_evt(hdev, skb);
4713		break;
4714
4715	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4716		hci_inquiry_result_with_rssi_evt(hdev, skb);
4717		break;
4718
4719	case HCI_EV_REMOTE_EXT_FEATURES:
4720		hci_remote_ext_features_evt(hdev, skb);
4721		break;
4722
4723	case HCI_EV_SYNC_CONN_COMPLETE:
4724		hci_sync_conn_complete_evt(hdev, skb);
4725		break;
4726
4727	case HCI_EV_EXTENDED_INQUIRY_RESULT:
4728		hci_extended_inquiry_result_evt(hdev, skb);
4729		break;
4730
4731	case HCI_EV_KEY_REFRESH_COMPLETE:
4732		hci_key_refresh_complete_evt(hdev, skb);
4733		break;
4734
4735	case HCI_EV_IO_CAPA_REQUEST:
4736		hci_io_capa_request_evt(hdev, skb);
4737		break;
4738
4739	case HCI_EV_IO_CAPA_REPLY:
4740		hci_io_capa_reply_evt(hdev, skb);
4741		break;
4742
4743	case HCI_EV_USER_CONFIRM_REQUEST:
4744		hci_user_confirm_request_evt(hdev, skb);
4745		break;
4746
4747	case HCI_EV_USER_PASSKEY_REQUEST:
4748		hci_user_passkey_request_evt(hdev, skb);
4749		break;
4750
4751	case HCI_EV_USER_PASSKEY_NOTIFY:
4752		hci_user_passkey_notify_evt(hdev, skb);
4753		break;
4754
4755	case HCI_EV_KEYPRESS_NOTIFY:
4756		hci_keypress_notify_evt(hdev, skb);
4757		break;
4758
4759	case HCI_EV_SIMPLE_PAIR_COMPLETE:
4760		hci_simple_pair_complete_evt(hdev, skb);
4761		break;
4762
4763	case HCI_EV_REMOTE_HOST_FEATURES:
4764		hci_remote_host_features_evt(hdev, skb);
4765		break;
4766
4767	case HCI_EV_LE_META:
4768		hci_le_meta_evt(hdev, skb);
4769		break;
4770
4771	case HCI_EV_CHANNEL_SELECTED:
4772		hci_chan_selected_evt(hdev, skb);
4773		break;
4774
4775	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4776		hci_remote_oob_data_request_evt(hdev, skb);
4777		break;
4778
4779	case HCI_EV_PHY_LINK_COMPLETE:
4780		hci_phy_link_complete_evt(hdev, skb);
4781		break;
4782
4783	case HCI_EV_LOGICAL_LINK_COMPLETE:
4784		hci_loglink_complete_evt(hdev, skb);
4785		break;
4786
4787	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4788		hci_disconn_loglink_complete_evt(hdev, skb);
4789		break;
4790
4791	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4792		hci_disconn_phylink_complete_evt(hdev, skb);
4793		break;
4794
4795	case HCI_EV_NUM_COMP_BLOCKS:
4796		hci_num_comp_blocks_evt(hdev, skb);
4797		break;
4798
4799	default:
4800		BT_DBG("%s event 0x%2.2x", hdev->name, event);
4801		break;
4802	}
4803
4804	kfree_skb(skb);
4805	hdev->stat.evt_rx++;
4806}
4807