hci_event.c revision bc6d2d04182877b198c1a945b7c401decbbb8c02
1/*
2   BlueZ - Bluetooth protocol stack for Linux
3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <asm/unaligned.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/mgmt.h>
32
33#include "a2mp.h"
34#include "amp.h"
35#include "smp.h"
36
37/* Handle HCI Event packets */
38
39static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40{
41	__u8 status = *((__u8 *) skb->data);
42
43	BT_DBG("%s status 0x%2.2x", hdev->name, status);
44
45	if (status)
46		return;
47
48	clear_bit(HCI_INQUIRY, &hdev->flags);
49	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50	wake_up_bit(&hdev->flags, HCI_INQUIRY);
51
52	hci_dev_lock(hdev);
53	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54	hci_dev_unlock(hdev);
55
56	hci_conn_check_pending(hdev);
57}
58
59static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60{
61	__u8 status = *((__u8 *) skb->data);
62
63	BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65	if (status)
66		return;
67
68	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69}
70
71static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72{
73	__u8 status = *((__u8 *) skb->data);
74
75	BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77	if (status)
78		return;
79
80	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82	hci_conn_check_pending(hdev);
83}
84
85static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86					  struct sk_buff *skb)
87{
88	BT_DBG("%s", hdev->name);
89}
90
91static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92{
93	struct hci_rp_role_discovery *rp = (void *) skb->data;
94	struct hci_conn *conn;
95
96	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98	if (rp->status)
99		return;
100
101	hci_dev_lock(hdev);
102
103	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104	if (conn) {
105		if (rp->role)
106			clear_bit(HCI_CONN_MASTER, &conn->flags);
107		else
108			set_bit(HCI_CONN_MASTER, &conn->flags);
109	}
110
111	hci_dev_unlock(hdev);
112}
113
114static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
115{
116	struct hci_rp_read_link_policy *rp = (void *) skb->data;
117	struct hci_conn *conn;
118
119	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
120
121	if (rp->status)
122		return;
123
124	hci_dev_lock(hdev);
125
126	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
127	if (conn)
128		conn->link_policy = __le16_to_cpu(rp->policy);
129
130	hci_dev_unlock(hdev);
131}
132
133static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
134{
135	struct hci_rp_write_link_policy *rp = (void *) skb->data;
136	struct hci_conn *conn;
137	void *sent;
138
139	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
140
141	if (rp->status)
142		return;
143
144	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145	if (!sent)
146		return;
147
148	hci_dev_lock(hdev);
149
150	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151	if (conn)
152		conn->link_policy = get_unaligned_le16(sent + 2);
153
154	hci_dev_unlock(hdev);
155}
156
157static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158					struct sk_buff *skb)
159{
160	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
161
162	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
163
164	if (rp->status)
165		return;
166
167	hdev->link_policy = __le16_to_cpu(rp->policy);
168}
169
170static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171					 struct sk_buff *skb)
172{
173	__u8 status = *((__u8 *) skb->data);
174	void *sent;
175
176	BT_DBG("%s status 0x%2.2x", hdev->name, status);
177
178	if (status)
179		return;
180
181	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
182	if (!sent)
183		return;
184
185	hdev->link_policy = get_unaligned_le16(sent);
186}
187
188static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189{
190	__u8 status = *((__u8 *) skb->data);
191
192	BT_DBG("%s status 0x%2.2x", hdev->name, status);
193
194	clear_bit(HCI_RESET, &hdev->flags);
195
196	/* Reset all non-persistent flags */
197	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
198
199	hdev->discovery.state = DISCOVERY_STOPPED;
200	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
201	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
202
203	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
204	hdev->adv_data_len = 0;
205
206	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
207	hdev->scan_rsp_data_len = 0;
208
209	hdev->le_scan_type = LE_SCAN_PASSIVE;
210
211	hdev->ssp_debug_mode = 0;
212}
213
214static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
215{
216	__u8 status = *((__u8 *) skb->data);
217	void *sent;
218
219	BT_DBG("%s status 0x%2.2x", hdev->name, status);
220
221	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
222	if (!sent)
223		return;
224
225	hci_dev_lock(hdev);
226
227	if (test_bit(HCI_MGMT, &hdev->dev_flags))
228		mgmt_set_local_name_complete(hdev, sent, status);
229	else if (!status)
230		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
231
232	hci_dev_unlock(hdev);
233}
234
235static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
236{
237	struct hci_rp_read_local_name *rp = (void *) skb->data;
238
239	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
240
241	if (rp->status)
242		return;
243
244	if (test_bit(HCI_SETUP, &hdev->dev_flags))
245		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
246}
247
248static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
249{
250	__u8 status = *((__u8 *) skb->data);
251	void *sent;
252
253	BT_DBG("%s status 0x%2.2x", hdev->name, status);
254
255	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
256	if (!sent)
257		return;
258
259	if (!status) {
260		__u8 param = *((__u8 *) sent);
261
262		if (param == AUTH_ENABLED)
263			set_bit(HCI_AUTH, &hdev->flags);
264		else
265			clear_bit(HCI_AUTH, &hdev->flags);
266	}
267
268	if (test_bit(HCI_MGMT, &hdev->dev_flags))
269		mgmt_auth_enable_complete(hdev, status);
270}
271
272static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
273{
274	__u8 status = *((__u8 *) skb->data);
275	__u8 param;
276	void *sent;
277
278	BT_DBG("%s status 0x%2.2x", hdev->name, status);
279
280	if (status)
281		return;
282
283	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
284	if (!sent)
285		return;
286
287	param = *((__u8 *) sent);
288
289	if (param)
290		set_bit(HCI_ENCRYPT, &hdev->flags);
291	else
292		clear_bit(HCI_ENCRYPT, &hdev->flags);
293}
294
295static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
296{
297	__u8 status = *((__u8 *) skb->data);
298	__u8 param;
299	void *sent;
300
301	BT_DBG("%s status 0x%2.2x", hdev->name, status);
302
303	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
304	if (!sent)
305		return;
306
307	param = *((__u8 *) sent);
308
309	hci_dev_lock(hdev);
310
311	if (status) {
312		mgmt_write_scan_failed(hdev, param, status);
313		hdev->discov_timeout = 0;
314		goto done;
315	}
316
317	if (param & SCAN_INQUIRY)
318		set_bit(HCI_ISCAN, &hdev->flags);
319	else
320		clear_bit(HCI_ISCAN, &hdev->flags);
321
322	if (param & SCAN_PAGE)
323		set_bit(HCI_PSCAN, &hdev->flags);
324	else
325		clear_bit(HCI_ISCAN, &hdev->flags);
326
327done:
328	hci_dev_unlock(hdev);
329}
330
331static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
332{
333	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
334
335	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
336
337	if (rp->status)
338		return;
339
340	memcpy(hdev->dev_class, rp->dev_class, 3);
341
342	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
343	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
344}
345
346static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
347{
348	__u8 status = *((__u8 *) skb->data);
349	void *sent;
350
351	BT_DBG("%s status 0x%2.2x", hdev->name, status);
352
353	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
354	if (!sent)
355		return;
356
357	hci_dev_lock(hdev);
358
359	if (status == 0)
360		memcpy(hdev->dev_class, sent, 3);
361
362	if (test_bit(HCI_MGMT, &hdev->dev_flags))
363		mgmt_set_class_of_dev_complete(hdev, sent, status);
364
365	hci_dev_unlock(hdev);
366}
367
368static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
369{
370	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
371	__u16 setting;
372
373	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
374
375	if (rp->status)
376		return;
377
378	setting = __le16_to_cpu(rp->voice_setting);
379
380	if (hdev->voice_setting == setting)
381		return;
382
383	hdev->voice_setting = setting;
384
385	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
386
387	if (hdev->notify)
388		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
389}
390
391static void hci_cc_write_voice_setting(struct hci_dev *hdev,
392				       struct sk_buff *skb)
393{
394	__u8 status = *((__u8 *) skb->data);
395	__u16 setting;
396	void *sent;
397
398	BT_DBG("%s status 0x%2.2x", hdev->name, status);
399
400	if (status)
401		return;
402
403	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
404	if (!sent)
405		return;
406
407	setting = get_unaligned_le16(sent);
408
409	if (hdev->voice_setting == setting)
410		return;
411
412	hdev->voice_setting = setting;
413
414	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
415
416	if (hdev->notify)
417		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
418}
419
420static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
421					  struct sk_buff *skb)
422{
423	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
424
425	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
426
427	if (rp->status)
428		return;
429
430	hdev->num_iac = rp->num_iac;
431
432	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
433}
434
435static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
436{
437	__u8 status = *((__u8 *) skb->data);
438	struct hci_cp_write_ssp_mode *sent;
439
440	BT_DBG("%s status 0x%2.2x", hdev->name, status);
441
442	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
443	if (!sent)
444		return;
445
446	if (!status) {
447		if (sent->mode)
448			hdev->features[1][0] |= LMP_HOST_SSP;
449		else
450			hdev->features[1][0] &= ~LMP_HOST_SSP;
451	}
452
453	if (test_bit(HCI_MGMT, &hdev->dev_flags))
454		mgmt_ssp_enable_complete(hdev, sent->mode, status);
455	else if (!status) {
456		if (sent->mode)
457			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
458		else
459			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
460	}
461}
462
463static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
464{
465	u8 status = *((u8 *) skb->data);
466	struct hci_cp_write_sc_support *sent;
467
468	BT_DBG("%s status 0x%2.2x", hdev->name, status);
469
470	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
471	if (!sent)
472		return;
473
474	if (!status) {
475		if (sent->support)
476			hdev->features[1][0] |= LMP_HOST_SC;
477		else
478			hdev->features[1][0] &= ~LMP_HOST_SC;
479	}
480
481	if (test_bit(HCI_MGMT, &hdev->dev_flags))
482		mgmt_sc_enable_complete(hdev, sent->support, status);
483	else if (!status) {
484		if (sent->support)
485			set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
486		else
487			clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
488	}
489}
490
491static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
492{
493	struct hci_rp_read_local_version *rp = (void *) skb->data;
494
495	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
496
497	if (rp->status)
498		return;
499
500	if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
501		hdev->hci_ver = rp->hci_ver;
502		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
503		hdev->lmp_ver = rp->lmp_ver;
504		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
505		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
506	}
507}
508
509static void hci_cc_read_local_commands(struct hci_dev *hdev,
510				       struct sk_buff *skb)
511{
512	struct hci_rp_read_local_commands *rp = (void *) skb->data;
513
514	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
515
516	if (rp->status)
517		return;
518
519	if (test_bit(HCI_SETUP, &hdev->dev_flags))
520		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
521}
522
523static void hci_cc_read_local_features(struct hci_dev *hdev,
524				       struct sk_buff *skb)
525{
526	struct hci_rp_read_local_features *rp = (void *) skb->data;
527
528	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
529
530	if (rp->status)
531		return;
532
533	memcpy(hdev->features, rp->features, 8);
534
535	/* Adjust default settings according to features
536	 * supported by device. */
537
538	if (hdev->features[0][0] & LMP_3SLOT)
539		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
540
541	if (hdev->features[0][0] & LMP_5SLOT)
542		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
543
544	if (hdev->features[0][1] & LMP_HV2) {
545		hdev->pkt_type  |= (HCI_HV2);
546		hdev->esco_type |= (ESCO_HV2);
547	}
548
549	if (hdev->features[0][1] & LMP_HV3) {
550		hdev->pkt_type  |= (HCI_HV3);
551		hdev->esco_type |= (ESCO_HV3);
552	}
553
554	if (lmp_esco_capable(hdev))
555		hdev->esco_type |= (ESCO_EV3);
556
557	if (hdev->features[0][4] & LMP_EV4)
558		hdev->esco_type |= (ESCO_EV4);
559
560	if (hdev->features[0][4] & LMP_EV5)
561		hdev->esco_type |= (ESCO_EV5);
562
563	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
564		hdev->esco_type |= (ESCO_2EV3);
565
566	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
567		hdev->esco_type |= (ESCO_3EV3);
568
569	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
570		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
571}
572
573static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
574					   struct sk_buff *skb)
575{
576	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
577
578	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
579
580	if (rp->status)
581		return;
582
583	if (hdev->max_page < rp->max_page)
584		hdev->max_page = rp->max_page;
585
586	if (rp->page < HCI_MAX_PAGES)
587		memcpy(hdev->features[rp->page], rp->features, 8);
588}
589
590static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
591					  struct sk_buff *skb)
592{
593	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
594
595	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
596
597	if (rp->status)
598		return;
599
600	hdev->flow_ctl_mode = rp->mode;
601}
602
603static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
604{
605	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
606
607	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
608
609	if (rp->status)
610		return;
611
612	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
613	hdev->sco_mtu  = rp->sco_mtu;
614	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
615	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
616
617	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
618		hdev->sco_mtu  = 64;
619		hdev->sco_pkts = 8;
620	}
621
622	hdev->acl_cnt = hdev->acl_pkts;
623	hdev->sco_cnt = hdev->sco_pkts;
624
625	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
626	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
627}
628
629static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
630{
631	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
632
633	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
634
635	if (rp->status)
636		return;
637
638	if (test_bit(HCI_INIT, &hdev->flags))
639		bacpy(&hdev->bdaddr, &rp->bdaddr);
640
641	if (test_bit(HCI_SETUP, &hdev->dev_flags))
642		bacpy(&hdev->setup_addr, &rp->bdaddr);
643}
644
645static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
646					   struct sk_buff *skb)
647{
648	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
649
650	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651
652	if (rp->status)
653		return;
654
655	if (test_bit(HCI_INIT, &hdev->flags)) {
656		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
657		hdev->page_scan_window = __le16_to_cpu(rp->window);
658	}
659}
660
661static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
662					    struct sk_buff *skb)
663{
664	u8 status = *((u8 *) skb->data);
665	struct hci_cp_write_page_scan_activity *sent;
666
667	BT_DBG("%s status 0x%2.2x", hdev->name, status);
668
669	if (status)
670		return;
671
672	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
673	if (!sent)
674		return;
675
676	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
677	hdev->page_scan_window = __le16_to_cpu(sent->window);
678}
679
680static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
681					   struct sk_buff *skb)
682{
683	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
684
685	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
686
687	if (rp->status)
688		return;
689
690	if (test_bit(HCI_INIT, &hdev->flags))
691		hdev->page_scan_type = rp->type;
692}
693
694static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
695					struct sk_buff *skb)
696{
697	u8 status = *((u8 *) skb->data);
698	u8 *type;
699
700	BT_DBG("%s status 0x%2.2x", hdev->name, status);
701
702	if (status)
703		return;
704
705	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
706	if (type)
707		hdev->page_scan_type = *type;
708}
709
710static void hci_cc_read_data_block_size(struct hci_dev *hdev,
711					struct sk_buff *skb)
712{
713	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
714
715	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716
717	if (rp->status)
718		return;
719
720	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
721	hdev->block_len = __le16_to_cpu(rp->block_len);
722	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
723
724	hdev->block_cnt = hdev->num_blocks;
725
726	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
727	       hdev->block_cnt, hdev->block_len);
728}
729
730static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
731{
732	struct hci_rp_read_clock *rp = (void *) skb->data;
733	struct hci_cp_read_clock *cp;
734	struct hci_conn *conn;
735
736	BT_DBG("%s", hdev->name);
737
738	if (skb->len < sizeof(*rp))
739		return;
740
741	if (rp->status)
742		return;
743
744	hci_dev_lock(hdev);
745
746	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
747	if (!cp)
748		goto unlock;
749
750	if (cp->which == 0x00) {
751		hdev->clock = le32_to_cpu(rp->clock);
752		goto unlock;
753	}
754
755	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
756	if (conn) {
757		conn->clock = le32_to_cpu(rp->clock);
758		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
759	}
760
761unlock:
762	hci_dev_unlock(hdev);
763}
764
765static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
766				       struct sk_buff *skb)
767{
768	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
769
770	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
771
772	if (rp->status)
773		goto a2mp_rsp;
774
775	hdev->amp_status = rp->amp_status;
776	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
777	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
778	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
779	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
780	hdev->amp_type = rp->amp_type;
781	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
782	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
783	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
784	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
785
786a2mp_rsp:
787	a2mp_send_getinfo_rsp(hdev);
788}
789
790static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
791					struct sk_buff *skb)
792{
793	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
794	struct amp_assoc *assoc = &hdev->loc_assoc;
795	size_t rem_len, frag_len;
796
797	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
798
799	if (rp->status)
800		goto a2mp_rsp;
801
802	frag_len = skb->len - sizeof(*rp);
803	rem_len = __le16_to_cpu(rp->rem_len);
804
805	if (rem_len > frag_len) {
806		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
807
808		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
809		assoc->offset += frag_len;
810
811		/* Read other fragments */
812		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
813
814		return;
815	}
816
817	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
818	assoc->len = assoc->offset + rem_len;
819	assoc->offset = 0;
820
821a2mp_rsp:
822	/* Send A2MP Rsp when all fragments are received */
823	a2mp_send_getampassoc_rsp(hdev, rp->status);
824	a2mp_send_create_phy_link_req(hdev, rp->status);
825}
826
827static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
828					 struct sk_buff *skb)
829{
830	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
831
832	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
833
834	if (rp->status)
835		return;
836
837	hdev->inq_tx_power = rp->tx_power;
838}
839
840static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
841{
842	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
843	struct hci_cp_pin_code_reply *cp;
844	struct hci_conn *conn;
845
846	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
847
848	hci_dev_lock(hdev);
849
850	if (test_bit(HCI_MGMT, &hdev->dev_flags))
851		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
852
853	if (rp->status)
854		goto unlock;
855
856	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
857	if (!cp)
858		goto unlock;
859
860	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
861	if (conn)
862		conn->pin_length = cp->pin_len;
863
864unlock:
865	hci_dev_unlock(hdev);
866}
867
868static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
869{
870	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
871
872	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
873
874	hci_dev_lock(hdev);
875
876	if (test_bit(HCI_MGMT, &hdev->dev_flags))
877		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
878						 rp->status);
879
880	hci_dev_unlock(hdev);
881}
882
883static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
884				       struct sk_buff *skb)
885{
886	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
887
888	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
889
890	if (rp->status)
891		return;
892
893	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
894	hdev->le_pkts = rp->le_max_pkt;
895
896	hdev->le_cnt = hdev->le_pkts;
897
898	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
899}
900
901static void hci_cc_le_read_local_features(struct hci_dev *hdev,
902					  struct sk_buff *skb)
903{
904	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
905
906	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
907
908	if (rp->status)
909		return;
910
911	memcpy(hdev->le_features, rp->features, 8);
912}
913
914static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
915					struct sk_buff *skb)
916{
917	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
918
919	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
920
921	if (rp->status)
922		return;
923
924	hdev->adv_tx_power = rp->tx_power;
925}
926
927static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
928{
929	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
930
931	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
932
933	hci_dev_lock(hdev);
934
935	if (test_bit(HCI_MGMT, &hdev->dev_flags))
936		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
937						 rp->status);
938
939	hci_dev_unlock(hdev);
940}
941
942static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
943					  struct sk_buff *skb)
944{
945	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
946
947	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
948
949	hci_dev_lock(hdev);
950
951	if (test_bit(HCI_MGMT, &hdev->dev_flags))
952		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
953						     ACL_LINK, 0, rp->status);
954
955	hci_dev_unlock(hdev);
956}
957
958static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
959{
960	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
961
962	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
963
964	hci_dev_lock(hdev);
965
966	if (test_bit(HCI_MGMT, &hdev->dev_flags))
967		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
968						 0, rp->status);
969
970	hci_dev_unlock(hdev);
971}
972
973static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
974					  struct sk_buff *skb)
975{
976	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
977
978	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
979
980	hci_dev_lock(hdev);
981
982	if (test_bit(HCI_MGMT, &hdev->dev_flags))
983		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
984						     ACL_LINK, 0, rp->status);
985
986	hci_dev_unlock(hdev);
987}
988
989static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
990				       struct sk_buff *skb)
991{
992	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
993
994	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
995
996	hci_dev_lock(hdev);
997	mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
998					  NULL, NULL, rp->status);
999	hci_dev_unlock(hdev);
1000}
1001
1002static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1003					   struct sk_buff *skb)
1004{
1005	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1006
1007	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1008
1009	hci_dev_lock(hdev);
1010	mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
1011					  rp->hash256, rp->randomizer256,
1012					  rp->status);
1013	hci_dev_unlock(hdev);
1014}
1015
1016
1017static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1018{
1019	__u8 status = *((__u8 *) skb->data);
1020	bdaddr_t *sent;
1021
1022	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1023
1024	if (status)
1025		return;
1026
1027	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1028	if (!sent)
1029		return;
1030
1031	hci_dev_lock(hdev);
1032
1033	bacpy(&hdev->random_addr, sent);
1034
1035	hci_dev_unlock(hdev);
1036}
1037
1038static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1039{
1040	__u8 *sent, status = *((__u8 *) skb->data);
1041
1042	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1043
1044	if (status)
1045		return;
1046
1047	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1048	if (!sent)
1049		return;
1050
1051	hci_dev_lock(hdev);
1052
1053	/* If we're doing connection initation as peripheral. Set a
1054	 * timeout in case something goes wrong.
1055	 */
1056	if (*sent) {
1057		struct hci_conn *conn;
1058
1059		set_bit(HCI_LE_ADV, &hdev->dev_flags);
1060
1061		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1062		if (conn)
1063			queue_delayed_work(hdev->workqueue,
1064					   &conn->le_conn_timeout,
1065					   conn->conn_timeout);
1066	} else {
1067		clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1068	}
1069
1070	hci_dev_unlock(hdev);
1071}
1072
1073static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1074{
1075	struct hci_cp_le_set_scan_param *cp;
1076	__u8 status = *((__u8 *) skb->data);
1077
1078	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1079
1080	if (status)
1081		return;
1082
1083	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1084	if (!cp)
1085		return;
1086
1087	hci_dev_lock(hdev);
1088
1089	hdev->le_scan_type = cp->type;
1090
1091	hci_dev_unlock(hdev);
1092}
1093
1094static bool has_pending_adv_report(struct hci_dev *hdev)
1095{
1096	struct discovery_state *d = &hdev->discovery;
1097
1098	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1099}
1100
1101static void clear_pending_adv_report(struct hci_dev *hdev)
1102{
1103	struct discovery_state *d = &hdev->discovery;
1104
1105	bacpy(&d->last_adv_addr, BDADDR_ANY);
1106	d->last_adv_data_len = 0;
1107}
1108
1109static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1110				     u8 bdaddr_type, s8 rssi, u32 flags,
1111				     u8 *data, u8 len)
1112{
1113	struct discovery_state *d = &hdev->discovery;
1114
1115	bacpy(&d->last_adv_addr, bdaddr);
1116	d->last_adv_addr_type = bdaddr_type;
1117	d->last_adv_rssi = rssi;
1118	d->last_adv_flags = flags;
1119	memcpy(d->last_adv_data, data, len);
1120	d->last_adv_data_len = len;
1121}
1122
1123static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1124				      struct sk_buff *skb)
1125{
1126	struct hci_cp_le_set_scan_enable *cp;
1127	__u8 status = *((__u8 *) skb->data);
1128
1129	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1130
1131	if (status)
1132		return;
1133
1134	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1135	if (!cp)
1136		return;
1137
1138	switch (cp->enable) {
1139	case LE_SCAN_ENABLE:
1140		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1141		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1142			clear_pending_adv_report(hdev);
1143		break;
1144
1145	case LE_SCAN_DISABLE:
1146		/* We do this here instead of when setting DISCOVERY_STOPPED
1147		 * since the latter would potentially require waiting for
1148		 * inquiry to stop too.
1149		 */
1150		if (has_pending_adv_report(hdev)) {
1151			struct discovery_state *d = &hdev->discovery;
1152
1153			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1154					  d->last_adv_addr_type, NULL,
1155					  d->last_adv_rssi, d->last_adv_flags,
1156					  d->last_adv_data,
1157					  d->last_adv_data_len, NULL, 0);
1158		}
1159
1160		/* Cancel this timer so that we don't try to disable scanning
1161		 * when it's already disabled.
1162		 */
1163		cancel_delayed_work(&hdev->le_scan_disable);
1164
1165		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1166
1167		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1168		 * interrupted scanning due to a connect request. Mark
1169		 * therefore discovery as stopped. If this was not
1170		 * because of a connect request advertising might have
1171		 * been disabled because of active scanning, so
1172		 * re-enable it again if necessary.
1173		 */
1174		if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1175				       &hdev->dev_flags))
1176			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1177		else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1178			 hdev->discovery.state == DISCOVERY_FINDING)
1179			mgmt_reenable_advertising(hdev);
1180
1181		break;
1182
1183	default:
1184		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1185		break;
1186	}
1187}
1188
1189static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1190					   struct sk_buff *skb)
1191{
1192	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1193
1194	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1195
1196	if (rp->status)
1197		return;
1198
1199	hdev->le_white_list_size = rp->size;
1200}
1201
1202static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1203				       struct sk_buff *skb)
1204{
1205	__u8 status = *((__u8 *) skb->data);
1206
1207	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1208
1209	if (status)
1210		return;
1211
1212	hci_bdaddr_list_clear(&hdev->le_white_list);
1213}
1214
1215static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1216					struct sk_buff *skb)
1217{
1218	struct hci_cp_le_add_to_white_list *sent;
1219	__u8 status = *((__u8 *) skb->data);
1220
1221	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1222
1223	if (status)
1224		return;
1225
1226	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1227	if (!sent)
1228		return;
1229
1230	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1231			   sent->bdaddr_type);
1232}
1233
1234static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1235					  struct sk_buff *skb)
1236{
1237	struct hci_cp_le_del_from_white_list *sent;
1238	__u8 status = *((__u8 *) skb->data);
1239
1240	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1241
1242	if (status)
1243		return;
1244
1245	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1246	if (!sent)
1247		return;
1248
1249	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1250			    sent->bdaddr_type);
1251}
1252
1253static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1254					    struct sk_buff *skb)
1255{
1256	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1257
1258	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1259
1260	if (rp->status)
1261		return;
1262
1263	memcpy(hdev->le_states, rp->le_states, 8);
1264}
1265
1266static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1267					   struct sk_buff *skb)
1268{
1269	struct hci_cp_write_le_host_supported *sent;
1270	__u8 status = *((__u8 *) skb->data);
1271
1272	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1273
1274	if (status)
1275		return;
1276
1277	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1278	if (!sent)
1279		return;
1280
1281	if (sent->le) {
1282		hdev->features[1][0] |= LMP_HOST_LE;
1283		set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1284	} else {
1285		hdev->features[1][0] &= ~LMP_HOST_LE;
1286		clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1287		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1288	}
1289
1290	if (sent->simul)
1291		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1292	else
1293		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1294}
1295
1296static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1297{
1298	struct hci_cp_le_set_adv_param *cp;
1299	u8 status = *((u8 *) skb->data);
1300
1301	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1302
1303	if (status)
1304		return;
1305
1306	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1307	if (!cp)
1308		return;
1309
1310	hci_dev_lock(hdev);
1311	hdev->adv_addr_type = cp->own_address_type;
1312	hci_dev_unlock(hdev);
1313}
1314
1315static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1316					  struct sk_buff *skb)
1317{
1318	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1319
1320	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1321	       hdev->name, rp->status, rp->phy_handle);
1322
1323	if (rp->status)
1324		return;
1325
1326	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1327}
1328
1329static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1330{
1331	struct hci_rp_read_rssi *rp = (void *) skb->data;
1332	struct hci_conn *conn;
1333
1334	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1335
1336	if (rp->status)
1337		return;
1338
1339	hci_dev_lock(hdev);
1340
1341	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1342	if (conn)
1343		conn->rssi = rp->rssi;
1344
1345	hci_dev_unlock(hdev);
1346}
1347
1348static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1349{
1350	struct hci_cp_read_tx_power *sent;
1351	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1352	struct hci_conn *conn;
1353
1354	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1355
1356	if (rp->status)
1357		return;
1358
1359	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1360	if (!sent)
1361		return;
1362
1363	hci_dev_lock(hdev);
1364
1365	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1366	if (!conn)
1367		goto unlock;
1368
1369	switch (sent->type) {
1370	case 0x00:
1371		conn->tx_power = rp->tx_power;
1372		break;
1373	case 0x01:
1374		conn->max_tx_power = rp->tx_power;
1375		break;
1376	}
1377
1378unlock:
1379	hci_dev_unlock(hdev);
1380}
1381
1382static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1383{
1384	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1385
1386	if (status) {
1387		hci_conn_check_pending(hdev);
1388		return;
1389	}
1390
1391	set_bit(HCI_INQUIRY, &hdev->flags);
1392}
1393
1394static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1395{
1396	struct hci_cp_create_conn *cp;
1397	struct hci_conn *conn;
1398
1399	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1400
1401	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1402	if (!cp)
1403		return;
1404
1405	hci_dev_lock(hdev);
1406
1407	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1408
1409	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1410
1411	if (status) {
1412		if (conn && conn->state == BT_CONNECT) {
1413			if (status != 0x0c || conn->attempt > 2) {
1414				conn->state = BT_CLOSED;
1415				hci_proto_connect_cfm(conn, status);
1416				hci_conn_del(conn);
1417			} else
1418				conn->state = BT_CONNECT2;
1419		}
1420	} else {
1421		if (!conn) {
1422			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1423			if (conn) {
1424				conn->out = true;
1425				set_bit(HCI_CONN_MASTER, &conn->flags);
1426			} else
1427				BT_ERR("No memory for new connection");
1428		}
1429	}
1430
1431	hci_dev_unlock(hdev);
1432}
1433
1434static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1435{
1436	struct hci_cp_add_sco *cp;
1437	struct hci_conn *acl, *sco;
1438	__u16 handle;
1439
1440	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1441
1442	if (!status)
1443		return;
1444
1445	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1446	if (!cp)
1447		return;
1448
1449	handle = __le16_to_cpu(cp->handle);
1450
1451	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1452
1453	hci_dev_lock(hdev);
1454
1455	acl = hci_conn_hash_lookup_handle(hdev, handle);
1456	if (acl) {
1457		sco = acl->link;
1458		if (sco) {
1459			sco->state = BT_CLOSED;
1460
1461			hci_proto_connect_cfm(sco, status);
1462			hci_conn_del(sco);
1463		}
1464	}
1465
1466	hci_dev_unlock(hdev);
1467}
1468
1469static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1470{
1471	struct hci_cp_auth_requested *cp;
1472	struct hci_conn *conn;
1473
1474	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1475
1476	if (!status)
1477		return;
1478
1479	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1480	if (!cp)
1481		return;
1482
1483	hci_dev_lock(hdev);
1484
1485	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1486	if (conn) {
1487		if (conn->state == BT_CONFIG) {
1488			hci_proto_connect_cfm(conn, status);
1489			hci_conn_drop(conn);
1490		}
1491	}
1492
1493	hci_dev_unlock(hdev);
1494}
1495
1496static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1497{
1498	struct hci_cp_set_conn_encrypt *cp;
1499	struct hci_conn *conn;
1500
1501	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1502
1503	if (!status)
1504		return;
1505
1506	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1507	if (!cp)
1508		return;
1509
1510	hci_dev_lock(hdev);
1511
1512	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1513	if (conn) {
1514		if (conn->state == BT_CONFIG) {
1515			hci_proto_connect_cfm(conn, status);
1516			hci_conn_drop(conn);
1517		}
1518	}
1519
1520	hci_dev_unlock(hdev);
1521}
1522
1523static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1524				    struct hci_conn *conn)
1525{
1526	if (conn->state != BT_CONFIG || !conn->out)
1527		return 0;
1528
1529	if (conn->pending_sec_level == BT_SECURITY_SDP)
1530		return 0;
1531
1532	/* Only request authentication for SSP connections or non-SSP
1533	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1534	 * is requested.
1535	 */
1536	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1537	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1538	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1539	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1540		return 0;
1541
1542	return 1;
1543}
1544
1545static int hci_resolve_name(struct hci_dev *hdev,
1546				   struct inquiry_entry *e)
1547{
1548	struct hci_cp_remote_name_req cp;
1549
1550	memset(&cp, 0, sizeof(cp));
1551
1552	bacpy(&cp.bdaddr, &e->data.bdaddr);
1553	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1554	cp.pscan_mode = e->data.pscan_mode;
1555	cp.clock_offset = e->data.clock_offset;
1556
1557	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1558}
1559
1560static bool hci_resolve_next_name(struct hci_dev *hdev)
1561{
1562	struct discovery_state *discov = &hdev->discovery;
1563	struct inquiry_entry *e;
1564
1565	if (list_empty(&discov->resolve))
1566		return false;
1567
1568	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1569	if (!e)
1570		return false;
1571
1572	if (hci_resolve_name(hdev, e) == 0) {
1573		e->name_state = NAME_PENDING;
1574		return true;
1575	}
1576
1577	return false;
1578}
1579
1580static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1581				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1582{
1583	struct discovery_state *discov = &hdev->discovery;
1584	struct inquiry_entry *e;
1585
1586	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1587		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1588				      name_len, conn->dev_class);
1589
1590	if (discov->state == DISCOVERY_STOPPED)
1591		return;
1592
1593	if (discov->state == DISCOVERY_STOPPING)
1594		goto discov_complete;
1595
1596	if (discov->state != DISCOVERY_RESOLVING)
1597		return;
1598
1599	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1600	/* If the device was not found in a list of found devices names of which
1601	 * are pending. there is no need to continue resolving a next name as it
1602	 * will be done upon receiving another Remote Name Request Complete
1603	 * Event */
1604	if (!e)
1605		return;
1606
1607	list_del(&e->list);
1608	if (name) {
1609		e->name_state = NAME_KNOWN;
1610		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1611				 e->data.rssi, name, name_len);
1612	} else {
1613		e->name_state = NAME_NOT_KNOWN;
1614	}
1615
1616	if (hci_resolve_next_name(hdev))
1617		return;
1618
1619discov_complete:
1620	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1621}
1622
1623static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1624{
1625	struct hci_cp_remote_name_req *cp;
1626	struct hci_conn *conn;
1627
1628	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1629
1630	/* If successful wait for the name req complete event before
1631	 * checking for the need to do authentication */
1632	if (!status)
1633		return;
1634
1635	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1636	if (!cp)
1637		return;
1638
1639	hci_dev_lock(hdev);
1640
1641	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1642
1643	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1644		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1645
1646	if (!conn)
1647		goto unlock;
1648
1649	if (!hci_outgoing_auth_needed(hdev, conn))
1650		goto unlock;
1651
1652	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1653		struct hci_cp_auth_requested auth_cp;
1654
1655		auth_cp.handle = __cpu_to_le16(conn->handle);
1656		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1657			     sizeof(auth_cp), &auth_cp);
1658	}
1659
1660unlock:
1661	hci_dev_unlock(hdev);
1662}
1663
1664static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1665{
1666	struct hci_cp_read_remote_features *cp;
1667	struct hci_conn *conn;
1668
1669	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1670
1671	if (!status)
1672		return;
1673
1674	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1675	if (!cp)
1676		return;
1677
1678	hci_dev_lock(hdev);
1679
1680	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1681	if (conn) {
1682		if (conn->state == BT_CONFIG) {
1683			hci_proto_connect_cfm(conn, status);
1684			hci_conn_drop(conn);
1685		}
1686	}
1687
1688	hci_dev_unlock(hdev);
1689}
1690
1691static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1692{
1693	struct hci_cp_read_remote_ext_features *cp;
1694	struct hci_conn *conn;
1695
1696	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1697
1698	if (!status)
1699		return;
1700
1701	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1702	if (!cp)
1703		return;
1704
1705	hci_dev_lock(hdev);
1706
1707	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1708	if (conn) {
1709		if (conn->state == BT_CONFIG) {
1710			hci_proto_connect_cfm(conn, status);
1711			hci_conn_drop(conn);
1712		}
1713	}
1714
1715	hci_dev_unlock(hdev);
1716}
1717
1718static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1719{
1720	struct hci_cp_setup_sync_conn *cp;
1721	struct hci_conn *acl, *sco;
1722	__u16 handle;
1723
1724	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1725
1726	if (!status)
1727		return;
1728
1729	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1730	if (!cp)
1731		return;
1732
1733	handle = __le16_to_cpu(cp->handle);
1734
1735	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1736
1737	hci_dev_lock(hdev);
1738
1739	acl = hci_conn_hash_lookup_handle(hdev, handle);
1740	if (acl) {
1741		sco = acl->link;
1742		if (sco) {
1743			sco->state = BT_CLOSED;
1744
1745			hci_proto_connect_cfm(sco, status);
1746			hci_conn_del(sco);
1747		}
1748	}
1749
1750	hci_dev_unlock(hdev);
1751}
1752
1753static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1754{
1755	struct hci_cp_sniff_mode *cp;
1756	struct hci_conn *conn;
1757
1758	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1759
1760	if (!status)
1761		return;
1762
1763	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1764	if (!cp)
1765		return;
1766
1767	hci_dev_lock(hdev);
1768
1769	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1770	if (conn) {
1771		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1772
1773		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1774			hci_sco_setup(conn, status);
1775	}
1776
1777	hci_dev_unlock(hdev);
1778}
1779
1780static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1781{
1782	struct hci_cp_exit_sniff_mode *cp;
1783	struct hci_conn *conn;
1784
1785	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1786
1787	if (!status)
1788		return;
1789
1790	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1791	if (!cp)
1792		return;
1793
1794	hci_dev_lock(hdev);
1795
1796	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1797	if (conn) {
1798		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1799
1800		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1801			hci_sco_setup(conn, status);
1802	}
1803
1804	hci_dev_unlock(hdev);
1805}
1806
1807static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1808{
1809	struct hci_cp_disconnect *cp;
1810	struct hci_conn *conn;
1811
1812	if (!status)
1813		return;
1814
1815	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1816	if (!cp)
1817		return;
1818
1819	hci_dev_lock(hdev);
1820
1821	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1822	if (conn)
1823		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1824				       conn->dst_type, status);
1825
1826	hci_dev_unlock(hdev);
1827}
1828
1829static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1830{
1831	struct hci_cp_create_phy_link *cp;
1832
1833	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1834
1835	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1836	if (!cp)
1837		return;
1838
1839	hci_dev_lock(hdev);
1840
1841	if (status) {
1842		struct hci_conn *hcon;
1843
1844		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1845		if (hcon)
1846			hci_conn_del(hcon);
1847	} else {
1848		amp_write_remote_assoc(hdev, cp->phy_handle);
1849	}
1850
1851	hci_dev_unlock(hdev);
1852}
1853
1854static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1855{
1856	struct hci_cp_accept_phy_link *cp;
1857
1858	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1859
1860	if (status)
1861		return;
1862
1863	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1864	if (!cp)
1865		return;
1866
1867	amp_write_remote_assoc(hdev, cp->phy_handle);
1868}
1869
1870static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1871{
1872	struct hci_cp_le_create_conn *cp;
1873	struct hci_conn *conn;
1874
1875	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1876
1877	/* All connection failure handling is taken care of by the
1878	 * hci_le_conn_failed function which is triggered by the HCI
1879	 * request completion callbacks used for connecting.
1880	 */
1881	if (status)
1882		return;
1883
1884	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1885	if (!cp)
1886		return;
1887
1888	hci_dev_lock(hdev);
1889
1890	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1891	if (!conn)
1892		goto unlock;
1893
1894	/* Store the initiator and responder address information which
1895	 * is needed for SMP. These values will not change during the
1896	 * lifetime of the connection.
1897	 */
1898	conn->init_addr_type = cp->own_address_type;
1899	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1900		bacpy(&conn->init_addr, &hdev->random_addr);
1901	else
1902		bacpy(&conn->init_addr, &hdev->bdaddr);
1903
1904	conn->resp_addr_type = cp->peer_addr_type;
1905	bacpy(&conn->resp_addr, &cp->peer_addr);
1906
1907	/* We don't want the connection attempt to stick around
1908	 * indefinitely since LE doesn't have a page timeout concept
1909	 * like BR/EDR. Set a timer for any connection that doesn't use
1910	 * the white list for connecting.
1911	 */
1912	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1913		queue_delayed_work(conn->hdev->workqueue,
1914				   &conn->le_conn_timeout,
1915				   conn->conn_timeout);
1916
1917unlock:
1918	hci_dev_unlock(hdev);
1919}
1920
1921static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1922{
1923	struct hci_cp_le_start_enc *cp;
1924	struct hci_conn *conn;
1925
1926	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1927
1928	if (!status)
1929		return;
1930
1931	hci_dev_lock(hdev);
1932
1933	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1934	if (!cp)
1935		goto unlock;
1936
1937	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1938	if (!conn)
1939		goto unlock;
1940
1941	if (conn->state != BT_CONNECTED)
1942		goto unlock;
1943
1944	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1945	hci_conn_drop(conn);
1946
1947unlock:
1948	hci_dev_unlock(hdev);
1949}
1950
1951static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1952{
1953	__u8 status = *((__u8 *) skb->data);
1954	struct discovery_state *discov = &hdev->discovery;
1955	struct inquiry_entry *e;
1956
1957	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1958
1959	hci_conn_check_pending(hdev);
1960
1961	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1962		return;
1963
1964	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1965	wake_up_bit(&hdev->flags, HCI_INQUIRY);
1966
1967	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1968		return;
1969
1970	hci_dev_lock(hdev);
1971
1972	if (discov->state != DISCOVERY_FINDING)
1973		goto unlock;
1974
1975	if (list_empty(&discov->resolve)) {
1976		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1977		goto unlock;
1978	}
1979
1980	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1981	if (e && hci_resolve_name(hdev, e) == 0) {
1982		e->name_state = NAME_PENDING;
1983		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1984	} else {
1985		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1986	}
1987
1988unlock:
1989	hci_dev_unlock(hdev);
1990}
1991
1992static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1993{
1994	struct inquiry_data data;
1995	struct inquiry_info *info = (void *) (skb->data + 1);
1996	int num_rsp = *((__u8 *) skb->data);
1997
1998	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1999
2000	if (!num_rsp)
2001		return;
2002
2003	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2004		return;
2005
2006	hci_dev_lock(hdev);
2007
2008	for (; num_rsp; num_rsp--, info++) {
2009		u32 flags;
2010
2011		bacpy(&data.bdaddr, &info->bdaddr);
2012		data.pscan_rep_mode	= info->pscan_rep_mode;
2013		data.pscan_period_mode	= info->pscan_period_mode;
2014		data.pscan_mode		= info->pscan_mode;
2015		memcpy(data.dev_class, info->dev_class, 3);
2016		data.clock_offset	= info->clock_offset;
2017		data.rssi		= 0x00;
2018		data.ssp_mode		= 0x00;
2019
2020		flags = hci_inquiry_cache_update(hdev, &data, false);
2021
2022		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2023				  info->dev_class, 0, flags, NULL, 0, NULL, 0);
2024	}
2025
2026	hci_dev_unlock(hdev);
2027}
2028
2029static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2030{
2031	struct hci_ev_conn_complete *ev = (void *) skb->data;
2032	struct hci_conn *conn;
2033
2034	BT_DBG("%s", hdev->name);
2035
2036	hci_dev_lock(hdev);
2037
2038	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2039	if (!conn) {
2040		if (ev->link_type != SCO_LINK)
2041			goto unlock;
2042
2043		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2044		if (!conn)
2045			goto unlock;
2046
2047		conn->type = SCO_LINK;
2048	}
2049
2050	if (!ev->status) {
2051		conn->handle = __le16_to_cpu(ev->handle);
2052
2053		if (conn->type == ACL_LINK) {
2054			conn->state = BT_CONFIG;
2055			hci_conn_hold(conn);
2056
2057			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2058			    !hci_find_link_key(hdev, &ev->bdaddr))
2059				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2060			else
2061				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2062		} else
2063			conn->state = BT_CONNECTED;
2064
2065		hci_conn_add_sysfs(conn);
2066
2067		if (test_bit(HCI_AUTH, &hdev->flags))
2068			set_bit(HCI_CONN_AUTH, &conn->flags);
2069
2070		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2071			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2072
2073		/* Get remote features */
2074		if (conn->type == ACL_LINK) {
2075			struct hci_cp_read_remote_features cp;
2076			cp.handle = ev->handle;
2077			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2078				     sizeof(cp), &cp);
2079		}
2080
2081		/* Set packet type for incoming connection */
2082		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2083			struct hci_cp_change_conn_ptype cp;
2084			cp.handle = ev->handle;
2085			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2086			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2087				     &cp);
2088		}
2089	} else {
2090		conn->state = BT_CLOSED;
2091		if (conn->type == ACL_LINK)
2092			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2093					    conn->dst_type, ev->status);
2094	}
2095
2096	if (conn->type == ACL_LINK)
2097		hci_sco_setup(conn, ev->status);
2098
2099	if (ev->status) {
2100		hci_proto_connect_cfm(conn, ev->status);
2101		hci_conn_del(conn);
2102	} else if (ev->link_type != ACL_LINK)
2103		hci_proto_connect_cfm(conn, ev->status);
2104
2105unlock:
2106	hci_dev_unlock(hdev);
2107
2108	hci_conn_check_pending(hdev);
2109}
2110
2111static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2112{
2113	struct hci_cp_reject_conn_req cp;
2114
2115	bacpy(&cp.bdaddr, bdaddr);
2116	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2117	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2118}
2119
2120static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2121{
2122	struct hci_ev_conn_request *ev = (void *) skb->data;
2123	int mask = hdev->link_mode;
2124	struct inquiry_entry *ie;
2125	struct hci_conn *conn;
2126	__u8 flags = 0;
2127
2128	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2129	       ev->link_type);
2130
2131	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2132				      &flags);
2133
2134	if (!(mask & HCI_LM_ACCEPT)) {
2135		hci_reject_conn(hdev, &ev->bdaddr);
2136		return;
2137	}
2138
2139	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
2140		if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2141					   BDADDR_BREDR)) {
2142			hci_reject_conn(hdev, &ev->bdaddr);
2143			return;
2144		}
2145	} else {
2146		if (!hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2147					    BDADDR_BREDR)) {
2148			hci_reject_conn(hdev, &ev->bdaddr);
2149			return;
2150		}
2151	}
2152
2153	/* Connection accepted */
2154
2155	hci_dev_lock(hdev);
2156
2157	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2158	if (ie)
2159		memcpy(ie->data.dev_class, ev->dev_class, 3);
2160
2161	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2162			&ev->bdaddr);
2163	if (!conn) {
2164		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2165		if (!conn) {
2166			BT_ERR("No memory for new connection");
2167			hci_dev_unlock(hdev);
2168			return;
2169		}
2170	}
2171
2172	memcpy(conn->dev_class, ev->dev_class, 3);
2173
2174	hci_dev_unlock(hdev);
2175
2176	if (ev->link_type == ACL_LINK ||
2177	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2178		struct hci_cp_accept_conn_req cp;
2179		conn->state = BT_CONNECT;
2180
2181		bacpy(&cp.bdaddr, &ev->bdaddr);
2182
2183		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2184			cp.role = 0x00; /* Become master */
2185		else
2186			cp.role = 0x01; /* Remain slave */
2187
2188		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2189	} else if (!(flags & HCI_PROTO_DEFER)) {
2190		struct hci_cp_accept_sync_conn_req cp;
2191		conn->state = BT_CONNECT;
2192
2193		bacpy(&cp.bdaddr, &ev->bdaddr);
2194		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2195
2196		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2197		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2198		cp.max_latency    = cpu_to_le16(0xffff);
2199		cp.content_format = cpu_to_le16(hdev->voice_setting);
2200		cp.retrans_effort = 0xff;
2201
2202		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2203			     &cp);
2204	} else {
2205		conn->state = BT_CONNECT2;
2206		hci_proto_connect_cfm(conn, 0);
2207	}
2208}
2209
2210static u8 hci_to_mgmt_reason(u8 err)
2211{
2212	switch (err) {
2213	case HCI_ERROR_CONNECTION_TIMEOUT:
2214		return MGMT_DEV_DISCONN_TIMEOUT;
2215	case HCI_ERROR_REMOTE_USER_TERM:
2216	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2217	case HCI_ERROR_REMOTE_POWER_OFF:
2218		return MGMT_DEV_DISCONN_REMOTE;
2219	case HCI_ERROR_LOCAL_HOST_TERM:
2220		return MGMT_DEV_DISCONN_LOCAL_HOST;
2221	default:
2222		return MGMT_DEV_DISCONN_UNKNOWN;
2223	}
2224}
2225
2226static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2227{
2228	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2229	u8 reason = hci_to_mgmt_reason(ev->reason);
2230	struct hci_conn_params *params;
2231	struct hci_conn *conn;
2232	bool mgmt_connected;
2233	u8 type;
2234
2235	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2236
2237	hci_dev_lock(hdev);
2238
2239	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2240	if (!conn)
2241		goto unlock;
2242
2243	if (ev->status) {
2244		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2245				       conn->dst_type, ev->status);
2246		goto unlock;
2247	}
2248
2249	conn->state = BT_CLOSED;
2250
2251	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2252	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2253				reason, mgmt_connected);
2254
2255	if (conn->type == ACL_LINK &&
2256	    test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2257		hci_remove_link_key(hdev, &conn->dst);
2258
2259	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2260	if (params) {
2261		switch (params->auto_connect) {
2262		case HCI_AUTO_CONN_LINK_LOSS:
2263			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2264				break;
2265			/* Fall through */
2266
2267		case HCI_AUTO_CONN_ALWAYS:
2268			list_del_init(&params->action);
2269			list_add(&params->action, &hdev->pend_le_conns);
2270			hci_update_background_scan(hdev);
2271			break;
2272
2273		default:
2274			break;
2275		}
2276	}
2277
2278	type = conn->type;
2279
2280	hci_proto_disconn_cfm(conn, ev->reason);
2281	hci_conn_del(conn);
2282
2283	/* Re-enable advertising if necessary, since it might
2284	 * have been disabled by the connection. From the
2285	 * HCI_LE_Set_Advertise_Enable command description in
2286	 * the core specification (v4.0):
2287	 * "The Controller shall continue advertising until the Host
2288	 * issues an LE_Set_Advertise_Enable command with
2289	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2290	 * or until a connection is created or until the Advertising
2291	 * is timed out due to Directed Advertising."
2292	 */
2293	if (type == LE_LINK)
2294		mgmt_reenable_advertising(hdev);
2295
2296unlock:
2297	hci_dev_unlock(hdev);
2298}
2299
2300static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2301{
2302	struct hci_ev_auth_complete *ev = (void *) skb->data;
2303	struct hci_conn *conn;
2304
2305	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2306
2307	hci_dev_lock(hdev);
2308
2309	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2310	if (!conn)
2311		goto unlock;
2312
2313	if (!ev->status) {
2314		if (!hci_conn_ssp_enabled(conn) &&
2315		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2316			BT_INFO("re-auth of legacy device is not possible.");
2317		} else {
2318			set_bit(HCI_CONN_AUTH, &conn->flags);
2319			conn->sec_level = conn->pending_sec_level;
2320		}
2321	} else {
2322		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2323				 ev->status);
2324	}
2325
2326	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2327	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2328
2329	if (conn->state == BT_CONFIG) {
2330		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2331			struct hci_cp_set_conn_encrypt cp;
2332			cp.handle  = ev->handle;
2333			cp.encrypt = 0x01;
2334			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2335				     &cp);
2336		} else {
2337			conn->state = BT_CONNECTED;
2338			hci_proto_connect_cfm(conn, ev->status);
2339			hci_conn_drop(conn);
2340		}
2341	} else {
2342		hci_auth_cfm(conn, ev->status);
2343
2344		hci_conn_hold(conn);
2345		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2346		hci_conn_drop(conn);
2347	}
2348
2349	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2350		if (!ev->status) {
2351			struct hci_cp_set_conn_encrypt cp;
2352			cp.handle  = ev->handle;
2353			cp.encrypt = 0x01;
2354			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2355				     &cp);
2356		} else {
2357			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2358			hci_encrypt_cfm(conn, ev->status, 0x00);
2359		}
2360	}
2361
2362unlock:
2363	hci_dev_unlock(hdev);
2364}
2365
2366static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2367{
2368	struct hci_ev_remote_name *ev = (void *) skb->data;
2369	struct hci_conn *conn;
2370
2371	BT_DBG("%s", hdev->name);
2372
2373	hci_conn_check_pending(hdev);
2374
2375	hci_dev_lock(hdev);
2376
2377	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2378
2379	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2380		goto check_auth;
2381
2382	if (ev->status == 0)
2383		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2384				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2385	else
2386		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2387
2388check_auth:
2389	if (!conn)
2390		goto unlock;
2391
2392	if (!hci_outgoing_auth_needed(hdev, conn))
2393		goto unlock;
2394
2395	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2396		struct hci_cp_auth_requested cp;
2397		cp.handle = __cpu_to_le16(conn->handle);
2398		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2399	}
2400
2401unlock:
2402	hci_dev_unlock(hdev);
2403}
2404
2405static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2406{
2407	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2408	struct hci_conn *conn;
2409
2410	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2411
2412	hci_dev_lock(hdev);
2413
2414	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2415	if (!conn)
2416		goto unlock;
2417
2418	if (!ev->status) {
2419		if (ev->encrypt) {
2420			/* Encryption implies authentication */
2421			set_bit(HCI_CONN_AUTH, &conn->flags);
2422			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2423			conn->sec_level = conn->pending_sec_level;
2424
2425			/* P-256 authentication key implies FIPS */
2426			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2427				set_bit(HCI_CONN_FIPS, &conn->flags);
2428
2429			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2430			    conn->type == LE_LINK)
2431				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2432		} else {
2433			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2434			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2435		}
2436	}
2437
2438	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2439
2440	if (ev->status && conn->state == BT_CONNECTED) {
2441		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2442		hci_conn_drop(conn);
2443		goto unlock;
2444	}
2445
2446	if (conn->state == BT_CONFIG) {
2447		if (!ev->status)
2448			conn->state = BT_CONNECTED;
2449
2450		/* In Secure Connections Only mode, do not allow any
2451		 * connections that are not encrypted with AES-CCM
2452		 * using a P-256 authenticated combination key.
2453		 */
2454		if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2455		    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2456		     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2457			hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2458			hci_conn_drop(conn);
2459			goto unlock;
2460		}
2461
2462		hci_proto_connect_cfm(conn, ev->status);
2463		hci_conn_drop(conn);
2464	} else
2465		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2466
2467unlock:
2468	hci_dev_unlock(hdev);
2469}
2470
2471static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2472					     struct sk_buff *skb)
2473{
2474	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2475	struct hci_conn *conn;
2476
2477	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2478
2479	hci_dev_lock(hdev);
2480
2481	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2482	if (conn) {
2483		if (!ev->status)
2484			set_bit(HCI_CONN_SECURE, &conn->flags);
2485
2486		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2487
2488		hci_key_change_cfm(conn, ev->status);
2489	}
2490
2491	hci_dev_unlock(hdev);
2492}
2493
2494static void hci_remote_features_evt(struct hci_dev *hdev,
2495				    struct sk_buff *skb)
2496{
2497	struct hci_ev_remote_features *ev = (void *) skb->data;
2498	struct hci_conn *conn;
2499
2500	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2501
2502	hci_dev_lock(hdev);
2503
2504	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2505	if (!conn)
2506		goto unlock;
2507
2508	if (!ev->status)
2509		memcpy(conn->features[0], ev->features, 8);
2510
2511	if (conn->state != BT_CONFIG)
2512		goto unlock;
2513
2514	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2515		struct hci_cp_read_remote_ext_features cp;
2516		cp.handle = ev->handle;
2517		cp.page = 0x01;
2518		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2519			     sizeof(cp), &cp);
2520		goto unlock;
2521	}
2522
2523	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2524		struct hci_cp_remote_name_req cp;
2525		memset(&cp, 0, sizeof(cp));
2526		bacpy(&cp.bdaddr, &conn->dst);
2527		cp.pscan_rep_mode = 0x02;
2528		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2529	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2530		mgmt_device_connected(hdev, &conn->dst, conn->type,
2531				      conn->dst_type, 0, NULL, 0,
2532				      conn->dev_class);
2533
2534	if (!hci_outgoing_auth_needed(hdev, conn)) {
2535		conn->state = BT_CONNECTED;
2536		hci_proto_connect_cfm(conn, ev->status);
2537		hci_conn_drop(conn);
2538	}
2539
2540unlock:
2541	hci_dev_unlock(hdev);
2542}
2543
2544static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2545{
2546	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2547	u8 status = skb->data[sizeof(*ev)];
2548	__u16 opcode;
2549
2550	skb_pull(skb, sizeof(*ev));
2551
2552	opcode = __le16_to_cpu(ev->opcode);
2553
2554	switch (opcode) {
2555	case HCI_OP_INQUIRY_CANCEL:
2556		hci_cc_inquiry_cancel(hdev, skb);
2557		break;
2558
2559	case HCI_OP_PERIODIC_INQ:
2560		hci_cc_periodic_inq(hdev, skb);
2561		break;
2562
2563	case HCI_OP_EXIT_PERIODIC_INQ:
2564		hci_cc_exit_periodic_inq(hdev, skb);
2565		break;
2566
2567	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2568		hci_cc_remote_name_req_cancel(hdev, skb);
2569		break;
2570
2571	case HCI_OP_ROLE_DISCOVERY:
2572		hci_cc_role_discovery(hdev, skb);
2573		break;
2574
2575	case HCI_OP_READ_LINK_POLICY:
2576		hci_cc_read_link_policy(hdev, skb);
2577		break;
2578
2579	case HCI_OP_WRITE_LINK_POLICY:
2580		hci_cc_write_link_policy(hdev, skb);
2581		break;
2582
2583	case HCI_OP_READ_DEF_LINK_POLICY:
2584		hci_cc_read_def_link_policy(hdev, skb);
2585		break;
2586
2587	case HCI_OP_WRITE_DEF_LINK_POLICY:
2588		hci_cc_write_def_link_policy(hdev, skb);
2589		break;
2590
2591	case HCI_OP_RESET:
2592		hci_cc_reset(hdev, skb);
2593		break;
2594
2595	case HCI_OP_WRITE_LOCAL_NAME:
2596		hci_cc_write_local_name(hdev, skb);
2597		break;
2598
2599	case HCI_OP_READ_LOCAL_NAME:
2600		hci_cc_read_local_name(hdev, skb);
2601		break;
2602
2603	case HCI_OP_WRITE_AUTH_ENABLE:
2604		hci_cc_write_auth_enable(hdev, skb);
2605		break;
2606
2607	case HCI_OP_WRITE_ENCRYPT_MODE:
2608		hci_cc_write_encrypt_mode(hdev, skb);
2609		break;
2610
2611	case HCI_OP_WRITE_SCAN_ENABLE:
2612		hci_cc_write_scan_enable(hdev, skb);
2613		break;
2614
2615	case HCI_OP_READ_CLASS_OF_DEV:
2616		hci_cc_read_class_of_dev(hdev, skb);
2617		break;
2618
2619	case HCI_OP_WRITE_CLASS_OF_DEV:
2620		hci_cc_write_class_of_dev(hdev, skb);
2621		break;
2622
2623	case HCI_OP_READ_VOICE_SETTING:
2624		hci_cc_read_voice_setting(hdev, skb);
2625		break;
2626
2627	case HCI_OP_WRITE_VOICE_SETTING:
2628		hci_cc_write_voice_setting(hdev, skb);
2629		break;
2630
2631	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2632		hci_cc_read_num_supported_iac(hdev, skb);
2633		break;
2634
2635	case HCI_OP_WRITE_SSP_MODE:
2636		hci_cc_write_ssp_mode(hdev, skb);
2637		break;
2638
2639	case HCI_OP_WRITE_SC_SUPPORT:
2640		hci_cc_write_sc_support(hdev, skb);
2641		break;
2642
2643	case HCI_OP_READ_LOCAL_VERSION:
2644		hci_cc_read_local_version(hdev, skb);
2645		break;
2646
2647	case HCI_OP_READ_LOCAL_COMMANDS:
2648		hci_cc_read_local_commands(hdev, skb);
2649		break;
2650
2651	case HCI_OP_READ_LOCAL_FEATURES:
2652		hci_cc_read_local_features(hdev, skb);
2653		break;
2654
2655	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2656		hci_cc_read_local_ext_features(hdev, skb);
2657		break;
2658
2659	case HCI_OP_READ_BUFFER_SIZE:
2660		hci_cc_read_buffer_size(hdev, skb);
2661		break;
2662
2663	case HCI_OP_READ_BD_ADDR:
2664		hci_cc_read_bd_addr(hdev, skb);
2665		break;
2666
2667	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2668		hci_cc_read_page_scan_activity(hdev, skb);
2669		break;
2670
2671	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2672		hci_cc_write_page_scan_activity(hdev, skb);
2673		break;
2674
2675	case HCI_OP_READ_PAGE_SCAN_TYPE:
2676		hci_cc_read_page_scan_type(hdev, skb);
2677		break;
2678
2679	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2680		hci_cc_write_page_scan_type(hdev, skb);
2681		break;
2682
2683	case HCI_OP_READ_DATA_BLOCK_SIZE:
2684		hci_cc_read_data_block_size(hdev, skb);
2685		break;
2686
2687	case HCI_OP_READ_FLOW_CONTROL_MODE:
2688		hci_cc_read_flow_control_mode(hdev, skb);
2689		break;
2690
2691	case HCI_OP_READ_LOCAL_AMP_INFO:
2692		hci_cc_read_local_amp_info(hdev, skb);
2693		break;
2694
2695	case HCI_OP_READ_CLOCK:
2696		hci_cc_read_clock(hdev, skb);
2697		break;
2698
2699	case HCI_OP_READ_LOCAL_AMP_ASSOC:
2700		hci_cc_read_local_amp_assoc(hdev, skb);
2701		break;
2702
2703	case HCI_OP_READ_INQ_RSP_TX_POWER:
2704		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2705		break;
2706
2707	case HCI_OP_PIN_CODE_REPLY:
2708		hci_cc_pin_code_reply(hdev, skb);
2709		break;
2710
2711	case HCI_OP_PIN_CODE_NEG_REPLY:
2712		hci_cc_pin_code_neg_reply(hdev, skb);
2713		break;
2714
2715	case HCI_OP_READ_LOCAL_OOB_DATA:
2716		hci_cc_read_local_oob_data(hdev, skb);
2717		break;
2718
2719	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2720		hci_cc_read_local_oob_ext_data(hdev, skb);
2721		break;
2722
2723	case HCI_OP_LE_READ_BUFFER_SIZE:
2724		hci_cc_le_read_buffer_size(hdev, skb);
2725		break;
2726
2727	case HCI_OP_LE_READ_LOCAL_FEATURES:
2728		hci_cc_le_read_local_features(hdev, skb);
2729		break;
2730
2731	case HCI_OP_LE_READ_ADV_TX_POWER:
2732		hci_cc_le_read_adv_tx_power(hdev, skb);
2733		break;
2734
2735	case HCI_OP_USER_CONFIRM_REPLY:
2736		hci_cc_user_confirm_reply(hdev, skb);
2737		break;
2738
2739	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2740		hci_cc_user_confirm_neg_reply(hdev, skb);
2741		break;
2742
2743	case HCI_OP_USER_PASSKEY_REPLY:
2744		hci_cc_user_passkey_reply(hdev, skb);
2745		break;
2746
2747	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2748		hci_cc_user_passkey_neg_reply(hdev, skb);
2749		break;
2750
2751	case HCI_OP_LE_SET_RANDOM_ADDR:
2752		hci_cc_le_set_random_addr(hdev, skb);
2753		break;
2754
2755	case HCI_OP_LE_SET_ADV_ENABLE:
2756		hci_cc_le_set_adv_enable(hdev, skb);
2757		break;
2758
2759	case HCI_OP_LE_SET_SCAN_PARAM:
2760		hci_cc_le_set_scan_param(hdev, skb);
2761		break;
2762
2763	case HCI_OP_LE_SET_SCAN_ENABLE:
2764		hci_cc_le_set_scan_enable(hdev, skb);
2765		break;
2766
2767	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2768		hci_cc_le_read_white_list_size(hdev, skb);
2769		break;
2770
2771	case HCI_OP_LE_CLEAR_WHITE_LIST:
2772		hci_cc_le_clear_white_list(hdev, skb);
2773		break;
2774
2775	case HCI_OP_LE_ADD_TO_WHITE_LIST:
2776		hci_cc_le_add_to_white_list(hdev, skb);
2777		break;
2778
2779	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2780		hci_cc_le_del_from_white_list(hdev, skb);
2781		break;
2782
2783	case HCI_OP_LE_READ_SUPPORTED_STATES:
2784		hci_cc_le_read_supported_states(hdev, skb);
2785		break;
2786
2787	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2788		hci_cc_write_le_host_supported(hdev, skb);
2789		break;
2790
2791	case HCI_OP_LE_SET_ADV_PARAM:
2792		hci_cc_set_adv_param(hdev, skb);
2793		break;
2794
2795	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2796		hci_cc_write_remote_amp_assoc(hdev, skb);
2797		break;
2798
2799	case HCI_OP_READ_RSSI:
2800		hci_cc_read_rssi(hdev, skb);
2801		break;
2802
2803	case HCI_OP_READ_TX_POWER:
2804		hci_cc_read_tx_power(hdev, skb);
2805		break;
2806
2807	default:
2808		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2809		break;
2810	}
2811
2812	if (opcode != HCI_OP_NOP)
2813		cancel_delayed_work(&hdev->cmd_timer);
2814
2815	hci_req_cmd_complete(hdev, opcode, status);
2816
2817	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2818		atomic_set(&hdev->cmd_cnt, 1);
2819		if (!skb_queue_empty(&hdev->cmd_q))
2820			queue_work(hdev->workqueue, &hdev->cmd_work);
2821	}
2822}
2823
2824static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2825{
2826	struct hci_ev_cmd_status *ev = (void *) skb->data;
2827	__u16 opcode;
2828
2829	skb_pull(skb, sizeof(*ev));
2830
2831	opcode = __le16_to_cpu(ev->opcode);
2832
2833	switch (opcode) {
2834	case HCI_OP_INQUIRY:
2835		hci_cs_inquiry(hdev, ev->status);
2836		break;
2837
2838	case HCI_OP_CREATE_CONN:
2839		hci_cs_create_conn(hdev, ev->status);
2840		break;
2841
2842	case HCI_OP_ADD_SCO:
2843		hci_cs_add_sco(hdev, ev->status);
2844		break;
2845
2846	case HCI_OP_AUTH_REQUESTED:
2847		hci_cs_auth_requested(hdev, ev->status);
2848		break;
2849
2850	case HCI_OP_SET_CONN_ENCRYPT:
2851		hci_cs_set_conn_encrypt(hdev, ev->status);
2852		break;
2853
2854	case HCI_OP_REMOTE_NAME_REQ:
2855		hci_cs_remote_name_req(hdev, ev->status);
2856		break;
2857
2858	case HCI_OP_READ_REMOTE_FEATURES:
2859		hci_cs_read_remote_features(hdev, ev->status);
2860		break;
2861
2862	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2863		hci_cs_read_remote_ext_features(hdev, ev->status);
2864		break;
2865
2866	case HCI_OP_SETUP_SYNC_CONN:
2867		hci_cs_setup_sync_conn(hdev, ev->status);
2868		break;
2869
2870	case HCI_OP_SNIFF_MODE:
2871		hci_cs_sniff_mode(hdev, ev->status);
2872		break;
2873
2874	case HCI_OP_EXIT_SNIFF_MODE:
2875		hci_cs_exit_sniff_mode(hdev, ev->status);
2876		break;
2877
2878	case HCI_OP_DISCONNECT:
2879		hci_cs_disconnect(hdev, ev->status);
2880		break;
2881
2882	case HCI_OP_CREATE_PHY_LINK:
2883		hci_cs_create_phylink(hdev, ev->status);
2884		break;
2885
2886	case HCI_OP_ACCEPT_PHY_LINK:
2887		hci_cs_accept_phylink(hdev, ev->status);
2888		break;
2889
2890	case HCI_OP_LE_CREATE_CONN:
2891		hci_cs_le_create_conn(hdev, ev->status);
2892		break;
2893
2894	case HCI_OP_LE_START_ENC:
2895		hci_cs_le_start_enc(hdev, ev->status);
2896		break;
2897
2898	default:
2899		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2900		break;
2901	}
2902
2903	if (opcode != HCI_OP_NOP)
2904		cancel_delayed_work(&hdev->cmd_timer);
2905
2906	if (ev->status ||
2907	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2908		hci_req_cmd_complete(hdev, opcode, ev->status);
2909
2910	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2911		atomic_set(&hdev->cmd_cnt, 1);
2912		if (!skb_queue_empty(&hdev->cmd_q))
2913			queue_work(hdev->workqueue, &hdev->cmd_work);
2914	}
2915}
2916
2917static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2918{
2919	struct hci_ev_role_change *ev = (void *) skb->data;
2920	struct hci_conn *conn;
2921
2922	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2923
2924	hci_dev_lock(hdev);
2925
2926	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2927	if (conn) {
2928		if (!ev->status) {
2929			if (ev->role)
2930				clear_bit(HCI_CONN_MASTER, &conn->flags);
2931			else
2932				set_bit(HCI_CONN_MASTER, &conn->flags);
2933		}
2934
2935		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2936
2937		hci_role_switch_cfm(conn, ev->status, ev->role);
2938	}
2939
2940	hci_dev_unlock(hdev);
2941}
2942
2943static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2944{
2945	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2946	int i;
2947
2948	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2949		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2950		return;
2951	}
2952
2953	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2954	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2955		BT_DBG("%s bad parameters", hdev->name);
2956		return;
2957	}
2958
2959	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2960
2961	for (i = 0; i < ev->num_hndl; i++) {
2962		struct hci_comp_pkts_info *info = &ev->handles[i];
2963		struct hci_conn *conn;
2964		__u16  handle, count;
2965
2966		handle = __le16_to_cpu(info->handle);
2967		count  = __le16_to_cpu(info->count);
2968
2969		conn = hci_conn_hash_lookup_handle(hdev, handle);
2970		if (!conn)
2971			continue;
2972
2973		conn->sent -= count;
2974
2975		switch (conn->type) {
2976		case ACL_LINK:
2977			hdev->acl_cnt += count;
2978			if (hdev->acl_cnt > hdev->acl_pkts)
2979				hdev->acl_cnt = hdev->acl_pkts;
2980			break;
2981
2982		case LE_LINK:
2983			if (hdev->le_pkts) {
2984				hdev->le_cnt += count;
2985				if (hdev->le_cnt > hdev->le_pkts)
2986					hdev->le_cnt = hdev->le_pkts;
2987			} else {
2988				hdev->acl_cnt += count;
2989				if (hdev->acl_cnt > hdev->acl_pkts)
2990					hdev->acl_cnt = hdev->acl_pkts;
2991			}
2992			break;
2993
2994		case SCO_LINK:
2995			hdev->sco_cnt += count;
2996			if (hdev->sco_cnt > hdev->sco_pkts)
2997				hdev->sco_cnt = hdev->sco_pkts;
2998			break;
2999
3000		default:
3001			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3002			break;
3003		}
3004	}
3005
3006	queue_work(hdev->workqueue, &hdev->tx_work);
3007}
3008
3009static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3010						 __u16 handle)
3011{
3012	struct hci_chan *chan;
3013
3014	switch (hdev->dev_type) {
3015	case HCI_BREDR:
3016		return hci_conn_hash_lookup_handle(hdev, handle);
3017	case HCI_AMP:
3018		chan = hci_chan_lookup_handle(hdev, handle);
3019		if (chan)
3020			return chan->conn;
3021		break;
3022	default:
3023		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3024		break;
3025	}
3026
3027	return NULL;
3028}
3029
3030static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3031{
3032	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3033	int i;
3034
3035	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3036		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3037		return;
3038	}
3039
3040	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3041	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3042		BT_DBG("%s bad parameters", hdev->name);
3043		return;
3044	}
3045
3046	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3047	       ev->num_hndl);
3048
3049	for (i = 0; i < ev->num_hndl; i++) {
3050		struct hci_comp_blocks_info *info = &ev->handles[i];
3051		struct hci_conn *conn = NULL;
3052		__u16  handle, block_count;
3053
3054		handle = __le16_to_cpu(info->handle);
3055		block_count = __le16_to_cpu(info->blocks);
3056
3057		conn = __hci_conn_lookup_handle(hdev, handle);
3058		if (!conn)
3059			continue;
3060
3061		conn->sent -= block_count;
3062
3063		switch (conn->type) {
3064		case ACL_LINK:
3065		case AMP_LINK:
3066			hdev->block_cnt += block_count;
3067			if (hdev->block_cnt > hdev->num_blocks)
3068				hdev->block_cnt = hdev->num_blocks;
3069			break;
3070
3071		default:
3072			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3073			break;
3074		}
3075	}
3076
3077	queue_work(hdev->workqueue, &hdev->tx_work);
3078}
3079
3080static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3081{
3082	struct hci_ev_mode_change *ev = (void *) skb->data;
3083	struct hci_conn *conn;
3084
3085	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3086
3087	hci_dev_lock(hdev);
3088
3089	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3090	if (conn) {
3091		conn->mode = ev->mode;
3092
3093		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3094					&conn->flags)) {
3095			if (conn->mode == HCI_CM_ACTIVE)
3096				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3097			else
3098				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3099		}
3100
3101		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3102			hci_sco_setup(conn, ev->status);
3103	}
3104
3105	hci_dev_unlock(hdev);
3106}
3107
3108static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3109{
3110	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3111	struct hci_conn *conn;
3112
3113	BT_DBG("%s", hdev->name);
3114
3115	hci_dev_lock(hdev);
3116
3117	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3118	if (!conn)
3119		goto unlock;
3120
3121	if (conn->state == BT_CONNECTED) {
3122		hci_conn_hold(conn);
3123		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3124		hci_conn_drop(conn);
3125	}
3126
3127	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3128		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3129			     sizeof(ev->bdaddr), &ev->bdaddr);
3130	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3131		u8 secure;
3132
3133		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3134			secure = 1;
3135		else
3136			secure = 0;
3137
3138		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3139	}
3140
3141unlock:
3142	hci_dev_unlock(hdev);
3143}
3144
3145static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3146{
3147	struct hci_ev_link_key_req *ev = (void *) skb->data;
3148	struct hci_cp_link_key_reply cp;
3149	struct hci_conn *conn;
3150	struct link_key *key;
3151
3152	BT_DBG("%s", hdev->name);
3153
3154	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3155		return;
3156
3157	hci_dev_lock(hdev);
3158
3159	key = hci_find_link_key(hdev, &ev->bdaddr);
3160	if (!key) {
3161		BT_DBG("%s link key not found for %pMR", hdev->name,
3162		       &ev->bdaddr);
3163		goto not_found;
3164	}
3165
3166	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3167	       &ev->bdaddr);
3168
3169	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3170	if (conn) {
3171		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3172		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3173		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3174			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3175			goto not_found;
3176		}
3177
3178		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3179		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3180		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3181			BT_DBG("%s ignoring key unauthenticated for high security",
3182			       hdev->name);
3183			goto not_found;
3184		}
3185
3186		conn->key_type = key->type;
3187		conn->pin_length = key->pin_len;
3188	}
3189
3190	bacpy(&cp.bdaddr, &ev->bdaddr);
3191	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3192
3193	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3194
3195	hci_dev_unlock(hdev);
3196
3197	return;
3198
3199not_found:
3200	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3201	hci_dev_unlock(hdev);
3202}
3203
3204static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3205{
3206	struct hci_ev_link_key_notify *ev = (void *) skb->data;
3207	struct hci_conn *conn;
3208	struct link_key *key;
3209	bool persistent;
3210	u8 pin_len = 0;
3211
3212	BT_DBG("%s", hdev->name);
3213
3214	hci_dev_lock(hdev);
3215
3216	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3217	if (conn) {
3218		hci_conn_hold(conn);
3219		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3220		pin_len = conn->pin_length;
3221
3222		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3223			conn->key_type = ev->key_type;
3224
3225		hci_conn_drop(conn);
3226	}
3227
3228	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3229		goto unlock;
3230
3231	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3232			        ev->key_type, pin_len, &persistent);
3233	if (!key)
3234		goto unlock;
3235
3236	mgmt_new_link_key(hdev, key, persistent);
3237
3238	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3239	 * is set. If it's not set simply remove the key from the kernel
3240	 * list (we've still notified user space about it but with
3241	 * store_hint being 0).
3242	 */
3243	if (key->type == HCI_LK_DEBUG_COMBINATION &&
3244	    !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3245		list_del(&key->list);
3246		kfree(key);
3247	} else if (conn) {
3248		if (persistent)
3249			clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3250		else
3251			set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3252	}
3253
3254unlock:
3255	hci_dev_unlock(hdev);
3256}
3257
3258static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3259{
3260	struct hci_ev_clock_offset *ev = (void *) skb->data;
3261	struct hci_conn *conn;
3262
3263	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3264
3265	hci_dev_lock(hdev);
3266
3267	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3268	if (conn && !ev->status) {
3269		struct inquiry_entry *ie;
3270
3271		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3272		if (ie) {
3273			ie->data.clock_offset = ev->clock_offset;
3274			ie->timestamp = jiffies;
3275		}
3276	}
3277
3278	hci_dev_unlock(hdev);
3279}
3280
3281static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3282{
3283	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3284	struct hci_conn *conn;
3285
3286	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3287
3288	hci_dev_lock(hdev);
3289
3290	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3291	if (conn && !ev->status)
3292		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3293
3294	hci_dev_unlock(hdev);
3295}
3296
3297static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3298{
3299	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3300	struct inquiry_entry *ie;
3301
3302	BT_DBG("%s", hdev->name);
3303
3304	hci_dev_lock(hdev);
3305
3306	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3307	if (ie) {
3308		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3309		ie->timestamp = jiffies;
3310	}
3311
3312	hci_dev_unlock(hdev);
3313}
3314
3315static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3316					     struct sk_buff *skb)
3317{
3318	struct inquiry_data data;
3319	int num_rsp = *((__u8 *) skb->data);
3320
3321	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3322
3323	if (!num_rsp)
3324		return;
3325
3326	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3327		return;
3328
3329	hci_dev_lock(hdev);
3330
3331	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3332		struct inquiry_info_with_rssi_and_pscan_mode *info;
3333		info = (void *) (skb->data + 1);
3334
3335		for (; num_rsp; num_rsp--, info++) {
3336			u32 flags;
3337
3338			bacpy(&data.bdaddr, &info->bdaddr);
3339			data.pscan_rep_mode	= info->pscan_rep_mode;
3340			data.pscan_period_mode	= info->pscan_period_mode;
3341			data.pscan_mode		= info->pscan_mode;
3342			memcpy(data.dev_class, info->dev_class, 3);
3343			data.clock_offset	= info->clock_offset;
3344			data.rssi		= info->rssi;
3345			data.ssp_mode		= 0x00;
3346
3347			flags = hci_inquiry_cache_update(hdev, &data, false);
3348
3349			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3350					  info->dev_class, info->rssi,
3351					  flags, NULL, 0, NULL, 0);
3352		}
3353	} else {
3354		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3355
3356		for (; num_rsp; num_rsp--, info++) {
3357			u32 flags;
3358
3359			bacpy(&data.bdaddr, &info->bdaddr);
3360			data.pscan_rep_mode	= info->pscan_rep_mode;
3361			data.pscan_period_mode	= info->pscan_period_mode;
3362			data.pscan_mode		= 0x00;
3363			memcpy(data.dev_class, info->dev_class, 3);
3364			data.clock_offset	= info->clock_offset;
3365			data.rssi		= info->rssi;
3366			data.ssp_mode		= 0x00;
3367
3368			flags = hci_inquiry_cache_update(hdev, &data, false);
3369
3370			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3371					  info->dev_class, info->rssi,
3372					  flags, NULL, 0, NULL, 0);
3373		}
3374	}
3375
3376	hci_dev_unlock(hdev);
3377}
3378
3379static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3380					struct sk_buff *skb)
3381{
3382	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3383	struct hci_conn *conn;
3384
3385	BT_DBG("%s", hdev->name);
3386
3387	hci_dev_lock(hdev);
3388
3389	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3390	if (!conn)
3391		goto unlock;
3392
3393	if (ev->page < HCI_MAX_PAGES)
3394		memcpy(conn->features[ev->page], ev->features, 8);
3395
3396	if (!ev->status && ev->page == 0x01) {
3397		struct inquiry_entry *ie;
3398
3399		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3400		if (ie)
3401			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3402
3403		if (ev->features[0] & LMP_HOST_SSP) {
3404			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3405		} else {
3406			/* It is mandatory by the Bluetooth specification that
3407			 * Extended Inquiry Results are only used when Secure
3408			 * Simple Pairing is enabled, but some devices violate
3409			 * this.
3410			 *
3411			 * To make these devices work, the internal SSP
3412			 * enabled flag needs to be cleared if the remote host
3413			 * features do not indicate SSP support */
3414			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3415		}
3416
3417		if (ev->features[0] & LMP_HOST_SC)
3418			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3419	}
3420
3421	if (conn->state != BT_CONFIG)
3422		goto unlock;
3423
3424	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3425		struct hci_cp_remote_name_req cp;
3426		memset(&cp, 0, sizeof(cp));
3427		bacpy(&cp.bdaddr, &conn->dst);
3428		cp.pscan_rep_mode = 0x02;
3429		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3430	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3431		mgmt_device_connected(hdev, &conn->dst, conn->type,
3432				      conn->dst_type, 0, NULL, 0,
3433				      conn->dev_class);
3434
3435	if (!hci_outgoing_auth_needed(hdev, conn)) {
3436		conn->state = BT_CONNECTED;
3437		hci_proto_connect_cfm(conn, ev->status);
3438		hci_conn_drop(conn);
3439	}
3440
3441unlock:
3442	hci_dev_unlock(hdev);
3443}
3444
3445static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3446				       struct sk_buff *skb)
3447{
3448	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3449	struct hci_conn *conn;
3450
3451	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3452
3453	hci_dev_lock(hdev);
3454
3455	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3456	if (!conn) {
3457		if (ev->link_type == ESCO_LINK)
3458			goto unlock;
3459
3460		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3461		if (!conn)
3462			goto unlock;
3463
3464		conn->type = SCO_LINK;
3465	}
3466
3467	switch (ev->status) {
3468	case 0x00:
3469		conn->handle = __le16_to_cpu(ev->handle);
3470		conn->state  = BT_CONNECTED;
3471
3472		hci_conn_add_sysfs(conn);
3473		break;
3474
3475	case 0x10:	/* Connection Accept Timeout */
3476	case 0x0d:	/* Connection Rejected due to Limited Resources */
3477	case 0x11:	/* Unsupported Feature or Parameter Value */
3478	case 0x1c:	/* SCO interval rejected */
3479	case 0x1a:	/* Unsupported Remote Feature */
3480	case 0x1f:	/* Unspecified error */
3481	case 0x20:	/* Unsupported LMP Parameter value */
3482		if (conn->out) {
3483			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3484					(hdev->esco_type & EDR_ESCO_MASK);
3485			if (hci_setup_sync(conn, conn->link->handle))
3486				goto unlock;
3487		}
3488		/* fall through */
3489
3490	default:
3491		conn->state = BT_CLOSED;
3492		break;
3493	}
3494
3495	hci_proto_connect_cfm(conn, ev->status);
3496	if (ev->status)
3497		hci_conn_del(conn);
3498
3499unlock:
3500	hci_dev_unlock(hdev);
3501}
3502
3503static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3504{
3505	size_t parsed = 0;
3506
3507	while (parsed < eir_len) {
3508		u8 field_len = eir[0];
3509
3510		if (field_len == 0)
3511			return parsed;
3512
3513		parsed += field_len + 1;
3514		eir += field_len + 1;
3515	}
3516
3517	return eir_len;
3518}
3519
3520static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3521					    struct sk_buff *skb)
3522{
3523	struct inquiry_data data;
3524	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3525	int num_rsp = *((__u8 *) skb->data);
3526	size_t eir_len;
3527
3528	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3529
3530	if (!num_rsp)
3531		return;
3532
3533	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3534		return;
3535
3536	hci_dev_lock(hdev);
3537
3538	for (; num_rsp; num_rsp--, info++) {
3539		u32 flags;
3540		bool name_known;
3541
3542		bacpy(&data.bdaddr, &info->bdaddr);
3543		data.pscan_rep_mode	= info->pscan_rep_mode;
3544		data.pscan_period_mode	= info->pscan_period_mode;
3545		data.pscan_mode		= 0x00;
3546		memcpy(data.dev_class, info->dev_class, 3);
3547		data.clock_offset	= info->clock_offset;
3548		data.rssi		= info->rssi;
3549		data.ssp_mode		= 0x01;
3550
3551		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3552			name_known = eir_has_data_type(info->data,
3553						       sizeof(info->data),
3554						       EIR_NAME_COMPLETE);
3555		else
3556			name_known = true;
3557
3558		flags = hci_inquiry_cache_update(hdev, &data, name_known);
3559
3560		eir_len = eir_get_length(info->data, sizeof(info->data));
3561
3562		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3563				  info->dev_class, info->rssi,
3564				  flags, info->data, eir_len, NULL, 0);
3565	}
3566
3567	hci_dev_unlock(hdev);
3568}
3569
3570static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3571					 struct sk_buff *skb)
3572{
3573	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3574	struct hci_conn *conn;
3575
3576	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3577	       __le16_to_cpu(ev->handle));
3578
3579	hci_dev_lock(hdev);
3580
3581	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3582	if (!conn)
3583		goto unlock;
3584
3585	/* For BR/EDR the necessary steps are taken through the
3586	 * auth_complete event.
3587	 */
3588	if (conn->type != LE_LINK)
3589		goto unlock;
3590
3591	if (!ev->status)
3592		conn->sec_level = conn->pending_sec_level;
3593
3594	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3595
3596	if (ev->status && conn->state == BT_CONNECTED) {
3597		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3598		hci_conn_drop(conn);
3599		goto unlock;
3600	}
3601
3602	if (conn->state == BT_CONFIG) {
3603		if (!ev->status)
3604			conn->state = BT_CONNECTED;
3605
3606		hci_proto_connect_cfm(conn, ev->status);
3607		hci_conn_drop(conn);
3608	} else {
3609		hci_auth_cfm(conn, ev->status);
3610
3611		hci_conn_hold(conn);
3612		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3613		hci_conn_drop(conn);
3614	}
3615
3616unlock:
3617	hci_dev_unlock(hdev);
3618}
3619
3620static u8 hci_get_auth_req(struct hci_conn *conn)
3621{
3622	/* If remote requests no-bonding follow that lead */
3623	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3624	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3625		return conn->remote_auth | (conn->auth_type & 0x01);
3626
3627	/* If both remote and local have enough IO capabilities, require
3628	 * MITM protection
3629	 */
3630	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3631	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3632		return conn->remote_auth | 0x01;
3633
3634	/* No MITM protection possible so ignore remote requirement */
3635	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3636}
3637
3638static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3639{
3640	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3641	struct hci_conn *conn;
3642
3643	BT_DBG("%s", hdev->name);
3644
3645	hci_dev_lock(hdev);
3646
3647	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3648	if (!conn)
3649		goto unlock;
3650
3651	hci_conn_hold(conn);
3652
3653	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3654		goto unlock;
3655
3656	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3657	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3658		struct hci_cp_io_capability_reply cp;
3659
3660		bacpy(&cp.bdaddr, &ev->bdaddr);
3661		/* Change the IO capability from KeyboardDisplay
3662		 * to DisplayYesNo as it is not supported by BT spec. */
3663		cp.capability = (conn->io_capability == 0x04) ?
3664				HCI_IO_DISPLAY_YESNO : conn->io_capability;
3665
3666		/* If we are initiators, there is no remote information yet */
3667		if (conn->remote_auth == 0xff) {
3668			cp.authentication = conn->auth_type;
3669
3670			/* Request MITM protection if our IO caps allow it
3671			 * except for the no-bonding case.
3672			 * conn->auth_type is not updated here since
3673			 * that might cause the user confirmation to be
3674			 * rejected in case the remote doesn't have the
3675			 * IO capabilities for MITM.
3676			 */
3677			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3678			    cp.authentication != HCI_AT_NO_BONDING)
3679				cp.authentication |= 0x01;
3680		} else {
3681			conn->auth_type = hci_get_auth_req(conn);
3682			cp.authentication = conn->auth_type;
3683		}
3684
3685		if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3686		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3687			cp.oob_data = 0x01;
3688		else
3689			cp.oob_data = 0x00;
3690
3691		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3692			     sizeof(cp), &cp);
3693	} else {
3694		struct hci_cp_io_capability_neg_reply cp;
3695
3696		bacpy(&cp.bdaddr, &ev->bdaddr);
3697		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3698
3699		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3700			     sizeof(cp), &cp);
3701	}
3702
3703unlock:
3704	hci_dev_unlock(hdev);
3705}
3706
3707static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3708{
3709	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3710	struct hci_conn *conn;
3711
3712	BT_DBG("%s", hdev->name);
3713
3714	hci_dev_lock(hdev);
3715
3716	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3717	if (!conn)
3718		goto unlock;
3719
3720	conn->remote_cap = ev->capability;
3721	conn->remote_auth = ev->authentication;
3722	if (ev->oob_data)
3723		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3724
3725unlock:
3726	hci_dev_unlock(hdev);
3727}
3728
3729static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3730					 struct sk_buff *skb)
3731{
3732	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3733	int loc_mitm, rem_mitm, confirm_hint = 0;
3734	struct hci_conn *conn;
3735
3736	BT_DBG("%s", hdev->name);
3737
3738	hci_dev_lock(hdev);
3739
3740	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3741		goto unlock;
3742
3743	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3744	if (!conn)
3745		goto unlock;
3746
3747	loc_mitm = (conn->auth_type & 0x01);
3748	rem_mitm = (conn->remote_auth & 0x01);
3749
3750	/* If we require MITM but the remote device can't provide that
3751	 * (it has NoInputNoOutput) then reject the confirmation request
3752	 */
3753	if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3754		BT_DBG("Rejecting request: remote device can't provide MITM");
3755		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3756			     sizeof(ev->bdaddr), &ev->bdaddr);
3757		goto unlock;
3758	}
3759
3760	/* If no side requires MITM protection; auto-accept */
3761	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3762	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3763
3764		/* If we're not the initiators request authorization to
3765		 * proceed from user space (mgmt_user_confirm with
3766		 * confirm_hint set to 1). The exception is if neither
3767		 * side had MITM in which case we do auto-accept.
3768		 */
3769		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3770		    (loc_mitm || rem_mitm)) {
3771			BT_DBG("Confirming auto-accept as acceptor");
3772			confirm_hint = 1;
3773			goto confirm;
3774		}
3775
3776		BT_DBG("Auto-accept of user confirmation with %ums delay",
3777		       hdev->auto_accept_delay);
3778
3779		if (hdev->auto_accept_delay > 0) {
3780			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3781			queue_delayed_work(conn->hdev->workqueue,
3782					   &conn->auto_accept_work, delay);
3783			goto unlock;
3784		}
3785
3786		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3787			     sizeof(ev->bdaddr), &ev->bdaddr);
3788		goto unlock;
3789	}
3790
3791confirm:
3792	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3793				  le32_to_cpu(ev->passkey), confirm_hint);
3794
3795unlock:
3796	hci_dev_unlock(hdev);
3797}
3798
3799static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3800					 struct sk_buff *skb)
3801{
3802	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3803
3804	BT_DBG("%s", hdev->name);
3805
3806	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3807		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3808}
3809
3810static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3811					struct sk_buff *skb)
3812{
3813	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3814	struct hci_conn *conn;
3815
3816	BT_DBG("%s", hdev->name);
3817
3818	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3819	if (!conn)
3820		return;
3821
3822	conn->passkey_notify = __le32_to_cpu(ev->passkey);
3823	conn->passkey_entered = 0;
3824
3825	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3826		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3827					 conn->dst_type, conn->passkey_notify,
3828					 conn->passkey_entered);
3829}
3830
3831static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3832{
3833	struct hci_ev_keypress_notify *ev = (void *) skb->data;
3834	struct hci_conn *conn;
3835
3836	BT_DBG("%s", hdev->name);
3837
3838	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3839	if (!conn)
3840		return;
3841
3842	switch (ev->type) {
3843	case HCI_KEYPRESS_STARTED:
3844		conn->passkey_entered = 0;
3845		return;
3846
3847	case HCI_KEYPRESS_ENTERED:
3848		conn->passkey_entered++;
3849		break;
3850
3851	case HCI_KEYPRESS_ERASED:
3852		conn->passkey_entered--;
3853		break;
3854
3855	case HCI_KEYPRESS_CLEARED:
3856		conn->passkey_entered = 0;
3857		break;
3858
3859	case HCI_KEYPRESS_COMPLETED:
3860		return;
3861	}
3862
3863	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3864		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3865					 conn->dst_type, conn->passkey_notify,
3866					 conn->passkey_entered);
3867}
3868
3869static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3870					 struct sk_buff *skb)
3871{
3872	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3873	struct hci_conn *conn;
3874
3875	BT_DBG("%s", hdev->name);
3876
3877	hci_dev_lock(hdev);
3878
3879	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3880	if (!conn)
3881		goto unlock;
3882
3883	/* To avoid duplicate auth_failed events to user space we check
3884	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3885	 * initiated the authentication. A traditional auth_complete
3886	 * event gets always produced as initiator and is also mapped to
3887	 * the mgmt_auth_failed event */
3888	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3889		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3890				 ev->status);
3891
3892	hci_conn_drop(conn);
3893
3894unlock:
3895	hci_dev_unlock(hdev);
3896}
3897
3898static void hci_remote_host_features_evt(struct hci_dev *hdev,
3899					 struct sk_buff *skb)
3900{
3901	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3902	struct inquiry_entry *ie;
3903	struct hci_conn *conn;
3904
3905	BT_DBG("%s", hdev->name);
3906
3907	hci_dev_lock(hdev);
3908
3909	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3910	if (conn)
3911		memcpy(conn->features[1], ev->features, 8);
3912
3913	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3914	if (ie)
3915		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3916
3917	hci_dev_unlock(hdev);
3918}
3919
3920static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3921					    struct sk_buff *skb)
3922{
3923	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3924	struct oob_data *data;
3925
3926	BT_DBG("%s", hdev->name);
3927
3928	hci_dev_lock(hdev);
3929
3930	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3931		goto unlock;
3932
3933	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3934	if (data) {
3935		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3936			struct hci_cp_remote_oob_ext_data_reply cp;
3937
3938			bacpy(&cp.bdaddr, &ev->bdaddr);
3939			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3940			memcpy(cp.randomizer192, data->randomizer192,
3941			       sizeof(cp.randomizer192));
3942			memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3943			memcpy(cp.randomizer256, data->randomizer256,
3944			       sizeof(cp.randomizer256));
3945
3946			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3947				     sizeof(cp), &cp);
3948		} else {
3949			struct hci_cp_remote_oob_data_reply cp;
3950
3951			bacpy(&cp.bdaddr, &ev->bdaddr);
3952			memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3953			memcpy(cp.randomizer, data->randomizer192,
3954			       sizeof(cp.randomizer));
3955
3956			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3957				     sizeof(cp), &cp);
3958		}
3959	} else {
3960		struct hci_cp_remote_oob_data_neg_reply cp;
3961
3962		bacpy(&cp.bdaddr, &ev->bdaddr);
3963		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3964			     sizeof(cp), &cp);
3965	}
3966
3967unlock:
3968	hci_dev_unlock(hdev);
3969}
3970
3971static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3972				      struct sk_buff *skb)
3973{
3974	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3975	struct hci_conn *hcon, *bredr_hcon;
3976
3977	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3978	       ev->status);
3979
3980	hci_dev_lock(hdev);
3981
3982	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3983	if (!hcon) {
3984		hci_dev_unlock(hdev);
3985		return;
3986	}
3987
3988	if (ev->status) {
3989		hci_conn_del(hcon);
3990		hci_dev_unlock(hdev);
3991		return;
3992	}
3993
3994	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3995
3996	hcon->state = BT_CONNECTED;
3997	bacpy(&hcon->dst, &bredr_hcon->dst);
3998
3999	hci_conn_hold(hcon);
4000	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4001	hci_conn_drop(hcon);
4002
4003	hci_conn_add_sysfs(hcon);
4004
4005	amp_physical_cfm(bredr_hcon, hcon);
4006
4007	hci_dev_unlock(hdev);
4008}
4009
4010static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4011{
4012	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4013	struct hci_conn *hcon;
4014	struct hci_chan *hchan;
4015	struct amp_mgr *mgr;
4016
4017	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4018	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4019	       ev->status);
4020
4021	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4022	if (!hcon)
4023		return;
4024
4025	/* Create AMP hchan */
4026	hchan = hci_chan_create(hcon);
4027	if (!hchan)
4028		return;
4029
4030	hchan->handle = le16_to_cpu(ev->handle);
4031
4032	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4033
4034	mgr = hcon->amp_mgr;
4035	if (mgr && mgr->bredr_chan) {
4036		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4037
4038		l2cap_chan_lock(bredr_chan);
4039
4040		bredr_chan->conn->mtu = hdev->block_mtu;
4041		l2cap_logical_cfm(bredr_chan, hchan, 0);
4042		hci_conn_hold(hcon);
4043
4044		l2cap_chan_unlock(bredr_chan);
4045	}
4046}
4047
4048static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4049					     struct sk_buff *skb)
4050{
4051	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4052	struct hci_chan *hchan;
4053
4054	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4055	       le16_to_cpu(ev->handle), ev->status);
4056
4057	if (ev->status)
4058		return;
4059
4060	hci_dev_lock(hdev);
4061
4062	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4063	if (!hchan)
4064		goto unlock;
4065
4066	amp_destroy_logical_link(hchan, ev->reason);
4067
4068unlock:
4069	hci_dev_unlock(hdev);
4070}
4071
4072static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4073					     struct sk_buff *skb)
4074{
4075	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4076	struct hci_conn *hcon;
4077
4078	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4079
4080	if (ev->status)
4081		return;
4082
4083	hci_dev_lock(hdev);
4084
4085	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4086	if (hcon) {
4087		hcon->state = BT_CLOSED;
4088		hci_conn_del(hcon);
4089	}
4090
4091	hci_dev_unlock(hdev);
4092}
4093
4094static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4095{
4096	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4097	struct hci_conn_params *params;
4098	struct hci_conn *conn;
4099	struct smp_irk *irk;
4100	u8 addr_type;
4101
4102	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4103
4104	hci_dev_lock(hdev);
4105
4106	/* All controllers implicitly stop advertising in the event of a
4107	 * connection, so ensure that the state bit is cleared.
4108	 */
4109	clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4110
4111	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4112	if (!conn) {
4113		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
4114		if (!conn) {
4115			BT_ERR("No memory for new connection");
4116			goto unlock;
4117		}
4118
4119		conn->dst_type = ev->bdaddr_type;
4120
4121		if (ev->role == LE_CONN_ROLE_MASTER) {
4122			conn->out = true;
4123			set_bit(HCI_CONN_MASTER, &conn->flags);
4124		}
4125
4126		/* If we didn't have a hci_conn object previously
4127		 * but we're in master role this must be something
4128		 * initiated using a white list. Since white list based
4129		 * connections are not "first class citizens" we don't
4130		 * have full tracking of them. Therefore, we go ahead
4131		 * with a "best effort" approach of determining the
4132		 * initiator address based on the HCI_PRIVACY flag.
4133		 */
4134		if (conn->out) {
4135			conn->resp_addr_type = ev->bdaddr_type;
4136			bacpy(&conn->resp_addr, &ev->bdaddr);
4137			if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4138				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4139				bacpy(&conn->init_addr, &hdev->rpa);
4140			} else {
4141				hci_copy_identity_address(hdev,
4142							  &conn->init_addr,
4143							  &conn->init_addr_type);
4144			}
4145		}
4146	} else {
4147		cancel_delayed_work(&conn->le_conn_timeout);
4148	}
4149
4150	if (!conn->out) {
4151		/* Set the responder (our side) address type based on
4152		 * the advertising address type.
4153		 */
4154		conn->resp_addr_type = hdev->adv_addr_type;
4155		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4156			bacpy(&conn->resp_addr, &hdev->random_addr);
4157		else
4158			bacpy(&conn->resp_addr, &hdev->bdaddr);
4159
4160		conn->init_addr_type = ev->bdaddr_type;
4161		bacpy(&conn->init_addr, &ev->bdaddr);
4162
4163		/* For incoming connections, set the default minimum
4164		 * and maximum connection interval. They will be used
4165		 * to check if the parameters are in range and if not
4166		 * trigger the connection update procedure.
4167		 */
4168		conn->le_conn_min_interval = hdev->le_conn_min_interval;
4169		conn->le_conn_max_interval = hdev->le_conn_max_interval;
4170	}
4171
4172	/* Lookup the identity address from the stored connection
4173	 * address and address type.
4174	 *
4175	 * When establishing connections to an identity address, the
4176	 * connection procedure will store the resolvable random
4177	 * address first. Now if it can be converted back into the
4178	 * identity address, start using the identity address from
4179	 * now on.
4180	 */
4181	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4182	if (irk) {
4183		bacpy(&conn->dst, &irk->bdaddr);
4184		conn->dst_type = irk->addr_type;
4185	}
4186
4187	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4188		addr_type = BDADDR_LE_PUBLIC;
4189	else
4190		addr_type = BDADDR_LE_RANDOM;
4191
4192	/* Drop the connection if he device is blocked */
4193	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4194		hci_conn_drop(conn);
4195		goto unlock;
4196	}
4197
4198	if (ev->status) {
4199		hci_le_conn_failed(conn, ev->status);
4200		goto unlock;
4201	}
4202
4203	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4204		mgmt_device_connected(hdev, &conn->dst, conn->type,
4205				      conn->dst_type, 0, NULL, 0, NULL);
4206
4207	conn->sec_level = BT_SECURITY_LOW;
4208	conn->handle = __le16_to_cpu(ev->handle);
4209	conn->state = BT_CONNECTED;
4210
4211	conn->le_conn_interval = le16_to_cpu(ev->interval);
4212	conn->le_conn_latency = le16_to_cpu(ev->latency);
4213	conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4214
4215	hci_conn_add_sysfs(conn);
4216
4217	hci_proto_connect_cfm(conn, ev->status);
4218
4219	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
4220	if (params)
4221		list_del_init(&params->action);
4222
4223unlock:
4224	hci_update_background_scan(hdev);
4225	hci_dev_unlock(hdev);
4226}
4227
4228static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4229					    struct sk_buff *skb)
4230{
4231	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4232	struct hci_conn *conn;
4233
4234	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4235
4236	if (ev->status)
4237		return;
4238
4239	hci_dev_lock(hdev);
4240
4241	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4242	if (conn) {
4243		conn->le_conn_interval = le16_to_cpu(ev->interval);
4244		conn->le_conn_latency = le16_to_cpu(ev->latency);
4245		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4246	}
4247
4248	hci_dev_unlock(hdev);
4249}
4250
4251/* This function requires the caller holds hdev->lock */
4252static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4253				  u8 addr_type, u8 adv_type)
4254{
4255	struct hci_conn *conn;
4256
4257	/* If the event is not connectable don't proceed further */
4258	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4259		return;
4260
4261	/* Ignore if the device is blocked */
4262	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4263		return;
4264
4265	/* If we're connectable, always connect any ADV_DIRECT_IND event */
4266	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
4267	    adv_type == LE_ADV_DIRECT_IND)
4268		goto connect;
4269
4270	/* If we're not connectable only connect devices that we have in
4271	 * our pend_le_conns list.
4272	 */
4273	if (!hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, addr_type))
4274		return;
4275
4276connect:
4277	/* Request connection in master = true role */
4278	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4279			      HCI_LE_AUTOCONN_TIMEOUT, true);
4280	if (!IS_ERR(conn))
4281		return;
4282
4283	switch (PTR_ERR(conn)) {
4284	case -EBUSY:
4285		/* If hci_connect() returns -EBUSY it means there is already
4286		 * an LE connection attempt going on. Since controllers don't
4287		 * support more than one connection attempt at the time, we
4288		 * don't consider this an error case.
4289		 */
4290		break;
4291	default:
4292		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4293	}
4294}
4295
4296static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4297			       u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4298{
4299	struct discovery_state *d = &hdev->discovery;
4300	struct smp_irk *irk;
4301	bool match;
4302	u32 flags;
4303
4304	/* Check if we need to convert to identity address */
4305	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4306	if (irk) {
4307		bdaddr = &irk->bdaddr;
4308		bdaddr_type = irk->addr_type;
4309	}
4310
4311	/* Check if we have been requested to connect to this device */
4312	check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4313
4314	/* Passive scanning shouldn't trigger any device found events,
4315	 * except for devices marked as CONN_REPORT for which we do send
4316	 * device found events.
4317	 */
4318	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4319		struct hci_conn_params *param;
4320
4321		if (type == LE_ADV_DIRECT_IND)
4322			return;
4323
4324		param = hci_pend_le_action_lookup(&hdev->pend_le_reports,
4325						  bdaddr, bdaddr_type);
4326		if (!param)
4327			return;
4328
4329		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4330			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4331		else
4332			flags = 0;
4333		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4334				  rssi, flags, data, len, NULL, 0);
4335		return;
4336	}
4337
4338	/* When receiving non-connectable or scannable undirected
4339	 * advertising reports, this means that the remote device is
4340	 * not connectable and then clearly indicate this in the
4341	 * device found event.
4342	 *
4343	 * When receiving a scan response, then there is no way to
4344	 * know if the remote device is connectable or not. However
4345	 * since scan responses are merged with a previously seen
4346	 * advertising report, the flags field from that report
4347	 * will be used.
4348	 *
4349	 * In the really unlikely case that a controller get confused
4350	 * and just sends a scan response event, then it is marked as
4351	 * not connectable as well.
4352	 */
4353	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4354	    type == LE_ADV_SCAN_RSP)
4355		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4356	else
4357		flags = 0;
4358
4359	/* If there's nothing pending either store the data from this
4360	 * event or send an immediate device found event if the data
4361	 * should not be stored for later.
4362	 */
4363	if (!has_pending_adv_report(hdev)) {
4364		/* If the report will trigger a SCAN_REQ store it for
4365		 * later merging.
4366		 */
4367		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4368			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4369						 rssi, flags, data, len);
4370			return;
4371		}
4372
4373		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4374				  rssi, flags, data, len, NULL, 0);
4375		return;
4376	}
4377
4378	/* Check if the pending report is for the same device as the new one */
4379	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4380		 bdaddr_type == d->last_adv_addr_type);
4381
4382	/* If the pending data doesn't match this report or this isn't a
4383	 * scan response (e.g. we got a duplicate ADV_IND) then force
4384	 * sending of the pending data.
4385	 */
4386	if (type != LE_ADV_SCAN_RSP || !match) {
4387		/* Send out whatever is in the cache, but skip duplicates */
4388		if (!match)
4389			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4390					  d->last_adv_addr_type, NULL,
4391					  d->last_adv_rssi, d->last_adv_flags,
4392					  d->last_adv_data,
4393					  d->last_adv_data_len, NULL, 0);
4394
4395		/* If the new report will trigger a SCAN_REQ store it for
4396		 * later merging.
4397		 */
4398		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4399			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4400						 rssi, flags, data, len);
4401			return;
4402		}
4403
4404		/* The advertising reports cannot be merged, so clear
4405		 * the pending report and send out a device found event.
4406		 */
4407		clear_pending_adv_report(hdev);
4408		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4409				  rssi, flags, data, len, NULL, 0);
4410		return;
4411	}
4412
4413	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4414	 * the new event is a SCAN_RSP. We can therefore proceed with
4415	 * sending a merged device found event.
4416	 */
4417	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4418			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4419			  d->last_adv_data, d->last_adv_data_len, data, len);
4420	clear_pending_adv_report(hdev);
4421}
4422
4423static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4424{
4425	u8 num_reports = skb->data[0];
4426	void *ptr = &skb->data[1];
4427
4428	hci_dev_lock(hdev);
4429
4430	while (num_reports--) {
4431		struct hci_ev_le_advertising_info *ev = ptr;
4432		s8 rssi;
4433
4434		rssi = ev->data[ev->length];
4435		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4436				   ev->bdaddr_type, rssi, ev->data, ev->length);
4437
4438		ptr += sizeof(*ev) + ev->length + 1;
4439	}
4440
4441	hci_dev_unlock(hdev);
4442}
4443
4444static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4445{
4446	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4447	struct hci_cp_le_ltk_reply cp;
4448	struct hci_cp_le_ltk_neg_reply neg;
4449	struct hci_conn *conn;
4450	struct smp_ltk *ltk;
4451
4452	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4453
4454	hci_dev_lock(hdev);
4455
4456	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4457	if (conn == NULL)
4458		goto not_found;
4459
4460	ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4461	if (ltk == NULL)
4462		goto not_found;
4463
4464	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4465	cp.handle = cpu_to_le16(conn->handle);
4466
4467	if (ltk->authenticated)
4468		conn->pending_sec_level = BT_SECURITY_HIGH;
4469	else
4470		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4471
4472	conn->enc_key_size = ltk->enc_size;
4473
4474	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4475
4476	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4477	 * temporary key used to encrypt a connection following
4478	 * pairing. It is used during the Encrypted Session Setup to
4479	 * distribute the keys. Later, security can be re-established
4480	 * using a distributed LTK.
4481	 */
4482	if (ltk->type == SMP_STK) {
4483		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4484		list_del(&ltk->list);
4485		kfree(ltk);
4486	} else {
4487		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4488	}
4489
4490	hci_dev_unlock(hdev);
4491
4492	return;
4493
4494not_found:
4495	neg.handle = ev->handle;
4496	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4497	hci_dev_unlock(hdev);
4498}
4499
4500static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4501				      u8 reason)
4502{
4503	struct hci_cp_le_conn_param_req_neg_reply cp;
4504
4505	cp.handle = cpu_to_le16(handle);
4506	cp.reason = reason;
4507
4508	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4509		     &cp);
4510}
4511
4512static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4513					     struct sk_buff *skb)
4514{
4515	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4516	struct hci_cp_le_conn_param_req_reply cp;
4517	struct hci_conn *hcon;
4518	u16 handle, min, max, latency, timeout;
4519
4520	handle = le16_to_cpu(ev->handle);
4521	min = le16_to_cpu(ev->interval_min);
4522	max = le16_to_cpu(ev->interval_max);
4523	latency = le16_to_cpu(ev->latency);
4524	timeout = le16_to_cpu(ev->timeout);
4525
4526	hcon = hci_conn_hash_lookup_handle(hdev, handle);
4527	if (!hcon || hcon->state != BT_CONNECTED)
4528		return send_conn_param_neg_reply(hdev, handle,
4529						 HCI_ERROR_UNKNOWN_CONN_ID);
4530
4531	if (hci_check_conn_params(min, max, latency, timeout))
4532		return send_conn_param_neg_reply(hdev, handle,
4533						 HCI_ERROR_INVALID_LL_PARAMS);
4534
4535	if (test_bit(HCI_CONN_MASTER, &hcon->flags)) {
4536		struct hci_conn_params *params;
4537		u8 store_hint;
4538
4539		hci_dev_lock(hdev);
4540
4541		params = hci_conn_params_lookup(hdev, &hcon->dst,
4542						hcon->dst_type);
4543		if (params) {
4544			params->conn_min_interval = min;
4545			params->conn_max_interval = max;
4546			params->conn_latency = latency;
4547			params->supervision_timeout = timeout;
4548			store_hint = 0x01;
4549		} else{
4550			store_hint = 0x00;
4551		}
4552
4553		hci_dev_unlock(hdev);
4554
4555		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4556				    store_hint, min, max, latency, timeout);
4557	}
4558
4559	cp.handle = ev->handle;
4560	cp.interval_min = ev->interval_min;
4561	cp.interval_max = ev->interval_max;
4562	cp.latency = ev->latency;
4563	cp.timeout = ev->timeout;
4564	cp.min_ce_len = 0;
4565	cp.max_ce_len = 0;
4566
4567	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4568}
4569
4570static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4571{
4572	struct hci_ev_le_meta *le_ev = (void *) skb->data;
4573
4574	skb_pull(skb, sizeof(*le_ev));
4575
4576	switch (le_ev->subevent) {
4577	case HCI_EV_LE_CONN_COMPLETE:
4578		hci_le_conn_complete_evt(hdev, skb);
4579		break;
4580
4581	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4582		hci_le_conn_update_complete_evt(hdev, skb);
4583		break;
4584
4585	case HCI_EV_LE_ADVERTISING_REPORT:
4586		hci_le_adv_report_evt(hdev, skb);
4587		break;
4588
4589	case HCI_EV_LE_LTK_REQ:
4590		hci_le_ltk_request_evt(hdev, skb);
4591		break;
4592
4593	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4594		hci_le_remote_conn_param_req_evt(hdev, skb);
4595		break;
4596
4597	default:
4598		break;
4599	}
4600}
4601
4602static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4603{
4604	struct hci_ev_channel_selected *ev = (void *) skb->data;
4605	struct hci_conn *hcon;
4606
4607	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4608
4609	skb_pull(skb, sizeof(*ev));
4610
4611	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4612	if (!hcon)
4613		return;
4614
4615	amp_read_loc_assoc_final_data(hdev, hcon);
4616}
4617
4618void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4619{
4620	struct hci_event_hdr *hdr = (void *) skb->data;
4621	__u8 event = hdr->evt;
4622
4623	hci_dev_lock(hdev);
4624
4625	/* Received events are (currently) only needed when a request is
4626	 * ongoing so avoid unnecessary memory allocation.
4627	 */
4628	if (hdev->req_status == HCI_REQ_PEND) {
4629		kfree_skb(hdev->recv_evt);
4630		hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4631	}
4632
4633	hci_dev_unlock(hdev);
4634
4635	skb_pull(skb, HCI_EVENT_HDR_SIZE);
4636
4637	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4638		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4639		u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4640
4641		hci_req_cmd_complete(hdev, opcode, 0);
4642	}
4643
4644	switch (event) {
4645	case HCI_EV_INQUIRY_COMPLETE:
4646		hci_inquiry_complete_evt(hdev, skb);
4647		break;
4648
4649	case HCI_EV_INQUIRY_RESULT:
4650		hci_inquiry_result_evt(hdev, skb);
4651		break;
4652
4653	case HCI_EV_CONN_COMPLETE:
4654		hci_conn_complete_evt(hdev, skb);
4655		break;
4656
4657	case HCI_EV_CONN_REQUEST:
4658		hci_conn_request_evt(hdev, skb);
4659		break;
4660
4661	case HCI_EV_DISCONN_COMPLETE:
4662		hci_disconn_complete_evt(hdev, skb);
4663		break;
4664
4665	case HCI_EV_AUTH_COMPLETE:
4666		hci_auth_complete_evt(hdev, skb);
4667		break;
4668
4669	case HCI_EV_REMOTE_NAME:
4670		hci_remote_name_evt(hdev, skb);
4671		break;
4672
4673	case HCI_EV_ENCRYPT_CHANGE:
4674		hci_encrypt_change_evt(hdev, skb);
4675		break;
4676
4677	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4678		hci_change_link_key_complete_evt(hdev, skb);
4679		break;
4680
4681	case HCI_EV_REMOTE_FEATURES:
4682		hci_remote_features_evt(hdev, skb);
4683		break;
4684
4685	case HCI_EV_CMD_COMPLETE:
4686		hci_cmd_complete_evt(hdev, skb);
4687		break;
4688
4689	case HCI_EV_CMD_STATUS:
4690		hci_cmd_status_evt(hdev, skb);
4691		break;
4692
4693	case HCI_EV_ROLE_CHANGE:
4694		hci_role_change_evt(hdev, skb);
4695		break;
4696
4697	case HCI_EV_NUM_COMP_PKTS:
4698		hci_num_comp_pkts_evt(hdev, skb);
4699		break;
4700
4701	case HCI_EV_MODE_CHANGE:
4702		hci_mode_change_evt(hdev, skb);
4703		break;
4704
4705	case HCI_EV_PIN_CODE_REQ:
4706		hci_pin_code_request_evt(hdev, skb);
4707		break;
4708
4709	case HCI_EV_LINK_KEY_REQ:
4710		hci_link_key_request_evt(hdev, skb);
4711		break;
4712
4713	case HCI_EV_LINK_KEY_NOTIFY:
4714		hci_link_key_notify_evt(hdev, skb);
4715		break;
4716
4717	case HCI_EV_CLOCK_OFFSET:
4718		hci_clock_offset_evt(hdev, skb);
4719		break;
4720
4721	case HCI_EV_PKT_TYPE_CHANGE:
4722		hci_pkt_type_change_evt(hdev, skb);
4723		break;
4724
4725	case HCI_EV_PSCAN_REP_MODE:
4726		hci_pscan_rep_mode_evt(hdev, skb);
4727		break;
4728
4729	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4730		hci_inquiry_result_with_rssi_evt(hdev, skb);
4731		break;
4732
4733	case HCI_EV_REMOTE_EXT_FEATURES:
4734		hci_remote_ext_features_evt(hdev, skb);
4735		break;
4736
4737	case HCI_EV_SYNC_CONN_COMPLETE:
4738		hci_sync_conn_complete_evt(hdev, skb);
4739		break;
4740
4741	case HCI_EV_EXTENDED_INQUIRY_RESULT:
4742		hci_extended_inquiry_result_evt(hdev, skb);
4743		break;
4744
4745	case HCI_EV_KEY_REFRESH_COMPLETE:
4746		hci_key_refresh_complete_evt(hdev, skb);
4747		break;
4748
4749	case HCI_EV_IO_CAPA_REQUEST:
4750		hci_io_capa_request_evt(hdev, skb);
4751		break;
4752
4753	case HCI_EV_IO_CAPA_REPLY:
4754		hci_io_capa_reply_evt(hdev, skb);
4755		break;
4756
4757	case HCI_EV_USER_CONFIRM_REQUEST:
4758		hci_user_confirm_request_evt(hdev, skb);
4759		break;
4760
4761	case HCI_EV_USER_PASSKEY_REQUEST:
4762		hci_user_passkey_request_evt(hdev, skb);
4763		break;
4764
4765	case HCI_EV_USER_PASSKEY_NOTIFY:
4766		hci_user_passkey_notify_evt(hdev, skb);
4767		break;
4768
4769	case HCI_EV_KEYPRESS_NOTIFY:
4770		hci_keypress_notify_evt(hdev, skb);
4771		break;
4772
4773	case HCI_EV_SIMPLE_PAIR_COMPLETE:
4774		hci_simple_pair_complete_evt(hdev, skb);
4775		break;
4776
4777	case HCI_EV_REMOTE_HOST_FEATURES:
4778		hci_remote_host_features_evt(hdev, skb);
4779		break;
4780
4781	case HCI_EV_LE_META:
4782		hci_le_meta_evt(hdev, skb);
4783		break;
4784
4785	case HCI_EV_CHANNEL_SELECTED:
4786		hci_chan_selected_evt(hdev, skb);
4787		break;
4788
4789	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4790		hci_remote_oob_data_request_evt(hdev, skb);
4791		break;
4792
4793	case HCI_EV_PHY_LINK_COMPLETE:
4794		hci_phy_link_complete_evt(hdev, skb);
4795		break;
4796
4797	case HCI_EV_LOGICAL_LINK_COMPLETE:
4798		hci_loglink_complete_evt(hdev, skb);
4799		break;
4800
4801	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4802		hci_disconn_loglink_complete_evt(hdev, skb);
4803		break;
4804
4805	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4806		hci_disconn_phylink_complete_evt(hdev, skb);
4807		break;
4808
4809	case HCI_EV_NUM_COMP_BLOCKS:
4810		hci_num_comp_blocks_evt(hdev, skb);
4811		break;
4812
4813	default:
4814		BT_DBG("%s event 0x%2.2x", hdev->name, event);
4815		break;
4816	}
4817
4818	kfree_skb(skb);
4819	hdev->stat.evt_rx++;
4820}
4821