hci_event.c revision 42bd6a56ed1ab4b2cb50f4d4e674874da9b47f46
1/*
2   BlueZ - Bluetooth protocol stack for Linux
3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <asm/unaligned.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/mgmt.h>
32
33#include "a2mp.h"
34#include "amp.h"
35#include "smp.h"
36
37/* Handle HCI Event packets */
38
39static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40{
41	__u8 status = *((__u8 *) skb->data);
42
43	BT_DBG("%s status 0x%2.2x", hdev->name, status);
44
45	if (status)
46		return;
47
48	clear_bit(HCI_INQUIRY, &hdev->flags);
49	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50	wake_up_bit(&hdev->flags, HCI_INQUIRY);
51
52	hci_dev_lock(hdev);
53	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54	hci_dev_unlock(hdev);
55
56	hci_conn_check_pending(hdev);
57}
58
59static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60{
61	__u8 status = *((__u8 *) skb->data);
62
63	BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65	if (status)
66		return;
67
68	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69}
70
71static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72{
73	__u8 status = *((__u8 *) skb->data);
74
75	BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77	if (status)
78		return;
79
80	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82	hci_conn_check_pending(hdev);
83}
84
85static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86					  struct sk_buff *skb)
87{
88	BT_DBG("%s", hdev->name);
89}
90
91static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92{
93	struct hci_rp_role_discovery *rp = (void *) skb->data;
94	struct hci_conn *conn;
95
96	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98	if (rp->status)
99		return;
100
101	hci_dev_lock(hdev);
102
103	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104	if (conn) {
105		if (rp->role)
106			clear_bit(HCI_CONN_MASTER, &conn->flags);
107		else
108			set_bit(HCI_CONN_MASTER, &conn->flags);
109	}
110
111	hci_dev_unlock(hdev);
112}
113
114static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
115{
116	struct hci_rp_read_link_policy *rp = (void *) skb->data;
117	struct hci_conn *conn;
118
119	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
120
121	if (rp->status)
122		return;
123
124	hci_dev_lock(hdev);
125
126	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
127	if (conn)
128		conn->link_policy = __le16_to_cpu(rp->policy);
129
130	hci_dev_unlock(hdev);
131}
132
133static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
134{
135	struct hci_rp_write_link_policy *rp = (void *) skb->data;
136	struct hci_conn *conn;
137	void *sent;
138
139	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
140
141	if (rp->status)
142		return;
143
144	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145	if (!sent)
146		return;
147
148	hci_dev_lock(hdev);
149
150	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151	if (conn)
152		conn->link_policy = get_unaligned_le16(sent + 2);
153
154	hci_dev_unlock(hdev);
155}
156
157static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158					struct sk_buff *skb)
159{
160	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
161
162	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
163
164	if (rp->status)
165		return;
166
167	hdev->link_policy = __le16_to_cpu(rp->policy);
168}
169
170static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171					 struct sk_buff *skb)
172{
173	__u8 status = *((__u8 *) skb->data);
174	void *sent;
175
176	BT_DBG("%s status 0x%2.2x", hdev->name, status);
177
178	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179	if (!sent)
180		return;
181
182	if (!status)
183		hdev->link_policy = get_unaligned_le16(sent);
184}
185
186static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
187{
188	__u8 status = *((__u8 *) skb->data);
189
190	BT_DBG("%s status 0x%2.2x", hdev->name, status);
191
192	clear_bit(HCI_RESET, &hdev->flags);
193
194	/* Reset all non-persistent flags */
195	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
196
197	hdev->discovery.state = DISCOVERY_STOPPED;
198	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
199	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
200
201	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
202	hdev->adv_data_len = 0;
203
204	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
205	hdev->scan_rsp_data_len = 0;
206
207	hdev->le_scan_type = LE_SCAN_PASSIVE;
208
209	hdev->ssp_debug_mode = 0;
210}
211
212static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
213{
214	__u8 status = *((__u8 *) skb->data);
215	void *sent;
216
217	BT_DBG("%s status 0x%2.2x", hdev->name, status);
218
219	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
220	if (!sent)
221		return;
222
223	hci_dev_lock(hdev);
224
225	if (test_bit(HCI_MGMT, &hdev->dev_flags))
226		mgmt_set_local_name_complete(hdev, sent, status);
227	else if (!status)
228		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
229
230	hci_dev_unlock(hdev);
231}
232
233static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
234{
235	struct hci_rp_read_local_name *rp = (void *) skb->data;
236
237	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
238
239	if (rp->status)
240		return;
241
242	if (test_bit(HCI_SETUP, &hdev->dev_flags))
243		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
244}
245
246static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
247{
248	__u8 status = *((__u8 *) skb->data);
249	void *sent;
250
251	BT_DBG("%s status 0x%2.2x", hdev->name, status);
252
253	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
254	if (!sent)
255		return;
256
257	if (!status) {
258		__u8 param = *((__u8 *) sent);
259
260		if (param == AUTH_ENABLED)
261			set_bit(HCI_AUTH, &hdev->flags);
262		else
263			clear_bit(HCI_AUTH, &hdev->flags);
264	}
265
266	if (test_bit(HCI_MGMT, &hdev->dev_flags))
267		mgmt_auth_enable_complete(hdev, status);
268}
269
270static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
271{
272	__u8 status = *((__u8 *) skb->data);
273	void *sent;
274
275	BT_DBG("%s status 0x%2.2x", hdev->name, status);
276
277	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
278	if (!sent)
279		return;
280
281	if (!status) {
282		__u8 param = *((__u8 *) sent);
283
284		if (param)
285			set_bit(HCI_ENCRYPT, &hdev->flags);
286		else
287			clear_bit(HCI_ENCRYPT, &hdev->flags);
288	}
289}
290
291static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
292{
293	__u8 param, status = *((__u8 *) skb->data);
294	int old_pscan, old_iscan;
295	void *sent;
296
297	BT_DBG("%s status 0x%2.2x", hdev->name, status);
298
299	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
300	if (!sent)
301		return;
302
303	param = *((__u8 *) sent);
304
305	hci_dev_lock(hdev);
306
307	if (status) {
308		mgmt_write_scan_failed(hdev, param, status);
309		hdev->discov_timeout = 0;
310		goto done;
311	}
312
313	/* We need to ensure that we set this back on if someone changed
314	 * the scan mode through a raw HCI socket.
315	 */
316	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
317
318	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
319	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
320
321	if (param & SCAN_INQUIRY) {
322		set_bit(HCI_ISCAN, &hdev->flags);
323		if (!old_iscan)
324			mgmt_discoverable(hdev, 1);
325	} else if (old_iscan)
326		mgmt_discoverable(hdev, 0);
327
328	if (param & SCAN_PAGE) {
329		set_bit(HCI_PSCAN, &hdev->flags);
330		if (!old_pscan)
331			mgmt_connectable(hdev, 1);
332	} else if (old_pscan)
333		mgmt_connectable(hdev, 0);
334
335done:
336	hci_dev_unlock(hdev);
337}
338
339static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
340{
341	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
342
343	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
344
345	if (rp->status)
346		return;
347
348	memcpy(hdev->dev_class, rp->dev_class, 3);
349
350	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
351	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
352}
353
354static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
355{
356	__u8 status = *((__u8 *) skb->data);
357	void *sent;
358
359	BT_DBG("%s status 0x%2.2x", hdev->name, status);
360
361	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
362	if (!sent)
363		return;
364
365	hci_dev_lock(hdev);
366
367	if (status == 0)
368		memcpy(hdev->dev_class, sent, 3);
369
370	if (test_bit(HCI_MGMT, &hdev->dev_flags))
371		mgmt_set_class_of_dev_complete(hdev, sent, status);
372
373	hci_dev_unlock(hdev);
374}
375
376static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
377{
378	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
379	__u16 setting;
380
381	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
382
383	if (rp->status)
384		return;
385
386	setting = __le16_to_cpu(rp->voice_setting);
387
388	if (hdev->voice_setting == setting)
389		return;
390
391	hdev->voice_setting = setting;
392
393	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
394
395	if (hdev->notify)
396		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
397}
398
399static void hci_cc_write_voice_setting(struct hci_dev *hdev,
400				       struct sk_buff *skb)
401{
402	__u8 status = *((__u8 *) skb->data);
403	__u16 setting;
404	void *sent;
405
406	BT_DBG("%s status 0x%2.2x", hdev->name, status);
407
408	if (status)
409		return;
410
411	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
412	if (!sent)
413		return;
414
415	setting = get_unaligned_le16(sent);
416
417	if (hdev->voice_setting == setting)
418		return;
419
420	hdev->voice_setting = setting;
421
422	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
423
424	if (hdev->notify)
425		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
426}
427
428static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
429					  struct sk_buff *skb)
430{
431	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
432
433	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
434
435	if (rp->status)
436		return;
437
438	hdev->num_iac = rp->num_iac;
439
440	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
441}
442
443static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
444{
445	__u8 status = *((__u8 *) skb->data);
446	struct hci_cp_write_ssp_mode *sent;
447
448	BT_DBG("%s status 0x%2.2x", hdev->name, status);
449
450	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
451	if (!sent)
452		return;
453
454	if (!status) {
455		if (sent->mode)
456			hdev->features[1][0] |= LMP_HOST_SSP;
457		else
458			hdev->features[1][0] &= ~LMP_HOST_SSP;
459	}
460
461	if (test_bit(HCI_MGMT, &hdev->dev_flags))
462		mgmt_ssp_enable_complete(hdev, sent->mode, status);
463	else if (!status) {
464		if (sent->mode)
465			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
466		else
467			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
468	}
469}
470
471static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
472{
473	u8 status = *((u8 *) skb->data);
474	struct hci_cp_write_sc_support *sent;
475
476	BT_DBG("%s status 0x%2.2x", hdev->name, status);
477
478	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
479	if (!sent)
480		return;
481
482	if (!status) {
483		if (sent->support)
484			hdev->features[1][0] |= LMP_HOST_SC;
485		else
486			hdev->features[1][0] &= ~LMP_HOST_SC;
487	}
488
489	if (test_bit(HCI_MGMT, &hdev->dev_flags))
490		mgmt_sc_enable_complete(hdev, sent->support, status);
491	else if (!status) {
492		if (sent->support)
493			set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
494		else
495			clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
496	}
497}
498
499static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
500{
501	struct hci_rp_read_local_version *rp = (void *) skb->data;
502
503	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
504
505	if (rp->status)
506		return;
507
508	if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
509		hdev->hci_ver = rp->hci_ver;
510		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
511		hdev->lmp_ver = rp->lmp_ver;
512		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
513		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
514	}
515}
516
517static void hci_cc_read_local_commands(struct hci_dev *hdev,
518				       struct sk_buff *skb)
519{
520	struct hci_rp_read_local_commands *rp = (void *) skb->data;
521
522	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
523
524	if (rp->status)
525		return;
526
527	if (test_bit(HCI_SETUP, &hdev->dev_flags))
528		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
529}
530
531static void hci_cc_read_local_features(struct hci_dev *hdev,
532				       struct sk_buff *skb)
533{
534	struct hci_rp_read_local_features *rp = (void *) skb->data;
535
536	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
537
538	if (rp->status)
539		return;
540
541	memcpy(hdev->features, rp->features, 8);
542
543	/* Adjust default settings according to features
544	 * supported by device. */
545
546	if (hdev->features[0][0] & LMP_3SLOT)
547		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
548
549	if (hdev->features[0][0] & LMP_5SLOT)
550		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
551
552	if (hdev->features[0][1] & LMP_HV2) {
553		hdev->pkt_type  |= (HCI_HV2);
554		hdev->esco_type |= (ESCO_HV2);
555	}
556
557	if (hdev->features[0][1] & LMP_HV3) {
558		hdev->pkt_type  |= (HCI_HV3);
559		hdev->esco_type |= (ESCO_HV3);
560	}
561
562	if (lmp_esco_capable(hdev))
563		hdev->esco_type |= (ESCO_EV3);
564
565	if (hdev->features[0][4] & LMP_EV4)
566		hdev->esco_type |= (ESCO_EV4);
567
568	if (hdev->features[0][4] & LMP_EV5)
569		hdev->esco_type |= (ESCO_EV5);
570
571	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
572		hdev->esco_type |= (ESCO_2EV3);
573
574	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
575		hdev->esco_type |= (ESCO_3EV3);
576
577	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
578		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
579}
580
581static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
582					   struct sk_buff *skb)
583{
584	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
585
586	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
587
588	if (rp->status)
589		return;
590
591	if (hdev->max_page < rp->max_page)
592		hdev->max_page = rp->max_page;
593
594	if (rp->page < HCI_MAX_PAGES)
595		memcpy(hdev->features[rp->page], rp->features, 8);
596}
597
598static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
599					  struct sk_buff *skb)
600{
601	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
602
603	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
604
605	if (!rp->status)
606		hdev->flow_ctl_mode = rp->mode;
607}
608
609static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
610{
611	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
612
613	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
614
615	if (rp->status)
616		return;
617
618	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
619	hdev->sco_mtu  = rp->sco_mtu;
620	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
621	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
622
623	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
624		hdev->sco_mtu  = 64;
625		hdev->sco_pkts = 8;
626	}
627
628	hdev->acl_cnt = hdev->acl_pkts;
629	hdev->sco_cnt = hdev->sco_pkts;
630
631	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
632	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
633}
634
635static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
636{
637	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
638
639	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
640
641	if (!rp->status)
642		bacpy(&hdev->bdaddr, &rp->bdaddr);
643}
644
645static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
646					   struct sk_buff *skb)
647{
648	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
649
650	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651
652	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
653		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
654		hdev->page_scan_window = __le16_to_cpu(rp->window);
655	}
656}
657
658static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
659					    struct sk_buff *skb)
660{
661	u8 status = *((u8 *) skb->data);
662	struct hci_cp_write_page_scan_activity *sent;
663
664	BT_DBG("%s status 0x%2.2x", hdev->name, status);
665
666	if (status)
667		return;
668
669	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
670	if (!sent)
671		return;
672
673	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
674	hdev->page_scan_window = __le16_to_cpu(sent->window);
675}
676
677static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
678					   struct sk_buff *skb)
679{
680	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
681
682	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
683
684	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
685		hdev->page_scan_type = rp->type;
686}
687
688static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
689					struct sk_buff *skb)
690{
691	u8 status = *((u8 *) skb->data);
692	u8 *type;
693
694	BT_DBG("%s status 0x%2.2x", hdev->name, status);
695
696	if (status)
697		return;
698
699	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
700	if (type)
701		hdev->page_scan_type = *type;
702}
703
704static void hci_cc_read_data_block_size(struct hci_dev *hdev,
705					struct sk_buff *skb)
706{
707	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
708
709	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
710
711	if (rp->status)
712		return;
713
714	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
715	hdev->block_len = __le16_to_cpu(rp->block_len);
716	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
717
718	hdev->block_cnt = hdev->num_blocks;
719
720	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
721	       hdev->block_cnt, hdev->block_len);
722}
723
724static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
725{
726	struct hci_rp_read_clock *rp = (void *) skb->data;
727	struct hci_cp_read_clock *cp;
728	struct hci_conn *conn;
729
730	BT_DBG("%s", hdev->name);
731
732	if (skb->len < sizeof(*rp))
733		return;
734
735	if (rp->status)
736		return;
737
738	hci_dev_lock(hdev);
739
740	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
741	if (!cp)
742		goto unlock;
743
744	if (cp->which == 0x00) {
745		hdev->clock = le32_to_cpu(rp->clock);
746		goto unlock;
747	}
748
749	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
750	if (conn) {
751		conn->clock = le32_to_cpu(rp->clock);
752		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
753	}
754
755unlock:
756	hci_dev_unlock(hdev);
757}
758
759static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
760				       struct sk_buff *skb)
761{
762	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
763
764	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
765
766	if (rp->status)
767		goto a2mp_rsp;
768
769	hdev->amp_status = rp->amp_status;
770	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
771	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
772	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
773	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
774	hdev->amp_type = rp->amp_type;
775	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
776	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
777	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
778	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
779
780a2mp_rsp:
781	a2mp_send_getinfo_rsp(hdev);
782}
783
784static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
785					struct sk_buff *skb)
786{
787	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
788	struct amp_assoc *assoc = &hdev->loc_assoc;
789	size_t rem_len, frag_len;
790
791	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
792
793	if (rp->status)
794		goto a2mp_rsp;
795
796	frag_len = skb->len - sizeof(*rp);
797	rem_len = __le16_to_cpu(rp->rem_len);
798
799	if (rem_len > frag_len) {
800		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
801
802		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
803		assoc->offset += frag_len;
804
805		/* Read other fragments */
806		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
807
808		return;
809	}
810
811	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
812	assoc->len = assoc->offset + rem_len;
813	assoc->offset = 0;
814
815a2mp_rsp:
816	/* Send A2MP Rsp when all fragments are received */
817	a2mp_send_getampassoc_rsp(hdev, rp->status);
818	a2mp_send_create_phy_link_req(hdev, rp->status);
819}
820
821static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
822					 struct sk_buff *skb)
823{
824	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
825
826	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
827
828	if (!rp->status)
829		hdev->inq_tx_power = rp->tx_power;
830}
831
832static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
833{
834	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
835	struct hci_cp_pin_code_reply *cp;
836	struct hci_conn *conn;
837
838	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
839
840	hci_dev_lock(hdev);
841
842	if (test_bit(HCI_MGMT, &hdev->dev_flags))
843		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
844
845	if (rp->status)
846		goto unlock;
847
848	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
849	if (!cp)
850		goto unlock;
851
852	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
853	if (conn)
854		conn->pin_length = cp->pin_len;
855
856unlock:
857	hci_dev_unlock(hdev);
858}
859
860static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
861{
862	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
863
864	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
865
866	hci_dev_lock(hdev);
867
868	if (test_bit(HCI_MGMT, &hdev->dev_flags))
869		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
870						 rp->status);
871
872	hci_dev_unlock(hdev);
873}
874
875static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
876				       struct sk_buff *skb)
877{
878	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
879
880	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
881
882	if (rp->status)
883		return;
884
885	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
886	hdev->le_pkts = rp->le_max_pkt;
887
888	hdev->le_cnt = hdev->le_pkts;
889
890	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
891}
892
893static void hci_cc_le_read_local_features(struct hci_dev *hdev,
894					  struct sk_buff *skb)
895{
896	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
897
898	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
899
900	if (!rp->status)
901		memcpy(hdev->le_features, rp->features, 8);
902}
903
904static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
905					struct sk_buff *skb)
906{
907	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
908
909	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
910
911	if (!rp->status)
912		hdev->adv_tx_power = rp->tx_power;
913}
914
915static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
916{
917	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
918
919	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
920
921	hci_dev_lock(hdev);
922
923	if (test_bit(HCI_MGMT, &hdev->dev_flags))
924		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
925						 rp->status);
926
927	hci_dev_unlock(hdev);
928}
929
930static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
931					  struct sk_buff *skb)
932{
933	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
934
935	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
936
937	hci_dev_lock(hdev);
938
939	if (test_bit(HCI_MGMT, &hdev->dev_flags))
940		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
941						     ACL_LINK, 0, rp->status);
942
943	hci_dev_unlock(hdev);
944}
945
946static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
947{
948	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
949
950	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
951
952	hci_dev_lock(hdev);
953
954	if (test_bit(HCI_MGMT, &hdev->dev_flags))
955		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
956						 0, rp->status);
957
958	hci_dev_unlock(hdev);
959}
960
961static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
962					  struct sk_buff *skb)
963{
964	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
965
966	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
967
968	hci_dev_lock(hdev);
969
970	if (test_bit(HCI_MGMT, &hdev->dev_flags))
971		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
972						     ACL_LINK, 0, rp->status);
973
974	hci_dev_unlock(hdev);
975}
976
977static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
978				       struct sk_buff *skb)
979{
980	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
981
982	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
983
984	hci_dev_lock(hdev);
985	mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
986					  NULL, NULL, rp->status);
987	hci_dev_unlock(hdev);
988}
989
990static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
991					   struct sk_buff *skb)
992{
993	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
994
995	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
996
997	hci_dev_lock(hdev);
998	mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
999					  rp->hash256, rp->randomizer256,
1000					  rp->status);
1001	hci_dev_unlock(hdev);
1002}
1003
1004
1005static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1006{
1007	__u8 status = *((__u8 *) skb->data);
1008	bdaddr_t *sent;
1009
1010	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1011
1012	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1013	if (!sent)
1014		return;
1015
1016	hci_dev_lock(hdev);
1017
1018	if (!status)
1019		bacpy(&hdev->random_addr, sent);
1020
1021	hci_dev_unlock(hdev);
1022}
1023
1024static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1025{
1026	__u8 *sent, status = *((__u8 *) skb->data);
1027
1028	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1029
1030	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1031	if (!sent)
1032		return;
1033
1034	if (status)
1035		return;
1036
1037	hci_dev_lock(hdev);
1038
1039	/* If we're doing connection initation as peripheral. Set a
1040	 * timeout in case something goes wrong.
1041	 */
1042	if (*sent) {
1043		struct hci_conn *conn;
1044
1045		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1046		if (conn)
1047			queue_delayed_work(hdev->workqueue,
1048					   &conn->le_conn_timeout,
1049					   HCI_LE_CONN_TIMEOUT);
1050	}
1051
1052	mgmt_advertising(hdev, *sent);
1053
1054	hci_dev_unlock(hdev);
1055}
1056
1057static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1058{
1059	struct hci_cp_le_set_scan_param *cp;
1060	__u8 status = *((__u8 *) skb->data);
1061
1062	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1063
1064	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1065	if (!cp)
1066		return;
1067
1068	hci_dev_lock(hdev);
1069
1070	if (!status)
1071		hdev->le_scan_type = cp->type;
1072
1073	hci_dev_unlock(hdev);
1074}
1075
1076static bool has_pending_adv_report(struct hci_dev *hdev)
1077{
1078	struct discovery_state *d = &hdev->discovery;
1079
1080	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1081}
1082
1083static void clear_pending_adv_report(struct hci_dev *hdev)
1084{
1085	struct discovery_state *d = &hdev->discovery;
1086
1087	bacpy(&d->last_adv_addr, BDADDR_ANY);
1088	d->last_adv_data_len = 0;
1089}
1090
1091static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1092				     u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
1093{
1094	struct discovery_state *d = &hdev->discovery;
1095
1096	bacpy(&d->last_adv_addr, bdaddr);
1097	d->last_adv_addr_type = bdaddr_type;
1098	d->last_adv_rssi = rssi;
1099	memcpy(d->last_adv_data, data, len);
1100	d->last_adv_data_len = len;
1101}
1102
1103static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1104				      struct sk_buff *skb)
1105{
1106	struct hci_cp_le_set_scan_enable *cp;
1107	__u8 status = *((__u8 *) skb->data);
1108
1109	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1110
1111	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1112	if (!cp)
1113		return;
1114
1115	if (status)
1116		return;
1117
1118	switch (cp->enable) {
1119	case LE_SCAN_ENABLE:
1120		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1121		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1122			clear_pending_adv_report(hdev);
1123		break;
1124
1125	case LE_SCAN_DISABLE:
1126		/* We do this here instead of when setting DISCOVERY_STOPPED
1127		 * since the latter would potentially require waiting for
1128		 * inquiry to stop too.
1129		 */
1130		if (has_pending_adv_report(hdev)) {
1131			struct discovery_state *d = &hdev->discovery;
1132
1133			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1134					  d->last_adv_addr_type, NULL,
1135					  d->last_adv_rssi, 0, 1,
1136					  d->last_adv_data,
1137					  d->last_adv_data_len, NULL, 0);
1138		}
1139
1140		/* Cancel this timer so that we don't try to disable scanning
1141		 * when it's already disabled.
1142		 */
1143		cancel_delayed_work(&hdev->le_scan_disable);
1144
1145		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1146		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1147		 * interrupted scanning due to a connect request. Mark
1148		 * therefore discovery as stopped.
1149		 */
1150		if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1151				       &hdev->dev_flags))
1152			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1153		break;
1154
1155	default:
1156		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1157		break;
1158	}
1159}
1160
1161static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1162					   struct sk_buff *skb)
1163{
1164	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1165
1166	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1167
1168	if (!rp->status)
1169		hdev->le_white_list_size = rp->size;
1170}
1171
1172static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1173				       struct sk_buff *skb)
1174{
1175	__u8 status = *((__u8 *) skb->data);
1176
1177	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1178
1179	if (!status)
1180		hci_white_list_clear(hdev);
1181}
1182
1183static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1184					struct sk_buff *skb)
1185{
1186	struct hci_cp_le_add_to_white_list *sent;
1187	__u8 status = *((__u8 *) skb->data);
1188
1189	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1190
1191	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1192	if (!sent)
1193		return;
1194
1195	if (!status)
1196		hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
1197}
1198
1199static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1200					  struct sk_buff *skb)
1201{
1202	struct hci_cp_le_del_from_white_list *sent;
1203	__u8 status = *((__u8 *) skb->data);
1204
1205	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1206
1207	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1208	if (!sent)
1209		return;
1210
1211	if (!status)
1212		hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
1213}
1214
1215static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1216					    struct sk_buff *skb)
1217{
1218	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1219
1220	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1221
1222	if (!rp->status)
1223		memcpy(hdev->le_states, rp->le_states, 8);
1224}
1225
1226static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1227					   struct sk_buff *skb)
1228{
1229	struct hci_cp_write_le_host_supported *sent;
1230	__u8 status = *((__u8 *) skb->data);
1231
1232	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1233
1234	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1235	if (!sent)
1236		return;
1237
1238	if (!status) {
1239		if (sent->le) {
1240			hdev->features[1][0] |= LMP_HOST_LE;
1241			set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1242		} else {
1243			hdev->features[1][0] &= ~LMP_HOST_LE;
1244			clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1245			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1246		}
1247
1248		if (sent->simul)
1249			hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1250		else
1251			hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1252	}
1253}
1254
1255static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1256{
1257	struct hci_cp_le_set_adv_param *cp;
1258	u8 status = *((u8 *) skb->data);
1259
1260	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1261
1262	if (status)
1263		return;
1264
1265	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1266	if (!cp)
1267		return;
1268
1269	hci_dev_lock(hdev);
1270	hdev->adv_addr_type = cp->own_address_type;
1271	hci_dev_unlock(hdev);
1272}
1273
1274static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1275					  struct sk_buff *skb)
1276{
1277	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1278
1279	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1280	       hdev->name, rp->status, rp->phy_handle);
1281
1282	if (rp->status)
1283		return;
1284
1285	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1286}
1287
1288static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1289{
1290	struct hci_rp_read_rssi *rp = (void *) skb->data;
1291	struct hci_conn *conn;
1292
1293	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1294
1295	if (rp->status)
1296		return;
1297
1298	hci_dev_lock(hdev);
1299
1300	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1301	if (conn)
1302		conn->rssi = rp->rssi;
1303
1304	hci_dev_unlock(hdev);
1305}
1306
1307static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1308{
1309	struct hci_cp_read_tx_power *sent;
1310	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1311	struct hci_conn *conn;
1312
1313	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1314
1315	if (rp->status)
1316		return;
1317
1318	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1319	if (!sent)
1320		return;
1321
1322	hci_dev_lock(hdev);
1323
1324	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1325	if (!conn)
1326		goto unlock;
1327
1328	switch (sent->type) {
1329	case 0x00:
1330		conn->tx_power = rp->tx_power;
1331		break;
1332	case 0x01:
1333		conn->max_tx_power = rp->tx_power;
1334		break;
1335	}
1336
1337unlock:
1338	hci_dev_unlock(hdev);
1339}
1340
1341static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1342{
1343	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1344
1345	if (status) {
1346		hci_conn_check_pending(hdev);
1347		return;
1348	}
1349
1350	set_bit(HCI_INQUIRY, &hdev->flags);
1351}
1352
1353static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1354{
1355	struct hci_cp_create_conn *cp;
1356	struct hci_conn *conn;
1357
1358	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1359
1360	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1361	if (!cp)
1362		return;
1363
1364	hci_dev_lock(hdev);
1365
1366	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1367
1368	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1369
1370	if (status) {
1371		if (conn && conn->state == BT_CONNECT) {
1372			if (status != 0x0c || conn->attempt > 2) {
1373				conn->state = BT_CLOSED;
1374				hci_proto_connect_cfm(conn, status);
1375				hci_conn_del(conn);
1376			} else
1377				conn->state = BT_CONNECT2;
1378		}
1379	} else {
1380		if (!conn) {
1381			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1382			if (conn) {
1383				conn->out = true;
1384				set_bit(HCI_CONN_MASTER, &conn->flags);
1385			} else
1386				BT_ERR("No memory for new connection");
1387		}
1388	}
1389
1390	hci_dev_unlock(hdev);
1391}
1392
1393static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1394{
1395	struct hci_cp_add_sco *cp;
1396	struct hci_conn *acl, *sco;
1397	__u16 handle;
1398
1399	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1400
1401	if (!status)
1402		return;
1403
1404	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1405	if (!cp)
1406		return;
1407
1408	handle = __le16_to_cpu(cp->handle);
1409
1410	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1411
1412	hci_dev_lock(hdev);
1413
1414	acl = hci_conn_hash_lookup_handle(hdev, handle);
1415	if (acl) {
1416		sco = acl->link;
1417		if (sco) {
1418			sco->state = BT_CLOSED;
1419
1420			hci_proto_connect_cfm(sco, status);
1421			hci_conn_del(sco);
1422		}
1423	}
1424
1425	hci_dev_unlock(hdev);
1426}
1427
1428static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1429{
1430	struct hci_cp_auth_requested *cp;
1431	struct hci_conn *conn;
1432
1433	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1434
1435	if (!status)
1436		return;
1437
1438	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1439	if (!cp)
1440		return;
1441
1442	hci_dev_lock(hdev);
1443
1444	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1445	if (conn) {
1446		if (conn->state == BT_CONFIG) {
1447			hci_proto_connect_cfm(conn, status);
1448			hci_conn_drop(conn);
1449		}
1450	}
1451
1452	hci_dev_unlock(hdev);
1453}
1454
1455static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1456{
1457	struct hci_cp_set_conn_encrypt *cp;
1458	struct hci_conn *conn;
1459
1460	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1461
1462	if (!status)
1463		return;
1464
1465	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1466	if (!cp)
1467		return;
1468
1469	hci_dev_lock(hdev);
1470
1471	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1472	if (conn) {
1473		if (conn->state == BT_CONFIG) {
1474			hci_proto_connect_cfm(conn, status);
1475			hci_conn_drop(conn);
1476		}
1477	}
1478
1479	hci_dev_unlock(hdev);
1480}
1481
1482static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1483				    struct hci_conn *conn)
1484{
1485	if (conn->state != BT_CONFIG || !conn->out)
1486		return 0;
1487
1488	if (conn->pending_sec_level == BT_SECURITY_SDP)
1489		return 0;
1490
1491	/* Only request authentication for SSP connections or non-SSP
1492	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1493	 * is requested.
1494	 */
1495	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1496	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1497	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1498	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1499		return 0;
1500
1501	return 1;
1502}
1503
1504static int hci_resolve_name(struct hci_dev *hdev,
1505				   struct inquiry_entry *e)
1506{
1507	struct hci_cp_remote_name_req cp;
1508
1509	memset(&cp, 0, sizeof(cp));
1510
1511	bacpy(&cp.bdaddr, &e->data.bdaddr);
1512	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1513	cp.pscan_mode = e->data.pscan_mode;
1514	cp.clock_offset = e->data.clock_offset;
1515
1516	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1517}
1518
1519static bool hci_resolve_next_name(struct hci_dev *hdev)
1520{
1521	struct discovery_state *discov = &hdev->discovery;
1522	struct inquiry_entry *e;
1523
1524	if (list_empty(&discov->resolve))
1525		return false;
1526
1527	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1528	if (!e)
1529		return false;
1530
1531	if (hci_resolve_name(hdev, e) == 0) {
1532		e->name_state = NAME_PENDING;
1533		return true;
1534	}
1535
1536	return false;
1537}
1538
1539static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1540				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1541{
1542	struct discovery_state *discov = &hdev->discovery;
1543	struct inquiry_entry *e;
1544
1545	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1546		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1547				      name_len, conn->dev_class);
1548
1549	if (discov->state == DISCOVERY_STOPPED)
1550		return;
1551
1552	if (discov->state == DISCOVERY_STOPPING)
1553		goto discov_complete;
1554
1555	if (discov->state != DISCOVERY_RESOLVING)
1556		return;
1557
1558	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1559	/* If the device was not found in a list of found devices names of which
1560	 * are pending. there is no need to continue resolving a next name as it
1561	 * will be done upon receiving another Remote Name Request Complete
1562	 * Event */
1563	if (!e)
1564		return;
1565
1566	list_del(&e->list);
1567	if (name) {
1568		e->name_state = NAME_KNOWN;
1569		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1570				 e->data.rssi, name, name_len);
1571	} else {
1572		e->name_state = NAME_NOT_KNOWN;
1573	}
1574
1575	if (hci_resolve_next_name(hdev))
1576		return;
1577
1578discov_complete:
1579	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1580}
1581
1582static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1583{
1584	struct hci_cp_remote_name_req *cp;
1585	struct hci_conn *conn;
1586
1587	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1588
1589	/* If successful wait for the name req complete event before
1590	 * checking for the need to do authentication */
1591	if (!status)
1592		return;
1593
1594	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1595	if (!cp)
1596		return;
1597
1598	hci_dev_lock(hdev);
1599
1600	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1601
1602	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1603		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1604
1605	if (!conn)
1606		goto unlock;
1607
1608	if (!hci_outgoing_auth_needed(hdev, conn))
1609		goto unlock;
1610
1611	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1612		struct hci_cp_auth_requested auth_cp;
1613
1614		auth_cp.handle = __cpu_to_le16(conn->handle);
1615		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1616			     sizeof(auth_cp), &auth_cp);
1617	}
1618
1619unlock:
1620	hci_dev_unlock(hdev);
1621}
1622
1623static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1624{
1625	struct hci_cp_read_remote_features *cp;
1626	struct hci_conn *conn;
1627
1628	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1629
1630	if (!status)
1631		return;
1632
1633	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1634	if (!cp)
1635		return;
1636
1637	hci_dev_lock(hdev);
1638
1639	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1640	if (conn) {
1641		if (conn->state == BT_CONFIG) {
1642			hci_proto_connect_cfm(conn, status);
1643			hci_conn_drop(conn);
1644		}
1645	}
1646
1647	hci_dev_unlock(hdev);
1648}
1649
1650static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1651{
1652	struct hci_cp_read_remote_ext_features *cp;
1653	struct hci_conn *conn;
1654
1655	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1656
1657	if (!status)
1658		return;
1659
1660	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1661	if (!cp)
1662		return;
1663
1664	hci_dev_lock(hdev);
1665
1666	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1667	if (conn) {
1668		if (conn->state == BT_CONFIG) {
1669			hci_proto_connect_cfm(conn, status);
1670			hci_conn_drop(conn);
1671		}
1672	}
1673
1674	hci_dev_unlock(hdev);
1675}
1676
1677static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1678{
1679	struct hci_cp_setup_sync_conn *cp;
1680	struct hci_conn *acl, *sco;
1681	__u16 handle;
1682
1683	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1684
1685	if (!status)
1686		return;
1687
1688	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1689	if (!cp)
1690		return;
1691
1692	handle = __le16_to_cpu(cp->handle);
1693
1694	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1695
1696	hci_dev_lock(hdev);
1697
1698	acl = hci_conn_hash_lookup_handle(hdev, handle);
1699	if (acl) {
1700		sco = acl->link;
1701		if (sco) {
1702			sco->state = BT_CLOSED;
1703
1704			hci_proto_connect_cfm(sco, status);
1705			hci_conn_del(sco);
1706		}
1707	}
1708
1709	hci_dev_unlock(hdev);
1710}
1711
1712static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1713{
1714	struct hci_cp_sniff_mode *cp;
1715	struct hci_conn *conn;
1716
1717	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1718
1719	if (!status)
1720		return;
1721
1722	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1723	if (!cp)
1724		return;
1725
1726	hci_dev_lock(hdev);
1727
1728	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1729	if (conn) {
1730		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1731
1732		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1733			hci_sco_setup(conn, status);
1734	}
1735
1736	hci_dev_unlock(hdev);
1737}
1738
1739static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1740{
1741	struct hci_cp_exit_sniff_mode *cp;
1742	struct hci_conn *conn;
1743
1744	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1745
1746	if (!status)
1747		return;
1748
1749	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1750	if (!cp)
1751		return;
1752
1753	hci_dev_lock(hdev);
1754
1755	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1756	if (conn) {
1757		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1758
1759		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1760			hci_sco_setup(conn, status);
1761	}
1762
1763	hci_dev_unlock(hdev);
1764}
1765
1766static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1767{
1768	struct hci_cp_disconnect *cp;
1769	struct hci_conn *conn;
1770
1771	if (!status)
1772		return;
1773
1774	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1775	if (!cp)
1776		return;
1777
1778	hci_dev_lock(hdev);
1779
1780	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1781	if (conn)
1782		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1783				       conn->dst_type, status);
1784
1785	hci_dev_unlock(hdev);
1786}
1787
1788static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1789{
1790	struct hci_cp_create_phy_link *cp;
1791
1792	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1793
1794	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1795	if (!cp)
1796		return;
1797
1798	hci_dev_lock(hdev);
1799
1800	if (status) {
1801		struct hci_conn *hcon;
1802
1803		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1804		if (hcon)
1805			hci_conn_del(hcon);
1806	} else {
1807		amp_write_remote_assoc(hdev, cp->phy_handle);
1808	}
1809
1810	hci_dev_unlock(hdev);
1811}
1812
1813static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1814{
1815	struct hci_cp_accept_phy_link *cp;
1816
1817	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1818
1819	if (status)
1820		return;
1821
1822	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1823	if (!cp)
1824		return;
1825
1826	amp_write_remote_assoc(hdev, cp->phy_handle);
1827}
1828
1829static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1830{
1831	struct hci_cp_le_create_conn *cp;
1832	struct hci_conn *conn;
1833
1834	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1835
1836	/* All connection failure handling is taken care of by the
1837	 * hci_le_conn_failed function which is triggered by the HCI
1838	 * request completion callbacks used for connecting.
1839	 */
1840	if (status)
1841		return;
1842
1843	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1844	if (!cp)
1845		return;
1846
1847	hci_dev_lock(hdev);
1848
1849	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1850	if (!conn)
1851		goto unlock;
1852
1853	/* Store the initiator and responder address information which
1854	 * is needed for SMP. These values will not change during the
1855	 * lifetime of the connection.
1856	 */
1857	conn->init_addr_type = cp->own_address_type;
1858	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1859		bacpy(&conn->init_addr, &hdev->random_addr);
1860	else
1861		bacpy(&conn->init_addr, &hdev->bdaddr);
1862
1863	conn->resp_addr_type = cp->peer_addr_type;
1864	bacpy(&conn->resp_addr, &cp->peer_addr);
1865
1866	/* We don't want the connection attempt to stick around
1867	 * indefinitely since LE doesn't have a page timeout concept
1868	 * like BR/EDR. Set a timer for any connection that doesn't use
1869	 * the white list for connecting.
1870	 */
1871	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1872		queue_delayed_work(conn->hdev->workqueue,
1873				   &conn->le_conn_timeout,
1874				   HCI_LE_CONN_TIMEOUT);
1875
1876unlock:
1877	hci_dev_unlock(hdev);
1878}
1879
1880static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1881{
1882	struct hci_cp_le_start_enc *cp;
1883	struct hci_conn *conn;
1884
1885	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1886
1887	if (!status)
1888		return;
1889
1890	hci_dev_lock(hdev);
1891
1892	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1893	if (!cp)
1894		goto unlock;
1895
1896	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1897	if (!conn)
1898		goto unlock;
1899
1900	if (conn->state != BT_CONNECTED)
1901		goto unlock;
1902
1903	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1904	hci_conn_drop(conn);
1905
1906unlock:
1907	hci_dev_unlock(hdev);
1908}
1909
1910static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1911{
1912	__u8 status = *((__u8 *) skb->data);
1913	struct discovery_state *discov = &hdev->discovery;
1914	struct inquiry_entry *e;
1915
1916	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1917
1918	hci_conn_check_pending(hdev);
1919
1920	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1921		return;
1922
1923	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1924	wake_up_bit(&hdev->flags, HCI_INQUIRY);
1925
1926	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1927		return;
1928
1929	hci_dev_lock(hdev);
1930
1931	if (discov->state != DISCOVERY_FINDING)
1932		goto unlock;
1933
1934	if (list_empty(&discov->resolve)) {
1935		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1936		goto unlock;
1937	}
1938
1939	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1940	if (e && hci_resolve_name(hdev, e) == 0) {
1941		e->name_state = NAME_PENDING;
1942		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1943	} else {
1944		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1945	}
1946
1947unlock:
1948	hci_dev_unlock(hdev);
1949}
1950
1951static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1952{
1953	struct inquiry_data data;
1954	struct inquiry_info *info = (void *) (skb->data + 1);
1955	int num_rsp = *((__u8 *) skb->data);
1956
1957	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1958
1959	if (!num_rsp)
1960		return;
1961
1962	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1963		return;
1964
1965	hci_dev_lock(hdev);
1966
1967	for (; num_rsp; num_rsp--, info++) {
1968		bool name_known, ssp;
1969
1970		bacpy(&data.bdaddr, &info->bdaddr);
1971		data.pscan_rep_mode	= info->pscan_rep_mode;
1972		data.pscan_period_mode	= info->pscan_period_mode;
1973		data.pscan_mode		= info->pscan_mode;
1974		memcpy(data.dev_class, info->dev_class, 3);
1975		data.clock_offset	= info->clock_offset;
1976		data.rssi		= 0x00;
1977		data.ssp_mode		= 0x00;
1978
1979		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1980		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1981				  info->dev_class, 0, !name_known, ssp, NULL,
1982				  0, NULL, 0);
1983	}
1984
1985	hci_dev_unlock(hdev);
1986}
1987
1988static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1989{
1990	struct hci_ev_conn_complete *ev = (void *) skb->data;
1991	struct hci_conn *conn;
1992
1993	BT_DBG("%s", hdev->name);
1994
1995	hci_dev_lock(hdev);
1996
1997	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1998	if (!conn) {
1999		if (ev->link_type != SCO_LINK)
2000			goto unlock;
2001
2002		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2003		if (!conn)
2004			goto unlock;
2005
2006		conn->type = SCO_LINK;
2007	}
2008
2009	if (!ev->status) {
2010		conn->handle = __le16_to_cpu(ev->handle);
2011
2012		if (conn->type == ACL_LINK) {
2013			conn->state = BT_CONFIG;
2014			hci_conn_hold(conn);
2015
2016			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2017			    !hci_find_link_key(hdev, &ev->bdaddr))
2018				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2019			else
2020				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2021		} else
2022			conn->state = BT_CONNECTED;
2023
2024		hci_conn_add_sysfs(conn);
2025
2026		if (test_bit(HCI_AUTH, &hdev->flags))
2027			set_bit(HCI_CONN_AUTH, &conn->flags);
2028
2029		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2030			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2031
2032		/* Get remote features */
2033		if (conn->type == ACL_LINK) {
2034			struct hci_cp_read_remote_features cp;
2035			cp.handle = ev->handle;
2036			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2037				     sizeof(cp), &cp);
2038		}
2039
2040		/* Set packet type for incoming connection */
2041		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2042			struct hci_cp_change_conn_ptype cp;
2043			cp.handle = ev->handle;
2044			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2045			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2046				     &cp);
2047		}
2048	} else {
2049		conn->state = BT_CLOSED;
2050		if (conn->type == ACL_LINK)
2051			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2052					    conn->dst_type, ev->status);
2053	}
2054
2055	if (conn->type == ACL_LINK)
2056		hci_sco_setup(conn, ev->status);
2057
2058	if (ev->status) {
2059		hci_proto_connect_cfm(conn, ev->status);
2060		hci_conn_del(conn);
2061	} else if (ev->link_type != ACL_LINK)
2062		hci_proto_connect_cfm(conn, ev->status);
2063
2064unlock:
2065	hci_dev_unlock(hdev);
2066
2067	hci_conn_check_pending(hdev);
2068}
2069
2070static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2071{
2072	struct hci_ev_conn_request *ev = (void *) skb->data;
2073	int mask = hdev->link_mode;
2074	__u8 flags = 0;
2075
2076	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2077	       ev->link_type);
2078
2079	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2080				      &flags);
2081
2082	if ((mask & HCI_LM_ACCEPT) &&
2083	    !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
2084		/* Connection accepted */
2085		struct inquiry_entry *ie;
2086		struct hci_conn *conn;
2087
2088		hci_dev_lock(hdev);
2089
2090		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2091		if (ie)
2092			memcpy(ie->data.dev_class, ev->dev_class, 3);
2093
2094		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2095					       &ev->bdaddr);
2096		if (!conn) {
2097			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2098			if (!conn) {
2099				BT_ERR("No memory for new connection");
2100				hci_dev_unlock(hdev);
2101				return;
2102			}
2103		}
2104
2105		memcpy(conn->dev_class, ev->dev_class, 3);
2106
2107		hci_dev_unlock(hdev);
2108
2109		if (ev->link_type == ACL_LINK ||
2110		    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2111			struct hci_cp_accept_conn_req cp;
2112			conn->state = BT_CONNECT;
2113
2114			bacpy(&cp.bdaddr, &ev->bdaddr);
2115
2116			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2117				cp.role = 0x00; /* Become master */
2118			else
2119				cp.role = 0x01; /* Remain slave */
2120
2121			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2122				     &cp);
2123		} else if (!(flags & HCI_PROTO_DEFER)) {
2124			struct hci_cp_accept_sync_conn_req cp;
2125			conn->state = BT_CONNECT;
2126
2127			bacpy(&cp.bdaddr, &ev->bdaddr);
2128			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2129
2130			cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2131			cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2132			cp.max_latency    = cpu_to_le16(0xffff);
2133			cp.content_format = cpu_to_le16(hdev->voice_setting);
2134			cp.retrans_effort = 0xff;
2135
2136			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2137				     sizeof(cp), &cp);
2138		} else {
2139			conn->state = BT_CONNECT2;
2140			hci_proto_connect_cfm(conn, 0);
2141		}
2142	} else {
2143		/* Connection rejected */
2144		struct hci_cp_reject_conn_req cp;
2145
2146		bacpy(&cp.bdaddr, &ev->bdaddr);
2147		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2148		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2149	}
2150}
2151
2152static u8 hci_to_mgmt_reason(u8 err)
2153{
2154	switch (err) {
2155	case HCI_ERROR_CONNECTION_TIMEOUT:
2156		return MGMT_DEV_DISCONN_TIMEOUT;
2157	case HCI_ERROR_REMOTE_USER_TERM:
2158	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2159	case HCI_ERROR_REMOTE_POWER_OFF:
2160		return MGMT_DEV_DISCONN_REMOTE;
2161	case HCI_ERROR_LOCAL_HOST_TERM:
2162		return MGMT_DEV_DISCONN_LOCAL_HOST;
2163	default:
2164		return MGMT_DEV_DISCONN_UNKNOWN;
2165	}
2166}
2167
2168static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2169{
2170	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2171	u8 reason = hci_to_mgmt_reason(ev->reason);
2172	struct hci_conn_params *params;
2173	struct hci_conn *conn;
2174	bool mgmt_connected;
2175	u8 type;
2176
2177	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2178
2179	hci_dev_lock(hdev);
2180
2181	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2182	if (!conn)
2183		goto unlock;
2184
2185	if (ev->status) {
2186		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2187				       conn->dst_type, ev->status);
2188		goto unlock;
2189	}
2190
2191	conn->state = BT_CLOSED;
2192
2193	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2194	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2195				reason, mgmt_connected);
2196
2197	if (conn->type == ACL_LINK &&
2198	    test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2199		hci_remove_link_key(hdev, &conn->dst);
2200
2201	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2202	if (params) {
2203		switch (params->auto_connect) {
2204		case HCI_AUTO_CONN_LINK_LOSS:
2205			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2206				break;
2207			/* Fall through */
2208
2209		case HCI_AUTO_CONN_ALWAYS:
2210			hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type);
2211			break;
2212
2213		default:
2214			break;
2215		}
2216	}
2217
2218	type = conn->type;
2219
2220	hci_proto_disconn_cfm(conn, ev->reason);
2221	hci_conn_del(conn);
2222
2223	/* Re-enable advertising if necessary, since it might
2224	 * have been disabled by the connection. From the
2225	 * HCI_LE_Set_Advertise_Enable command description in
2226	 * the core specification (v4.0):
2227	 * "The Controller shall continue advertising until the Host
2228	 * issues an LE_Set_Advertise_Enable command with
2229	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2230	 * or until a connection is created or until the Advertising
2231	 * is timed out due to Directed Advertising."
2232	 */
2233	if (type == LE_LINK)
2234		mgmt_reenable_advertising(hdev);
2235
2236unlock:
2237	hci_dev_unlock(hdev);
2238}
2239
2240static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2241{
2242	struct hci_ev_auth_complete *ev = (void *) skb->data;
2243	struct hci_conn *conn;
2244
2245	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2246
2247	hci_dev_lock(hdev);
2248
2249	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2250	if (!conn)
2251		goto unlock;
2252
2253	if (!ev->status) {
2254		if (!hci_conn_ssp_enabled(conn) &&
2255		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2256			BT_INFO("re-auth of legacy device is not possible.");
2257		} else {
2258			set_bit(HCI_CONN_AUTH, &conn->flags);
2259			conn->sec_level = conn->pending_sec_level;
2260		}
2261	} else {
2262		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2263				 ev->status);
2264	}
2265
2266	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2267	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2268
2269	if (conn->state == BT_CONFIG) {
2270		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2271			struct hci_cp_set_conn_encrypt cp;
2272			cp.handle  = ev->handle;
2273			cp.encrypt = 0x01;
2274			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2275				     &cp);
2276		} else {
2277			conn->state = BT_CONNECTED;
2278			hci_proto_connect_cfm(conn, ev->status);
2279			hci_conn_drop(conn);
2280		}
2281	} else {
2282		hci_auth_cfm(conn, ev->status);
2283
2284		hci_conn_hold(conn);
2285		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2286		hci_conn_drop(conn);
2287	}
2288
2289	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2290		if (!ev->status) {
2291			struct hci_cp_set_conn_encrypt cp;
2292			cp.handle  = ev->handle;
2293			cp.encrypt = 0x01;
2294			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2295				     &cp);
2296		} else {
2297			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2298			hci_encrypt_cfm(conn, ev->status, 0x00);
2299		}
2300	}
2301
2302unlock:
2303	hci_dev_unlock(hdev);
2304}
2305
2306static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2307{
2308	struct hci_ev_remote_name *ev = (void *) skb->data;
2309	struct hci_conn *conn;
2310
2311	BT_DBG("%s", hdev->name);
2312
2313	hci_conn_check_pending(hdev);
2314
2315	hci_dev_lock(hdev);
2316
2317	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2318
2319	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2320		goto check_auth;
2321
2322	if (ev->status == 0)
2323		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2324				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2325	else
2326		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2327
2328check_auth:
2329	if (!conn)
2330		goto unlock;
2331
2332	if (!hci_outgoing_auth_needed(hdev, conn))
2333		goto unlock;
2334
2335	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2336		struct hci_cp_auth_requested cp;
2337		cp.handle = __cpu_to_le16(conn->handle);
2338		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2339	}
2340
2341unlock:
2342	hci_dev_unlock(hdev);
2343}
2344
2345static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2346{
2347	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2348	struct hci_conn *conn;
2349
2350	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2351
2352	hci_dev_lock(hdev);
2353
2354	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2355	if (!conn)
2356		goto unlock;
2357
2358	if (!ev->status) {
2359		if (ev->encrypt) {
2360			/* Encryption implies authentication */
2361			set_bit(HCI_CONN_AUTH, &conn->flags);
2362			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2363			conn->sec_level = conn->pending_sec_level;
2364
2365			/* P-256 authentication key implies FIPS */
2366			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2367				set_bit(HCI_CONN_FIPS, &conn->flags);
2368
2369			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2370			    conn->type == LE_LINK)
2371				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2372		} else {
2373			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2374			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2375		}
2376	}
2377
2378	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2379
2380	if (ev->status && conn->state == BT_CONNECTED) {
2381		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2382		hci_conn_drop(conn);
2383		goto unlock;
2384	}
2385
2386	if (conn->state == BT_CONFIG) {
2387		if (!ev->status)
2388			conn->state = BT_CONNECTED;
2389
2390		/* In Secure Connections Only mode, do not allow any
2391		 * connections that are not encrypted with AES-CCM
2392		 * using a P-256 authenticated combination key.
2393		 */
2394		if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2395		    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2396		     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2397			hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2398			hci_conn_drop(conn);
2399			goto unlock;
2400		}
2401
2402		hci_proto_connect_cfm(conn, ev->status);
2403		hci_conn_drop(conn);
2404	} else
2405		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2406
2407unlock:
2408	hci_dev_unlock(hdev);
2409}
2410
2411static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2412					     struct sk_buff *skb)
2413{
2414	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2415	struct hci_conn *conn;
2416
2417	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2418
2419	hci_dev_lock(hdev);
2420
2421	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2422	if (conn) {
2423		if (!ev->status)
2424			set_bit(HCI_CONN_SECURE, &conn->flags);
2425
2426		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2427
2428		hci_key_change_cfm(conn, ev->status);
2429	}
2430
2431	hci_dev_unlock(hdev);
2432}
2433
2434static void hci_remote_features_evt(struct hci_dev *hdev,
2435				    struct sk_buff *skb)
2436{
2437	struct hci_ev_remote_features *ev = (void *) skb->data;
2438	struct hci_conn *conn;
2439
2440	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2441
2442	hci_dev_lock(hdev);
2443
2444	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2445	if (!conn)
2446		goto unlock;
2447
2448	if (!ev->status)
2449		memcpy(conn->features[0], ev->features, 8);
2450
2451	if (conn->state != BT_CONFIG)
2452		goto unlock;
2453
2454	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2455		struct hci_cp_read_remote_ext_features cp;
2456		cp.handle = ev->handle;
2457		cp.page = 0x01;
2458		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2459			     sizeof(cp), &cp);
2460		goto unlock;
2461	}
2462
2463	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2464		struct hci_cp_remote_name_req cp;
2465		memset(&cp, 0, sizeof(cp));
2466		bacpy(&cp.bdaddr, &conn->dst);
2467		cp.pscan_rep_mode = 0x02;
2468		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2469	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2470		mgmt_device_connected(hdev, &conn->dst, conn->type,
2471				      conn->dst_type, 0, NULL, 0,
2472				      conn->dev_class);
2473
2474	if (!hci_outgoing_auth_needed(hdev, conn)) {
2475		conn->state = BT_CONNECTED;
2476		hci_proto_connect_cfm(conn, ev->status);
2477		hci_conn_drop(conn);
2478	}
2479
2480unlock:
2481	hci_dev_unlock(hdev);
2482}
2483
2484static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2485{
2486	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2487	u8 status = skb->data[sizeof(*ev)];
2488	__u16 opcode;
2489
2490	skb_pull(skb, sizeof(*ev));
2491
2492	opcode = __le16_to_cpu(ev->opcode);
2493
2494	switch (opcode) {
2495	case HCI_OP_INQUIRY_CANCEL:
2496		hci_cc_inquiry_cancel(hdev, skb);
2497		break;
2498
2499	case HCI_OP_PERIODIC_INQ:
2500		hci_cc_periodic_inq(hdev, skb);
2501		break;
2502
2503	case HCI_OP_EXIT_PERIODIC_INQ:
2504		hci_cc_exit_periodic_inq(hdev, skb);
2505		break;
2506
2507	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2508		hci_cc_remote_name_req_cancel(hdev, skb);
2509		break;
2510
2511	case HCI_OP_ROLE_DISCOVERY:
2512		hci_cc_role_discovery(hdev, skb);
2513		break;
2514
2515	case HCI_OP_READ_LINK_POLICY:
2516		hci_cc_read_link_policy(hdev, skb);
2517		break;
2518
2519	case HCI_OP_WRITE_LINK_POLICY:
2520		hci_cc_write_link_policy(hdev, skb);
2521		break;
2522
2523	case HCI_OP_READ_DEF_LINK_POLICY:
2524		hci_cc_read_def_link_policy(hdev, skb);
2525		break;
2526
2527	case HCI_OP_WRITE_DEF_LINK_POLICY:
2528		hci_cc_write_def_link_policy(hdev, skb);
2529		break;
2530
2531	case HCI_OP_RESET:
2532		hci_cc_reset(hdev, skb);
2533		break;
2534
2535	case HCI_OP_WRITE_LOCAL_NAME:
2536		hci_cc_write_local_name(hdev, skb);
2537		break;
2538
2539	case HCI_OP_READ_LOCAL_NAME:
2540		hci_cc_read_local_name(hdev, skb);
2541		break;
2542
2543	case HCI_OP_WRITE_AUTH_ENABLE:
2544		hci_cc_write_auth_enable(hdev, skb);
2545		break;
2546
2547	case HCI_OP_WRITE_ENCRYPT_MODE:
2548		hci_cc_write_encrypt_mode(hdev, skb);
2549		break;
2550
2551	case HCI_OP_WRITE_SCAN_ENABLE:
2552		hci_cc_write_scan_enable(hdev, skb);
2553		break;
2554
2555	case HCI_OP_READ_CLASS_OF_DEV:
2556		hci_cc_read_class_of_dev(hdev, skb);
2557		break;
2558
2559	case HCI_OP_WRITE_CLASS_OF_DEV:
2560		hci_cc_write_class_of_dev(hdev, skb);
2561		break;
2562
2563	case HCI_OP_READ_VOICE_SETTING:
2564		hci_cc_read_voice_setting(hdev, skb);
2565		break;
2566
2567	case HCI_OP_WRITE_VOICE_SETTING:
2568		hci_cc_write_voice_setting(hdev, skb);
2569		break;
2570
2571	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2572		hci_cc_read_num_supported_iac(hdev, skb);
2573		break;
2574
2575	case HCI_OP_WRITE_SSP_MODE:
2576		hci_cc_write_ssp_mode(hdev, skb);
2577		break;
2578
2579	case HCI_OP_WRITE_SC_SUPPORT:
2580		hci_cc_write_sc_support(hdev, skb);
2581		break;
2582
2583	case HCI_OP_READ_LOCAL_VERSION:
2584		hci_cc_read_local_version(hdev, skb);
2585		break;
2586
2587	case HCI_OP_READ_LOCAL_COMMANDS:
2588		hci_cc_read_local_commands(hdev, skb);
2589		break;
2590
2591	case HCI_OP_READ_LOCAL_FEATURES:
2592		hci_cc_read_local_features(hdev, skb);
2593		break;
2594
2595	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2596		hci_cc_read_local_ext_features(hdev, skb);
2597		break;
2598
2599	case HCI_OP_READ_BUFFER_SIZE:
2600		hci_cc_read_buffer_size(hdev, skb);
2601		break;
2602
2603	case HCI_OP_READ_BD_ADDR:
2604		hci_cc_read_bd_addr(hdev, skb);
2605		break;
2606
2607	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2608		hci_cc_read_page_scan_activity(hdev, skb);
2609		break;
2610
2611	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2612		hci_cc_write_page_scan_activity(hdev, skb);
2613		break;
2614
2615	case HCI_OP_READ_PAGE_SCAN_TYPE:
2616		hci_cc_read_page_scan_type(hdev, skb);
2617		break;
2618
2619	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2620		hci_cc_write_page_scan_type(hdev, skb);
2621		break;
2622
2623	case HCI_OP_READ_DATA_BLOCK_SIZE:
2624		hci_cc_read_data_block_size(hdev, skb);
2625		break;
2626
2627	case HCI_OP_READ_FLOW_CONTROL_MODE:
2628		hci_cc_read_flow_control_mode(hdev, skb);
2629		break;
2630
2631	case HCI_OP_READ_LOCAL_AMP_INFO:
2632		hci_cc_read_local_amp_info(hdev, skb);
2633		break;
2634
2635	case HCI_OP_READ_CLOCK:
2636		hci_cc_read_clock(hdev, skb);
2637		break;
2638
2639	case HCI_OP_READ_LOCAL_AMP_ASSOC:
2640		hci_cc_read_local_amp_assoc(hdev, skb);
2641		break;
2642
2643	case HCI_OP_READ_INQ_RSP_TX_POWER:
2644		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2645		break;
2646
2647	case HCI_OP_PIN_CODE_REPLY:
2648		hci_cc_pin_code_reply(hdev, skb);
2649		break;
2650
2651	case HCI_OP_PIN_CODE_NEG_REPLY:
2652		hci_cc_pin_code_neg_reply(hdev, skb);
2653		break;
2654
2655	case HCI_OP_READ_LOCAL_OOB_DATA:
2656		hci_cc_read_local_oob_data(hdev, skb);
2657		break;
2658
2659	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2660		hci_cc_read_local_oob_ext_data(hdev, skb);
2661		break;
2662
2663	case HCI_OP_LE_READ_BUFFER_SIZE:
2664		hci_cc_le_read_buffer_size(hdev, skb);
2665		break;
2666
2667	case HCI_OP_LE_READ_LOCAL_FEATURES:
2668		hci_cc_le_read_local_features(hdev, skb);
2669		break;
2670
2671	case HCI_OP_LE_READ_ADV_TX_POWER:
2672		hci_cc_le_read_adv_tx_power(hdev, skb);
2673		break;
2674
2675	case HCI_OP_USER_CONFIRM_REPLY:
2676		hci_cc_user_confirm_reply(hdev, skb);
2677		break;
2678
2679	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2680		hci_cc_user_confirm_neg_reply(hdev, skb);
2681		break;
2682
2683	case HCI_OP_USER_PASSKEY_REPLY:
2684		hci_cc_user_passkey_reply(hdev, skb);
2685		break;
2686
2687	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2688		hci_cc_user_passkey_neg_reply(hdev, skb);
2689		break;
2690
2691	case HCI_OP_LE_SET_RANDOM_ADDR:
2692		hci_cc_le_set_random_addr(hdev, skb);
2693		break;
2694
2695	case HCI_OP_LE_SET_ADV_ENABLE:
2696		hci_cc_le_set_adv_enable(hdev, skb);
2697		break;
2698
2699	case HCI_OP_LE_SET_SCAN_PARAM:
2700		hci_cc_le_set_scan_param(hdev, skb);
2701		break;
2702
2703	case HCI_OP_LE_SET_SCAN_ENABLE:
2704		hci_cc_le_set_scan_enable(hdev, skb);
2705		break;
2706
2707	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2708		hci_cc_le_read_white_list_size(hdev, skb);
2709		break;
2710
2711	case HCI_OP_LE_CLEAR_WHITE_LIST:
2712		hci_cc_le_clear_white_list(hdev, skb);
2713		break;
2714
2715	case HCI_OP_LE_ADD_TO_WHITE_LIST:
2716		hci_cc_le_add_to_white_list(hdev, skb);
2717		break;
2718
2719	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2720		hci_cc_le_del_from_white_list(hdev, skb);
2721		break;
2722
2723	case HCI_OP_LE_READ_SUPPORTED_STATES:
2724		hci_cc_le_read_supported_states(hdev, skb);
2725		break;
2726
2727	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2728		hci_cc_write_le_host_supported(hdev, skb);
2729		break;
2730
2731	case HCI_OP_LE_SET_ADV_PARAM:
2732		hci_cc_set_adv_param(hdev, skb);
2733		break;
2734
2735	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2736		hci_cc_write_remote_amp_assoc(hdev, skb);
2737		break;
2738
2739	case HCI_OP_READ_RSSI:
2740		hci_cc_read_rssi(hdev, skb);
2741		break;
2742
2743	case HCI_OP_READ_TX_POWER:
2744		hci_cc_read_tx_power(hdev, skb);
2745		break;
2746
2747	default:
2748		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2749		break;
2750	}
2751
2752	if (opcode != HCI_OP_NOP)
2753		cancel_delayed_work(&hdev->cmd_timer);
2754
2755	hci_req_cmd_complete(hdev, opcode, status);
2756
2757	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2758		atomic_set(&hdev->cmd_cnt, 1);
2759		if (!skb_queue_empty(&hdev->cmd_q))
2760			queue_work(hdev->workqueue, &hdev->cmd_work);
2761	}
2762}
2763
2764static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2765{
2766	struct hci_ev_cmd_status *ev = (void *) skb->data;
2767	__u16 opcode;
2768
2769	skb_pull(skb, sizeof(*ev));
2770
2771	opcode = __le16_to_cpu(ev->opcode);
2772
2773	switch (opcode) {
2774	case HCI_OP_INQUIRY:
2775		hci_cs_inquiry(hdev, ev->status);
2776		break;
2777
2778	case HCI_OP_CREATE_CONN:
2779		hci_cs_create_conn(hdev, ev->status);
2780		break;
2781
2782	case HCI_OP_ADD_SCO:
2783		hci_cs_add_sco(hdev, ev->status);
2784		break;
2785
2786	case HCI_OP_AUTH_REQUESTED:
2787		hci_cs_auth_requested(hdev, ev->status);
2788		break;
2789
2790	case HCI_OP_SET_CONN_ENCRYPT:
2791		hci_cs_set_conn_encrypt(hdev, ev->status);
2792		break;
2793
2794	case HCI_OP_REMOTE_NAME_REQ:
2795		hci_cs_remote_name_req(hdev, ev->status);
2796		break;
2797
2798	case HCI_OP_READ_REMOTE_FEATURES:
2799		hci_cs_read_remote_features(hdev, ev->status);
2800		break;
2801
2802	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2803		hci_cs_read_remote_ext_features(hdev, ev->status);
2804		break;
2805
2806	case HCI_OP_SETUP_SYNC_CONN:
2807		hci_cs_setup_sync_conn(hdev, ev->status);
2808		break;
2809
2810	case HCI_OP_SNIFF_MODE:
2811		hci_cs_sniff_mode(hdev, ev->status);
2812		break;
2813
2814	case HCI_OP_EXIT_SNIFF_MODE:
2815		hci_cs_exit_sniff_mode(hdev, ev->status);
2816		break;
2817
2818	case HCI_OP_DISCONNECT:
2819		hci_cs_disconnect(hdev, ev->status);
2820		break;
2821
2822	case HCI_OP_CREATE_PHY_LINK:
2823		hci_cs_create_phylink(hdev, ev->status);
2824		break;
2825
2826	case HCI_OP_ACCEPT_PHY_LINK:
2827		hci_cs_accept_phylink(hdev, ev->status);
2828		break;
2829
2830	case HCI_OP_LE_CREATE_CONN:
2831		hci_cs_le_create_conn(hdev, ev->status);
2832		break;
2833
2834	case HCI_OP_LE_START_ENC:
2835		hci_cs_le_start_enc(hdev, ev->status);
2836		break;
2837
2838	default:
2839		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2840		break;
2841	}
2842
2843	if (opcode != HCI_OP_NOP)
2844		cancel_delayed_work(&hdev->cmd_timer);
2845
2846	if (ev->status ||
2847	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2848		hci_req_cmd_complete(hdev, opcode, ev->status);
2849
2850	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2851		atomic_set(&hdev->cmd_cnt, 1);
2852		if (!skb_queue_empty(&hdev->cmd_q))
2853			queue_work(hdev->workqueue, &hdev->cmd_work);
2854	}
2855}
2856
2857static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2858{
2859	struct hci_ev_role_change *ev = (void *) skb->data;
2860	struct hci_conn *conn;
2861
2862	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2863
2864	hci_dev_lock(hdev);
2865
2866	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2867	if (conn) {
2868		if (!ev->status) {
2869			if (ev->role)
2870				clear_bit(HCI_CONN_MASTER, &conn->flags);
2871			else
2872				set_bit(HCI_CONN_MASTER, &conn->flags);
2873		}
2874
2875		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2876
2877		hci_role_switch_cfm(conn, ev->status, ev->role);
2878	}
2879
2880	hci_dev_unlock(hdev);
2881}
2882
2883static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2884{
2885	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2886	int i;
2887
2888	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2889		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2890		return;
2891	}
2892
2893	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2894	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2895		BT_DBG("%s bad parameters", hdev->name);
2896		return;
2897	}
2898
2899	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2900
2901	for (i = 0; i < ev->num_hndl; i++) {
2902		struct hci_comp_pkts_info *info = &ev->handles[i];
2903		struct hci_conn *conn;
2904		__u16  handle, count;
2905
2906		handle = __le16_to_cpu(info->handle);
2907		count  = __le16_to_cpu(info->count);
2908
2909		conn = hci_conn_hash_lookup_handle(hdev, handle);
2910		if (!conn)
2911			continue;
2912
2913		conn->sent -= count;
2914
2915		switch (conn->type) {
2916		case ACL_LINK:
2917			hdev->acl_cnt += count;
2918			if (hdev->acl_cnt > hdev->acl_pkts)
2919				hdev->acl_cnt = hdev->acl_pkts;
2920			break;
2921
2922		case LE_LINK:
2923			if (hdev->le_pkts) {
2924				hdev->le_cnt += count;
2925				if (hdev->le_cnt > hdev->le_pkts)
2926					hdev->le_cnt = hdev->le_pkts;
2927			} else {
2928				hdev->acl_cnt += count;
2929				if (hdev->acl_cnt > hdev->acl_pkts)
2930					hdev->acl_cnt = hdev->acl_pkts;
2931			}
2932			break;
2933
2934		case SCO_LINK:
2935			hdev->sco_cnt += count;
2936			if (hdev->sco_cnt > hdev->sco_pkts)
2937				hdev->sco_cnt = hdev->sco_pkts;
2938			break;
2939
2940		default:
2941			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2942			break;
2943		}
2944	}
2945
2946	queue_work(hdev->workqueue, &hdev->tx_work);
2947}
2948
2949static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2950						 __u16 handle)
2951{
2952	struct hci_chan *chan;
2953
2954	switch (hdev->dev_type) {
2955	case HCI_BREDR:
2956		return hci_conn_hash_lookup_handle(hdev, handle);
2957	case HCI_AMP:
2958		chan = hci_chan_lookup_handle(hdev, handle);
2959		if (chan)
2960			return chan->conn;
2961		break;
2962	default:
2963		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2964		break;
2965	}
2966
2967	return NULL;
2968}
2969
2970static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2971{
2972	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2973	int i;
2974
2975	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2976		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2977		return;
2978	}
2979
2980	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2981	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2982		BT_DBG("%s bad parameters", hdev->name);
2983		return;
2984	}
2985
2986	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2987	       ev->num_hndl);
2988
2989	for (i = 0; i < ev->num_hndl; i++) {
2990		struct hci_comp_blocks_info *info = &ev->handles[i];
2991		struct hci_conn *conn = NULL;
2992		__u16  handle, block_count;
2993
2994		handle = __le16_to_cpu(info->handle);
2995		block_count = __le16_to_cpu(info->blocks);
2996
2997		conn = __hci_conn_lookup_handle(hdev, handle);
2998		if (!conn)
2999			continue;
3000
3001		conn->sent -= block_count;
3002
3003		switch (conn->type) {
3004		case ACL_LINK:
3005		case AMP_LINK:
3006			hdev->block_cnt += block_count;
3007			if (hdev->block_cnt > hdev->num_blocks)
3008				hdev->block_cnt = hdev->num_blocks;
3009			break;
3010
3011		default:
3012			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3013			break;
3014		}
3015	}
3016
3017	queue_work(hdev->workqueue, &hdev->tx_work);
3018}
3019
3020static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3021{
3022	struct hci_ev_mode_change *ev = (void *) skb->data;
3023	struct hci_conn *conn;
3024
3025	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3026
3027	hci_dev_lock(hdev);
3028
3029	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3030	if (conn) {
3031		conn->mode = ev->mode;
3032
3033		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3034					&conn->flags)) {
3035			if (conn->mode == HCI_CM_ACTIVE)
3036				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3037			else
3038				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3039		}
3040
3041		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3042			hci_sco_setup(conn, ev->status);
3043	}
3044
3045	hci_dev_unlock(hdev);
3046}
3047
3048static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3049{
3050	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3051	struct hci_conn *conn;
3052
3053	BT_DBG("%s", hdev->name);
3054
3055	hci_dev_lock(hdev);
3056
3057	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3058	if (!conn)
3059		goto unlock;
3060
3061	if (conn->state == BT_CONNECTED) {
3062		hci_conn_hold(conn);
3063		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3064		hci_conn_drop(conn);
3065	}
3066
3067	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3068		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3069			     sizeof(ev->bdaddr), &ev->bdaddr);
3070	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3071		u8 secure;
3072
3073		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3074			secure = 1;
3075		else
3076			secure = 0;
3077
3078		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3079	}
3080
3081unlock:
3082	hci_dev_unlock(hdev);
3083}
3084
3085static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3086{
3087	struct hci_ev_link_key_req *ev = (void *) skb->data;
3088	struct hci_cp_link_key_reply cp;
3089	struct hci_conn *conn;
3090	struct link_key *key;
3091
3092	BT_DBG("%s", hdev->name);
3093
3094	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3095		return;
3096
3097	hci_dev_lock(hdev);
3098
3099	key = hci_find_link_key(hdev, &ev->bdaddr);
3100	if (!key) {
3101		BT_DBG("%s link key not found for %pMR", hdev->name,
3102		       &ev->bdaddr);
3103		goto not_found;
3104	}
3105
3106	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3107	       &ev->bdaddr);
3108
3109	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3110	if (conn) {
3111		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3112		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3113		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3114			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3115			goto not_found;
3116		}
3117
3118		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3119		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3120		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3121			BT_DBG("%s ignoring key unauthenticated for high security",
3122			       hdev->name);
3123			goto not_found;
3124		}
3125
3126		conn->key_type = key->type;
3127		conn->pin_length = key->pin_len;
3128	}
3129
3130	bacpy(&cp.bdaddr, &ev->bdaddr);
3131	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3132
3133	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3134
3135	hci_dev_unlock(hdev);
3136
3137	return;
3138
3139not_found:
3140	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3141	hci_dev_unlock(hdev);
3142}
3143
3144static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3145{
3146	struct hci_ev_link_key_notify *ev = (void *) skb->data;
3147	struct hci_conn *conn;
3148	struct link_key *key;
3149	bool persistent;
3150	u8 pin_len = 0;
3151
3152	BT_DBG("%s", hdev->name);
3153
3154	hci_dev_lock(hdev);
3155
3156	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3157	if (conn) {
3158		hci_conn_hold(conn);
3159		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3160		pin_len = conn->pin_length;
3161
3162		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3163			conn->key_type = ev->key_type;
3164
3165		hci_conn_drop(conn);
3166	}
3167
3168	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3169		goto unlock;
3170
3171	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3172			        ev->key_type, pin_len, &persistent);
3173	if (!key)
3174		goto unlock;
3175
3176	mgmt_new_link_key(hdev, key, persistent);
3177
3178	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3179	 * is set. If it's not set simply remove the key from the kernel
3180	 * list (we've still notified user space about it but with
3181	 * store_hint being 0).
3182	 */
3183	if (key->type == HCI_LK_DEBUG_COMBINATION &&
3184	    !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3185		list_del(&key->list);
3186		kfree(key);
3187	} else if (conn) {
3188		if (persistent)
3189			clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3190		else
3191			set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3192	}
3193
3194unlock:
3195	hci_dev_unlock(hdev);
3196}
3197
3198static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3199{
3200	struct hci_ev_clock_offset *ev = (void *) skb->data;
3201	struct hci_conn *conn;
3202
3203	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3204
3205	hci_dev_lock(hdev);
3206
3207	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3208	if (conn && !ev->status) {
3209		struct inquiry_entry *ie;
3210
3211		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3212		if (ie) {
3213			ie->data.clock_offset = ev->clock_offset;
3214			ie->timestamp = jiffies;
3215		}
3216	}
3217
3218	hci_dev_unlock(hdev);
3219}
3220
3221static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3222{
3223	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3224	struct hci_conn *conn;
3225
3226	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3227
3228	hci_dev_lock(hdev);
3229
3230	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3231	if (conn && !ev->status)
3232		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3233
3234	hci_dev_unlock(hdev);
3235}
3236
3237static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3238{
3239	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3240	struct inquiry_entry *ie;
3241
3242	BT_DBG("%s", hdev->name);
3243
3244	hci_dev_lock(hdev);
3245
3246	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3247	if (ie) {
3248		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3249		ie->timestamp = jiffies;
3250	}
3251
3252	hci_dev_unlock(hdev);
3253}
3254
3255static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3256					     struct sk_buff *skb)
3257{
3258	struct inquiry_data data;
3259	int num_rsp = *((__u8 *) skb->data);
3260	bool name_known, ssp;
3261
3262	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3263
3264	if (!num_rsp)
3265		return;
3266
3267	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3268		return;
3269
3270	hci_dev_lock(hdev);
3271
3272	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3273		struct inquiry_info_with_rssi_and_pscan_mode *info;
3274		info = (void *) (skb->data + 1);
3275
3276		for (; num_rsp; num_rsp--, info++) {
3277			bacpy(&data.bdaddr, &info->bdaddr);
3278			data.pscan_rep_mode	= info->pscan_rep_mode;
3279			data.pscan_period_mode	= info->pscan_period_mode;
3280			data.pscan_mode		= info->pscan_mode;
3281			memcpy(data.dev_class, info->dev_class, 3);
3282			data.clock_offset	= info->clock_offset;
3283			data.rssi		= info->rssi;
3284			data.ssp_mode		= 0x00;
3285
3286			name_known = hci_inquiry_cache_update(hdev, &data,
3287							      false, &ssp);
3288			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3289					  info->dev_class, info->rssi,
3290					  !name_known, ssp, NULL, 0, NULL, 0);
3291		}
3292	} else {
3293		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3294
3295		for (; num_rsp; num_rsp--, info++) {
3296			bacpy(&data.bdaddr, &info->bdaddr);
3297			data.pscan_rep_mode	= info->pscan_rep_mode;
3298			data.pscan_period_mode	= info->pscan_period_mode;
3299			data.pscan_mode		= 0x00;
3300			memcpy(data.dev_class, info->dev_class, 3);
3301			data.clock_offset	= info->clock_offset;
3302			data.rssi		= info->rssi;
3303			data.ssp_mode		= 0x00;
3304			name_known = hci_inquiry_cache_update(hdev, &data,
3305							      false, &ssp);
3306			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3307					  info->dev_class, info->rssi,
3308					  !name_known, ssp, NULL, 0, NULL, 0);
3309		}
3310	}
3311
3312	hci_dev_unlock(hdev);
3313}
3314
3315static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3316					struct sk_buff *skb)
3317{
3318	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3319	struct hci_conn *conn;
3320
3321	BT_DBG("%s", hdev->name);
3322
3323	hci_dev_lock(hdev);
3324
3325	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3326	if (!conn)
3327		goto unlock;
3328
3329	if (ev->page < HCI_MAX_PAGES)
3330		memcpy(conn->features[ev->page], ev->features, 8);
3331
3332	if (!ev->status && ev->page == 0x01) {
3333		struct inquiry_entry *ie;
3334
3335		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3336		if (ie)
3337			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3338
3339		if (ev->features[0] & LMP_HOST_SSP) {
3340			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3341		} else {
3342			/* It is mandatory by the Bluetooth specification that
3343			 * Extended Inquiry Results are only used when Secure
3344			 * Simple Pairing is enabled, but some devices violate
3345			 * this.
3346			 *
3347			 * To make these devices work, the internal SSP
3348			 * enabled flag needs to be cleared if the remote host
3349			 * features do not indicate SSP support */
3350			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3351		}
3352
3353		if (ev->features[0] & LMP_HOST_SC)
3354			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3355	}
3356
3357	if (conn->state != BT_CONFIG)
3358		goto unlock;
3359
3360	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3361		struct hci_cp_remote_name_req cp;
3362		memset(&cp, 0, sizeof(cp));
3363		bacpy(&cp.bdaddr, &conn->dst);
3364		cp.pscan_rep_mode = 0x02;
3365		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3366	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3367		mgmt_device_connected(hdev, &conn->dst, conn->type,
3368				      conn->dst_type, 0, NULL, 0,
3369				      conn->dev_class);
3370
3371	if (!hci_outgoing_auth_needed(hdev, conn)) {
3372		conn->state = BT_CONNECTED;
3373		hci_proto_connect_cfm(conn, ev->status);
3374		hci_conn_drop(conn);
3375	}
3376
3377unlock:
3378	hci_dev_unlock(hdev);
3379}
3380
3381static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3382				       struct sk_buff *skb)
3383{
3384	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3385	struct hci_conn *conn;
3386
3387	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3388
3389	hci_dev_lock(hdev);
3390
3391	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3392	if (!conn) {
3393		if (ev->link_type == ESCO_LINK)
3394			goto unlock;
3395
3396		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3397		if (!conn)
3398			goto unlock;
3399
3400		conn->type = SCO_LINK;
3401	}
3402
3403	switch (ev->status) {
3404	case 0x00:
3405		conn->handle = __le16_to_cpu(ev->handle);
3406		conn->state  = BT_CONNECTED;
3407
3408		hci_conn_add_sysfs(conn);
3409		break;
3410
3411	case 0x10:	/* Connection Accept Timeout */
3412	case 0x0d:	/* Connection Rejected due to Limited Resources */
3413	case 0x11:	/* Unsupported Feature or Parameter Value */
3414	case 0x1c:	/* SCO interval rejected */
3415	case 0x1a:	/* Unsupported Remote Feature */
3416	case 0x1f:	/* Unspecified error */
3417	case 0x20:	/* Unsupported LMP Parameter value */
3418		if (conn->out) {
3419			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3420					(hdev->esco_type & EDR_ESCO_MASK);
3421			if (hci_setup_sync(conn, conn->link->handle))
3422				goto unlock;
3423		}
3424		/* fall through */
3425
3426	default:
3427		conn->state = BT_CLOSED;
3428		break;
3429	}
3430
3431	hci_proto_connect_cfm(conn, ev->status);
3432	if (ev->status)
3433		hci_conn_del(conn);
3434
3435unlock:
3436	hci_dev_unlock(hdev);
3437}
3438
3439static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3440{
3441	size_t parsed = 0;
3442
3443	while (parsed < eir_len) {
3444		u8 field_len = eir[0];
3445
3446		if (field_len == 0)
3447			return parsed;
3448
3449		parsed += field_len + 1;
3450		eir += field_len + 1;
3451	}
3452
3453	return eir_len;
3454}
3455
3456static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3457					    struct sk_buff *skb)
3458{
3459	struct inquiry_data data;
3460	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3461	int num_rsp = *((__u8 *) skb->data);
3462	size_t eir_len;
3463
3464	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3465
3466	if (!num_rsp)
3467		return;
3468
3469	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3470		return;
3471
3472	hci_dev_lock(hdev);
3473
3474	for (; num_rsp; num_rsp--, info++) {
3475		bool name_known, ssp;
3476
3477		bacpy(&data.bdaddr, &info->bdaddr);
3478		data.pscan_rep_mode	= info->pscan_rep_mode;
3479		data.pscan_period_mode	= info->pscan_period_mode;
3480		data.pscan_mode		= 0x00;
3481		memcpy(data.dev_class, info->dev_class, 3);
3482		data.clock_offset	= info->clock_offset;
3483		data.rssi		= info->rssi;
3484		data.ssp_mode		= 0x01;
3485
3486		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3487			name_known = eir_has_data_type(info->data,
3488						       sizeof(info->data),
3489						       EIR_NAME_COMPLETE);
3490		else
3491			name_known = true;
3492
3493		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3494						      &ssp);
3495		eir_len = eir_get_length(info->data, sizeof(info->data));
3496		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3497				  info->dev_class, info->rssi, !name_known,
3498				  ssp, info->data, eir_len, NULL, 0);
3499	}
3500
3501	hci_dev_unlock(hdev);
3502}
3503
3504static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3505					 struct sk_buff *skb)
3506{
3507	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3508	struct hci_conn *conn;
3509
3510	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3511	       __le16_to_cpu(ev->handle));
3512
3513	hci_dev_lock(hdev);
3514
3515	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3516	if (!conn)
3517		goto unlock;
3518
3519	/* For BR/EDR the necessary steps are taken through the
3520	 * auth_complete event.
3521	 */
3522	if (conn->type != LE_LINK)
3523		goto unlock;
3524
3525	if (!ev->status)
3526		conn->sec_level = conn->pending_sec_level;
3527
3528	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3529
3530	if (ev->status && conn->state == BT_CONNECTED) {
3531		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3532		hci_conn_drop(conn);
3533		goto unlock;
3534	}
3535
3536	if (conn->state == BT_CONFIG) {
3537		if (!ev->status)
3538			conn->state = BT_CONNECTED;
3539
3540		hci_proto_connect_cfm(conn, ev->status);
3541		hci_conn_drop(conn);
3542	} else {
3543		hci_auth_cfm(conn, ev->status);
3544
3545		hci_conn_hold(conn);
3546		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3547		hci_conn_drop(conn);
3548	}
3549
3550unlock:
3551	hci_dev_unlock(hdev);
3552}
3553
3554static u8 hci_get_auth_req(struct hci_conn *conn)
3555{
3556	/* If remote requests no-bonding follow that lead */
3557	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3558	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3559		return conn->remote_auth | (conn->auth_type & 0x01);
3560
3561	/* If both remote and local have enough IO capabilities, require
3562	 * MITM protection
3563	 */
3564	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3565	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3566		return conn->remote_auth | 0x01;
3567
3568	/* No MITM protection possible so ignore remote requirement */
3569	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3570}
3571
3572static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3573{
3574	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3575	struct hci_conn *conn;
3576
3577	BT_DBG("%s", hdev->name);
3578
3579	hci_dev_lock(hdev);
3580
3581	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3582	if (!conn)
3583		goto unlock;
3584
3585	hci_conn_hold(conn);
3586
3587	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3588		goto unlock;
3589
3590	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3591	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3592		struct hci_cp_io_capability_reply cp;
3593
3594		bacpy(&cp.bdaddr, &ev->bdaddr);
3595		/* Change the IO capability from KeyboardDisplay
3596		 * to DisplayYesNo as it is not supported by BT spec. */
3597		cp.capability = (conn->io_capability == 0x04) ?
3598				HCI_IO_DISPLAY_YESNO : conn->io_capability;
3599
3600		/* If we are initiators, there is no remote information yet */
3601		if (conn->remote_auth == 0xff) {
3602			cp.authentication = conn->auth_type;
3603
3604			/* Request MITM protection if our IO caps allow it
3605			 * except for the no-bonding case.
3606			 * conn->auth_type is not updated here since
3607			 * that might cause the user confirmation to be
3608			 * rejected in case the remote doesn't have the
3609			 * IO capabilities for MITM.
3610			 */
3611			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3612			    cp.authentication != HCI_AT_NO_BONDING)
3613				cp.authentication |= 0x01;
3614		} else {
3615			conn->auth_type = hci_get_auth_req(conn);
3616			cp.authentication = conn->auth_type;
3617		}
3618
3619		if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3620		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3621			cp.oob_data = 0x01;
3622		else
3623			cp.oob_data = 0x00;
3624
3625		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3626			     sizeof(cp), &cp);
3627	} else {
3628		struct hci_cp_io_capability_neg_reply cp;
3629
3630		bacpy(&cp.bdaddr, &ev->bdaddr);
3631		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3632
3633		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3634			     sizeof(cp), &cp);
3635	}
3636
3637unlock:
3638	hci_dev_unlock(hdev);
3639}
3640
3641static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3642{
3643	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3644	struct hci_conn *conn;
3645
3646	BT_DBG("%s", hdev->name);
3647
3648	hci_dev_lock(hdev);
3649
3650	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3651	if (!conn)
3652		goto unlock;
3653
3654	conn->remote_cap = ev->capability;
3655	conn->remote_auth = ev->authentication;
3656	if (ev->oob_data)
3657		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3658
3659unlock:
3660	hci_dev_unlock(hdev);
3661}
3662
3663static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3664					 struct sk_buff *skb)
3665{
3666	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3667	int loc_mitm, rem_mitm, confirm_hint = 0;
3668	struct hci_conn *conn;
3669
3670	BT_DBG("%s", hdev->name);
3671
3672	hci_dev_lock(hdev);
3673
3674	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3675		goto unlock;
3676
3677	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3678	if (!conn)
3679		goto unlock;
3680
3681	loc_mitm = (conn->auth_type & 0x01);
3682	rem_mitm = (conn->remote_auth & 0x01);
3683
3684	/* If we require MITM but the remote device can't provide that
3685	 * (it has NoInputNoOutput) then reject the confirmation request
3686	 */
3687	if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3688		BT_DBG("Rejecting request: remote device can't provide MITM");
3689		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3690			     sizeof(ev->bdaddr), &ev->bdaddr);
3691		goto unlock;
3692	}
3693
3694	/* If no side requires MITM protection; auto-accept */
3695	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3696	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3697
3698		/* If we're not the initiators request authorization to
3699		 * proceed from user space (mgmt_user_confirm with
3700		 * confirm_hint set to 1). The exception is if neither
3701		 * side had MITM in which case we do auto-accept.
3702		 */
3703		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3704		    (loc_mitm || rem_mitm)) {
3705			BT_DBG("Confirming auto-accept as acceptor");
3706			confirm_hint = 1;
3707			goto confirm;
3708		}
3709
3710		BT_DBG("Auto-accept of user confirmation with %ums delay",
3711		       hdev->auto_accept_delay);
3712
3713		if (hdev->auto_accept_delay > 0) {
3714			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3715			queue_delayed_work(conn->hdev->workqueue,
3716					   &conn->auto_accept_work, delay);
3717			goto unlock;
3718		}
3719
3720		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3721			     sizeof(ev->bdaddr), &ev->bdaddr);
3722		goto unlock;
3723	}
3724
3725confirm:
3726	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3727				  le32_to_cpu(ev->passkey), confirm_hint);
3728
3729unlock:
3730	hci_dev_unlock(hdev);
3731}
3732
3733static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3734					 struct sk_buff *skb)
3735{
3736	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3737
3738	BT_DBG("%s", hdev->name);
3739
3740	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3741		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3742}
3743
3744static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3745					struct sk_buff *skb)
3746{
3747	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3748	struct hci_conn *conn;
3749
3750	BT_DBG("%s", hdev->name);
3751
3752	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3753	if (!conn)
3754		return;
3755
3756	conn->passkey_notify = __le32_to_cpu(ev->passkey);
3757	conn->passkey_entered = 0;
3758
3759	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3760		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3761					 conn->dst_type, conn->passkey_notify,
3762					 conn->passkey_entered);
3763}
3764
3765static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3766{
3767	struct hci_ev_keypress_notify *ev = (void *) skb->data;
3768	struct hci_conn *conn;
3769
3770	BT_DBG("%s", hdev->name);
3771
3772	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3773	if (!conn)
3774		return;
3775
3776	switch (ev->type) {
3777	case HCI_KEYPRESS_STARTED:
3778		conn->passkey_entered = 0;
3779		return;
3780
3781	case HCI_KEYPRESS_ENTERED:
3782		conn->passkey_entered++;
3783		break;
3784
3785	case HCI_KEYPRESS_ERASED:
3786		conn->passkey_entered--;
3787		break;
3788
3789	case HCI_KEYPRESS_CLEARED:
3790		conn->passkey_entered = 0;
3791		break;
3792
3793	case HCI_KEYPRESS_COMPLETED:
3794		return;
3795	}
3796
3797	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3798		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3799					 conn->dst_type, conn->passkey_notify,
3800					 conn->passkey_entered);
3801}
3802
3803static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3804					 struct sk_buff *skb)
3805{
3806	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3807	struct hci_conn *conn;
3808
3809	BT_DBG("%s", hdev->name);
3810
3811	hci_dev_lock(hdev);
3812
3813	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3814	if (!conn)
3815		goto unlock;
3816
3817	/* To avoid duplicate auth_failed events to user space we check
3818	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3819	 * initiated the authentication. A traditional auth_complete
3820	 * event gets always produced as initiator and is also mapped to
3821	 * the mgmt_auth_failed event */
3822	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3823		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3824				 ev->status);
3825
3826	hci_conn_drop(conn);
3827
3828unlock:
3829	hci_dev_unlock(hdev);
3830}
3831
3832static void hci_remote_host_features_evt(struct hci_dev *hdev,
3833					 struct sk_buff *skb)
3834{
3835	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3836	struct inquiry_entry *ie;
3837	struct hci_conn *conn;
3838
3839	BT_DBG("%s", hdev->name);
3840
3841	hci_dev_lock(hdev);
3842
3843	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3844	if (conn)
3845		memcpy(conn->features[1], ev->features, 8);
3846
3847	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3848	if (ie)
3849		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3850
3851	hci_dev_unlock(hdev);
3852}
3853
3854static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3855					    struct sk_buff *skb)
3856{
3857	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3858	struct oob_data *data;
3859
3860	BT_DBG("%s", hdev->name);
3861
3862	hci_dev_lock(hdev);
3863
3864	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3865		goto unlock;
3866
3867	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3868	if (data) {
3869		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3870			struct hci_cp_remote_oob_ext_data_reply cp;
3871
3872			bacpy(&cp.bdaddr, &ev->bdaddr);
3873			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3874			memcpy(cp.randomizer192, data->randomizer192,
3875			       sizeof(cp.randomizer192));
3876			memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3877			memcpy(cp.randomizer256, data->randomizer256,
3878			       sizeof(cp.randomizer256));
3879
3880			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3881				     sizeof(cp), &cp);
3882		} else {
3883			struct hci_cp_remote_oob_data_reply cp;
3884
3885			bacpy(&cp.bdaddr, &ev->bdaddr);
3886			memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3887			memcpy(cp.randomizer, data->randomizer192,
3888			       sizeof(cp.randomizer));
3889
3890			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3891				     sizeof(cp), &cp);
3892		}
3893	} else {
3894		struct hci_cp_remote_oob_data_neg_reply cp;
3895
3896		bacpy(&cp.bdaddr, &ev->bdaddr);
3897		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3898			     sizeof(cp), &cp);
3899	}
3900
3901unlock:
3902	hci_dev_unlock(hdev);
3903}
3904
3905static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3906				      struct sk_buff *skb)
3907{
3908	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3909	struct hci_conn *hcon, *bredr_hcon;
3910
3911	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3912	       ev->status);
3913
3914	hci_dev_lock(hdev);
3915
3916	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3917	if (!hcon) {
3918		hci_dev_unlock(hdev);
3919		return;
3920	}
3921
3922	if (ev->status) {
3923		hci_conn_del(hcon);
3924		hci_dev_unlock(hdev);
3925		return;
3926	}
3927
3928	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3929
3930	hcon->state = BT_CONNECTED;
3931	bacpy(&hcon->dst, &bredr_hcon->dst);
3932
3933	hci_conn_hold(hcon);
3934	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3935	hci_conn_drop(hcon);
3936
3937	hci_conn_add_sysfs(hcon);
3938
3939	amp_physical_cfm(bredr_hcon, hcon);
3940
3941	hci_dev_unlock(hdev);
3942}
3943
3944static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3945{
3946	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3947	struct hci_conn *hcon;
3948	struct hci_chan *hchan;
3949	struct amp_mgr *mgr;
3950
3951	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3952	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3953	       ev->status);
3954
3955	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3956	if (!hcon)
3957		return;
3958
3959	/* Create AMP hchan */
3960	hchan = hci_chan_create(hcon);
3961	if (!hchan)
3962		return;
3963
3964	hchan->handle = le16_to_cpu(ev->handle);
3965
3966	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3967
3968	mgr = hcon->amp_mgr;
3969	if (mgr && mgr->bredr_chan) {
3970		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3971
3972		l2cap_chan_lock(bredr_chan);
3973
3974		bredr_chan->conn->mtu = hdev->block_mtu;
3975		l2cap_logical_cfm(bredr_chan, hchan, 0);
3976		hci_conn_hold(hcon);
3977
3978		l2cap_chan_unlock(bredr_chan);
3979	}
3980}
3981
3982static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3983					     struct sk_buff *skb)
3984{
3985	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3986	struct hci_chan *hchan;
3987
3988	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3989	       le16_to_cpu(ev->handle), ev->status);
3990
3991	if (ev->status)
3992		return;
3993
3994	hci_dev_lock(hdev);
3995
3996	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3997	if (!hchan)
3998		goto unlock;
3999
4000	amp_destroy_logical_link(hchan, ev->reason);
4001
4002unlock:
4003	hci_dev_unlock(hdev);
4004}
4005
4006static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4007					     struct sk_buff *skb)
4008{
4009	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4010	struct hci_conn *hcon;
4011
4012	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4013
4014	if (ev->status)
4015		return;
4016
4017	hci_dev_lock(hdev);
4018
4019	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4020	if (hcon) {
4021		hcon->state = BT_CLOSED;
4022		hci_conn_del(hcon);
4023	}
4024
4025	hci_dev_unlock(hdev);
4026}
4027
4028static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4029{
4030	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4031	struct hci_conn *conn;
4032	struct smp_irk *irk;
4033
4034	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4035
4036	hci_dev_lock(hdev);
4037
4038	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4039	if (!conn) {
4040		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
4041		if (!conn) {
4042			BT_ERR("No memory for new connection");
4043			goto unlock;
4044		}
4045
4046		conn->dst_type = ev->bdaddr_type;
4047
4048		if (ev->role == LE_CONN_ROLE_MASTER) {
4049			conn->out = true;
4050			set_bit(HCI_CONN_MASTER, &conn->flags);
4051		}
4052
4053		/* If we didn't have a hci_conn object previously
4054		 * but we're in master role this must be something
4055		 * initiated using a white list. Since white list based
4056		 * connections are not "first class citizens" we don't
4057		 * have full tracking of them. Therefore, we go ahead
4058		 * with a "best effort" approach of determining the
4059		 * initiator address based on the HCI_PRIVACY flag.
4060		 */
4061		if (conn->out) {
4062			conn->resp_addr_type = ev->bdaddr_type;
4063			bacpy(&conn->resp_addr, &ev->bdaddr);
4064			if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4065				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4066				bacpy(&conn->init_addr, &hdev->rpa);
4067			} else {
4068				hci_copy_identity_address(hdev,
4069							  &conn->init_addr,
4070							  &conn->init_addr_type);
4071			}
4072		}
4073	} else {
4074		cancel_delayed_work(&conn->le_conn_timeout);
4075	}
4076
4077	if (!conn->out) {
4078		/* Set the responder (our side) address type based on
4079		 * the advertising address type.
4080		 */
4081		conn->resp_addr_type = hdev->adv_addr_type;
4082		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4083			bacpy(&conn->resp_addr, &hdev->random_addr);
4084		else
4085			bacpy(&conn->resp_addr, &hdev->bdaddr);
4086
4087		conn->init_addr_type = ev->bdaddr_type;
4088		bacpy(&conn->init_addr, &ev->bdaddr);
4089
4090		/* For incoming connections, set the default minimum
4091		 * and maximum connection interval. They will be used
4092		 * to check if the parameters are in range and if not
4093		 * trigger the connection update procedure.
4094		 */
4095		conn->le_conn_min_interval = hdev->le_conn_min_interval;
4096		conn->le_conn_max_interval = hdev->le_conn_max_interval;
4097	}
4098
4099	/* Lookup the identity address from the stored connection
4100	 * address and address type.
4101	 *
4102	 * When establishing connections to an identity address, the
4103	 * connection procedure will store the resolvable random
4104	 * address first. Now if it can be converted back into the
4105	 * identity address, start using the identity address from
4106	 * now on.
4107	 */
4108	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4109	if (irk) {
4110		bacpy(&conn->dst, &irk->bdaddr);
4111		conn->dst_type = irk->addr_type;
4112	}
4113
4114	if (ev->status) {
4115		hci_le_conn_failed(conn, ev->status);
4116		goto unlock;
4117	}
4118
4119	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4120		mgmt_device_connected(hdev, &conn->dst, conn->type,
4121				      conn->dst_type, 0, NULL, 0, NULL);
4122
4123	conn->sec_level = BT_SECURITY_LOW;
4124	conn->handle = __le16_to_cpu(ev->handle);
4125	conn->state = BT_CONNECTED;
4126
4127	conn->le_conn_interval = le16_to_cpu(ev->interval);
4128	conn->le_conn_latency = le16_to_cpu(ev->latency);
4129	conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4130
4131	hci_conn_add_sysfs(conn);
4132
4133	hci_proto_connect_cfm(conn, ev->status);
4134
4135	hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type);
4136
4137unlock:
4138	hci_dev_unlock(hdev);
4139}
4140
4141static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4142					    struct sk_buff *skb)
4143{
4144	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4145	struct hci_conn *conn;
4146
4147	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4148
4149	if (ev->status)
4150		return;
4151
4152	hci_dev_lock(hdev);
4153
4154	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4155	if (conn) {
4156		conn->le_conn_interval = le16_to_cpu(ev->interval);
4157		conn->le_conn_latency = le16_to_cpu(ev->latency);
4158		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4159	}
4160
4161	hci_dev_unlock(hdev);
4162}
4163
4164/* This function requires the caller holds hdev->lock */
4165static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4166				  u8 addr_type)
4167{
4168	struct hci_conn *conn;
4169	struct smp_irk *irk;
4170
4171	/* If this is a resolvable address, we should resolve it and then
4172	 * update address and address type variables.
4173	 */
4174	irk = hci_get_irk(hdev, addr, addr_type);
4175	if (irk) {
4176		addr = &irk->bdaddr;
4177		addr_type = irk->addr_type;
4178	}
4179
4180	if (!hci_pend_le_conn_lookup(hdev, addr, addr_type))
4181		return;
4182
4183	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4184			      HCI_AT_NO_BONDING);
4185	if (!IS_ERR(conn))
4186		return;
4187
4188	switch (PTR_ERR(conn)) {
4189	case -EBUSY:
4190		/* If hci_connect() returns -EBUSY it means there is already
4191		 * an LE connection attempt going on. Since controllers don't
4192		 * support more than one connection attempt at the time, we
4193		 * don't consider this an error case.
4194		 */
4195		break;
4196	default:
4197		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4198	}
4199}
4200
4201static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4202			       u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4203{
4204	struct discovery_state *d = &hdev->discovery;
4205	bool match;
4206
4207	/* Passive scanning shouldn't trigger any device found events */
4208	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4209		if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
4210			check_pending_le_conn(hdev, bdaddr, bdaddr_type);
4211		return;
4212	}
4213
4214	/* If there's nothing pending either store the data from this
4215	 * event or send an immediate device found event if the data
4216	 * should not be stored for later.
4217	 */
4218	if (!has_pending_adv_report(hdev)) {
4219		/* If the report will trigger a SCAN_REQ store it for
4220		 * later merging.
4221		 */
4222		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4223			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4224						 rssi, data, len);
4225			return;
4226		}
4227
4228		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4229				  rssi, 0, 1, data, len, NULL, 0);
4230		return;
4231	}
4232
4233	/* Check if the pending report is for the same device as the new one */
4234	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4235		 bdaddr_type == d->last_adv_addr_type);
4236
4237	/* If the pending data doesn't match this report or this isn't a
4238	 * scan response (e.g. we got a duplicate ADV_IND) then force
4239	 * sending of the pending data.
4240	 */
4241	if (type != LE_ADV_SCAN_RSP || !match) {
4242		/* Send out whatever is in the cache, but skip duplicates */
4243		if (!match)
4244			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4245					  d->last_adv_addr_type, NULL,
4246					  d->last_adv_rssi, 0, 1,
4247					  d->last_adv_data,
4248					  d->last_adv_data_len, NULL, 0);
4249
4250		/* If the new report will trigger a SCAN_REQ store it for
4251		 * later merging.
4252		 */
4253		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4254			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4255						 rssi, data, len);
4256			return;
4257		}
4258
4259		/* The advertising reports cannot be merged, so clear
4260		 * the pending report and send out a device found event.
4261		 */
4262		clear_pending_adv_report(hdev);
4263		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4264				  rssi, 0, 1, data, len, NULL, 0);
4265		return;
4266	}
4267
4268	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4269	 * the new event is a SCAN_RSP. We can therefore proceed with
4270	 * sending a merged device found event.
4271	 */
4272	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4273			  d->last_adv_addr_type, NULL, rssi, 0, 1,
4274			  d->last_adv_data, d->last_adv_data_len, data, len);
4275	clear_pending_adv_report(hdev);
4276}
4277
4278static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4279{
4280	u8 num_reports = skb->data[0];
4281	void *ptr = &skb->data[1];
4282
4283	hci_dev_lock(hdev);
4284
4285	while (num_reports--) {
4286		struct hci_ev_le_advertising_info *ev = ptr;
4287		s8 rssi;
4288
4289		rssi = ev->data[ev->length];
4290		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4291				   ev->bdaddr_type, rssi, ev->data, ev->length);
4292
4293		ptr += sizeof(*ev) + ev->length + 1;
4294	}
4295
4296	hci_dev_unlock(hdev);
4297}
4298
4299static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4300{
4301	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4302	struct hci_cp_le_ltk_reply cp;
4303	struct hci_cp_le_ltk_neg_reply neg;
4304	struct hci_conn *conn;
4305	struct smp_ltk *ltk;
4306
4307	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4308
4309	hci_dev_lock(hdev);
4310
4311	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4312	if (conn == NULL)
4313		goto not_found;
4314
4315	ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4316	if (ltk == NULL)
4317		goto not_found;
4318
4319	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4320	cp.handle = cpu_to_le16(conn->handle);
4321
4322	if (ltk->authenticated)
4323		conn->pending_sec_level = BT_SECURITY_HIGH;
4324	else
4325		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4326
4327	conn->enc_key_size = ltk->enc_size;
4328
4329	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4330
4331	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4332	 * temporary key used to encrypt a connection following
4333	 * pairing. It is used during the Encrypted Session Setup to
4334	 * distribute the keys. Later, security can be re-established
4335	 * using a distributed LTK.
4336	 */
4337	if (ltk->type == SMP_STK) {
4338		list_del(&ltk->list);
4339		kfree(ltk);
4340	}
4341
4342	hci_dev_unlock(hdev);
4343
4344	return;
4345
4346not_found:
4347	neg.handle = ev->handle;
4348	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4349	hci_dev_unlock(hdev);
4350}
4351
4352static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4353{
4354	struct hci_ev_le_meta *le_ev = (void *) skb->data;
4355
4356	skb_pull(skb, sizeof(*le_ev));
4357
4358	switch (le_ev->subevent) {
4359	case HCI_EV_LE_CONN_COMPLETE:
4360		hci_le_conn_complete_evt(hdev, skb);
4361		break;
4362
4363	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4364		hci_le_conn_update_complete_evt(hdev, skb);
4365		break;
4366
4367	case HCI_EV_LE_ADVERTISING_REPORT:
4368		hci_le_adv_report_evt(hdev, skb);
4369		break;
4370
4371	case HCI_EV_LE_LTK_REQ:
4372		hci_le_ltk_request_evt(hdev, skb);
4373		break;
4374
4375	default:
4376		break;
4377	}
4378}
4379
4380static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4381{
4382	struct hci_ev_channel_selected *ev = (void *) skb->data;
4383	struct hci_conn *hcon;
4384
4385	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4386
4387	skb_pull(skb, sizeof(*ev));
4388
4389	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4390	if (!hcon)
4391		return;
4392
4393	amp_read_loc_assoc_final_data(hdev, hcon);
4394}
4395
4396void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4397{
4398	struct hci_event_hdr *hdr = (void *) skb->data;
4399	__u8 event = hdr->evt;
4400
4401	hci_dev_lock(hdev);
4402
4403	/* Received events are (currently) only needed when a request is
4404	 * ongoing so avoid unnecessary memory allocation.
4405	 */
4406	if (hdev->req_status == HCI_REQ_PEND) {
4407		kfree_skb(hdev->recv_evt);
4408		hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4409	}
4410
4411	hci_dev_unlock(hdev);
4412
4413	skb_pull(skb, HCI_EVENT_HDR_SIZE);
4414
4415	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4416		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4417		u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4418
4419		hci_req_cmd_complete(hdev, opcode, 0);
4420	}
4421
4422	switch (event) {
4423	case HCI_EV_INQUIRY_COMPLETE:
4424		hci_inquiry_complete_evt(hdev, skb);
4425		break;
4426
4427	case HCI_EV_INQUIRY_RESULT:
4428		hci_inquiry_result_evt(hdev, skb);
4429		break;
4430
4431	case HCI_EV_CONN_COMPLETE:
4432		hci_conn_complete_evt(hdev, skb);
4433		break;
4434
4435	case HCI_EV_CONN_REQUEST:
4436		hci_conn_request_evt(hdev, skb);
4437		break;
4438
4439	case HCI_EV_DISCONN_COMPLETE:
4440		hci_disconn_complete_evt(hdev, skb);
4441		break;
4442
4443	case HCI_EV_AUTH_COMPLETE:
4444		hci_auth_complete_evt(hdev, skb);
4445		break;
4446
4447	case HCI_EV_REMOTE_NAME:
4448		hci_remote_name_evt(hdev, skb);
4449		break;
4450
4451	case HCI_EV_ENCRYPT_CHANGE:
4452		hci_encrypt_change_evt(hdev, skb);
4453		break;
4454
4455	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4456		hci_change_link_key_complete_evt(hdev, skb);
4457		break;
4458
4459	case HCI_EV_REMOTE_FEATURES:
4460		hci_remote_features_evt(hdev, skb);
4461		break;
4462
4463	case HCI_EV_CMD_COMPLETE:
4464		hci_cmd_complete_evt(hdev, skb);
4465		break;
4466
4467	case HCI_EV_CMD_STATUS:
4468		hci_cmd_status_evt(hdev, skb);
4469		break;
4470
4471	case HCI_EV_ROLE_CHANGE:
4472		hci_role_change_evt(hdev, skb);
4473		break;
4474
4475	case HCI_EV_NUM_COMP_PKTS:
4476		hci_num_comp_pkts_evt(hdev, skb);
4477		break;
4478
4479	case HCI_EV_MODE_CHANGE:
4480		hci_mode_change_evt(hdev, skb);
4481		break;
4482
4483	case HCI_EV_PIN_CODE_REQ:
4484		hci_pin_code_request_evt(hdev, skb);
4485		break;
4486
4487	case HCI_EV_LINK_KEY_REQ:
4488		hci_link_key_request_evt(hdev, skb);
4489		break;
4490
4491	case HCI_EV_LINK_KEY_NOTIFY:
4492		hci_link_key_notify_evt(hdev, skb);
4493		break;
4494
4495	case HCI_EV_CLOCK_OFFSET:
4496		hci_clock_offset_evt(hdev, skb);
4497		break;
4498
4499	case HCI_EV_PKT_TYPE_CHANGE:
4500		hci_pkt_type_change_evt(hdev, skb);
4501		break;
4502
4503	case HCI_EV_PSCAN_REP_MODE:
4504		hci_pscan_rep_mode_evt(hdev, skb);
4505		break;
4506
4507	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4508		hci_inquiry_result_with_rssi_evt(hdev, skb);
4509		break;
4510
4511	case HCI_EV_REMOTE_EXT_FEATURES:
4512		hci_remote_ext_features_evt(hdev, skb);
4513		break;
4514
4515	case HCI_EV_SYNC_CONN_COMPLETE:
4516		hci_sync_conn_complete_evt(hdev, skb);
4517		break;
4518
4519	case HCI_EV_EXTENDED_INQUIRY_RESULT:
4520		hci_extended_inquiry_result_evt(hdev, skb);
4521		break;
4522
4523	case HCI_EV_KEY_REFRESH_COMPLETE:
4524		hci_key_refresh_complete_evt(hdev, skb);
4525		break;
4526
4527	case HCI_EV_IO_CAPA_REQUEST:
4528		hci_io_capa_request_evt(hdev, skb);
4529		break;
4530
4531	case HCI_EV_IO_CAPA_REPLY:
4532		hci_io_capa_reply_evt(hdev, skb);
4533		break;
4534
4535	case HCI_EV_USER_CONFIRM_REQUEST:
4536		hci_user_confirm_request_evt(hdev, skb);
4537		break;
4538
4539	case HCI_EV_USER_PASSKEY_REQUEST:
4540		hci_user_passkey_request_evt(hdev, skb);
4541		break;
4542
4543	case HCI_EV_USER_PASSKEY_NOTIFY:
4544		hci_user_passkey_notify_evt(hdev, skb);
4545		break;
4546
4547	case HCI_EV_KEYPRESS_NOTIFY:
4548		hci_keypress_notify_evt(hdev, skb);
4549		break;
4550
4551	case HCI_EV_SIMPLE_PAIR_COMPLETE:
4552		hci_simple_pair_complete_evt(hdev, skb);
4553		break;
4554
4555	case HCI_EV_REMOTE_HOST_FEATURES:
4556		hci_remote_host_features_evt(hdev, skb);
4557		break;
4558
4559	case HCI_EV_LE_META:
4560		hci_le_meta_evt(hdev, skb);
4561		break;
4562
4563	case HCI_EV_CHANNEL_SELECTED:
4564		hci_chan_selected_evt(hdev, skb);
4565		break;
4566
4567	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4568		hci_remote_oob_data_request_evt(hdev, skb);
4569		break;
4570
4571	case HCI_EV_PHY_LINK_COMPLETE:
4572		hci_phy_link_complete_evt(hdev, skb);
4573		break;
4574
4575	case HCI_EV_LOGICAL_LINK_COMPLETE:
4576		hci_loglink_complete_evt(hdev, skb);
4577		break;
4578
4579	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4580		hci_disconn_loglink_complete_evt(hdev, skb);
4581		break;
4582
4583	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4584		hci_disconn_phylink_complete_evt(hdev, skb);
4585		break;
4586
4587	case HCI_EV_NUM_COMP_BLOCKS:
4588		hci_num_comp_blocks_evt(hdev, skb);
4589		break;
4590
4591	default:
4592		BT_DBG("%s event 0x%2.2x", hdev->name, event);
4593		break;
4594	}
4595
4596	kfree_skb(skb);
4597	hdev->stat.evt_rx++;
4598}
4599