1/*
2   BlueZ - Bluetooth protocol stack for Linux
3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <asm/unaligned.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/mgmt.h>
32
33#include "a2mp.h"
34#include "amp.h"
35#include "smp.h"
36
37/* Handle HCI Event packets */
38
39static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40{
41	__u8 status = *((__u8 *) skb->data);
42
43	BT_DBG("%s status 0x%2.2x", hdev->name, status);
44
45	if (status)
46		return;
47
48	clear_bit(HCI_INQUIRY, &hdev->flags);
49	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50	wake_up_bit(&hdev->flags, HCI_INQUIRY);
51
52	hci_dev_lock(hdev);
53	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54	hci_dev_unlock(hdev);
55
56	hci_conn_check_pending(hdev);
57}
58
59static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60{
61	__u8 status = *((__u8 *) skb->data);
62
63	BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65	if (status)
66		return;
67
68	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69}
70
71static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72{
73	__u8 status = *((__u8 *) skb->data);
74
75	BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77	if (status)
78		return;
79
80	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82	hci_conn_check_pending(hdev);
83}
84
85static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86					  struct sk_buff *skb)
87{
88	BT_DBG("%s", hdev->name);
89}
90
91static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92{
93	struct hci_rp_role_discovery *rp = (void *) skb->data;
94	struct hci_conn *conn;
95
96	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98	if (rp->status)
99		return;
100
101	hci_dev_lock(hdev);
102
103	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104	if (conn)
105		conn->role = rp->role;
106
107	hci_dev_unlock(hdev);
108}
109
110static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
111{
112	struct hci_rp_read_link_policy *rp = (void *) skb->data;
113	struct hci_conn *conn;
114
115	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
116
117	if (rp->status)
118		return;
119
120	hci_dev_lock(hdev);
121
122	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
123	if (conn)
124		conn->link_policy = __le16_to_cpu(rp->policy);
125
126	hci_dev_unlock(hdev);
127}
128
129static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
130{
131	struct hci_rp_write_link_policy *rp = (void *) skb->data;
132	struct hci_conn *conn;
133	void *sent;
134
135	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
136
137	if (rp->status)
138		return;
139
140	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
141	if (!sent)
142		return;
143
144	hci_dev_lock(hdev);
145
146	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
147	if (conn)
148		conn->link_policy = get_unaligned_le16(sent + 2);
149
150	hci_dev_unlock(hdev);
151}
152
153static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
154					struct sk_buff *skb)
155{
156	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
157
158	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
159
160	if (rp->status)
161		return;
162
163	hdev->link_policy = __le16_to_cpu(rp->policy);
164}
165
166static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
167					 struct sk_buff *skb)
168{
169	__u8 status = *((__u8 *) skb->data);
170	void *sent;
171
172	BT_DBG("%s status 0x%2.2x", hdev->name, status);
173
174	if (status)
175		return;
176
177	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178	if (!sent)
179		return;
180
181	hdev->link_policy = get_unaligned_le16(sent);
182}
183
184static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185{
186	__u8 status = *((__u8 *) skb->data);
187
188	BT_DBG("%s status 0x%2.2x", hdev->name, status);
189
190	clear_bit(HCI_RESET, &hdev->flags);
191
192	/* Reset all non-persistent flags */
193	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
194
195	hdev->discovery.state = DISCOVERY_STOPPED;
196	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
197	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
198
199	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
200	hdev->adv_data_len = 0;
201
202	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
203	hdev->scan_rsp_data_len = 0;
204
205	hdev->le_scan_type = LE_SCAN_PASSIVE;
206
207	hdev->ssp_debug_mode = 0;
208}
209
210static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
211{
212	__u8 status = *((__u8 *) skb->data);
213	void *sent;
214
215	BT_DBG("%s status 0x%2.2x", hdev->name, status);
216
217	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
218	if (!sent)
219		return;
220
221	hci_dev_lock(hdev);
222
223	if (test_bit(HCI_MGMT, &hdev->dev_flags))
224		mgmt_set_local_name_complete(hdev, sent, status);
225	else if (!status)
226		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
227
228	hci_dev_unlock(hdev);
229}
230
231static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
232{
233	struct hci_rp_read_local_name *rp = (void *) skb->data;
234
235	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
236
237	if (rp->status)
238		return;
239
240	if (test_bit(HCI_SETUP, &hdev->dev_flags))
241		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
242}
243
244static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
245{
246	__u8 status = *((__u8 *) skb->data);
247	void *sent;
248
249	BT_DBG("%s status 0x%2.2x", hdev->name, status);
250
251	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
252	if (!sent)
253		return;
254
255	if (!status) {
256		__u8 param = *((__u8 *) sent);
257
258		if (param == AUTH_ENABLED)
259			set_bit(HCI_AUTH, &hdev->flags);
260		else
261			clear_bit(HCI_AUTH, &hdev->flags);
262	}
263
264	if (test_bit(HCI_MGMT, &hdev->dev_flags))
265		mgmt_auth_enable_complete(hdev, status);
266}
267
268static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
269{
270	__u8 status = *((__u8 *) skb->data);
271	__u8 param;
272	void *sent;
273
274	BT_DBG("%s status 0x%2.2x", hdev->name, status);
275
276	if (status)
277		return;
278
279	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
280	if (!sent)
281		return;
282
283	param = *((__u8 *) sent);
284
285	if (param)
286		set_bit(HCI_ENCRYPT, &hdev->flags);
287	else
288		clear_bit(HCI_ENCRYPT, &hdev->flags);
289}
290
291static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
292{
293	__u8 status = *((__u8 *) skb->data);
294	__u8 param;
295	void *sent;
296
297	BT_DBG("%s status 0x%2.2x", hdev->name, status);
298
299	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
300	if (!sent)
301		return;
302
303	param = *((__u8 *) sent);
304
305	hci_dev_lock(hdev);
306
307	if (status) {
308		hdev->discov_timeout = 0;
309		goto done;
310	}
311
312	if (param & SCAN_INQUIRY)
313		set_bit(HCI_ISCAN, &hdev->flags);
314	else
315		clear_bit(HCI_ISCAN, &hdev->flags);
316
317	if (param & SCAN_PAGE)
318		set_bit(HCI_PSCAN, &hdev->flags);
319	else
320		clear_bit(HCI_PSCAN, &hdev->flags);
321
322done:
323	hci_dev_unlock(hdev);
324}
325
326static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
327{
328	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
329
330	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
331
332	if (rp->status)
333		return;
334
335	memcpy(hdev->dev_class, rp->dev_class, 3);
336
337	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
338	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
339}
340
341static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
342{
343	__u8 status = *((__u8 *) skb->data);
344	void *sent;
345
346	BT_DBG("%s status 0x%2.2x", hdev->name, status);
347
348	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
349	if (!sent)
350		return;
351
352	hci_dev_lock(hdev);
353
354	if (status == 0)
355		memcpy(hdev->dev_class, sent, 3);
356
357	if (test_bit(HCI_MGMT, &hdev->dev_flags))
358		mgmt_set_class_of_dev_complete(hdev, sent, status);
359
360	hci_dev_unlock(hdev);
361}
362
363static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
364{
365	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
366	__u16 setting;
367
368	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
369
370	if (rp->status)
371		return;
372
373	setting = __le16_to_cpu(rp->voice_setting);
374
375	if (hdev->voice_setting == setting)
376		return;
377
378	hdev->voice_setting = setting;
379
380	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
381
382	if (hdev->notify)
383		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
384}
385
386static void hci_cc_write_voice_setting(struct hci_dev *hdev,
387				       struct sk_buff *skb)
388{
389	__u8 status = *((__u8 *) skb->data);
390	__u16 setting;
391	void *sent;
392
393	BT_DBG("%s status 0x%2.2x", hdev->name, status);
394
395	if (status)
396		return;
397
398	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
399	if (!sent)
400		return;
401
402	setting = get_unaligned_le16(sent);
403
404	if (hdev->voice_setting == setting)
405		return;
406
407	hdev->voice_setting = setting;
408
409	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
410
411	if (hdev->notify)
412		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
413}
414
415static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
416					  struct sk_buff *skb)
417{
418	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
419
420	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
421
422	if (rp->status)
423		return;
424
425	hdev->num_iac = rp->num_iac;
426
427	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
428}
429
430static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
431{
432	__u8 status = *((__u8 *) skb->data);
433	struct hci_cp_write_ssp_mode *sent;
434
435	BT_DBG("%s status 0x%2.2x", hdev->name, status);
436
437	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
438	if (!sent)
439		return;
440
441	if (!status) {
442		if (sent->mode)
443			hdev->features[1][0] |= LMP_HOST_SSP;
444		else
445			hdev->features[1][0] &= ~LMP_HOST_SSP;
446	}
447
448	if (test_bit(HCI_MGMT, &hdev->dev_flags))
449		mgmt_ssp_enable_complete(hdev, sent->mode, status);
450	else if (!status) {
451		if (sent->mode)
452			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
453		else
454			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
455	}
456}
457
458static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
459{
460	u8 status = *((u8 *) skb->data);
461	struct hci_cp_write_sc_support *sent;
462
463	BT_DBG("%s status 0x%2.2x", hdev->name, status);
464
465	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
466	if (!sent)
467		return;
468
469	if (!status) {
470		if (sent->support)
471			hdev->features[1][0] |= LMP_HOST_SC;
472		else
473			hdev->features[1][0] &= ~LMP_HOST_SC;
474	}
475
476	if (test_bit(HCI_MGMT, &hdev->dev_flags))
477		mgmt_sc_enable_complete(hdev, sent->support, status);
478	else if (!status) {
479		if (sent->support)
480			set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
481		else
482			clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
483	}
484}
485
486static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
487{
488	struct hci_rp_read_local_version *rp = (void *) skb->data;
489
490	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
491
492	if (rp->status)
493		return;
494
495	if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
496		hdev->hci_ver = rp->hci_ver;
497		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
498		hdev->lmp_ver = rp->lmp_ver;
499		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
500		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
501	}
502}
503
504static void hci_cc_read_local_commands(struct hci_dev *hdev,
505				       struct sk_buff *skb)
506{
507	struct hci_rp_read_local_commands *rp = (void *) skb->data;
508
509	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
510
511	if (rp->status)
512		return;
513
514	if (test_bit(HCI_SETUP, &hdev->dev_flags))
515		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
516}
517
518static void hci_cc_read_local_features(struct hci_dev *hdev,
519				       struct sk_buff *skb)
520{
521	struct hci_rp_read_local_features *rp = (void *) skb->data;
522
523	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
524
525	if (rp->status)
526		return;
527
528	memcpy(hdev->features, rp->features, 8);
529
530	/* Adjust default settings according to features
531	 * supported by device. */
532
533	if (hdev->features[0][0] & LMP_3SLOT)
534		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
535
536	if (hdev->features[0][0] & LMP_5SLOT)
537		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
538
539	if (hdev->features[0][1] & LMP_HV2) {
540		hdev->pkt_type  |= (HCI_HV2);
541		hdev->esco_type |= (ESCO_HV2);
542	}
543
544	if (hdev->features[0][1] & LMP_HV3) {
545		hdev->pkt_type  |= (HCI_HV3);
546		hdev->esco_type |= (ESCO_HV3);
547	}
548
549	if (lmp_esco_capable(hdev))
550		hdev->esco_type |= (ESCO_EV3);
551
552	if (hdev->features[0][4] & LMP_EV4)
553		hdev->esco_type |= (ESCO_EV4);
554
555	if (hdev->features[0][4] & LMP_EV5)
556		hdev->esco_type |= (ESCO_EV5);
557
558	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
559		hdev->esco_type |= (ESCO_2EV3);
560
561	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
562		hdev->esco_type |= (ESCO_3EV3);
563
564	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
565		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
566}
567
568static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
569					   struct sk_buff *skb)
570{
571	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
572
573	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
574
575	if (rp->status)
576		return;
577
578	if (hdev->max_page < rp->max_page)
579		hdev->max_page = rp->max_page;
580
581	if (rp->page < HCI_MAX_PAGES)
582		memcpy(hdev->features[rp->page], rp->features, 8);
583}
584
585static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
586					  struct sk_buff *skb)
587{
588	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
589
590	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
591
592	if (rp->status)
593		return;
594
595	hdev->flow_ctl_mode = rp->mode;
596}
597
598static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
599{
600	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
601
602	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
603
604	if (rp->status)
605		return;
606
607	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
608	hdev->sco_mtu  = rp->sco_mtu;
609	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
610	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
611
612	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
613		hdev->sco_mtu  = 64;
614		hdev->sco_pkts = 8;
615	}
616
617	hdev->acl_cnt = hdev->acl_pkts;
618	hdev->sco_cnt = hdev->sco_pkts;
619
620	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
621	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
622}
623
624static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
625{
626	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
627
628	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
629
630	if (rp->status)
631		return;
632
633	if (test_bit(HCI_INIT, &hdev->flags))
634		bacpy(&hdev->bdaddr, &rp->bdaddr);
635
636	if (test_bit(HCI_SETUP, &hdev->dev_flags))
637		bacpy(&hdev->setup_addr, &rp->bdaddr);
638}
639
640static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
641					   struct sk_buff *skb)
642{
643	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
644
645	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
646
647	if (rp->status)
648		return;
649
650	if (test_bit(HCI_INIT, &hdev->flags)) {
651		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
652		hdev->page_scan_window = __le16_to_cpu(rp->window);
653	}
654}
655
656static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
657					    struct sk_buff *skb)
658{
659	u8 status = *((u8 *) skb->data);
660	struct hci_cp_write_page_scan_activity *sent;
661
662	BT_DBG("%s status 0x%2.2x", hdev->name, status);
663
664	if (status)
665		return;
666
667	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
668	if (!sent)
669		return;
670
671	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
672	hdev->page_scan_window = __le16_to_cpu(sent->window);
673}
674
675static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
676					   struct sk_buff *skb)
677{
678	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
679
680	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
681
682	if (rp->status)
683		return;
684
685	if (test_bit(HCI_INIT, &hdev->flags))
686		hdev->page_scan_type = rp->type;
687}
688
689static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
690					struct sk_buff *skb)
691{
692	u8 status = *((u8 *) skb->data);
693	u8 *type;
694
695	BT_DBG("%s status 0x%2.2x", hdev->name, status);
696
697	if (status)
698		return;
699
700	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
701	if (type)
702		hdev->page_scan_type = *type;
703}
704
705static void hci_cc_read_data_block_size(struct hci_dev *hdev,
706					struct sk_buff *skb)
707{
708	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
709
710	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
711
712	if (rp->status)
713		return;
714
715	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
716	hdev->block_len = __le16_to_cpu(rp->block_len);
717	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
718
719	hdev->block_cnt = hdev->num_blocks;
720
721	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
722	       hdev->block_cnt, hdev->block_len);
723}
724
725static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
726{
727	struct hci_rp_read_clock *rp = (void *) skb->data;
728	struct hci_cp_read_clock *cp;
729	struct hci_conn *conn;
730
731	BT_DBG("%s", hdev->name);
732
733	if (skb->len < sizeof(*rp))
734		return;
735
736	if (rp->status)
737		return;
738
739	hci_dev_lock(hdev);
740
741	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
742	if (!cp)
743		goto unlock;
744
745	if (cp->which == 0x00) {
746		hdev->clock = le32_to_cpu(rp->clock);
747		goto unlock;
748	}
749
750	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
751	if (conn) {
752		conn->clock = le32_to_cpu(rp->clock);
753		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
754	}
755
756unlock:
757	hci_dev_unlock(hdev);
758}
759
760static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
761				       struct sk_buff *skb)
762{
763	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
764
765	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
766
767	if (rp->status)
768		goto a2mp_rsp;
769
770	hdev->amp_status = rp->amp_status;
771	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
772	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
773	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
774	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
775	hdev->amp_type = rp->amp_type;
776	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
777	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
778	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
779	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
780
781a2mp_rsp:
782	a2mp_send_getinfo_rsp(hdev);
783}
784
785static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
786					struct sk_buff *skb)
787{
788	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
789	struct amp_assoc *assoc = &hdev->loc_assoc;
790	size_t rem_len, frag_len;
791
792	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
793
794	if (rp->status)
795		goto a2mp_rsp;
796
797	frag_len = skb->len - sizeof(*rp);
798	rem_len = __le16_to_cpu(rp->rem_len);
799
800	if (rem_len > frag_len) {
801		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
802
803		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
804		assoc->offset += frag_len;
805
806		/* Read other fragments */
807		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
808
809		return;
810	}
811
812	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
813	assoc->len = assoc->offset + rem_len;
814	assoc->offset = 0;
815
816a2mp_rsp:
817	/* Send A2MP Rsp when all fragments are received */
818	a2mp_send_getampassoc_rsp(hdev, rp->status);
819	a2mp_send_create_phy_link_req(hdev, rp->status);
820}
821
822static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
823					 struct sk_buff *skb)
824{
825	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
826
827	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
828
829	if (rp->status)
830		return;
831
832	hdev->inq_tx_power = rp->tx_power;
833}
834
835static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
836{
837	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
838	struct hci_cp_pin_code_reply *cp;
839	struct hci_conn *conn;
840
841	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
842
843	hci_dev_lock(hdev);
844
845	if (test_bit(HCI_MGMT, &hdev->dev_flags))
846		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
847
848	if (rp->status)
849		goto unlock;
850
851	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
852	if (!cp)
853		goto unlock;
854
855	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
856	if (conn)
857		conn->pin_length = cp->pin_len;
858
859unlock:
860	hci_dev_unlock(hdev);
861}
862
863static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
864{
865	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
866
867	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
868
869	hci_dev_lock(hdev);
870
871	if (test_bit(HCI_MGMT, &hdev->dev_flags))
872		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
873						 rp->status);
874
875	hci_dev_unlock(hdev);
876}
877
878static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
879				       struct sk_buff *skb)
880{
881	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
882
883	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
884
885	if (rp->status)
886		return;
887
888	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
889	hdev->le_pkts = rp->le_max_pkt;
890
891	hdev->le_cnt = hdev->le_pkts;
892
893	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
894}
895
896static void hci_cc_le_read_local_features(struct hci_dev *hdev,
897					  struct sk_buff *skb)
898{
899	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
900
901	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
902
903	if (rp->status)
904		return;
905
906	memcpy(hdev->le_features, rp->features, 8);
907}
908
909static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
910					struct sk_buff *skb)
911{
912	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
913
914	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
915
916	if (rp->status)
917		return;
918
919	hdev->adv_tx_power = rp->tx_power;
920}
921
922static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
923{
924	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
925
926	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
927
928	hci_dev_lock(hdev);
929
930	if (test_bit(HCI_MGMT, &hdev->dev_flags))
931		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
932						 rp->status);
933
934	hci_dev_unlock(hdev);
935}
936
937static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
938					  struct sk_buff *skb)
939{
940	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
941
942	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943
944	hci_dev_lock(hdev);
945
946	if (test_bit(HCI_MGMT, &hdev->dev_flags))
947		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
948						     ACL_LINK, 0, rp->status);
949
950	hci_dev_unlock(hdev);
951}
952
953static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
954{
955	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
956
957	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
958
959	hci_dev_lock(hdev);
960
961	if (test_bit(HCI_MGMT, &hdev->dev_flags))
962		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
963						 0, rp->status);
964
965	hci_dev_unlock(hdev);
966}
967
968static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
969					  struct sk_buff *skb)
970{
971	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
972
973	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
974
975	hci_dev_lock(hdev);
976
977	if (test_bit(HCI_MGMT, &hdev->dev_flags))
978		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
979						     ACL_LINK, 0, rp->status);
980
981	hci_dev_unlock(hdev);
982}
983
984static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
985				       struct sk_buff *skb)
986{
987	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
988
989	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
990
991	hci_dev_lock(hdev);
992	mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
993					  NULL, NULL, rp->status);
994	hci_dev_unlock(hdev);
995}
996
997static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
998					   struct sk_buff *skb)
999{
1000	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1001
1002	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1003
1004	hci_dev_lock(hdev);
1005	mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
1006					  rp->hash256, rp->randomizer256,
1007					  rp->status);
1008	hci_dev_unlock(hdev);
1009}
1010
1011
1012static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1013{
1014	__u8 status = *((__u8 *) skb->data);
1015	bdaddr_t *sent;
1016
1017	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1018
1019	if (status)
1020		return;
1021
1022	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1023	if (!sent)
1024		return;
1025
1026	hci_dev_lock(hdev);
1027
1028	bacpy(&hdev->random_addr, sent);
1029
1030	hci_dev_unlock(hdev);
1031}
1032
1033static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1034{
1035	__u8 *sent, status = *((__u8 *) skb->data);
1036
1037	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1038
1039	if (status)
1040		return;
1041
1042	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1043	if (!sent)
1044		return;
1045
1046	hci_dev_lock(hdev);
1047
1048	/* If we're doing connection initation as peripheral. Set a
1049	 * timeout in case something goes wrong.
1050	 */
1051	if (*sent) {
1052		struct hci_conn *conn;
1053
1054		set_bit(HCI_LE_ADV, &hdev->dev_flags);
1055
1056		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1057		if (conn)
1058			queue_delayed_work(hdev->workqueue,
1059					   &conn->le_conn_timeout,
1060					   conn->conn_timeout);
1061	} else {
1062		clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1063	}
1064
1065	hci_dev_unlock(hdev);
1066}
1067
1068static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1069{
1070	struct hci_cp_le_set_scan_param *cp;
1071	__u8 status = *((__u8 *) skb->data);
1072
1073	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1074
1075	if (status)
1076		return;
1077
1078	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1079	if (!cp)
1080		return;
1081
1082	hci_dev_lock(hdev);
1083
1084	hdev->le_scan_type = cp->type;
1085
1086	hci_dev_unlock(hdev);
1087}
1088
1089static bool has_pending_adv_report(struct hci_dev *hdev)
1090{
1091	struct discovery_state *d = &hdev->discovery;
1092
1093	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1094}
1095
1096static void clear_pending_adv_report(struct hci_dev *hdev)
1097{
1098	struct discovery_state *d = &hdev->discovery;
1099
1100	bacpy(&d->last_adv_addr, BDADDR_ANY);
1101	d->last_adv_data_len = 0;
1102}
1103
1104static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1105				     u8 bdaddr_type, s8 rssi, u32 flags,
1106				     u8 *data, u8 len)
1107{
1108	struct discovery_state *d = &hdev->discovery;
1109
1110	bacpy(&d->last_adv_addr, bdaddr);
1111	d->last_adv_addr_type = bdaddr_type;
1112	d->last_adv_rssi = rssi;
1113	d->last_adv_flags = flags;
1114	memcpy(d->last_adv_data, data, len);
1115	d->last_adv_data_len = len;
1116}
1117
1118static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1119				      struct sk_buff *skb)
1120{
1121	struct hci_cp_le_set_scan_enable *cp;
1122	__u8 status = *((__u8 *) skb->data);
1123
1124	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1125
1126	if (status)
1127		return;
1128
1129	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1130	if (!cp)
1131		return;
1132
1133	switch (cp->enable) {
1134	case LE_SCAN_ENABLE:
1135		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1136		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1137			clear_pending_adv_report(hdev);
1138		break;
1139
1140	case LE_SCAN_DISABLE:
1141		/* We do this here instead of when setting DISCOVERY_STOPPED
1142		 * since the latter would potentially require waiting for
1143		 * inquiry to stop too.
1144		 */
1145		if (has_pending_adv_report(hdev)) {
1146			struct discovery_state *d = &hdev->discovery;
1147
1148			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1149					  d->last_adv_addr_type, NULL,
1150					  d->last_adv_rssi, d->last_adv_flags,
1151					  d->last_adv_data,
1152					  d->last_adv_data_len, NULL, 0);
1153		}
1154
1155		/* Cancel this timer so that we don't try to disable scanning
1156		 * when it's already disabled.
1157		 */
1158		cancel_delayed_work(&hdev->le_scan_disable);
1159
1160		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1161
1162		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1163		 * interrupted scanning due to a connect request. Mark
1164		 * therefore discovery as stopped. If this was not
1165		 * because of a connect request advertising might have
1166		 * been disabled because of active scanning, so
1167		 * re-enable it again if necessary.
1168		 */
1169		if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1170				       &hdev->dev_flags))
1171			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1172		else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1173			 hdev->discovery.state == DISCOVERY_FINDING)
1174			mgmt_reenable_advertising(hdev);
1175
1176		break;
1177
1178	default:
1179		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1180		break;
1181	}
1182}
1183
1184static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1185					   struct sk_buff *skb)
1186{
1187	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1188
1189	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1190
1191	if (rp->status)
1192		return;
1193
1194	hdev->le_white_list_size = rp->size;
1195}
1196
1197static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1198				       struct sk_buff *skb)
1199{
1200	__u8 status = *((__u8 *) skb->data);
1201
1202	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1203
1204	if (status)
1205		return;
1206
1207	hci_bdaddr_list_clear(&hdev->le_white_list);
1208}
1209
1210static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1211					struct sk_buff *skb)
1212{
1213	struct hci_cp_le_add_to_white_list *sent;
1214	__u8 status = *((__u8 *) skb->data);
1215
1216	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1217
1218	if (status)
1219		return;
1220
1221	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1222	if (!sent)
1223		return;
1224
1225	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1226			   sent->bdaddr_type);
1227}
1228
1229static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1230					  struct sk_buff *skb)
1231{
1232	struct hci_cp_le_del_from_white_list *sent;
1233	__u8 status = *((__u8 *) skb->data);
1234
1235	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1236
1237	if (status)
1238		return;
1239
1240	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1241	if (!sent)
1242		return;
1243
1244	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1245			    sent->bdaddr_type);
1246}
1247
1248static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1249					    struct sk_buff *skb)
1250{
1251	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1252
1253	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1254
1255	if (rp->status)
1256		return;
1257
1258	memcpy(hdev->le_states, rp->le_states, 8);
1259}
1260
1261static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1262					   struct sk_buff *skb)
1263{
1264	struct hci_cp_write_le_host_supported *sent;
1265	__u8 status = *((__u8 *) skb->data);
1266
1267	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1268
1269	if (status)
1270		return;
1271
1272	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1273	if (!sent)
1274		return;
1275
1276	if (sent->le) {
1277		hdev->features[1][0] |= LMP_HOST_LE;
1278		set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1279	} else {
1280		hdev->features[1][0] &= ~LMP_HOST_LE;
1281		clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1282		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1283	}
1284
1285	if (sent->simul)
1286		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1287	else
1288		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1289}
1290
1291static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1292{
1293	struct hci_cp_le_set_adv_param *cp;
1294	u8 status = *((u8 *) skb->data);
1295
1296	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1297
1298	if (status)
1299		return;
1300
1301	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1302	if (!cp)
1303		return;
1304
1305	hci_dev_lock(hdev);
1306	hdev->adv_addr_type = cp->own_address_type;
1307	hci_dev_unlock(hdev);
1308}
1309
1310static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1311					  struct sk_buff *skb)
1312{
1313	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1314
1315	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1316	       hdev->name, rp->status, rp->phy_handle);
1317
1318	if (rp->status)
1319		return;
1320
1321	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1322}
1323
1324static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1325{
1326	struct hci_rp_read_rssi *rp = (void *) skb->data;
1327	struct hci_conn *conn;
1328
1329	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1330
1331	if (rp->status)
1332		return;
1333
1334	hci_dev_lock(hdev);
1335
1336	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1337	if (conn)
1338		conn->rssi = rp->rssi;
1339
1340	hci_dev_unlock(hdev);
1341}
1342
1343static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1344{
1345	struct hci_cp_read_tx_power *sent;
1346	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1347	struct hci_conn *conn;
1348
1349	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1350
1351	if (rp->status)
1352		return;
1353
1354	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1355	if (!sent)
1356		return;
1357
1358	hci_dev_lock(hdev);
1359
1360	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1361	if (!conn)
1362		goto unlock;
1363
1364	switch (sent->type) {
1365	case 0x00:
1366		conn->tx_power = rp->tx_power;
1367		break;
1368	case 0x01:
1369		conn->max_tx_power = rp->tx_power;
1370		break;
1371	}
1372
1373unlock:
1374	hci_dev_unlock(hdev);
1375}
1376
1377static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1378{
1379	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1380
1381	if (status) {
1382		hci_conn_check_pending(hdev);
1383		return;
1384	}
1385
1386	set_bit(HCI_INQUIRY, &hdev->flags);
1387}
1388
1389static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1390{
1391	struct hci_cp_create_conn *cp;
1392	struct hci_conn *conn;
1393
1394	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1395
1396	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1397	if (!cp)
1398		return;
1399
1400	hci_dev_lock(hdev);
1401
1402	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1403
1404	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1405
1406	if (status) {
1407		if (conn && conn->state == BT_CONNECT) {
1408			if (status != 0x0c || conn->attempt > 2) {
1409				conn->state = BT_CLOSED;
1410				hci_proto_connect_cfm(conn, status);
1411				hci_conn_del(conn);
1412			} else
1413				conn->state = BT_CONNECT2;
1414		}
1415	} else {
1416		if (!conn) {
1417			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1418					    HCI_ROLE_MASTER);
1419			if (!conn)
1420				BT_ERR("No memory for new connection");
1421		}
1422	}
1423
1424	hci_dev_unlock(hdev);
1425}
1426
1427static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1428{
1429	struct hci_cp_add_sco *cp;
1430	struct hci_conn *acl, *sco;
1431	__u16 handle;
1432
1433	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1434
1435	if (!status)
1436		return;
1437
1438	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1439	if (!cp)
1440		return;
1441
1442	handle = __le16_to_cpu(cp->handle);
1443
1444	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1445
1446	hci_dev_lock(hdev);
1447
1448	acl = hci_conn_hash_lookup_handle(hdev, handle);
1449	if (acl) {
1450		sco = acl->link;
1451		if (sco) {
1452			sco->state = BT_CLOSED;
1453
1454			hci_proto_connect_cfm(sco, status);
1455			hci_conn_del(sco);
1456		}
1457	}
1458
1459	hci_dev_unlock(hdev);
1460}
1461
1462static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1463{
1464	struct hci_cp_auth_requested *cp;
1465	struct hci_conn *conn;
1466
1467	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1468
1469	if (!status)
1470		return;
1471
1472	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1473	if (!cp)
1474		return;
1475
1476	hci_dev_lock(hdev);
1477
1478	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1479	if (conn) {
1480		if (conn->state == BT_CONFIG) {
1481			hci_proto_connect_cfm(conn, status);
1482			hci_conn_drop(conn);
1483		}
1484	}
1485
1486	hci_dev_unlock(hdev);
1487}
1488
1489static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1490{
1491	struct hci_cp_set_conn_encrypt *cp;
1492	struct hci_conn *conn;
1493
1494	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1495
1496	if (!status)
1497		return;
1498
1499	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1500	if (!cp)
1501		return;
1502
1503	hci_dev_lock(hdev);
1504
1505	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1506	if (conn) {
1507		if (conn->state == BT_CONFIG) {
1508			hci_proto_connect_cfm(conn, status);
1509			hci_conn_drop(conn);
1510		}
1511	}
1512
1513	hci_dev_unlock(hdev);
1514}
1515
1516static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1517				    struct hci_conn *conn)
1518{
1519	if (conn->state != BT_CONFIG || !conn->out)
1520		return 0;
1521
1522	if (conn->pending_sec_level == BT_SECURITY_SDP)
1523		return 0;
1524
1525	/* Only request authentication for SSP connections or non-SSP
1526	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1527	 * is requested.
1528	 */
1529	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1530	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1531	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1532	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1533		return 0;
1534
1535	return 1;
1536}
1537
1538static int hci_resolve_name(struct hci_dev *hdev,
1539				   struct inquiry_entry *e)
1540{
1541	struct hci_cp_remote_name_req cp;
1542
1543	memset(&cp, 0, sizeof(cp));
1544
1545	bacpy(&cp.bdaddr, &e->data.bdaddr);
1546	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1547	cp.pscan_mode = e->data.pscan_mode;
1548	cp.clock_offset = e->data.clock_offset;
1549
1550	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1551}
1552
1553static bool hci_resolve_next_name(struct hci_dev *hdev)
1554{
1555	struct discovery_state *discov = &hdev->discovery;
1556	struct inquiry_entry *e;
1557
1558	if (list_empty(&discov->resolve))
1559		return false;
1560
1561	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1562	if (!e)
1563		return false;
1564
1565	if (hci_resolve_name(hdev, e) == 0) {
1566		e->name_state = NAME_PENDING;
1567		return true;
1568	}
1569
1570	return false;
1571}
1572
1573static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1574				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1575{
1576	struct discovery_state *discov = &hdev->discovery;
1577	struct inquiry_entry *e;
1578
1579	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1580		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1581				      name_len, conn->dev_class);
1582
1583	if (discov->state == DISCOVERY_STOPPED)
1584		return;
1585
1586	if (discov->state == DISCOVERY_STOPPING)
1587		goto discov_complete;
1588
1589	if (discov->state != DISCOVERY_RESOLVING)
1590		return;
1591
1592	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1593	/* If the device was not found in a list of found devices names of which
1594	 * are pending. there is no need to continue resolving a next name as it
1595	 * will be done upon receiving another Remote Name Request Complete
1596	 * Event */
1597	if (!e)
1598		return;
1599
1600	list_del(&e->list);
1601	if (name) {
1602		e->name_state = NAME_KNOWN;
1603		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1604				 e->data.rssi, name, name_len);
1605	} else {
1606		e->name_state = NAME_NOT_KNOWN;
1607	}
1608
1609	if (hci_resolve_next_name(hdev))
1610		return;
1611
1612discov_complete:
1613	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1614}
1615
1616static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1617{
1618	struct hci_cp_remote_name_req *cp;
1619	struct hci_conn *conn;
1620
1621	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1622
1623	/* If successful wait for the name req complete event before
1624	 * checking for the need to do authentication */
1625	if (!status)
1626		return;
1627
1628	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1629	if (!cp)
1630		return;
1631
1632	hci_dev_lock(hdev);
1633
1634	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1635
1636	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1637		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1638
1639	if (!conn)
1640		goto unlock;
1641
1642	if (!hci_outgoing_auth_needed(hdev, conn))
1643		goto unlock;
1644
1645	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1646		struct hci_cp_auth_requested auth_cp;
1647
1648		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1649
1650		auth_cp.handle = __cpu_to_le16(conn->handle);
1651		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1652			     sizeof(auth_cp), &auth_cp);
1653	}
1654
1655unlock:
1656	hci_dev_unlock(hdev);
1657}
1658
1659static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1660{
1661	struct hci_cp_read_remote_features *cp;
1662	struct hci_conn *conn;
1663
1664	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1665
1666	if (!status)
1667		return;
1668
1669	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1670	if (!cp)
1671		return;
1672
1673	hci_dev_lock(hdev);
1674
1675	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1676	if (conn) {
1677		if (conn->state == BT_CONFIG) {
1678			hci_proto_connect_cfm(conn, status);
1679			hci_conn_drop(conn);
1680		}
1681	}
1682
1683	hci_dev_unlock(hdev);
1684}
1685
1686static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1687{
1688	struct hci_cp_read_remote_ext_features *cp;
1689	struct hci_conn *conn;
1690
1691	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1692
1693	if (!status)
1694		return;
1695
1696	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1697	if (!cp)
1698		return;
1699
1700	hci_dev_lock(hdev);
1701
1702	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1703	if (conn) {
1704		if (conn->state == BT_CONFIG) {
1705			hci_proto_connect_cfm(conn, status);
1706			hci_conn_drop(conn);
1707		}
1708	}
1709
1710	hci_dev_unlock(hdev);
1711}
1712
1713static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1714{
1715	struct hci_cp_setup_sync_conn *cp;
1716	struct hci_conn *acl, *sco;
1717	__u16 handle;
1718
1719	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1720
1721	if (!status)
1722		return;
1723
1724	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1725	if (!cp)
1726		return;
1727
1728	handle = __le16_to_cpu(cp->handle);
1729
1730	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1731
1732	hci_dev_lock(hdev);
1733
1734	acl = hci_conn_hash_lookup_handle(hdev, handle);
1735	if (acl) {
1736		sco = acl->link;
1737		if (sco) {
1738			sco->state = BT_CLOSED;
1739
1740			hci_proto_connect_cfm(sco, status);
1741			hci_conn_del(sco);
1742		}
1743	}
1744
1745	hci_dev_unlock(hdev);
1746}
1747
1748static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1749{
1750	struct hci_cp_sniff_mode *cp;
1751	struct hci_conn *conn;
1752
1753	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1754
1755	if (!status)
1756		return;
1757
1758	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1759	if (!cp)
1760		return;
1761
1762	hci_dev_lock(hdev);
1763
1764	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1765	if (conn) {
1766		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1767
1768		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1769			hci_sco_setup(conn, status);
1770	}
1771
1772	hci_dev_unlock(hdev);
1773}
1774
1775static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1776{
1777	struct hci_cp_exit_sniff_mode *cp;
1778	struct hci_conn *conn;
1779
1780	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1781
1782	if (!status)
1783		return;
1784
1785	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1786	if (!cp)
1787		return;
1788
1789	hci_dev_lock(hdev);
1790
1791	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1792	if (conn) {
1793		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1794
1795		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1796			hci_sco_setup(conn, status);
1797	}
1798
1799	hci_dev_unlock(hdev);
1800}
1801
1802static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1803{
1804	struct hci_cp_disconnect *cp;
1805	struct hci_conn *conn;
1806
1807	if (!status)
1808		return;
1809
1810	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1811	if (!cp)
1812		return;
1813
1814	hci_dev_lock(hdev);
1815
1816	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1817	if (conn)
1818		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1819				       conn->dst_type, status);
1820
1821	hci_dev_unlock(hdev);
1822}
1823
1824static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1825{
1826	struct hci_cp_create_phy_link *cp;
1827
1828	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1829
1830	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1831	if (!cp)
1832		return;
1833
1834	hci_dev_lock(hdev);
1835
1836	if (status) {
1837		struct hci_conn *hcon;
1838
1839		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1840		if (hcon)
1841			hci_conn_del(hcon);
1842	} else {
1843		amp_write_remote_assoc(hdev, cp->phy_handle);
1844	}
1845
1846	hci_dev_unlock(hdev);
1847}
1848
1849static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1850{
1851	struct hci_cp_accept_phy_link *cp;
1852
1853	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1854
1855	if (status)
1856		return;
1857
1858	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1859	if (!cp)
1860		return;
1861
1862	amp_write_remote_assoc(hdev, cp->phy_handle);
1863}
1864
1865static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1866{
1867	struct hci_cp_le_create_conn *cp;
1868	struct hci_conn *conn;
1869
1870	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1871
1872	/* All connection failure handling is taken care of by the
1873	 * hci_le_conn_failed function which is triggered by the HCI
1874	 * request completion callbacks used for connecting.
1875	 */
1876	if (status)
1877		return;
1878
1879	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1880	if (!cp)
1881		return;
1882
1883	hci_dev_lock(hdev);
1884
1885	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1886	if (!conn)
1887		goto unlock;
1888
1889	/* Store the initiator and responder address information which
1890	 * is needed for SMP. These values will not change during the
1891	 * lifetime of the connection.
1892	 */
1893	conn->init_addr_type = cp->own_address_type;
1894	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1895		bacpy(&conn->init_addr, &hdev->random_addr);
1896	else
1897		bacpy(&conn->init_addr, &hdev->bdaddr);
1898
1899	conn->resp_addr_type = cp->peer_addr_type;
1900	bacpy(&conn->resp_addr, &cp->peer_addr);
1901
1902	/* We don't want the connection attempt to stick around
1903	 * indefinitely since LE doesn't have a page timeout concept
1904	 * like BR/EDR. Set a timer for any connection that doesn't use
1905	 * the white list for connecting.
1906	 */
1907	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1908		queue_delayed_work(conn->hdev->workqueue,
1909				   &conn->le_conn_timeout,
1910				   conn->conn_timeout);
1911
1912unlock:
1913	hci_dev_unlock(hdev);
1914}
1915
1916static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1917{
1918	struct hci_cp_le_start_enc *cp;
1919	struct hci_conn *conn;
1920
1921	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1922
1923	if (!status)
1924		return;
1925
1926	hci_dev_lock(hdev);
1927
1928	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1929	if (!cp)
1930		goto unlock;
1931
1932	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1933	if (!conn)
1934		goto unlock;
1935
1936	if (conn->state != BT_CONNECTED)
1937		goto unlock;
1938
1939	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1940	hci_conn_drop(conn);
1941
1942unlock:
1943	hci_dev_unlock(hdev);
1944}
1945
1946static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1947{
1948	__u8 status = *((__u8 *) skb->data);
1949	struct discovery_state *discov = &hdev->discovery;
1950	struct inquiry_entry *e;
1951
1952	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1953
1954	hci_conn_check_pending(hdev);
1955
1956	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1957		return;
1958
1959	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1960	wake_up_bit(&hdev->flags, HCI_INQUIRY);
1961
1962	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1963		return;
1964
1965	hci_dev_lock(hdev);
1966
1967	if (discov->state != DISCOVERY_FINDING)
1968		goto unlock;
1969
1970	if (list_empty(&discov->resolve)) {
1971		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1972		goto unlock;
1973	}
1974
1975	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1976	if (e && hci_resolve_name(hdev, e) == 0) {
1977		e->name_state = NAME_PENDING;
1978		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1979	} else {
1980		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1981	}
1982
1983unlock:
1984	hci_dev_unlock(hdev);
1985}
1986
1987static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1988{
1989	struct inquiry_data data;
1990	struct inquiry_info *info = (void *) (skb->data + 1);
1991	int num_rsp = *((__u8 *) skb->data);
1992
1993	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1994
1995	if (!num_rsp)
1996		return;
1997
1998	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1999		return;
2000
2001	hci_dev_lock(hdev);
2002
2003	for (; num_rsp; num_rsp--, info++) {
2004		u32 flags;
2005
2006		bacpy(&data.bdaddr, &info->bdaddr);
2007		data.pscan_rep_mode	= info->pscan_rep_mode;
2008		data.pscan_period_mode	= info->pscan_period_mode;
2009		data.pscan_mode		= info->pscan_mode;
2010		memcpy(data.dev_class, info->dev_class, 3);
2011		data.clock_offset	= info->clock_offset;
2012		data.rssi		= 0x00;
2013		data.ssp_mode		= 0x00;
2014
2015		flags = hci_inquiry_cache_update(hdev, &data, false);
2016
2017		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2018				  info->dev_class, 0, flags, NULL, 0, NULL, 0);
2019	}
2020
2021	hci_dev_unlock(hdev);
2022}
2023
2024static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2025{
2026	struct hci_ev_conn_complete *ev = (void *) skb->data;
2027	struct hci_conn *conn;
2028
2029	BT_DBG("%s", hdev->name);
2030
2031	hci_dev_lock(hdev);
2032
2033	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2034	if (!conn) {
2035		if (ev->link_type != SCO_LINK)
2036			goto unlock;
2037
2038		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2039		if (!conn)
2040			goto unlock;
2041
2042		conn->type = SCO_LINK;
2043	}
2044
2045	if (!ev->status) {
2046		conn->handle = __le16_to_cpu(ev->handle);
2047
2048		if (conn->type == ACL_LINK) {
2049			conn->state = BT_CONFIG;
2050			hci_conn_hold(conn);
2051
2052			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2053			    !hci_find_link_key(hdev, &ev->bdaddr))
2054				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2055			else
2056				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2057		} else
2058			conn->state = BT_CONNECTED;
2059
2060		hci_conn_add_sysfs(conn);
2061
2062		if (test_bit(HCI_AUTH, &hdev->flags))
2063			set_bit(HCI_CONN_AUTH, &conn->flags);
2064
2065		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2066			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2067
2068		/* Get remote features */
2069		if (conn->type == ACL_LINK) {
2070			struct hci_cp_read_remote_features cp;
2071			cp.handle = ev->handle;
2072			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2073				     sizeof(cp), &cp);
2074
2075			hci_update_page_scan(hdev, NULL);
2076		}
2077
2078		/* Set packet type for incoming connection */
2079		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2080			struct hci_cp_change_conn_ptype cp;
2081			cp.handle = ev->handle;
2082			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2083			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2084				     &cp);
2085		}
2086	} else {
2087		conn->state = BT_CLOSED;
2088		if (conn->type == ACL_LINK)
2089			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2090					    conn->dst_type, ev->status);
2091	}
2092
2093	if (conn->type == ACL_LINK)
2094		hci_sco_setup(conn, ev->status);
2095
2096	if (ev->status) {
2097		hci_proto_connect_cfm(conn, ev->status);
2098		hci_conn_del(conn);
2099	} else if (ev->link_type != ACL_LINK)
2100		hci_proto_connect_cfm(conn, ev->status);
2101
2102unlock:
2103	hci_dev_unlock(hdev);
2104
2105	hci_conn_check_pending(hdev);
2106}
2107
2108static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2109{
2110	struct hci_cp_reject_conn_req cp;
2111
2112	bacpy(&cp.bdaddr, bdaddr);
2113	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2114	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2115}
2116
2117static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2118{
2119	struct hci_ev_conn_request *ev = (void *) skb->data;
2120	int mask = hdev->link_mode;
2121	struct inquiry_entry *ie;
2122	struct hci_conn *conn;
2123	__u8 flags = 0;
2124
2125	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2126	       ev->link_type);
2127
2128	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2129				      &flags);
2130
2131	if (!(mask & HCI_LM_ACCEPT)) {
2132		hci_reject_conn(hdev, &ev->bdaddr);
2133		return;
2134	}
2135
2136	if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2137				   BDADDR_BREDR)) {
2138		hci_reject_conn(hdev, &ev->bdaddr);
2139		return;
2140	}
2141
2142	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2143	    !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2144				    BDADDR_BREDR)) {
2145		    hci_reject_conn(hdev, &ev->bdaddr);
2146		    return;
2147	}
2148
2149	/* Connection accepted */
2150
2151	hci_dev_lock(hdev);
2152
2153	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2154	if (ie)
2155		memcpy(ie->data.dev_class, ev->dev_class, 3);
2156
2157	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2158			&ev->bdaddr);
2159	if (!conn) {
2160		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2161				    HCI_ROLE_SLAVE);
2162		if (!conn) {
2163			BT_ERR("No memory for new connection");
2164			hci_dev_unlock(hdev);
2165			return;
2166		}
2167	}
2168
2169	memcpy(conn->dev_class, ev->dev_class, 3);
2170
2171	hci_dev_unlock(hdev);
2172
2173	if (ev->link_type == ACL_LINK ||
2174	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2175		struct hci_cp_accept_conn_req cp;
2176		conn->state = BT_CONNECT;
2177
2178		bacpy(&cp.bdaddr, &ev->bdaddr);
2179
2180		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2181			cp.role = 0x00; /* Become master */
2182		else
2183			cp.role = 0x01; /* Remain slave */
2184
2185		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2186	} else if (!(flags & HCI_PROTO_DEFER)) {
2187		struct hci_cp_accept_sync_conn_req cp;
2188		conn->state = BT_CONNECT;
2189
2190		bacpy(&cp.bdaddr, &ev->bdaddr);
2191		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2192
2193		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2194		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2195		cp.max_latency    = cpu_to_le16(0xffff);
2196		cp.content_format = cpu_to_le16(hdev->voice_setting);
2197		cp.retrans_effort = 0xff;
2198
2199		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2200			     &cp);
2201	} else {
2202		conn->state = BT_CONNECT2;
2203		hci_proto_connect_cfm(conn, 0);
2204	}
2205}
2206
2207static u8 hci_to_mgmt_reason(u8 err)
2208{
2209	switch (err) {
2210	case HCI_ERROR_CONNECTION_TIMEOUT:
2211		return MGMT_DEV_DISCONN_TIMEOUT;
2212	case HCI_ERROR_REMOTE_USER_TERM:
2213	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2214	case HCI_ERROR_REMOTE_POWER_OFF:
2215		return MGMT_DEV_DISCONN_REMOTE;
2216	case HCI_ERROR_LOCAL_HOST_TERM:
2217		return MGMT_DEV_DISCONN_LOCAL_HOST;
2218	default:
2219		return MGMT_DEV_DISCONN_UNKNOWN;
2220	}
2221}
2222
2223static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2224{
2225	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2226	u8 reason = hci_to_mgmt_reason(ev->reason);
2227	struct hci_conn_params *params;
2228	struct hci_conn *conn;
2229	bool mgmt_connected;
2230	u8 type;
2231
2232	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2233
2234	hci_dev_lock(hdev);
2235
2236	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2237	if (!conn)
2238		goto unlock;
2239
2240	if (ev->status) {
2241		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2242				       conn->dst_type, ev->status);
2243		goto unlock;
2244	}
2245
2246	conn->state = BT_CLOSED;
2247
2248	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2249	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2250				reason, mgmt_connected);
2251
2252	if (conn->type == ACL_LINK) {
2253		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2254			hci_remove_link_key(hdev, &conn->dst);
2255
2256		hci_update_page_scan(hdev, NULL);
2257	}
2258
2259	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2260	if (params) {
2261		switch (params->auto_connect) {
2262		case HCI_AUTO_CONN_LINK_LOSS:
2263			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2264				break;
2265			/* Fall through */
2266
2267		case HCI_AUTO_CONN_DIRECT:
2268		case HCI_AUTO_CONN_ALWAYS:
2269			list_del_init(&params->action);
2270			list_add(&params->action, &hdev->pend_le_conns);
2271			hci_update_background_scan(hdev);
2272			break;
2273
2274		default:
2275			break;
2276		}
2277	}
2278
2279	type = conn->type;
2280
2281	hci_proto_disconn_cfm(conn, ev->reason);
2282	hci_conn_del(conn);
2283
2284	/* Re-enable advertising if necessary, since it might
2285	 * have been disabled by the connection. From the
2286	 * HCI_LE_Set_Advertise_Enable command description in
2287	 * the core specification (v4.0):
2288	 * "The Controller shall continue advertising until the Host
2289	 * issues an LE_Set_Advertise_Enable command with
2290	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2291	 * or until a connection is created or until the Advertising
2292	 * is timed out due to Directed Advertising."
2293	 */
2294	if (type == LE_LINK)
2295		mgmt_reenable_advertising(hdev);
2296
2297unlock:
2298	hci_dev_unlock(hdev);
2299}
2300
2301static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2302{
2303	struct hci_ev_auth_complete *ev = (void *) skb->data;
2304	struct hci_conn *conn;
2305
2306	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2307
2308	hci_dev_lock(hdev);
2309
2310	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2311	if (!conn)
2312		goto unlock;
2313
2314	if (!ev->status) {
2315		if (!hci_conn_ssp_enabled(conn) &&
2316		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2317			BT_INFO("re-auth of legacy device is not possible.");
2318		} else {
2319			set_bit(HCI_CONN_AUTH, &conn->flags);
2320			conn->sec_level = conn->pending_sec_level;
2321		}
2322	} else {
2323		mgmt_auth_failed(conn, ev->status);
2324	}
2325
2326	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2327	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2328
2329	if (conn->state == BT_CONFIG) {
2330		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2331			struct hci_cp_set_conn_encrypt cp;
2332			cp.handle  = ev->handle;
2333			cp.encrypt = 0x01;
2334			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2335				     &cp);
2336		} else {
2337			conn->state = BT_CONNECTED;
2338			hci_proto_connect_cfm(conn, ev->status);
2339			hci_conn_drop(conn);
2340		}
2341	} else {
2342		hci_auth_cfm(conn, ev->status);
2343
2344		hci_conn_hold(conn);
2345		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2346		hci_conn_drop(conn);
2347	}
2348
2349	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2350		if (!ev->status) {
2351			struct hci_cp_set_conn_encrypt cp;
2352			cp.handle  = ev->handle;
2353			cp.encrypt = 0x01;
2354			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2355				     &cp);
2356		} else {
2357			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2358			hci_encrypt_cfm(conn, ev->status, 0x00);
2359		}
2360	}
2361
2362unlock:
2363	hci_dev_unlock(hdev);
2364}
2365
2366static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2367{
2368	struct hci_ev_remote_name *ev = (void *) skb->data;
2369	struct hci_conn *conn;
2370
2371	BT_DBG("%s", hdev->name);
2372
2373	hci_conn_check_pending(hdev);
2374
2375	hci_dev_lock(hdev);
2376
2377	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2378
2379	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2380		goto check_auth;
2381
2382	if (ev->status == 0)
2383		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2384				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2385	else
2386		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2387
2388check_auth:
2389	if (!conn)
2390		goto unlock;
2391
2392	if (!hci_outgoing_auth_needed(hdev, conn))
2393		goto unlock;
2394
2395	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2396		struct hci_cp_auth_requested cp;
2397
2398		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2399
2400		cp.handle = __cpu_to_le16(conn->handle);
2401		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2402	}
2403
2404unlock:
2405	hci_dev_unlock(hdev);
2406}
2407
2408static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2409{
2410	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2411	struct hci_conn *conn;
2412
2413	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2414
2415	hci_dev_lock(hdev);
2416
2417	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2418	if (!conn)
2419		goto unlock;
2420
2421	if (!ev->status) {
2422		if (ev->encrypt) {
2423			/* Encryption implies authentication */
2424			set_bit(HCI_CONN_AUTH, &conn->flags);
2425			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2426			conn->sec_level = conn->pending_sec_level;
2427
2428			/* P-256 authentication key implies FIPS */
2429			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2430				set_bit(HCI_CONN_FIPS, &conn->flags);
2431
2432			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2433			    conn->type == LE_LINK)
2434				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2435		} else {
2436			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2437			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2438		}
2439	}
2440
2441	/* We should disregard the current RPA and generate a new one
2442	 * whenever the encryption procedure fails.
2443	 */
2444	if (ev->status && conn->type == LE_LINK)
2445		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2446
2447	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2448
2449	if (ev->status && conn->state == BT_CONNECTED) {
2450		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2451		hci_conn_drop(conn);
2452		goto unlock;
2453	}
2454
2455	if (conn->state == BT_CONFIG) {
2456		if (!ev->status)
2457			conn->state = BT_CONNECTED;
2458
2459		/* In Secure Connections Only mode, do not allow any
2460		 * connections that are not encrypted with AES-CCM
2461		 * using a P-256 authenticated combination key.
2462		 */
2463		if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2464		    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2465		     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2466			hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2467			hci_conn_drop(conn);
2468			goto unlock;
2469		}
2470
2471		hci_proto_connect_cfm(conn, ev->status);
2472		hci_conn_drop(conn);
2473	} else
2474		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2475
2476unlock:
2477	hci_dev_unlock(hdev);
2478}
2479
2480static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2481					     struct sk_buff *skb)
2482{
2483	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2484	struct hci_conn *conn;
2485
2486	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2487
2488	hci_dev_lock(hdev);
2489
2490	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2491	if (conn) {
2492		if (!ev->status)
2493			set_bit(HCI_CONN_SECURE, &conn->flags);
2494
2495		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2496
2497		hci_key_change_cfm(conn, ev->status);
2498	}
2499
2500	hci_dev_unlock(hdev);
2501}
2502
2503static void hci_remote_features_evt(struct hci_dev *hdev,
2504				    struct sk_buff *skb)
2505{
2506	struct hci_ev_remote_features *ev = (void *) skb->data;
2507	struct hci_conn *conn;
2508
2509	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2510
2511	hci_dev_lock(hdev);
2512
2513	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2514	if (!conn)
2515		goto unlock;
2516
2517	if (!ev->status)
2518		memcpy(conn->features[0], ev->features, 8);
2519
2520	if (conn->state != BT_CONFIG)
2521		goto unlock;
2522
2523	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2524		struct hci_cp_read_remote_ext_features cp;
2525		cp.handle = ev->handle;
2526		cp.page = 0x01;
2527		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2528			     sizeof(cp), &cp);
2529		goto unlock;
2530	}
2531
2532	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2533		struct hci_cp_remote_name_req cp;
2534		memset(&cp, 0, sizeof(cp));
2535		bacpy(&cp.bdaddr, &conn->dst);
2536		cp.pscan_rep_mode = 0x02;
2537		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2538	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2539		mgmt_device_connected(hdev, &conn->dst, conn->type,
2540				      conn->dst_type, 0, NULL, 0,
2541				      conn->dev_class);
2542
2543	if (!hci_outgoing_auth_needed(hdev, conn)) {
2544		conn->state = BT_CONNECTED;
2545		hci_proto_connect_cfm(conn, ev->status);
2546		hci_conn_drop(conn);
2547	}
2548
2549unlock:
2550	hci_dev_unlock(hdev);
2551}
2552
2553static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2554{
2555	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2556	u8 status = skb->data[sizeof(*ev)];
2557	__u16 opcode;
2558
2559	skb_pull(skb, sizeof(*ev));
2560
2561	opcode = __le16_to_cpu(ev->opcode);
2562
2563	switch (opcode) {
2564	case HCI_OP_INQUIRY_CANCEL:
2565		hci_cc_inquiry_cancel(hdev, skb);
2566		break;
2567
2568	case HCI_OP_PERIODIC_INQ:
2569		hci_cc_periodic_inq(hdev, skb);
2570		break;
2571
2572	case HCI_OP_EXIT_PERIODIC_INQ:
2573		hci_cc_exit_periodic_inq(hdev, skb);
2574		break;
2575
2576	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2577		hci_cc_remote_name_req_cancel(hdev, skb);
2578		break;
2579
2580	case HCI_OP_ROLE_DISCOVERY:
2581		hci_cc_role_discovery(hdev, skb);
2582		break;
2583
2584	case HCI_OP_READ_LINK_POLICY:
2585		hci_cc_read_link_policy(hdev, skb);
2586		break;
2587
2588	case HCI_OP_WRITE_LINK_POLICY:
2589		hci_cc_write_link_policy(hdev, skb);
2590		break;
2591
2592	case HCI_OP_READ_DEF_LINK_POLICY:
2593		hci_cc_read_def_link_policy(hdev, skb);
2594		break;
2595
2596	case HCI_OP_WRITE_DEF_LINK_POLICY:
2597		hci_cc_write_def_link_policy(hdev, skb);
2598		break;
2599
2600	case HCI_OP_RESET:
2601		hci_cc_reset(hdev, skb);
2602		break;
2603
2604	case HCI_OP_WRITE_LOCAL_NAME:
2605		hci_cc_write_local_name(hdev, skb);
2606		break;
2607
2608	case HCI_OP_READ_LOCAL_NAME:
2609		hci_cc_read_local_name(hdev, skb);
2610		break;
2611
2612	case HCI_OP_WRITE_AUTH_ENABLE:
2613		hci_cc_write_auth_enable(hdev, skb);
2614		break;
2615
2616	case HCI_OP_WRITE_ENCRYPT_MODE:
2617		hci_cc_write_encrypt_mode(hdev, skb);
2618		break;
2619
2620	case HCI_OP_WRITE_SCAN_ENABLE:
2621		hci_cc_write_scan_enable(hdev, skb);
2622		break;
2623
2624	case HCI_OP_READ_CLASS_OF_DEV:
2625		hci_cc_read_class_of_dev(hdev, skb);
2626		break;
2627
2628	case HCI_OP_WRITE_CLASS_OF_DEV:
2629		hci_cc_write_class_of_dev(hdev, skb);
2630		break;
2631
2632	case HCI_OP_READ_VOICE_SETTING:
2633		hci_cc_read_voice_setting(hdev, skb);
2634		break;
2635
2636	case HCI_OP_WRITE_VOICE_SETTING:
2637		hci_cc_write_voice_setting(hdev, skb);
2638		break;
2639
2640	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2641		hci_cc_read_num_supported_iac(hdev, skb);
2642		break;
2643
2644	case HCI_OP_WRITE_SSP_MODE:
2645		hci_cc_write_ssp_mode(hdev, skb);
2646		break;
2647
2648	case HCI_OP_WRITE_SC_SUPPORT:
2649		hci_cc_write_sc_support(hdev, skb);
2650		break;
2651
2652	case HCI_OP_READ_LOCAL_VERSION:
2653		hci_cc_read_local_version(hdev, skb);
2654		break;
2655
2656	case HCI_OP_READ_LOCAL_COMMANDS:
2657		hci_cc_read_local_commands(hdev, skb);
2658		break;
2659
2660	case HCI_OP_READ_LOCAL_FEATURES:
2661		hci_cc_read_local_features(hdev, skb);
2662		break;
2663
2664	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2665		hci_cc_read_local_ext_features(hdev, skb);
2666		break;
2667
2668	case HCI_OP_READ_BUFFER_SIZE:
2669		hci_cc_read_buffer_size(hdev, skb);
2670		break;
2671
2672	case HCI_OP_READ_BD_ADDR:
2673		hci_cc_read_bd_addr(hdev, skb);
2674		break;
2675
2676	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2677		hci_cc_read_page_scan_activity(hdev, skb);
2678		break;
2679
2680	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2681		hci_cc_write_page_scan_activity(hdev, skb);
2682		break;
2683
2684	case HCI_OP_READ_PAGE_SCAN_TYPE:
2685		hci_cc_read_page_scan_type(hdev, skb);
2686		break;
2687
2688	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2689		hci_cc_write_page_scan_type(hdev, skb);
2690		break;
2691
2692	case HCI_OP_READ_DATA_BLOCK_SIZE:
2693		hci_cc_read_data_block_size(hdev, skb);
2694		break;
2695
2696	case HCI_OP_READ_FLOW_CONTROL_MODE:
2697		hci_cc_read_flow_control_mode(hdev, skb);
2698		break;
2699
2700	case HCI_OP_READ_LOCAL_AMP_INFO:
2701		hci_cc_read_local_amp_info(hdev, skb);
2702		break;
2703
2704	case HCI_OP_READ_CLOCK:
2705		hci_cc_read_clock(hdev, skb);
2706		break;
2707
2708	case HCI_OP_READ_LOCAL_AMP_ASSOC:
2709		hci_cc_read_local_amp_assoc(hdev, skb);
2710		break;
2711
2712	case HCI_OP_READ_INQ_RSP_TX_POWER:
2713		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2714		break;
2715
2716	case HCI_OP_PIN_CODE_REPLY:
2717		hci_cc_pin_code_reply(hdev, skb);
2718		break;
2719
2720	case HCI_OP_PIN_CODE_NEG_REPLY:
2721		hci_cc_pin_code_neg_reply(hdev, skb);
2722		break;
2723
2724	case HCI_OP_READ_LOCAL_OOB_DATA:
2725		hci_cc_read_local_oob_data(hdev, skb);
2726		break;
2727
2728	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2729		hci_cc_read_local_oob_ext_data(hdev, skb);
2730		break;
2731
2732	case HCI_OP_LE_READ_BUFFER_SIZE:
2733		hci_cc_le_read_buffer_size(hdev, skb);
2734		break;
2735
2736	case HCI_OP_LE_READ_LOCAL_FEATURES:
2737		hci_cc_le_read_local_features(hdev, skb);
2738		break;
2739
2740	case HCI_OP_LE_READ_ADV_TX_POWER:
2741		hci_cc_le_read_adv_tx_power(hdev, skb);
2742		break;
2743
2744	case HCI_OP_USER_CONFIRM_REPLY:
2745		hci_cc_user_confirm_reply(hdev, skb);
2746		break;
2747
2748	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2749		hci_cc_user_confirm_neg_reply(hdev, skb);
2750		break;
2751
2752	case HCI_OP_USER_PASSKEY_REPLY:
2753		hci_cc_user_passkey_reply(hdev, skb);
2754		break;
2755
2756	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2757		hci_cc_user_passkey_neg_reply(hdev, skb);
2758		break;
2759
2760	case HCI_OP_LE_SET_RANDOM_ADDR:
2761		hci_cc_le_set_random_addr(hdev, skb);
2762		break;
2763
2764	case HCI_OP_LE_SET_ADV_ENABLE:
2765		hci_cc_le_set_adv_enable(hdev, skb);
2766		break;
2767
2768	case HCI_OP_LE_SET_SCAN_PARAM:
2769		hci_cc_le_set_scan_param(hdev, skb);
2770		break;
2771
2772	case HCI_OP_LE_SET_SCAN_ENABLE:
2773		hci_cc_le_set_scan_enable(hdev, skb);
2774		break;
2775
2776	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2777		hci_cc_le_read_white_list_size(hdev, skb);
2778		break;
2779
2780	case HCI_OP_LE_CLEAR_WHITE_LIST:
2781		hci_cc_le_clear_white_list(hdev, skb);
2782		break;
2783
2784	case HCI_OP_LE_ADD_TO_WHITE_LIST:
2785		hci_cc_le_add_to_white_list(hdev, skb);
2786		break;
2787
2788	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2789		hci_cc_le_del_from_white_list(hdev, skb);
2790		break;
2791
2792	case HCI_OP_LE_READ_SUPPORTED_STATES:
2793		hci_cc_le_read_supported_states(hdev, skb);
2794		break;
2795
2796	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2797		hci_cc_write_le_host_supported(hdev, skb);
2798		break;
2799
2800	case HCI_OP_LE_SET_ADV_PARAM:
2801		hci_cc_set_adv_param(hdev, skb);
2802		break;
2803
2804	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2805		hci_cc_write_remote_amp_assoc(hdev, skb);
2806		break;
2807
2808	case HCI_OP_READ_RSSI:
2809		hci_cc_read_rssi(hdev, skb);
2810		break;
2811
2812	case HCI_OP_READ_TX_POWER:
2813		hci_cc_read_tx_power(hdev, skb);
2814		break;
2815
2816	default:
2817		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2818		break;
2819	}
2820
2821	if (opcode != HCI_OP_NOP)
2822		cancel_delayed_work(&hdev->cmd_timer);
2823
2824	hci_req_cmd_complete(hdev, opcode, status);
2825
2826	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2827		atomic_set(&hdev->cmd_cnt, 1);
2828		if (!skb_queue_empty(&hdev->cmd_q))
2829			queue_work(hdev->workqueue, &hdev->cmd_work);
2830	}
2831}
2832
2833static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2834{
2835	struct hci_ev_cmd_status *ev = (void *) skb->data;
2836	__u16 opcode;
2837
2838	skb_pull(skb, sizeof(*ev));
2839
2840	opcode = __le16_to_cpu(ev->opcode);
2841
2842	switch (opcode) {
2843	case HCI_OP_INQUIRY:
2844		hci_cs_inquiry(hdev, ev->status);
2845		break;
2846
2847	case HCI_OP_CREATE_CONN:
2848		hci_cs_create_conn(hdev, ev->status);
2849		break;
2850
2851	case HCI_OP_ADD_SCO:
2852		hci_cs_add_sco(hdev, ev->status);
2853		break;
2854
2855	case HCI_OP_AUTH_REQUESTED:
2856		hci_cs_auth_requested(hdev, ev->status);
2857		break;
2858
2859	case HCI_OP_SET_CONN_ENCRYPT:
2860		hci_cs_set_conn_encrypt(hdev, ev->status);
2861		break;
2862
2863	case HCI_OP_REMOTE_NAME_REQ:
2864		hci_cs_remote_name_req(hdev, ev->status);
2865		break;
2866
2867	case HCI_OP_READ_REMOTE_FEATURES:
2868		hci_cs_read_remote_features(hdev, ev->status);
2869		break;
2870
2871	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2872		hci_cs_read_remote_ext_features(hdev, ev->status);
2873		break;
2874
2875	case HCI_OP_SETUP_SYNC_CONN:
2876		hci_cs_setup_sync_conn(hdev, ev->status);
2877		break;
2878
2879	case HCI_OP_SNIFF_MODE:
2880		hci_cs_sniff_mode(hdev, ev->status);
2881		break;
2882
2883	case HCI_OP_EXIT_SNIFF_MODE:
2884		hci_cs_exit_sniff_mode(hdev, ev->status);
2885		break;
2886
2887	case HCI_OP_DISCONNECT:
2888		hci_cs_disconnect(hdev, ev->status);
2889		break;
2890
2891	case HCI_OP_CREATE_PHY_LINK:
2892		hci_cs_create_phylink(hdev, ev->status);
2893		break;
2894
2895	case HCI_OP_ACCEPT_PHY_LINK:
2896		hci_cs_accept_phylink(hdev, ev->status);
2897		break;
2898
2899	case HCI_OP_LE_CREATE_CONN:
2900		hci_cs_le_create_conn(hdev, ev->status);
2901		break;
2902
2903	case HCI_OP_LE_START_ENC:
2904		hci_cs_le_start_enc(hdev, ev->status);
2905		break;
2906
2907	default:
2908		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2909		break;
2910	}
2911
2912	if (opcode != HCI_OP_NOP)
2913		cancel_delayed_work(&hdev->cmd_timer);
2914
2915	if (ev->status ||
2916	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2917		hci_req_cmd_complete(hdev, opcode, ev->status);
2918
2919	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2920		atomic_set(&hdev->cmd_cnt, 1);
2921		if (!skb_queue_empty(&hdev->cmd_q))
2922			queue_work(hdev->workqueue, &hdev->cmd_work);
2923	}
2924}
2925
2926static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2927{
2928	struct hci_ev_role_change *ev = (void *) skb->data;
2929	struct hci_conn *conn;
2930
2931	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2932
2933	hci_dev_lock(hdev);
2934
2935	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2936	if (conn) {
2937		if (!ev->status)
2938			conn->role = ev->role;
2939
2940		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2941
2942		hci_role_switch_cfm(conn, ev->status, ev->role);
2943	}
2944
2945	hci_dev_unlock(hdev);
2946}
2947
2948static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2949{
2950	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2951	int i;
2952
2953	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2954		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2955		return;
2956	}
2957
2958	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2959	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2960		BT_DBG("%s bad parameters", hdev->name);
2961		return;
2962	}
2963
2964	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2965
2966	for (i = 0; i < ev->num_hndl; i++) {
2967		struct hci_comp_pkts_info *info = &ev->handles[i];
2968		struct hci_conn *conn;
2969		__u16  handle, count;
2970
2971		handle = __le16_to_cpu(info->handle);
2972		count  = __le16_to_cpu(info->count);
2973
2974		conn = hci_conn_hash_lookup_handle(hdev, handle);
2975		if (!conn)
2976			continue;
2977
2978		conn->sent -= count;
2979
2980		switch (conn->type) {
2981		case ACL_LINK:
2982			hdev->acl_cnt += count;
2983			if (hdev->acl_cnt > hdev->acl_pkts)
2984				hdev->acl_cnt = hdev->acl_pkts;
2985			break;
2986
2987		case LE_LINK:
2988			if (hdev->le_pkts) {
2989				hdev->le_cnt += count;
2990				if (hdev->le_cnt > hdev->le_pkts)
2991					hdev->le_cnt = hdev->le_pkts;
2992			} else {
2993				hdev->acl_cnt += count;
2994				if (hdev->acl_cnt > hdev->acl_pkts)
2995					hdev->acl_cnt = hdev->acl_pkts;
2996			}
2997			break;
2998
2999		case SCO_LINK:
3000			hdev->sco_cnt += count;
3001			if (hdev->sco_cnt > hdev->sco_pkts)
3002				hdev->sco_cnt = hdev->sco_pkts;
3003			break;
3004
3005		default:
3006			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3007			break;
3008		}
3009	}
3010
3011	queue_work(hdev->workqueue, &hdev->tx_work);
3012}
3013
3014static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3015						 __u16 handle)
3016{
3017	struct hci_chan *chan;
3018
3019	switch (hdev->dev_type) {
3020	case HCI_BREDR:
3021		return hci_conn_hash_lookup_handle(hdev, handle);
3022	case HCI_AMP:
3023		chan = hci_chan_lookup_handle(hdev, handle);
3024		if (chan)
3025			return chan->conn;
3026		break;
3027	default:
3028		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3029		break;
3030	}
3031
3032	return NULL;
3033}
3034
3035static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3036{
3037	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3038	int i;
3039
3040	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3041		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3042		return;
3043	}
3044
3045	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3046	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3047		BT_DBG("%s bad parameters", hdev->name);
3048		return;
3049	}
3050
3051	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3052	       ev->num_hndl);
3053
3054	for (i = 0; i < ev->num_hndl; i++) {
3055		struct hci_comp_blocks_info *info = &ev->handles[i];
3056		struct hci_conn *conn = NULL;
3057		__u16  handle, block_count;
3058
3059		handle = __le16_to_cpu(info->handle);
3060		block_count = __le16_to_cpu(info->blocks);
3061
3062		conn = __hci_conn_lookup_handle(hdev, handle);
3063		if (!conn)
3064			continue;
3065
3066		conn->sent -= block_count;
3067
3068		switch (conn->type) {
3069		case ACL_LINK:
3070		case AMP_LINK:
3071			hdev->block_cnt += block_count;
3072			if (hdev->block_cnt > hdev->num_blocks)
3073				hdev->block_cnt = hdev->num_blocks;
3074			break;
3075
3076		default:
3077			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3078			break;
3079		}
3080	}
3081
3082	queue_work(hdev->workqueue, &hdev->tx_work);
3083}
3084
3085static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3086{
3087	struct hci_ev_mode_change *ev = (void *) skb->data;
3088	struct hci_conn *conn;
3089
3090	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3091
3092	hci_dev_lock(hdev);
3093
3094	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3095	if (conn) {
3096		conn->mode = ev->mode;
3097
3098		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3099					&conn->flags)) {
3100			if (conn->mode == HCI_CM_ACTIVE)
3101				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3102			else
3103				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3104		}
3105
3106		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3107			hci_sco_setup(conn, ev->status);
3108	}
3109
3110	hci_dev_unlock(hdev);
3111}
3112
3113static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3114{
3115	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3116	struct hci_conn *conn;
3117
3118	BT_DBG("%s", hdev->name);
3119
3120	hci_dev_lock(hdev);
3121
3122	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3123	if (!conn)
3124		goto unlock;
3125
3126	if (conn->state == BT_CONNECTED) {
3127		hci_conn_hold(conn);
3128		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3129		hci_conn_drop(conn);
3130	}
3131
3132	if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3133	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3134		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3135			     sizeof(ev->bdaddr), &ev->bdaddr);
3136	} else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3137		u8 secure;
3138
3139		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3140			secure = 1;
3141		else
3142			secure = 0;
3143
3144		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3145	}
3146
3147unlock:
3148	hci_dev_unlock(hdev);
3149}
3150
3151static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3152{
3153	struct hci_ev_link_key_req *ev = (void *) skb->data;
3154	struct hci_cp_link_key_reply cp;
3155	struct hci_conn *conn;
3156	struct link_key *key;
3157
3158	BT_DBG("%s", hdev->name);
3159
3160	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3161		return;
3162
3163	hci_dev_lock(hdev);
3164
3165	key = hci_find_link_key(hdev, &ev->bdaddr);
3166	if (!key) {
3167		BT_DBG("%s link key not found for %pMR", hdev->name,
3168		       &ev->bdaddr);
3169		goto not_found;
3170	}
3171
3172	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3173	       &ev->bdaddr);
3174
3175	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3176	if (conn) {
3177		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3178		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3179		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3180			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3181			goto not_found;
3182		}
3183
3184		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3185		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3186		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3187			BT_DBG("%s ignoring key unauthenticated for high security",
3188			       hdev->name);
3189			goto not_found;
3190		}
3191
3192		conn->key_type = key->type;
3193		conn->pin_length = key->pin_len;
3194	}
3195
3196	bacpy(&cp.bdaddr, &ev->bdaddr);
3197	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3198
3199	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3200
3201	hci_dev_unlock(hdev);
3202
3203	return;
3204
3205not_found:
3206	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3207	hci_dev_unlock(hdev);
3208}
3209
3210static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3211{
3212	struct hci_ev_link_key_notify *ev = (void *) skb->data;
3213	struct hci_conn *conn;
3214	struct link_key *key;
3215	bool persistent;
3216	u8 pin_len = 0;
3217
3218	BT_DBG("%s", hdev->name);
3219
3220	hci_dev_lock(hdev);
3221
3222	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3223	if (conn) {
3224		hci_conn_hold(conn);
3225		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3226		pin_len = conn->pin_length;
3227
3228		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3229			conn->key_type = ev->key_type;
3230
3231		hci_conn_drop(conn);
3232	}
3233
3234	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3235		goto unlock;
3236
3237	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3238			        ev->key_type, pin_len, &persistent);
3239	if (!key)
3240		goto unlock;
3241
3242	mgmt_new_link_key(hdev, key, persistent);
3243
3244	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3245	 * is set. If it's not set simply remove the key from the kernel
3246	 * list (we've still notified user space about it but with
3247	 * store_hint being 0).
3248	 */
3249	if (key->type == HCI_LK_DEBUG_COMBINATION &&
3250	    !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3251		list_del(&key->list);
3252		kfree(key);
3253	} else if (conn) {
3254		if (persistent)
3255			clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3256		else
3257			set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3258	}
3259
3260unlock:
3261	hci_dev_unlock(hdev);
3262}
3263
3264static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3265{
3266	struct hci_ev_clock_offset *ev = (void *) skb->data;
3267	struct hci_conn *conn;
3268
3269	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3270
3271	hci_dev_lock(hdev);
3272
3273	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3274	if (conn && !ev->status) {
3275		struct inquiry_entry *ie;
3276
3277		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3278		if (ie) {
3279			ie->data.clock_offset = ev->clock_offset;
3280			ie->timestamp = jiffies;
3281		}
3282	}
3283
3284	hci_dev_unlock(hdev);
3285}
3286
3287static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3288{
3289	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3290	struct hci_conn *conn;
3291
3292	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3293
3294	hci_dev_lock(hdev);
3295
3296	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3297	if (conn && !ev->status)
3298		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3299
3300	hci_dev_unlock(hdev);
3301}
3302
3303static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3304{
3305	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3306	struct inquiry_entry *ie;
3307
3308	BT_DBG("%s", hdev->name);
3309
3310	hci_dev_lock(hdev);
3311
3312	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3313	if (ie) {
3314		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3315		ie->timestamp = jiffies;
3316	}
3317
3318	hci_dev_unlock(hdev);
3319}
3320
3321static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3322					     struct sk_buff *skb)
3323{
3324	struct inquiry_data data;
3325	int num_rsp = *((__u8 *) skb->data);
3326
3327	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3328
3329	if (!num_rsp)
3330		return;
3331
3332	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3333		return;
3334
3335	hci_dev_lock(hdev);
3336
3337	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3338		struct inquiry_info_with_rssi_and_pscan_mode *info;
3339		info = (void *) (skb->data + 1);
3340
3341		for (; num_rsp; num_rsp--, info++) {
3342			u32 flags;
3343
3344			bacpy(&data.bdaddr, &info->bdaddr);
3345			data.pscan_rep_mode	= info->pscan_rep_mode;
3346			data.pscan_period_mode	= info->pscan_period_mode;
3347			data.pscan_mode		= info->pscan_mode;
3348			memcpy(data.dev_class, info->dev_class, 3);
3349			data.clock_offset	= info->clock_offset;
3350			data.rssi		= info->rssi;
3351			data.ssp_mode		= 0x00;
3352
3353			flags = hci_inquiry_cache_update(hdev, &data, false);
3354
3355			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3356					  info->dev_class, info->rssi,
3357					  flags, NULL, 0, NULL, 0);
3358		}
3359	} else {
3360		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3361
3362		for (; num_rsp; num_rsp--, info++) {
3363			u32 flags;
3364
3365			bacpy(&data.bdaddr, &info->bdaddr);
3366			data.pscan_rep_mode	= info->pscan_rep_mode;
3367			data.pscan_period_mode	= info->pscan_period_mode;
3368			data.pscan_mode		= 0x00;
3369			memcpy(data.dev_class, info->dev_class, 3);
3370			data.clock_offset	= info->clock_offset;
3371			data.rssi		= info->rssi;
3372			data.ssp_mode		= 0x00;
3373
3374			flags = hci_inquiry_cache_update(hdev, &data, false);
3375
3376			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3377					  info->dev_class, info->rssi,
3378					  flags, NULL, 0, NULL, 0);
3379		}
3380	}
3381
3382	hci_dev_unlock(hdev);
3383}
3384
3385static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3386					struct sk_buff *skb)
3387{
3388	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3389	struct hci_conn *conn;
3390
3391	BT_DBG("%s", hdev->name);
3392
3393	hci_dev_lock(hdev);
3394
3395	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3396	if (!conn)
3397		goto unlock;
3398
3399	if (ev->page < HCI_MAX_PAGES)
3400		memcpy(conn->features[ev->page], ev->features, 8);
3401
3402	if (!ev->status && ev->page == 0x01) {
3403		struct inquiry_entry *ie;
3404
3405		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3406		if (ie)
3407			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3408
3409		if (ev->features[0] & LMP_HOST_SSP) {
3410			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3411		} else {
3412			/* It is mandatory by the Bluetooth specification that
3413			 * Extended Inquiry Results are only used when Secure
3414			 * Simple Pairing is enabled, but some devices violate
3415			 * this.
3416			 *
3417			 * To make these devices work, the internal SSP
3418			 * enabled flag needs to be cleared if the remote host
3419			 * features do not indicate SSP support */
3420			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3421		}
3422
3423		if (ev->features[0] & LMP_HOST_SC)
3424			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3425	}
3426
3427	if (conn->state != BT_CONFIG)
3428		goto unlock;
3429
3430	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3431		struct hci_cp_remote_name_req cp;
3432		memset(&cp, 0, sizeof(cp));
3433		bacpy(&cp.bdaddr, &conn->dst);
3434		cp.pscan_rep_mode = 0x02;
3435		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3436	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3437		mgmt_device_connected(hdev, &conn->dst, conn->type,
3438				      conn->dst_type, 0, NULL, 0,
3439				      conn->dev_class);
3440
3441	if (!hci_outgoing_auth_needed(hdev, conn)) {
3442		conn->state = BT_CONNECTED;
3443		hci_proto_connect_cfm(conn, ev->status);
3444		hci_conn_drop(conn);
3445	}
3446
3447unlock:
3448	hci_dev_unlock(hdev);
3449}
3450
3451static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3452				       struct sk_buff *skb)
3453{
3454	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3455	struct hci_conn *conn;
3456
3457	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3458
3459	hci_dev_lock(hdev);
3460
3461	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3462	if (!conn) {
3463		if (ev->link_type == ESCO_LINK)
3464			goto unlock;
3465
3466		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3467		if (!conn)
3468			goto unlock;
3469
3470		conn->type = SCO_LINK;
3471	}
3472
3473	switch (ev->status) {
3474	case 0x00:
3475		conn->handle = __le16_to_cpu(ev->handle);
3476		conn->state  = BT_CONNECTED;
3477
3478		hci_conn_add_sysfs(conn);
3479		break;
3480
3481	case 0x10:	/* Connection Accept Timeout */
3482	case 0x0d:	/* Connection Rejected due to Limited Resources */
3483	case 0x11:	/* Unsupported Feature or Parameter Value */
3484	case 0x1c:	/* SCO interval rejected */
3485	case 0x1a:	/* Unsupported Remote Feature */
3486	case 0x1f:	/* Unspecified error */
3487	case 0x20:	/* Unsupported LMP Parameter value */
3488		if (conn->out) {
3489			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3490					(hdev->esco_type & EDR_ESCO_MASK);
3491			if (hci_setup_sync(conn, conn->link->handle))
3492				goto unlock;
3493		}
3494		/* fall through */
3495
3496	default:
3497		conn->state = BT_CLOSED;
3498		break;
3499	}
3500
3501	hci_proto_connect_cfm(conn, ev->status);
3502	if (ev->status)
3503		hci_conn_del(conn);
3504
3505unlock:
3506	hci_dev_unlock(hdev);
3507}
3508
3509static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3510{
3511	size_t parsed = 0;
3512
3513	while (parsed < eir_len) {
3514		u8 field_len = eir[0];
3515
3516		if (field_len == 0)
3517			return parsed;
3518
3519		parsed += field_len + 1;
3520		eir += field_len + 1;
3521	}
3522
3523	return eir_len;
3524}
3525
3526static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3527					    struct sk_buff *skb)
3528{
3529	struct inquiry_data data;
3530	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3531	int num_rsp = *((__u8 *) skb->data);
3532	size_t eir_len;
3533
3534	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3535
3536	if (!num_rsp)
3537		return;
3538
3539	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3540		return;
3541
3542	hci_dev_lock(hdev);
3543
3544	for (; num_rsp; num_rsp--, info++) {
3545		u32 flags;
3546		bool name_known;
3547
3548		bacpy(&data.bdaddr, &info->bdaddr);
3549		data.pscan_rep_mode	= info->pscan_rep_mode;
3550		data.pscan_period_mode	= info->pscan_period_mode;
3551		data.pscan_mode		= 0x00;
3552		memcpy(data.dev_class, info->dev_class, 3);
3553		data.clock_offset	= info->clock_offset;
3554		data.rssi		= info->rssi;
3555		data.ssp_mode		= 0x01;
3556
3557		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3558			name_known = eir_has_data_type(info->data,
3559						       sizeof(info->data),
3560						       EIR_NAME_COMPLETE);
3561		else
3562			name_known = true;
3563
3564		flags = hci_inquiry_cache_update(hdev, &data, name_known);
3565
3566		eir_len = eir_get_length(info->data, sizeof(info->data));
3567
3568		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3569				  info->dev_class, info->rssi,
3570				  flags, info->data, eir_len, NULL, 0);
3571	}
3572
3573	hci_dev_unlock(hdev);
3574}
3575
3576static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3577					 struct sk_buff *skb)
3578{
3579	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3580	struct hci_conn *conn;
3581
3582	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3583	       __le16_to_cpu(ev->handle));
3584
3585	hci_dev_lock(hdev);
3586
3587	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3588	if (!conn)
3589		goto unlock;
3590
3591	/* For BR/EDR the necessary steps are taken through the
3592	 * auth_complete event.
3593	 */
3594	if (conn->type != LE_LINK)
3595		goto unlock;
3596
3597	if (!ev->status)
3598		conn->sec_level = conn->pending_sec_level;
3599
3600	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3601
3602	if (ev->status && conn->state == BT_CONNECTED) {
3603		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3604		hci_conn_drop(conn);
3605		goto unlock;
3606	}
3607
3608	if (conn->state == BT_CONFIG) {
3609		if (!ev->status)
3610			conn->state = BT_CONNECTED;
3611
3612		hci_proto_connect_cfm(conn, ev->status);
3613		hci_conn_drop(conn);
3614	} else {
3615		hci_auth_cfm(conn, ev->status);
3616
3617		hci_conn_hold(conn);
3618		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3619		hci_conn_drop(conn);
3620	}
3621
3622unlock:
3623	hci_dev_unlock(hdev);
3624}
3625
3626static u8 hci_get_auth_req(struct hci_conn *conn)
3627{
3628	/* If remote requests no-bonding follow that lead */
3629	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3630	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3631		return conn->remote_auth | (conn->auth_type & 0x01);
3632
3633	/* If both remote and local have enough IO capabilities, require
3634	 * MITM protection
3635	 */
3636	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3637	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3638		return conn->remote_auth | 0x01;
3639
3640	/* No MITM protection possible so ignore remote requirement */
3641	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3642}
3643
3644static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3645{
3646	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3647	struct hci_conn *conn;
3648
3649	BT_DBG("%s", hdev->name);
3650
3651	hci_dev_lock(hdev);
3652
3653	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3654	if (!conn)
3655		goto unlock;
3656
3657	hci_conn_hold(conn);
3658
3659	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3660		goto unlock;
3661
3662	/* Allow pairing if we're pairable, the initiators of the
3663	 * pairing or if the remote is not requesting bonding.
3664	 */
3665	if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3666	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3667	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3668		struct hci_cp_io_capability_reply cp;
3669
3670		bacpy(&cp.bdaddr, &ev->bdaddr);
3671		/* Change the IO capability from KeyboardDisplay
3672		 * to DisplayYesNo as it is not supported by BT spec. */
3673		cp.capability = (conn->io_capability == 0x04) ?
3674				HCI_IO_DISPLAY_YESNO : conn->io_capability;
3675
3676		/* If we are initiators, there is no remote information yet */
3677		if (conn->remote_auth == 0xff) {
3678			/* Request MITM protection if our IO caps allow it
3679			 * except for the no-bonding case.
3680			 */
3681			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3682			    conn->auth_type != HCI_AT_NO_BONDING)
3683				conn->auth_type |= 0x01;
3684		} else {
3685			conn->auth_type = hci_get_auth_req(conn);
3686		}
3687
3688		/* If we're not bondable, force one of the non-bondable
3689		 * authentication requirement values.
3690		 */
3691		if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3692			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3693
3694		cp.authentication = conn->auth_type;
3695
3696		if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3697		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3698			cp.oob_data = 0x01;
3699		else
3700			cp.oob_data = 0x00;
3701
3702		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3703			     sizeof(cp), &cp);
3704	} else {
3705		struct hci_cp_io_capability_neg_reply cp;
3706
3707		bacpy(&cp.bdaddr, &ev->bdaddr);
3708		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3709
3710		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3711			     sizeof(cp), &cp);
3712	}
3713
3714unlock:
3715	hci_dev_unlock(hdev);
3716}
3717
3718static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3719{
3720	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3721	struct hci_conn *conn;
3722
3723	BT_DBG("%s", hdev->name);
3724
3725	hci_dev_lock(hdev);
3726
3727	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3728	if (!conn)
3729		goto unlock;
3730
3731	conn->remote_cap = ev->capability;
3732	conn->remote_auth = ev->authentication;
3733	if (ev->oob_data)
3734		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3735
3736unlock:
3737	hci_dev_unlock(hdev);
3738}
3739
3740static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3741					 struct sk_buff *skb)
3742{
3743	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3744	int loc_mitm, rem_mitm, confirm_hint = 0;
3745	struct hci_conn *conn;
3746
3747	BT_DBG("%s", hdev->name);
3748
3749	hci_dev_lock(hdev);
3750
3751	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3752		goto unlock;
3753
3754	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3755	if (!conn)
3756		goto unlock;
3757
3758	loc_mitm = (conn->auth_type & 0x01);
3759	rem_mitm = (conn->remote_auth & 0x01);
3760
3761	/* If we require MITM but the remote device can't provide that
3762	 * (it has NoInputNoOutput) then reject the confirmation
3763	 * request. We check the security level here since it doesn't
3764	 * necessarily match conn->auth_type.
3765	 */
3766	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3767	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3768		BT_DBG("Rejecting request: remote device can't provide MITM");
3769		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3770			     sizeof(ev->bdaddr), &ev->bdaddr);
3771		goto unlock;
3772	}
3773
3774	/* If no side requires MITM protection; auto-accept */
3775	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3776	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3777
3778		/* If we're not the initiators request authorization to
3779		 * proceed from user space (mgmt_user_confirm with
3780		 * confirm_hint set to 1). The exception is if neither
3781		 * side had MITM or if the local IO capability is
3782		 * NoInputNoOutput, in which case we do auto-accept
3783		 */
3784		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3785		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3786		    (loc_mitm || rem_mitm)) {
3787			BT_DBG("Confirming auto-accept as acceptor");
3788			confirm_hint = 1;
3789			goto confirm;
3790		}
3791
3792		BT_DBG("Auto-accept of user confirmation with %ums delay",
3793		       hdev->auto_accept_delay);
3794
3795		if (hdev->auto_accept_delay > 0) {
3796			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3797			queue_delayed_work(conn->hdev->workqueue,
3798					   &conn->auto_accept_work, delay);
3799			goto unlock;
3800		}
3801
3802		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3803			     sizeof(ev->bdaddr), &ev->bdaddr);
3804		goto unlock;
3805	}
3806
3807confirm:
3808	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3809				  le32_to_cpu(ev->passkey), confirm_hint);
3810
3811unlock:
3812	hci_dev_unlock(hdev);
3813}
3814
3815static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3816					 struct sk_buff *skb)
3817{
3818	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3819
3820	BT_DBG("%s", hdev->name);
3821
3822	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3823		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3824}
3825
3826static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3827					struct sk_buff *skb)
3828{
3829	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3830	struct hci_conn *conn;
3831
3832	BT_DBG("%s", hdev->name);
3833
3834	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3835	if (!conn)
3836		return;
3837
3838	conn->passkey_notify = __le32_to_cpu(ev->passkey);
3839	conn->passkey_entered = 0;
3840
3841	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3842		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3843					 conn->dst_type, conn->passkey_notify,
3844					 conn->passkey_entered);
3845}
3846
3847static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3848{
3849	struct hci_ev_keypress_notify *ev = (void *) skb->data;
3850	struct hci_conn *conn;
3851
3852	BT_DBG("%s", hdev->name);
3853
3854	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3855	if (!conn)
3856		return;
3857
3858	switch (ev->type) {
3859	case HCI_KEYPRESS_STARTED:
3860		conn->passkey_entered = 0;
3861		return;
3862
3863	case HCI_KEYPRESS_ENTERED:
3864		conn->passkey_entered++;
3865		break;
3866
3867	case HCI_KEYPRESS_ERASED:
3868		conn->passkey_entered--;
3869		break;
3870
3871	case HCI_KEYPRESS_CLEARED:
3872		conn->passkey_entered = 0;
3873		break;
3874
3875	case HCI_KEYPRESS_COMPLETED:
3876		return;
3877	}
3878
3879	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3880		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3881					 conn->dst_type, conn->passkey_notify,
3882					 conn->passkey_entered);
3883}
3884
3885static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3886					 struct sk_buff *skb)
3887{
3888	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3889	struct hci_conn *conn;
3890
3891	BT_DBG("%s", hdev->name);
3892
3893	hci_dev_lock(hdev);
3894
3895	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3896	if (!conn)
3897		goto unlock;
3898
3899	/* Reset the authentication requirement to unknown */
3900	conn->remote_auth = 0xff;
3901
3902	/* To avoid duplicate auth_failed events to user space we check
3903	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3904	 * initiated the authentication. A traditional auth_complete
3905	 * event gets always produced as initiator and is also mapped to
3906	 * the mgmt_auth_failed event */
3907	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3908		mgmt_auth_failed(conn, ev->status);
3909
3910	hci_conn_drop(conn);
3911
3912unlock:
3913	hci_dev_unlock(hdev);
3914}
3915
3916static void hci_remote_host_features_evt(struct hci_dev *hdev,
3917					 struct sk_buff *skb)
3918{
3919	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3920	struct inquiry_entry *ie;
3921	struct hci_conn *conn;
3922
3923	BT_DBG("%s", hdev->name);
3924
3925	hci_dev_lock(hdev);
3926
3927	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3928	if (conn)
3929		memcpy(conn->features[1], ev->features, 8);
3930
3931	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3932	if (ie)
3933		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3934
3935	hci_dev_unlock(hdev);
3936}
3937
3938static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3939					    struct sk_buff *skb)
3940{
3941	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3942	struct oob_data *data;
3943
3944	BT_DBG("%s", hdev->name);
3945
3946	hci_dev_lock(hdev);
3947
3948	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3949		goto unlock;
3950
3951	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3952	if (data) {
3953		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3954			struct hci_cp_remote_oob_ext_data_reply cp;
3955
3956			bacpy(&cp.bdaddr, &ev->bdaddr);
3957			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3958			memcpy(cp.randomizer192, data->randomizer192,
3959			       sizeof(cp.randomizer192));
3960			memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3961			memcpy(cp.randomizer256, data->randomizer256,
3962			       sizeof(cp.randomizer256));
3963
3964			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3965				     sizeof(cp), &cp);
3966		} else {
3967			struct hci_cp_remote_oob_data_reply cp;
3968
3969			bacpy(&cp.bdaddr, &ev->bdaddr);
3970			memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3971			memcpy(cp.randomizer, data->randomizer192,
3972			       sizeof(cp.randomizer));
3973
3974			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3975				     sizeof(cp), &cp);
3976		}
3977	} else {
3978		struct hci_cp_remote_oob_data_neg_reply cp;
3979
3980		bacpy(&cp.bdaddr, &ev->bdaddr);
3981		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3982			     sizeof(cp), &cp);
3983	}
3984
3985unlock:
3986	hci_dev_unlock(hdev);
3987}
3988
3989static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3990				      struct sk_buff *skb)
3991{
3992	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3993	struct hci_conn *hcon, *bredr_hcon;
3994
3995	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3996	       ev->status);
3997
3998	hci_dev_lock(hdev);
3999
4000	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4001	if (!hcon) {
4002		hci_dev_unlock(hdev);
4003		return;
4004	}
4005
4006	if (ev->status) {
4007		hci_conn_del(hcon);
4008		hci_dev_unlock(hdev);
4009		return;
4010	}
4011
4012	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4013
4014	hcon->state = BT_CONNECTED;
4015	bacpy(&hcon->dst, &bredr_hcon->dst);
4016
4017	hci_conn_hold(hcon);
4018	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4019	hci_conn_drop(hcon);
4020
4021	hci_conn_add_sysfs(hcon);
4022
4023	amp_physical_cfm(bredr_hcon, hcon);
4024
4025	hci_dev_unlock(hdev);
4026}
4027
4028static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4029{
4030	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4031	struct hci_conn *hcon;
4032	struct hci_chan *hchan;
4033	struct amp_mgr *mgr;
4034
4035	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4036	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4037	       ev->status);
4038
4039	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4040	if (!hcon)
4041		return;
4042
4043	/* Create AMP hchan */
4044	hchan = hci_chan_create(hcon);
4045	if (!hchan)
4046		return;
4047
4048	hchan->handle = le16_to_cpu(ev->handle);
4049
4050	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4051
4052	mgr = hcon->amp_mgr;
4053	if (mgr && mgr->bredr_chan) {
4054		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4055
4056		l2cap_chan_lock(bredr_chan);
4057
4058		bredr_chan->conn->mtu = hdev->block_mtu;
4059		l2cap_logical_cfm(bredr_chan, hchan, 0);
4060		hci_conn_hold(hcon);
4061
4062		l2cap_chan_unlock(bredr_chan);
4063	}
4064}
4065
4066static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4067					     struct sk_buff *skb)
4068{
4069	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4070	struct hci_chan *hchan;
4071
4072	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4073	       le16_to_cpu(ev->handle), ev->status);
4074
4075	if (ev->status)
4076		return;
4077
4078	hci_dev_lock(hdev);
4079
4080	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4081	if (!hchan)
4082		goto unlock;
4083
4084	amp_destroy_logical_link(hchan, ev->reason);
4085
4086unlock:
4087	hci_dev_unlock(hdev);
4088}
4089
4090static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4091					     struct sk_buff *skb)
4092{
4093	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4094	struct hci_conn *hcon;
4095
4096	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4097
4098	if (ev->status)
4099		return;
4100
4101	hci_dev_lock(hdev);
4102
4103	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4104	if (hcon) {
4105		hcon->state = BT_CLOSED;
4106		hci_conn_del(hcon);
4107	}
4108
4109	hci_dev_unlock(hdev);
4110}
4111
4112static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4113{
4114	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4115	struct hci_conn_params *params;
4116	struct hci_conn *conn;
4117	struct smp_irk *irk;
4118	u8 addr_type;
4119
4120	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4121
4122	hci_dev_lock(hdev);
4123
4124	/* All controllers implicitly stop advertising in the event of a
4125	 * connection, so ensure that the state bit is cleared.
4126	 */
4127	clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4128
4129	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4130	if (!conn) {
4131		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4132		if (!conn) {
4133			BT_ERR("No memory for new connection");
4134			goto unlock;
4135		}
4136
4137		conn->dst_type = ev->bdaddr_type;
4138
4139		/* If we didn't have a hci_conn object previously
4140		 * but we're in master role this must be something
4141		 * initiated using a white list. Since white list based
4142		 * connections are not "first class citizens" we don't
4143		 * have full tracking of them. Therefore, we go ahead
4144		 * with a "best effort" approach of determining the
4145		 * initiator address based on the HCI_PRIVACY flag.
4146		 */
4147		if (conn->out) {
4148			conn->resp_addr_type = ev->bdaddr_type;
4149			bacpy(&conn->resp_addr, &ev->bdaddr);
4150			if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4151				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4152				bacpy(&conn->init_addr, &hdev->rpa);
4153			} else {
4154				hci_copy_identity_address(hdev,
4155							  &conn->init_addr,
4156							  &conn->init_addr_type);
4157			}
4158		}
4159	} else {
4160		cancel_delayed_work(&conn->le_conn_timeout);
4161	}
4162
4163	if (!conn->out) {
4164		/* Set the responder (our side) address type based on
4165		 * the advertising address type.
4166		 */
4167		conn->resp_addr_type = hdev->adv_addr_type;
4168		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4169			bacpy(&conn->resp_addr, &hdev->random_addr);
4170		else
4171			bacpy(&conn->resp_addr, &hdev->bdaddr);
4172
4173		conn->init_addr_type = ev->bdaddr_type;
4174		bacpy(&conn->init_addr, &ev->bdaddr);
4175
4176		/* For incoming connections, set the default minimum
4177		 * and maximum connection interval. They will be used
4178		 * to check if the parameters are in range and if not
4179		 * trigger the connection update procedure.
4180		 */
4181		conn->le_conn_min_interval = hdev->le_conn_min_interval;
4182		conn->le_conn_max_interval = hdev->le_conn_max_interval;
4183	}
4184
4185	/* Lookup the identity address from the stored connection
4186	 * address and address type.
4187	 *
4188	 * When establishing connections to an identity address, the
4189	 * connection procedure will store the resolvable random
4190	 * address first. Now if it can be converted back into the
4191	 * identity address, start using the identity address from
4192	 * now on.
4193	 */
4194	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4195	if (irk) {
4196		bacpy(&conn->dst, &irk->bdaddr);
4197		conn->dst_type = irk->addr_type;
4198	}
4199
4200	if (ev->status) {
4201		hci_le_conn_failed(conn, ev->status);
4202		goto unlock;
4203	}
4204
4205	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4206		addr_type = BDADDR_LE_PUBLIC;
4207	else
4208		addr_type = BDADDR_LE_RANDOM;
4209
4210	/* Drop the connection if the device is blocked */
4211	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4212		hci_conn_drop(conn);
4213		goto unlock;
4214	}
4215
4216	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4217		mgmt_device_connected(hdev, &conn->dst, conn->type,
4218				      conn->dst_type, 0, NULL, 0, NULL);
4219
4220	conn->sec_level = BT_SECURITY_LOW;
4221	conn->handle = __le16_to_cpu(ev->handle);
4222	conn->state = BT_CONNECTED;
4223
4224	conn->le_conn_interval = le16_to_cpu(ev->interval);
4225	conn->le_conn_latency = le16_to_cpu(ev->latency);
4226	conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4227
4228	hci_conn_add_sysfs(conn);
4229
4230	hci_proto_connect_cfm(conn, ev->status);
4231
4232	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4233					   conn->dst_type);
4234	if (params) {
4235		list_del_init(&params->action);
4236		if (params->conn) {
4237			hci_conn_drop(params->conn);
4238			hci_conn_put(params->conn);
4239			params->conn = NULL;
4240		}
4241	}
4242
4243unlock:
4244	hci_update_background_scan(hdev);
4245	hci_dev_unlock(hdev);
4246}
4247
4248static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4249					    struct sk_buff *skb)
4250{
4251	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4252	struct hci_conn *conn;
4253
4254	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4255
4256	if (ev->status)
4257		return;
4258
4259	hci_dev_lock(hdev);
4260
4261	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4262	if (conn) {
4263		conn->le_conn_interval = le16_to_cpu(ev->interval);
4264		conn->le_conn_latency = le16_to_cpu(ev->latency);
4265		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4266	}
4267
4268	hci_dev_unlock(hdev);
4269}
4270
4271/* This function requires the caller holds hdev->lock */
4272static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4273				  u8 addr_type, u8 adv_type)
4274{
4275	struct hci_conn *conn;
4276	struct hci_conn_params *params;
4277
4278	/* If the event is not connectable don't proceed further */
4279	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4280		return;
4281
4282	/* Ignore if the device is blocked */
4283	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4284		return;
4285
4286	/* Most controller will fail if we try to create new connections
4287	 * while we have an existing one in slave role.
4288	 */
4289	if (hdev->conn_hash.le_num_slave > 0)
4290		return;
4291
4292	/* If we're not connectable only connect devices that we have in
4293	 * our pend_le_conns list.
4294	 */
4295	params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4296					   addr, addr_type);
4297	if (!params)
4298		return;
4299
4300	switch (params->auto_connect) {
4301	case HCI_AUTO_CONN_DIRECT:
4302		/* Only devices advertising with ADV_DIRECT_IND are
4303		 * triggering a connection attempt. This is allowing
4304		 * incoming connections from slave devices.
4305		 */
4306		if (adv_type != LE_ADV_DIRECT_IND)
4307			return;
4308		break;
4309	case HCI_AUTO_CONN_ALWAYS:
4310		/* Devices advertising with ADV_IND or ADV_DIRECT_IND
4311		 * are triggering a connection attempt. This means
4312		 * that incoming connectioms from slave device are
4313		 * accepted and also outgoing connections to slave
4314		 * devices are established when found.
4315		 */
4316		break;
4317	default:
4318		return;
4319	}
4320
4321	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4322			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4323	if (!IS_ERR(conn)) {
4324		/* Store the pointer since we don't really have any
4325		 * other owner of the object besides the params that
4326		 * triggered it. This way we can abort the connection if
4327		 * the parameters get removed and keep the reference
4328		 * count consistent once the connection is established.
4329		 */
4330		params->conn = hci_conn_get(conn);
4331		return;
4332	}
4333
4334	switch (PTR_ERR(conn)) {
4335	case -EBUSY:
4336		/* If hci_connect() returns -EBUSY it means there is already
4337		 * an LE connection attempt going on. Since controllers don't
4338		 * support more than one connection attempt at the time, we
4339		 * don't consider this an error case.
4340		 */
4341		break;
4342	default:
4343		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4344	}
4345}
4346
4347static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4348			       u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4349{
4350	struct discovery_state *d = &hdev->discovery;
4351	struct smp_irk *irk;
4352	bool match;
4353	u32 flags;
4354
4355	/* Check if we need to convert to identity address */
4356	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4357	if (irk) {
4358		bdaddr = &irk->bdaddr;
4359		bdaddr_type = irk->addr_type;
4360	}
4361
4362	/* Check if we have been requested to connect to this device */
4363	check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4364
4365	/* Passive scanning shouldn't trigger any device found events,
4366	 * except for devices marked as CONN_REPORT for which we do send
4367	 * device found events.
4368	 */
4369	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4370		if (type == LE_ADV_DIRECT_IND)
4371			return;
4372
4373		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4374					       bdaddr, bdaddr_type))
4375			return;
4376
4377		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4378			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4379		else
4380			flags = 0;
4381		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4382				  rssi, flags, data, len, NULL, 0);
4383		return;
4384	}
4385
4386	/* When receiving non-connectable or scannable undirected
4387	 * advertising reports, this means that the remote device is
4388	 * not connectable and then clearly indicate this in the
4389	 * device found event.
4390	 *
4391	 * When receiving a scan response, then there is no way to
4392	 * know if the remote device is connectable or not. However
4393	 * since scan responses are merged with a previously seen
4394	 * advertising report, the flags field from that report
4395	 * will be used.
4396	 *
4397	 * In the really unlikely case that a controller get confused
4398	 * and just sends a scan response event, then it is marked as
4399	 * not connectable as well.
4400	 */
4401	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4402	    type == LE_ADV_SCAN_RSP)
4403		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4404	else
4405		flags = 0;
4406
4407	/* If there's nothing pending either store the data from this
4408	 * event or send an immediate device found event if the data
4409	 * should not be stored for later.
4410	 */
4411	if (!has_pending_adv_report(hdev)) {
4412		/* If the report will trigger a SCAN_REQ store it for
4413		 * later merging.
4414		 */
4415		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4416			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4417						 rssi, flags, data, len);
4418			return;
4419		}
4420
4421		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4422				  rssi, flags, data, len, NULL, 0);
4423		return;
4424	}
4425
4426	/* Check if the pending report is for the same device as the new one */
4427	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4428		 bdaddr_type == d->last_adv_addr_type);
4429
4430	/* If the pending data doesn't match this report or this isn't a
4431	 * scan response (e.g. we got a duplicate ADV_IND) then force
4432	 * sending of the pending data.
4433	 */
4434	if (type != LE_ADV_SCAN_RSP || !match) {
4435		/* Send out whatever is in the cache, but skip duplicates */
4436		if (!match)
4437			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4438					  d->last_adv_addr_type, NULL,
4439					  d->last_adv_rssi, d->last_adv_flags,
4440					  d->last_adv_data,
4441					  d->last_adv_data_len, NULL, 0);
4442
4443		/* If the new report will trigger a SCAN_REQ store it for
4444		 * later merging.
4445		 */
4446		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4447			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4448						 rssi, flags, data, len);
4449			return;
4450		}
4451
4452		/* The advertising reports cannot be merged, so clear
4453		 * the pending report and send out a device found event.
4454		 */
4455		clear_pending_adv_report(hdev);
4456		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4457				  rssi, flags, data, len, NULL, 0);
4458		return;
4459	}
4460
4461	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4462	 * the new event is a SCAN_RSP. We can therefore proceed with
4463	 * sending a merged device found event.
4464	 */
4465	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4466			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4467			  d->last_adv_data, d->last_adv_data_len, data, len);
4468	clear_pending_adv_report(hdev);
4469}
4470
4471static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4472{
4473	u8 num_reports = skb->data[0];
4474	void *ptr = &skb->data[1];
4475
4476	hci_dev_lock(hdev);
4477
4478	while (num_reports--) {
4479		struct hci_ev_le_advertising_info *ev = ptr;
4480		s8 rssi;
4481
4482		rssi = ev->data[ev->length];
4483		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4484				   ev->bdaddr_type, rssi, ev->data, ev->length);
4485
4486		ptr += sizeof(*ev) + ev->length + 1;
4487	}
4488
4489	hci_dev_unlock(hdev);
4490}
4491
4492static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4493{
4494	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4495	struct hci_cp_le_ltk_reply cp;
4496	struct hci_cp_le_ltk_neg_reply neg;
4497	struct hci_conn *conn;
4498	struct smp_ltk *ltk;
4499
4500	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4501
4502	hci_dev_lock(hdev);
4503
4504	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4505	if (conn == NULL)
4506		goto not_found;
4507
4508	ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->role);
4509	if (ltk == NULL)
4510		goto not_found;
4511
4512	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4513	cp.handle = cpu_to_le16(conn->handle);
4514
4515	conn->pending_sec_level = smp_ltk_sec_level(ltk);
4516
4517	conn->enc_key_size = ltk->enc_size;
4518
4519	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4520
4521	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4522	 * temporary key used to encrypt a connection following
4523	 * pairing. It is used during the Encrypted Session Setup to
4524	 * distribute the keys. Later, security can be re-established
4525	 * using a distributed LTK.
4526	 */
4527	if (ltk->type == SMP_STK) {
4528		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4529		list_del(&ltk->list);
4530		kfree(ltk);
4531	} else {
4532		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4533	}
4534
4535	hci_dev_unlock(hdev);
4536
4537	return;
4538
4539not_found:
4540	neg.handle = ev->handle;
4541	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4542	hci_dev_unlock(hdev);
4543}
4544
4545static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4546				      u8 reason)
4547{
4548	struct hci_cp_le_conn_param_req_neg_reply cp;
4549
4550	cp.handle = cpu_to_le16(handle);
4551	cp.reason = reason;
4552
4553	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4554		     &cp);
4555}
4556
4557static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4558					     struct sk_buff *skb)
4559{
4560	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4561	struct hci_cp_le_conn_param_req_reply cp;
4562	struct hci_conn *hcon;
4563	u16 handle, min, max, latency, timeout;
4564
4565	handle = le16_to_cpu(ev->handle);
4566	min = le16_to_cpu(ev->interval_min);
4567	max = le16_to_cpu(ev->interval_max);
4568	latency = le16_to_cpu(ev->latency);
4569	timeout = le16_to_cpu(ev->timeout);
4570
4571	hcon = hci_conn_hash_lookup_handle(hdev, handle);
4572	if (!hcon || hcon->state != BT_CONNECTED)
4573		return send_conn_param_neg_reply(hdev, handle,
4574						 HCI_ERROR_UNKNOWN_CONN_ID);
4575
4576	if (hci_check_conn_params(min, max, latency, timeout))
4577		return send_conn_param_neg_reply(hdev, handle,
4578						 HCI_ERROR_INVALID_LL_PARAMS);
4579
4580	if (hcon->role == HCI_ROLE_MASTER) {
4581		struct hci_conn_params *params;
4582		u8 store_hint;
4583
4584		hci_dev_lock(hdev);
4585
4586		params = hci_conn_params_lookup(hdev, &hcon->dst,
4587						hcon->dst_type);
4588		if (params) {
4589			params->conn_min_interval = min;
4590			params->conn_max_interval = max;
4591			params->conn_latency = latency;
4592			params->supervision_timeout = timeout;
4593			store_hint = 0x01;
4594		} else{
4595			store_hint = 0x00;
4596		}
4597
4598		hci_dev_unlock(hdev);
4599
4600		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4601				    store_hint, min, max, latency, timeout);
4602	}
4603
4604	cp.handle = ev->handle;
4605	cp.interval_min = ev->interval_min;
4606	cp.interval_max = ev->interval_max;
4607	cp.latency = ev->latency;
4608	cp.timeout = ev->timeout;
4609	cp.min_ce_len = 0;
4610	cp.max_ce_len = 0;
4611
4612	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4613}
4614
4615static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4616{
4617	struct hci_ev_le_meta *le_ev = (void *) skb->data;
4618
4619	skb_pull(skb, sizeof(*le_ev));
4620
4621	switch (le_ev->subevent) {
4622	case HCI_EV_LE_CONN_COMPLETE:
4623		hci_le_conn_complete_evt(hdev, skb);
4624		break;
4625
4626	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4627		hci_le_conn_update_complete_evt(hdev, skb);
4628		break;
4629
4630	case HCI_EV_LE_ADVERTISING_REPORT:
4631		hci_le_adv_report_evt(hdev, skb);
4632		break;
4633
4634	case HCI_EV_LE_LTK_REQ:
4635		hci_le_ltk_request_evt(hdev, skb);
4636		break;
4637
4638	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4639		hci_le_remote_conn_param_req_evt(hdev, skb);
4640		break;
4641
4642	default:
4643		break;
4644	}
4645}
4646
4647static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4648{
4649	struct hci_ev_channel_selected *ev = (void *) skb->data;
4650	struct hci_conn *hcon;
4651
4652	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4653
4654	skb_pull(skb, sizeof(*ev));
4655
4656	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4657	if (!hcon)
4658		return;
4659
4660	amp_read_loc_assoc_final_data(hdev, hcon);
4661}
4662
4663void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4664{
4665	struct hci_event_hdr *hdr = (void *) skb->data;
4666	__u8 event = hdr->evt;
4667
4668	hci_dev_lock(hdev);
4669
4670	/* Received events are (currently) only needed when a request is
4671	 * ongoing so avoid unnecessary memory allocation.
4672	 */
4673	if (hci_req_pending(hdev)) {
4674		kfree_skb(hdev->recv_evt);
4675		hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4676	}
4677
4678	hci_dev_unlock(hdev);
4679
4680	skb_pull(skb, HCI_EVENT_HDR_SIZE);
4681
4682	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4683		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4684		u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4685
4686		hci_req_cmd_complete(hdev, opcode, 0);
4687	}
4688
4689	switch (event) {
4690	case HCI_EV_INQUIRY_COMPLETE:
4691		hci_inquiry_complete_evt(hdev, skb);
4692		break;
4693
4694	case HCI_EV_INQUIRY_RESULT:
4695		hci_inquiry_result_evt(hdev, skb);
4696		break;
4697
4698	case HCI_EV_CONN_COMPLETE:
4699		hci_conn_complete_evt(hdev, skb);
4700		break;
4701
4702	case HCI_EV_CONN_REQUEST:
4703		hci_conn_request_evt(hdev, skb);
4704		break;
4705
4706	case HCI_EV_DISCONN_COMPLETE:
4707		hci_disconn_complete_evt(hdev, skb);
4708		break;
4709
4710	case HCI_EV_AUTH_COMPLETE:
4711		hci_auth_complete_evt(hdev, skb);
4712		break;
4713
4714	case HCI_EV_REMOTE_NAME:
4715		hci_remote_name_evt(hdev, skb);
4716		break;
4717
4718	case HCI_EV_ENCRYPT_CHANGE:
4719		hci_encrypt_change_evt(hdev, skb);
4720		break;
4721
4722	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4723		hci_change_link_key_complete_evt(hdev, skb);
4724		break;
4725
4726	case HCI_EV_REMOTE_FEATURES:
4727		hci_remote_features_evt(hdev, skb);
4728		break;
4729
4730	case HCI_EV_CMD_COMPLETE:
4731		hci_cmd_complete_evt(hdev, skb);
4732		break;
4733
4734	case HCI_EV_CMD_STATUS:
4735		hci_cmd_status_evt(hdev, skb);
4736		break;
4737
4738	case HCI_EV_ROLE_CHANGE:
4739		hci_role_change_evt(hdev, skb);
4740		break;
4741
4742	case HCI_EV_NUM_COMP_PKTS:
4743		hci_num_comp_pkts_evt(hdev, skb);
4744		break;
4745
4746	case HCI_EV_MODE_CHANGE:
4747		hci_mode_change_evt(hdev, skb);
4748		break;
4749
4750	case HCI_EV_PIN_CODE_REQ:
4751		hci_pin_code_request_evt(hdev, skb);
4752		break;
4753
4754	case HCI_EV_LINK_KEY_REQ:
4755		hci_link_key_request_evt(hdev, skb);
4756		break;
4757
4758	case HCI_EV_LINK_KEY_NOTIFY:
4759		hci_link_key_notify_evt(hdev, skb);
4760		break;
4761
4762	case HCI_EV_CLOCK_OFFSET:
4763		hci_clock_offset_evt(hdev, skb);
4764		break;
4765
4766	case HCI_EV_PKT_TYPE_CHANGE:
4767		hci_pkt_type_change_evt(hdev, skb);
4768		break;
4769
4770	case HCI_EV_PSCAN_REP_MODE:
4771		hci_pscan_rep_mode_evt(hdev, skb);
4772		break;
4773
4774	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4775		hci_inquiry_result_with_rssi_evt(hdev, skb);
4776		break;
4777
4778	case HCI_EV_REMOTE_EXT_FEATURES:
4779		hci_remote_ext_features_evt(hdev, skb);
4780		break;
4781
4782	case HCI_EV_SYNC_CONN_COMPLETE:
4783		hci_sync_conn_complete_evt(hdev, skb);
4784		break;
4785
4786	case HCI_EV_EXTENDED_INQUIRY_RESULT:
4787		hci_extended_inquiry_result_evt(hdev, skb);
4788		break;
4789
4790	case HCI_EV_KEY_REFRESH_COMPLETE:
4791		hci_key_refresh_complete_evt(hdev, skb);
4792		break;
4793
4794	case HCI_EV_IO_CAPA_REQUEST:
4795		hci_io_capa_request_evt(hdev, skb);
4796		break;
4797
4798	case HCI_EV_IO_CAPA_REPLY:
4799		hci_io_capa_reply_evt(hdev, skb);
4800		break;
4801
4802	case HCI_EV_USER_CONFIRM_REQUEST:
4803		hci_user_confirm_request_evt(hdev, skb);
4804		break;
4805
4806	case HCI_EV_USER_PASSKEY_REQUEST:
4807		hci_user_passkey_request_evt(hdev, skb);
4808		break;
4809
4810	case HCI_EV_USER_PASSKEY_NOTIFY:
4811		hci_user_passkey_notify_evt(hdev, skb);
4812		break;
4813
4814	case HCI_EV_KEYPRESS_NOTIFY:
4815		hci_keypress_notify_evt(hdev, skb);
4816		break;
4817
4818	case HCI_EV_SIMPLE_PAIR_COMPLETE:
4819		hci_simple_pair_complete_evt(hdev, skb);
4820		break;
4821
4822	case HCI_EV_REMOTE_HOST_FEATURES:
4823		hci_remote_host_features_evt(hdev, skb);
4824		break;
4825
4826	case HCI_EV_LE_META:
4827		hci_le_meta_evt(hdev, skb);
4828		break;
4829
4830	case HCI_EV_CHANNEL_SELECTED:
4831		hci_chan_selected_evt(hdev, skb);
4832		break;
4833
4834	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4835		hci_remote_oob_data_request_evt(hdev, skb);
4836		break;
4837
4838	case HCI_EV_PHY_LINK_COMPLETE:
4839		hci_phy_link_complete_evt(hdev, skb);
4840		break;
4841
4842	case HCI_EV_LOGICAL_LINK_COMPLETE:
4843		hci_loglink_complete_evt(hdev, skb);
4844		break;
4845
4846	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4847		hci_disconn_loglink_complete_evt(hdev, skb);
4848		break;
4849
4850	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4851		hci_disconn_phylink_complete_evt(hdev, skb);
4852		break;
4853
4854	case HCI_EV_NUM_COMP_BLOCKS:
4855		hci_num_comp_blocks_evt(hdev, skb);
4856		break;
4857
4858	default:
4859		BT_DBG("%s event 0x%2.2x", hdev->name, event);
4860		break;
4861	}
4862
4863	kfree_skb(skb);
4864	hdev->stat.evt_rx++;
4865}
4866