1/*
2   BlueZ - Bluetooth protocol stack for Linux
3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <asm/unaligned.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/mgmt.h>
32#include <net/bluetooth/a2mp.h>
33#include <net/bluetooth/amp.h>
34
35/* Handle HCI Event packets */
36
37static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38{
39	__u8 status = *((__u8 *) skb->data);
40
41	BT_DBG("%s status 0x%2.2x", hdev->name, status);
42
43	if (status) {
44		hci_dev_lock(hdev);
45		mgmt_stop_discovery_failed(hdev, status);
46		hci_dev_unlock(hdev);
47		return;
48	}
49
50	clear_bit(HCI_INQUIRY, &hdev->flags);
51	smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
52	wake_up_bit(&hdev->flags, HCI_INQUIRY);
53
54	hci_dev_lock(hdev);
55	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
56	hci_dev_unlock(hdev);
57
58	hci_conn_check_pending(hdev);
59}
60
61static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
62{
63	__u8 status = *((__u8 *) skb->data);
64
65	BT_DBG("%s status 0x%2.2x", hdev->name, status);
66
67	if (status)
68		return;
69
70	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71}
72
73static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74{
75	__u8 status = *((__u8 *) skb->data);
76
77	BT_DBG("%s status 0x%2.2x", hdev->name, status);
78
79	if (status)
80		return;
81
82	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
83
84	hci_conn_check_pending(hdev);
85}
86
87static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88					  struct sk_buff *skb)
89{
90	BT_DBG("%s", hdev->name);
91}
92
93static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94{
95	struct hci_rp_role_discovery *rp = (void *) skb->data;
96	struct hci_conn *conn;
97
98	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
99
100	if (rp->status)
101		return;
102
103	hci_dev_lock(hdev);
104
105	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106	if (conn) {
107		if (rp->role)
108			conn->link_mode &= ~HCI_LM_MASTER;
109		else
110			conn->link_mode |= HCI_LM_MASTER;
111	}
112
113	hci_dev_unlock(hdev);
114}
115
116static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117{
118	struct hci_rp_read_link_policy *rp = (void *) skb->data;
119	struct hci_conn *conn;
120
121	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122
123	if (rp->status)
124		return;
125
126	hci_dev_lock(hdev);
127
128	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129	if (conn)
130		conn->link_policy = __le16_to_cpu(rp->policy);
131
132	hci_dev_unlock(hdev);
133}
134
135static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136{
137	struct hci_rp_write_link_policy *rp = (void *) skb->data;
138	struct hci_conn *conn;
139	void *sent;
140
141	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142
143	if (rp->status)
144		return;
145
146	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147	if (!sent)
148		return;
149
150	hci_dev_lock(hdev);
151
152	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153	if (conn)
154		conn->link_policy = get_unaligned_le16(sent + 2);
155
156	hci_dev_unlock(hdev);
157}
158
159static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160					struct sk_buff *skb)
161{
162	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163
164	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165
166	if (rp->status)
167		return;
168
169	hdev->link_policy = __le16_to_cpu(rp->policy);
170}
171
172static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173					 struct sk_buff *skb)
174{
175	__u8 status = *((__u8 *) skb->data);
176	void *sent;
177
178	BT_DBG("%s status 0x%2.2x", hdev->name, status);
179
180	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181	if (!sent)
182		return;
183
184	if (!status)
185		hdev->link_policy = get_unaligned_le16(sent);
186}
187
188static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189{
190	__u8 status = *((__u8 *) skb->data);
191
192	BT_DBG("%s status 0x%2.2x", hdev->name, status);
193
194	clear_bit(HCI_RESET, &hdev->flags);
195
196	/* Reset all non-persistent flags */
197	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
198
199	hdev->discovery.state = DISCOVERY_STOPPED;
200	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
201	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
202
203	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
204	hdev->adv_data_len = 0;
205}
206
207static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
208{
209	__u8 status = *((__u8 *) skb->data);
210	void *sent;
211
212	BT_DBG("%s status 0x%2.2x", hdev->name, status);
213
214	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
215	if (!sent)
216		return;
217
218	hci_dev_lock(hdev);
219
220	if (test_bit(HCI_MGMT, &hdev->dev_flags))
221		mgmt_set_local_name_complete(hdev, sent, status);
222	else if (!status)
223		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
224
225	hci_dev_unlock(hdev);
226}
227
228static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229{
230	struct hci_rp_read_local_name *rp = (void *) skb->data;
231
232	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
233
234	if (rp->status)
235		return;
236
237	if (test_bit(HCI_SETUP, &hdev->dev_flags))
238		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
239}
240
241static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
242{
243	__u8 status = *((__u8 *) skb->data);
244	void *sent;
245
246	BT_DBG("%s status 0x%2.2x", hdev->name, status);
247
248	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249	if (!sent)
250		return;
251
252	if (!status) {
253		__u8 param = *((__u8 *) sent);
254
255		if (param == AUTH_ENABLED)
256			set_bit(HCI_AUTH, &hdev->flags);
257		else
258			clear_bit(HCI_AUTH, &hdev->flags);
259	}
260
261	if (test_bit(HCI_MGMT, &hdev->dev_flags))
262		mgmt_auth_enable_complete(hdev, status);
263}
264
265static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
266{
267	__u8 status = *((__u8 *) skb->data);
268	void *sent;
269
270	BT_DBG("%s status 0x%2.2x", hdev->name, status);
271
272	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
273	if (!sent)
274		return;
275
276	if (!status) {
277		__u8 param = *((__u8 *) sent);
278
279		if (param)
280			set_bit(HCI_ENCRYPT, &hdev->flags);
281		else
282			clear_bit(HCI_ENCRYPT, &hdev->flags);
283	}
284}
285
286static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287{
288	__u8 param, status = *((__u8 *) skb->data);
289	int old_pscan, old_iscan;
290	void *sent;
291
292	BT_DBG("%s status 0x%2.2x", hdev->name, status);
293
294	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295	if (!sent)
296		return;
297
298	param = *((__u8 *) sent);
299
300	hci_dev_lock(hdev);
301
302	if (status) {
303		mgmt_write_scan_failed(hdev, param, status);
304		hdev->discov_timeout = 0;
305		goto done;
306	}
307
308	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
309	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
310
311	if (param & SCAN_INQUIRY) {
312		set_bit(HCI_ISCAN, &hdev->flags);
313		if (!old_iscan)
314			mgmt_discoverable(hdev, 1);
315		if (hdev->discov_timeout > 0) {
316			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
317			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
318					   to);
319		}
320	} else if (old_iscan)
321		mgmt_discoverable(hdev, 0);
322
323	if (param & SCAN_PAGE) {
324		set_bit(HCI_PSCAN, &hdev->flags);
325		if (!old_pscan)
326			mgmt_connectable(hdev, 1);
327	} else if (old_pscan)
328		mgmt_connectable(hdev, 0);
329
330done:
331	hci_dev_unlock(hdev);
332}
333
334static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335{
336	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337
338	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
339
340	if (rp->status)
341		return;
342
343	memcpy(hdev->dev_class, rp->dev_class, 3);
344
345	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347}
348
349static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350{
351	__u8 status = *((__u8 *) skb->data);
352	void *sent;
353
354	BT_DBG("%s status 0x%2.2x", hdev->name, status);
355
356	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357	if (!sent)
358		return;
359
360	hci_dev_lock(hdev);
361
362	if (status == 0)
363		memcpy(hdev->dev_class, sent, 3);
364
365	if (test_bit(HCI_MGMT, &hdev->dev_flags))
366		mgmt_set_class_of_dev_complete(hdev, sent, status);
367
368	hci_dev_unlock(hdev);
369}
370
371static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372{
373	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374	__u16 setting;
375
376	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
377
378	if (rp->status)
379		return;
380
381	setting = __le16_to_cpu(rp->voice_setting);
382
383	if (hdev->voice_setting == setting)
384		return;
385
386	hdev->voice_setting = setting;
387
388	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
389
390	if (hdev->notify)
391		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392}
393
394static void hci_cc_write_voice_setting(struct hci_dev *hdev,
395				       struct sk_buff *skb)
396{
397	__u8 status = *((__u8 *) skb->data);
398	__u16 setting;
399	void *sent;
400
401	BT_DBG("%s status 0x%2.2x", hdev->name, status);
402
403	if (status)
404		return;
405
406	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407	if (!sent)
408		return;
409
410	setting = get_unaligned_le16(sent);
411
412	if (hdev->voice_setting == setting)
413		return;
414
415	hdev->voice_setting = setting;
416
417	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
418
419	if (hdev->notify)
420		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
421}
422
423static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
424{
425	__u8 status = *((__u8 *) skb->data);
426	struct hci_cp_write_ssp_mode *sent;
427
428	BT_DBG("%s status 0x%2.2x", hdev->name, status);
429
430	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
431	if (!sent)
432		return;
433
434	if (!status) {
435		if (sent->mode)
436			hdev->features[1][0] |= LMP_HOST_SSP;
437		else
438			hdev->features[1][0] &= ~LMP_HOST_SSP;
439	}
440
441	if (test_bit(HCI_MGMT, &hdev->dev_flags))
442		mgmt_ssp_enable_complete(hdev, sent->mode, status);
443	else if (!status) {
444		if (sent->mode)
445			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
446		else
447			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
448	}
449}
450
451static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
452{
453	struct hci_rp_read_local_version *rp = (void *) skb->data;
454
455	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
456
457	if (rp->status)
458		return;
459
460	hdev->hci_ver = rp->hci_ver;
461	hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
462	hdev->lmp_ver = rp->lmp_ver;
463	hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
464	hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
465
466	BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
467	       hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
468}
469
470static void hci_cc_read_local_commands(struct hci_dev *hdev,
471				       struct sk_buff *skb)
472{
473	struct hci_rp_read_local_commands *rp = (void *) skb->data;
474
475	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
476
477	if (!rp->status)
478		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
479}
480
481static void hci_cc_read_local_features(struct hci_dev *hdev,
482				       struct sk_buff *skb)
483{
484	struct hci_rp_read_local_features *rp = (void *) skb->data;
485
486	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
487
488	if (rp->status)
489		return;
490
491	memcpy(hdev->features, rp->features, 8);
492
493	/* Adjust default settings according to features
494	 * supported by device. */
495
496	if (hdev->features[0][0] & LMP_3SLOT)
497		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
498
499	if (hdev->features[0][0] & LMP_5SLOT)
500		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
501
502	if (hdev->features[0][1] & LMP_HV2) {
503		hdev->pkt_type  |= (HCI_HV2);
504		hdev->esco_type |= (ESCO_HV2);
505	}
506
507	if (hdev->features[0][1] & LMP_HV3) {
508		hdev->pkt_type  |= (HCI_HV3);
509		hdev->esco_type |= (ESCO_HV3);
510	}
511
512	if (lmp_esco_capable(hdev))
513		hdev->esco_type |= (ESCO_EV3);
514
515	if (hdev->features[0][4] & LMP_EV4)
516		hdev->esco_type |= (ESCO_EV4);
517
518	if (hdev->features[0][4] & LMP_EV5)
519		hdev->esco_type |= (ESCO_EV5);
520
521	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
522		hdev->esco_type |= (ESCO_2EV3);
523
524	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
525		hdev->esco_type |= (ESCO_3EV3);
526
527	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
528		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
529
530	BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
531	       hdev->features[0][0], hdev->features[0][1],
532	       hdev->features[0][2], hdev->features[0][3],
533	       hdev->features[0][4], hdev->features[0][5],
534	       hdev->features[0][6], hdev->features[0][7]);
535}
536
537static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
538					   struct sk_buff *skb)
539{
540	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
541
542	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
543
544	if (rp->status)
545		return;
546
547	hdev->max_page = rp->max_page;
548
549	if (rp->page < HCI_MAX_PAGES)
550		memcpy(hdev->features[rp->page], rp->features, 8);
551}
552
553static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
554					  struct sk_buff *skb)
555{
556	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
557
558	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
559
560	if (!rp->status)
561		hdev->flow_ctl_mode = rp->mode;
562}
563
564static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
565{
566	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
567
568	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
569
570	if (rp->status)
571		return;
572
573	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
574	hdev->sco_mtu  = rp->sco_mtu;
575	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
576	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
577
578	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
579		hdev->sco_mtu  = 64;
580		hdev->sco_pkts = 8;
581	}
582
583	hdev->acl_cnt = hdev->acl_pkts;
584	hdev->sco_cnt = hdev->sco_pkts;
585
586	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
587	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
588}
589
590static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
591{
592	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
593
594	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
595
596	if (!rp->status)
597		bacpy(&hdev->bdaddr, &rp->bdaddr);
598}
599
600static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
601					   struct sk_buff *skb)
602{
603	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
604
605	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
606
607	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
608		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
609		hdev->page_scan_window = __le16_to_cpu(rp->window);
610	}
611}
612
613static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
614					    struct sk_buff *skb)
615{
616	u8 status = *((u8 *) skb->data);
617	struct hci_cp_write_page_scan_activity *sent;
618
619	BT_DBG("%s status 0x%2.2x", hdev->name, status);
620
621	if (status)
622		return;
623
624	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
625	if (!sent)
626		return;
627
628	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
629	hdev->page_scan_window = __le16_to_cpu(sent->window);
630}
631
632static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
633					   struct sk_buff *skb)
634{
635	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
636
637	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
638
639	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
640		hdev->page_scan_type = rp->type;
641}
642
643static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
644					struct sk_buff *skb)
645{
646	u8 status = *((u8 *) skb->data);
647	u8 *type;
648
649	BT_DBG("%s status 0x%2.2x", hdev->name, status);
650
651	if (status)
652		return;
653
654	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
655	if (type)
656		hdev->page_scan_type = *type;
657}
658
659static void hci_cc_read_data_block_size(struct hci_dev *hdev,
660					struct sk_buff *skb)
661{
662	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
663
664	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
665
666	if (rp->status)
667		return;
668
669	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
670	hdev->block_len = __le16_to_cpu(rp->block_len);
671	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
672
673	hdev->block_cnt = hdev->num_blocks;
674
675	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
676	       hdev->block_cnt, hdev->block_len);
677}
678
679static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
680				       struct sk_buff *skb)
681{
682	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
683
684	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
685
686	if (rp->status)
687		goto a2mp_rsp;
688
689	hdev->amp_status = rp->amp_status;
690	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
691	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
692	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
693	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
694	hdev->amp_type = rp->amp_type;
695	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
696	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
697	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
698	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
699
700a2mp_rsp:
701	a2mp_send_getinfo_rsp(hdev);
702}
703
704static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
705					struct sk_buff *skb)
706{
707	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
708	struct amp_assoc *assoc = &hdev->loc_assoc;
709	size_t rem_len, frag_len;
710
711	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
712
713	if (rp->status)
714		goto a2mp_rsp;
715
716	frag_len = skb->len - sizeof(*rp);
717	rem_len = __le16_to_cpu(rp->rem_len);
718
719	if (rem_len > frag_len) {
720		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
721
722		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
723		assoc->offset += frag_len;
724
725		/* Read other fragments */
726		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
727
728		return;
729	}
730
731	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
732	assoc->len = assoc->offset + rem_len;
733	assoc->offset = 0;
734
735a2mp_rsp:
736	/* Send A2MP Rsp when all fragments are received */
737	a2mp_send_getampassoc_rsp(hdev, rp->status);
738	a2mp_send_create_phy_link_req(hdev, rp->status);
739}
740
741static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
742					 struct sk_buff *skb)
743{
744	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
745
746	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
747
748	if (!rp->status)
749		hdev->inq_tx_power = rp->tx_power;
750}
751
752static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
753{
754	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
755	struct hci_cp_pin_code_reply *cp;
756	struct hci_conn *conn;
757
758	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
759
760	hci_dev_lock(hdev);
761
762	if (test_bit(HCI_MGMT, &hdev->dev_flags))
763		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
764
765	if (rp->status)
766		goto unlock;
767
768	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
769	if (!cp)
770		goto unlock;
771
772	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
773	if (conn)
774		conn->pin_length = cp->pin_len;
775
776unlock:
777	hci_dev_unlock(hdev);
778}
779
780static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
781{
782	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
783
784	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
785
786	hci_dev_lock(hdev);
787
788	if (test_bit(HCI_MGMT, &hdev->dev_flags))
789		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
790						 rp->status);
791
792	hci_dev_unlock(hdev);
793}
794
795static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
796				       struct sk_buff *skb)
797{
798	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
799
800	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
801
802	if (rp->status)
803		return;
804
805	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
806	hdev->le_pkts = rp->le_max_pkt;
807
808	hdev->le_cnt = hdev->le_pkts;
809
810	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
811}
812
813static void hci_cc_le_read_local_features(struct hci_dev *hdev,
814					  struct sk_buff *skb)
815{
816	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
817
818	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
819
820	if (!rp->status)
821		memcpy(hdev->le_features, rp->features, 8);
822}
823
824static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
825					struct sk_buff *skb)
826{
827	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
828
829	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
830
831	if (!rp->status)
832		hdev->adv_tx_power = rp->tx_power;
833}
834
835static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
836{
837	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
838
839	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
840
841	hci_dev_lock(hdev);
842
843	if (test_bit(HCI_MGMT, &hdev->dev_flags))
844		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
845						 rp->status);
846
847	hci_dev_unlock(hdev);
848}
849
850static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
851					  struct sk_buff *skb)
852{
853	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
854
855	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
856
857	hci_dev_lock(hdev);
858
859	if (test_bit(HCI_MGMT, &hdev->dev_flags))
860		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
861						     ACL_LINK, 0, rp->status);
862
863	hci_dev_unlock(hdev);
864}
865
866static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
867{
868	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
869
870	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
871
872	hci_dev_lock(hdev);
873
874	if (test_bit(HCI_MGMT, &hdev->dev_flags))
875		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
876						 0, rp->status);
877
878	hci_dev_unlock(hdev);
879}
880
881static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
882					  struct sk_buff *skb)
883{
884	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
885
886	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
887
888	hci_dev_lock(hdev);
889
890	if (test_bit(HCI_MGMT, &hdev->dev_flags))
891		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
892						     ACL_LINK, 0, rp->status);
893
894	hci_dev_unlock(hdev);
895}
896
897static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
898					     struct sk_buff *skb)
899{
900	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
901
902	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
903
904	hci_dev_lock(hdev);
905	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
906						rp->randomizer, rp->status);
907	hci_dev_unlock(hdev);
908}
909
910static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
911{
912	__u8 *sent, status = *((__u8 *) skb->data);
913
914	BT_DBG("%s status 0x%2.2x", hdev->name, status);
915
916	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
917	if (!sent)
918		return;
919
920	hci_dev_lock(hdev);
921
922	if (!status) {
923		if (*sent)
924			set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
925		else
926			clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
927	}
928
929	if (!test_bit(HCI_INIT, &hdev->flags)) {
930		struct hci_request req;
931
932		hci_req_init(&req, hdev);
933		hci_update_ad(&req);
934		hci_req_run(&req, NULL);
935	}
936
937	hci_dev_unlock(hdev);
938}
939
940static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
941{
942	__u8 status = *((__u8 *) skb->data);
943
944	BT_DBG("%s status 0x%2.2x", hdev->name, status);
945
946	if (status) {
947		hci_dev_lock(hdev);
948		mgmt_start_discovery_failed(hdev, status);
949		hci_dev_unlock(hdev);
950		return;
951	}
952}
953
954static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
955				      struct sk_buff *skb)
956{
957	struct hci_cp_le_set_scan_enable *cp;
958	__u8 status = *((__u8 *) skb->data);
959
960	BT_DBG("%s status 0x%2.2x", hdev->name, status);
961
962	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
963	if (!cp)
964		return;
965
966	switch (cp->enable) {
967	case LE_SCAN_ENABLE:
968		if (status) {
969			hci_dev_lock(hdev);
970			mgmt_start_discovery_failed(hdev, status);
971			hci_dev_unlock(hdev);
972			return;
973		}
974
975		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
976
977		hci_dev_lock(hdev);
978		hci_discovery_set_state(hdev, DISCOVERY_FINDING);
979		hci_dev_unlock(hdev);
980		break;
981
982	case LE_SCAN_DISABLE:
983		if (status) {
984			hci_dev_lock(hdev);
985			mgmt_stop_discovery_failed(hdev, status);
986			hci_dev_unlock(hdev);
987			return;
988		}
989
990		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
991
992		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
993		    hdev->discovery.state == DISCOVERY_FINDING) {
994			mgmt_interleaved_discovery(hdev);
995		} else {
996			hci_dev_lock(hdev);
997			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
998			hci_dev_unlock(hdev);
999		}
1000
1001		break;
1002
1003	default:
1004		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1005		break;
1006	}
1007}
1008
1009static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1010					   struct sk_buff *skb)
1011{
1012	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1013
1014	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1015
1016	if (!rp->status)
1017		hdev->le_white_list_size = rp->size;
1018}
1019
1020static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1021					    struct sk_buff *skb)
1022{
1023	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1024
1025	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1026
1027	if (!rp->status)
1028		memcpy(hdev->le_states, rp->le_states, 8);
1029}
1030
1031static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1032					   struct sk_buff *skb)
1033{
1034	struct hci_cp_write_le_host_supported *sent;
1035	__u8 status = *((__u8 *) skb->data);
1036
1037	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1038
1039	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1040	if (!sent)
1041		return;
1042
1043	if (!status) {
1044		if (sent->le)
1045			hdev->features[1][0] |= LMP_HOST_LE;
1046		else
1047			hdev->features[1][0] &= ~LMP_HOST_LE;
1048
1049		if (sent->simul)
1050			hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1051		else
1052			hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1053	}
1054
1055	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1056	    !test_bit(HCI_INIT, &hdev->flags))
1057		mgmt_le_enable_complete(hdev, sent->le, status);
1058}
1059
1060static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1061					  struct sk_buff *skb)
1062{
1063	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1064
1065	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1066	       hdev->name, rp->status, rp->phy_handle);
1067
1068	if (rp->status)
1069		return;
1070
1071	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1072}
1073
1074static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1075{
1076	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1077
1078	if (status) {
1079		hci_conn_check_pending(hdev);
1080		hci_dev_lock(hdev);
1081		if (test_bit(HCI_MGMT, &hdev->dev_flags))
1082			mgmt_start_discovery_failed(hdev, status);
1083		hci_dev_unlock(hdev);
1084		return;
1085	}
1086
1087	set_bit(HCI_INQUIRY, &hdev->flags);
1088
1089	hci_dev_lock(hdev);
1090	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1091	hci_dev_unlock(hdev);
1092}
1093
1094static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1095{
1096	struct hci_cp_create_conn *cp;
1097	struct hci_conn *conn;
1098
1099	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1100
1101	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1102	if (!cp)
1103		return;
1104
1105	hci_dev_lock(hdev);
1106
1107	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1108
1109	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1110
1111	if (status) {
1112		if (conn && conn->state == BT_CONNECT) {
1113			if (status != 0x0c || conn->attempt > 2) {
1114				conn->state = BT_CLOSED;
1115				hci_proto_connect_cfm(conn, status);
1116				hci_conn_del(conn);
1117			} else
1118				conn->state = BT_CONNECT2;
1119		}
1120	} else {
1121		if (!conn) {
1122			conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr);
1123			if (conn) {
1124				conn->out = true;
1125				conn->link_mode |= HCI_LM_MASTER;
1126			} else
1127				BT_ERR("No memory for new connection");
1128		}
1129	}
1130
1131	hci_dev_unlock(hdev);
1132}
1133
1134static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1135{
1136	struct hci_cp_add_sco *cp;
1137	struct hci_conn *acl, *sco;
1138	__u16 handle;
1139
1140	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1141
1142	if (!status)
1143		return;
1144
1145	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1146	if (!cp)
1147		return;
1148
1149	handle = __le16_to_cpu(cp->handle);
1150
1151	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1152
1153	hci_dev_lock(hdev);
1154
1155	acl = hci_conn_hash_lookup_handle(hdev, handle);
1156	if (acl) {
1157		sco = acl->link;
1158		if (sco) {
1159			sco->state = BT_CLOSED;
1160
1161			hci_proto_connect_cfm(sco, status);
1162			hci_conn_del(sco);
1163		}
1164	}
1165
1166	hci_dev_unlock(hdev);
1167}
1168
1169static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1170{
1171	struct hci_cp_auth_requested *cp;
1172	struct hci_conn *conn;
1173
1174	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1175
1176	if (!status)
1177		return;
1178
1179	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1180	if (!cp)
1181		return;
1182
1183	hci_dev_lock(hdev);
1184
1185	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1186	if (conn) {
1187		if (conn->state == BT_CONFIG) {
1188			hci_proto_connect_cfm(conn, status);
1189			hci_conn_drop(conn);
1190		}
1191	}
1192
1193	hci_dev_unlock(hdev);
1194}
1195
1196static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1197{
1198	struct hci_cp_set_conn_encrypt *cp;
1199	struct hci_conn *conn;
1200
1201	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1202
1203	if (!status)
1204		return;
1205
1206	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1207	if (!cp)
1208		return;
1209
1210	hci_dev_lock(hdev);
1211
1212	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1213	if (conn) {
1214		if (conn->state == BT_CONFIG) {
1215			hci_proto_connect_cfm(conn, status);
1216			hci_conn_drop(conn);
1217		}
1218	}
1219
1220	hci_dev_unlock(hdev);
1221}
1222
1223static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1224				    struct hci_conn *conn)
1225{
1226	if (conn->state != BT_CONFIG || !conn->out)
1227		return 0;
1228
1229	if (conn->pending_sec_level == BT_SECURITY_SDP)
1230		return 0;
1231
1232	/* Only request authentication for SSP connections or non-SSP
1233	 * devices with sec_level HIGH or if MITM protection is requested */
1234	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1235	    conn->pending_sec_level != BT_SECURITY_HIGH)
1236		return 0;
1237
1238	return 1;
1239}
1240
1241static int hci_resolve_name(struct hci_dev *hdev,
1242				   struct inquiry_entry *e)
1243{
1244	struct hci_cp_remote_name_req cp;
1245
1246	memset(&cp, 0, sizeof(cp));
1247
1248	bacpy(&cp.bdaddr, &e->data.bdaddr);
1249	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1250	cp.pscan_mode = e->data.pscan_mode;
1251	cp.clock_offset = e->data.clock_offset;
1252
1253	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1254}
1255
1256static bool hci_resolve_next_name(struct hci_dev *hdev)
1257{
1258	struct discovery_state *discov = &hdev->discovery;
1259	struct inquiry_entry *e;
1260
1261	if (list_empty(&discov->resolve))
1262		return false;
1263
1264	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1265	if (!e)
1266		return false;
1267
1268	if (hci_resolve_name(hdev, e) == 0) {
1269		e->name_state = NAME_PENDING;
1270		return true;
1271	}
1272
1273	return false;
1274}
1275
1276static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1277				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1278{
1279	struct discovery_state *discov = &hdev->discovery;
1280	struct inquiry_entry *e;
1281
1282	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1283		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1284				      name_len, conn->dev_class);
1285
1286	if (discov->state == DISCOVERY_STOPPED)
1287		return;
1288
1289	if (discov->state == DISCOVERY_STOPPING)
1290		goto discov_complete;
1291
1292	if (discov->state != DISCOVERY_RESOLVING)
1293		return;
1294
1295	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1296	/* If the device was not found in a list of found devices names of which
1297	 * are pending. there is no need to continue resolving a next name as it
1298	 * will be done upon receiving another Remote Name Request Complete
1299	 * Event */
1300	if (!e)
1301		return;
1302
1303	list_del(&e->list);
1304	if (name) {
1305		e->name_state = NAME_KNOWN;
1306		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1307				 e->data.rssi, name, name_len);
1308	} else {
1309		e->name_state = NAME_NOT_KNOWN;
1310	}
1311
1312	if (hci_resolve_next_name(hdev))
1313		return;
1314
1315discov_complete:
1316	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1317}
1318
1319static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1320{
1321	struct hci_cp_remote_name_req *cp;
1322	struct hci_conn *conn;
1323
1324	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1325
1326	/* If successful wait for the name req complete event before
1327	 * checking for the need to do authentication */
1328	if (!status)
1329		return;
1330
1331	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1332	if (!cp)
1333		return;
1334
1335	hci_dev_lock(hdev);
1336
1337	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1338
1339	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1340		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1341
1342	if (!conn)
1343		goto unlock;
1344
1345	if (!hci_outgoing_auth_needed(hdev, conn))
1346		goto unlock;
1347
1348	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1349		struct hci_cp_auth_requested cp;
1350		cp.handle = __cpu_to_le16(conn->handle);
1351		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1352	}
1353
1354unlock:
1355	hci_dev_unlock(hdev);
1356}
1357
1358static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1359{
1360	struct hci_cp_read_remote_features *cp;
1361	struct hci_conn *conn;
1362
1363	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1364
1365	if (!status)
1366		return;
1367
1368	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1369	if (!cp)
1370		return;
1371
1372	hci_dev_lock(hdev);
1373
1374	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1375	if (conn) {
1376		if (conn->state == BT_CONFIG) {
1377			hci_proto_connect_cfm(conn, status);
1378			hci_conn_drop(conn);
1379		}
1380	}
1381
1382	hci_dev_unlock(hdev);
1383}
1384
1385static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1386{
1387	struct hci_cp_read_remote_ext_features *cp;
1388	struct hci_conn *conn;
1389
1390	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1391
1392	if (!status)
1393		return;
1394
1395	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1396	if (!cp)
1397		return;
1398
1399	hci_dev_lock(hdev);
1400
1401	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1402	if (conn) {
1403		if (conn->state == BT_CONFIG) {
1404			hci_proto_connect_cfm(conn, status);
1405			hci_conn_drop(conn);
1406		}
1407	}
1408
1409	hci_dev_unlock(hdev);
1410}
1411
1412static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1413{
1414	struct hci_cp_setup_sync_conn *cp;
1415	struct hci_conn *acl, *sco;
1416	__u16 handle;
1417
1418	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1419
1420	if (!status)
1421		return;
1422
1423	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1424	if (!cp)
1425		return;
1426
1427	handle = __le16_to_cpu(cp->handle);
1428
1429	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1430
1431	hci_dev_lock(hdev);
1432
1433	acl = hci_conn_hash_lookup_handle(hdev, handle);
1434	if (acl) {
1435		sco = acl->link;
1436		if (sco) {
1437			sco->state = BT_CLOSED;
1438
1439			hci_proto_connect_cfm(sco, status);
1440			hci_conn_del(sco);
1441		}
1442	}
1443
1444	hci_dev_unlock(hdev);
1445}
1446
1447static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1448{
1449	struct hci_cp_sniff_mode *cp;
1450	struct hci_conn *conn;
1451
1452	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1453
1454	if (!status)
1455		return;
1456
1457	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1458	if (!cp)
1459		return;
1460
1461	hci_dev_lock(hdev);
1462
1463	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1464	if (conn) {
1465		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1466
1467		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1468			hci_sco_setup(conn, status);
1469	}
1470
1471	hci_dev_unlock(hdev);
1472}
1473
1474static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1475{
1476	struct hci_cp_exit_sniff_mode *cp;
1477	struct hci_conn *conn;
1478
1479	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1480
1481	if (!status)
1482		return;
1483
1484	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1485	if (!cp)
1486		return;
1487
1488	hci_dev_lock(hdev);
1489
1490	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1491	if (conn) {
1492		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1493
1494		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1495			hci_sco_setup(conn, status);
1496	}
1497
1498	hci_dev_unlock(hdev);
1499}
1500
1501static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1502{
1503	struct hci_cp_disconnect *cp;
1504	struct hci_conn *conn;
1505
1506	if (!status)
1507		return;
1508
1509	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1510	if (!cp)
1511		return;
1512
1513	hci_dev_lock(hdev);
1514
1515	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1516	if (conn)
1517		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1518				       conn->dst_type, status);
1519
1520	hci_dev_unlock(hdev);
1521}
1522
1523static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1524{
1525	struct hci_conn *conn;
1526
1527	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1528
1529	if (status) {
1530		hci_dev_lock(hdev);
1531
1532		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1533		if (!conn) {
1534			hci_dev_unlock(hdev);
1535			return;
1536		}
1537
1538		BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1539
1540		conn->state = BT_CLOSED;
1541		mgmt_connect_failed(hdev, &conn->dst, conn->type,
1542				    conn->dst_type, status);
1543		hci_proto_connect_cfm(conn, status);
1544		hci_conn_del(conn);
1545
1546		hci_dev_unlock(hdev);
1547	}
1548}
1549
1550static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1551{
1552	struct hci_cp_create_phy_link *cp;
1553
1554	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1555
1556	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1557	if (!cp)
1558		return;
1559
1560	hci_dev_lock(hdev);
1561
1562	if (status) {
1563		struct hci_conn *hcon;
1564
1565		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1566		if (hcon)
1567			hci_conn_del(hcon);
1568	} else {
1569		amp_write_remote_assoc(hdev, cp->phy_handle);
1570	}
1571
1572	hci_dev_unlock(hdev);
1573}
1574
1575static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1576{
1577	struct hci_cp_accept_phy_link *cp;
1578
1579	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1580
1581	if (status)
1582		return;
1583
1584	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1585	if (!cp)
1586		return;
1587
1588	amp_write_remote_assoc(hdev, cp->phy_handle);
1589}
1590
1591static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1592{
1593	__u8 status = *((__u8 *) skb->data);
1594	struct discovery_state *discov = &hdev->discovery;
1595	struct inquiry_entry *e;
1596
1597	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1598
1599	hci_conn_check_pending(hdev);
1600
1601	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1602		return;
1603
1604	smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1605	wake_up_bit(&hdev->flags, HCI_INQUIRY);
1606
1607	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1608		return;
1609
1610	hci_dev_lock(hdev);
1611
1612	if (discov->state != DISCOVERY_FINDING)
1613		goto unlock;
1614
1615	if (list_empty(&discov->resolve)) {
1616		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1617		goto unlock;
1618	}
1619
1620	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1621	if (e && hci_resolve_name(hdev, e) == 0) {
1622		e->name_state = NAME_PENDING;
1623		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1624	} else {
1625		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1626	}
1627
1628unlock:
1629	hci_dev_unlock(hdev);
1630}
1631
1632static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1633{
1634	struct inquiry_data data;
1635	struct inquiry_info *info = (void *) (skb->data + 1);
1636	int num_rsp = *((__u8 *) skb->data);
1637
1638	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1639
1640	if (!num_rsp)
1641		return;
1642
1643	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1644		return;
1645
1646	hci_dev_lock(hdev);
1647
1648	for (; num_rsp; num_rsp--, info++) {
1649		bool name_known, ssp;
1650
1651		bacpy(&data.bdaddr, &info->bdaddr);
1652		data.pscan_rep_mode	= info->pscan_rep_mode;
1653		data.pscan_period_mode	= info->pscan_period_mode;
1654		data.pscan_mode		= info->pscan_mode;
1655		memcpy(data.dev_class, info->dev_class, 3);
1656		data.clock_offset	= info->clock_offset;
1657		data.rssi		= 0x00;
1658		data.ssp_mode		= 0x00;
1659
1660		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1661		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1662				  info->dev_class, 0, !name_known, ssp, NULL,
1663				  0);
1664	}
1665
1666	hci_dev_unlock(hdev);
1667}
1668
1669static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1670{
1671	struct hci_ev_conn_complete *ev = (void *) skb->data;
1672	struct hci_conn *conn;
1673
1674	BT_DBG("%s", hdev->name);
1675
1676	hci_dev_lock(hdev);
1677
1678	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1679	if (!conn) {
1680		if (ev->link_type != SCO_LINK)
1681			goto unlock;
1682
1683		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1684		if (!conn)
1685			goto unlock;
1686
1687		conn->type = SCO_LINK;
1688	}
1689
1690	if (!ev->status) {
1691		conn->handle = __le16_to_cpu(ev->handle);
1692
1693		if (conn->type == ACL_LINK) {
1694			conn->state = BT_CONFIG;
1695			hci_conn_hold(conn);
1696
1697			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1698			    !hci_find_link_key(hdev, &ev->bdaddr))
1699				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1700			else
1701				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1702		} else
1703			conn->state = BT_CONNECTED;
1704
1705		hci_conn_add_sysfs(conn);
1706
1707		if (test_bit(HCI_AUTH, &hdev->flags))
1708			conn->link_mode |= HCI_LM_AUTH;
1709
1710		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1711			conn->link_mode |= HCI_LM_ENCRYPT;
1712
1713		/* Get remote features */
1714		if (conn->type == ACL_LINK) {
1715			struct hci_cp_read_remote_features cp;
1716			cp.handle = ev->handle;
1717			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1718				     sizeof(cp), &cp);
1719		}
1720
1721		/* Set packet type for incoming connection */
1722		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1723			struct hci_cp_change_conn_ptype cp;
1724			cp.handle = ev->handle;
1725			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1726			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1727				     &cp);
1728		}
1729	} else {
1730		conn->state = BT_CLOSED;
1731		if (conn->type == ACL_LINK)
1732			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1733					    conn->dst_type, ev->status);
1734	}
1735
1736	if (conn->type == ACL_LINK)
1737		hci_sco_setup(conn, ev->status);
1738
1739	if (ev->status) {
1740		hci_proto_connect_cfm(conn, ev->status);
1741		hci_conn_del(conn);
1742	} else if (ev->link_type != ACL_LINK)
1743		hci_proto_connect_cfm(conn, ev->status);
1744
1745unlock:
1746	hci_dev_unlock(hdev);
1747
1748	hci_conn_check_pending(hdev);
1749}
1750
1751static inline bool is_sco_active(struct hci_dev *hdev)
1752{
1753	if (hci_conn_hash_lookup_state(hdev, SCO_LINK, BT_CONNECTED) ||
1754			(hci_conn_hash_lookup_state(hdev, ESCO_LINK,
1755						    BT_CONNECTED)))
1756		return true;
1757	return false;
1758}
1759
1760static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1761{
1762	struct hci_ev_conn_request *ev = (void *) skb->data;
1763	int mask = hdev->link_mode;
1764	__u8 flags = 0;
1765
1766	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1767	       ev->link_type);
1768
1769	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1770				      &flags);
1771
1772	if ((mask & HCI_LM_ACCEPT) &&
1773	    !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1774		/* Connection accepted */
1775		struct inquiry_entry *ie;
1776		struct hci_conn *conn;
1777
1778		hci_dev_lock(hdev);
1779
1780		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1781		if (ie)
1782			memcpy(ie->data.dev_class, ev->dev_class, 3);
1783
1784		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1785					       &ev->bdaddr);
1786		if (!conn) {
1787			/* pkt_type not yet used for incoming connections */
1788			conn = hci_conn_add(hdev, ev->link_type, 0, &ev->bdaddr);
1789			if (!conn) {
1790				BT_ERR("No memory for new connection");
1791				hci_dev_unlock(hdev);
1792				return;
1793			}
1794		}
1795
1796		memcpy(conn->dev_class, ev->dev_class, 3);
1797
1798		hci_dev_unlock(hdev);
1799
1800		if (ev->link_type == ACL_LINK ||
1801		    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1802			struct hci_cp_accept_conn_req cp;
1803			conn->state = BT_CONNECT;
1804
1805			bacpy(&cp.bdaddr, &ev->bdaddr);
1806
1807			if (lmp_rswitch_capable(hdev) && ((mask & HCI_LM_MASTER)
1808						|| is_sco_active(hdev)))
1809				cp.role = 0x00; /* Become master */
1810			else
1811				cp.role = 0x01; /* Remain slave */
1812
1813			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1814				     &cp);
1815		} else if (!(flags & HCI_PROTO_DEFER)) {
1816			struct hci_cp_accept_sync_conn_req cp;
1817			conn->state = BT_CONNECT;
1818
1819			bacpy(&cp.bdaddr, &ev->bdaddr);
1820			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1821
1822			cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
1823			cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
1824			cp.max_latency    = __constant_cpu_to_le16(0xffff);
1825			cp.content_format = cpu_to_le16(hdev->voice_setting);
1826			cp.retrans_effort = 0xff;
1827
1828			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1829				     sizeof(cp), &cp);
1830		} else {
1831			conn->state = BT_CONNECT2;
1832			hci_proto_connect_cfm(conn, 0);
1833		}
1834	} else {
1835		/* Connection rejected */
1836		struct hci_cp_reject_conn_req cp;
1837
1838		bacpy(&cp.bdaddr, &ev->bdaddr);
1839		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1840		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1841	}
1842}
1843
1844static u8 hci_to_mgmt_reason(u8 err)
1845{
1846	switch (err) {
1847	case HCI_ERROR_CONNECTION_TIMEOUT:
1848		return MGMT_DEV_DISCONN_TIMEOUT;
1849	case HCI_ERROR_REMOTE_USER_TERM:
1850	case HCI_ERROR_REMOTE_LOW_RESOURCES:
1851	case HCI_ERROR_REMOTE_POWER_OFF:
1852		return MGMT_DEV_DISCONN_REMOTE;
1853	case HCI_ERROR_LOCAL_HOST_TERM:
1854		return MGMT_DEV_DISCONN_LOCAL_HOST;
1855	default:
1856		return MGMT_DEV_DISCONN_UNKNOWN;
1857	}
1858}
1859
1860static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1861{
1862	struct hci_ev_disconn_complete *ev = (void *) skb->data;
1863	struct hci_conn *conn;
1864
1865	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1866
1867	hci_dev_lock(hdev);
1868
1869	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1870	if (!conn)
1871		goto unlock;
1872
1873	if (ev->status == 0)
1874		conn->state = BT_CLOSED;
1875
1876	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1877	    (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1878		if (ev->status) {
1879			mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1880					       conn->dst_type, ev->status);
1881		} else {
1882			u8 reason = hci_to_mgmt_reason(ev->reason);
1883
1884			mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1885						 conn->dst_type, reason);
1886		}
1887	}
1888
1889	if (ev->status == 0) {
1890		if (conn->type == ACL_LINK && conn->flush_key)
1891			hci_remove_link_key(hdev, &conn->dst);
1892		hci_proto_disconn_cfm(conn, ev->reason);
1893		hci_conn_del(conn);
1894	}
1895
1896unlock:
1897	hci_dev_unlock(hdev);
1898}
1899
1900static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1901{
1902	struct hci_ev_auth_complete *ev = (void *) skb->data;
1903	struct hci_conn *conn;
1904
1905	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1906
1907	hci_dev_lock(hdev);
1908
1909	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1910	if (!conn)
1911		goto unlock;
1912
1913	if (!ev->status) {
1914		if (!hci_conn_ssp_enabled(conn) &&
1915		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1916			BT_INFO("re-auth of legacy device is not possible.");
1917		} else {
1918			conn->link_mode |= HCI_LM_AUTH;
1919			conn->sec_level = conn->pending_sec_level;
1920		}
1921	} else {
1922		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1923				 ev->status);
1924	}
1925
1926	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1927	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1928
1929	if (conn->state == BT_CONFIG) {
1930		if (!ev->status && hci_conn_ssp_enabled(conn)) {
1931			struct hci_cp_set_conn_encrypt cp;
1932			cp.handle  = ev->handle;
1933			cp.encrypt = 0x01;
1934			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1935				     &cp);
1936		} else {
1937			conn->state = BT_CONNECTED;
1938			hci_proto_connect_cfm(conn, ev->status);
1939			hci_conn_drop(conn);
1940		}
1941	} else {
1942		hci_auth_cfm(conn, ev->status);
1943
1944		hci_conn_hold(conn);
1945		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1946		hci_conn_drop(conn);
1947	}
1948
1949	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1950		if (!ev->status) {
1951			struct hci_cp_set_conn_encrypt cp;
1952			cp.handle  = ev->handle;
1953			cp.encrypt = 0x01;
1954			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1955				     &cp);
1956		} else {
1957			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1958			hci_encrypt_cfm(conn, ev->status, 0x00);
1959		}
1960	}
1961
1962unlock:
1963	hci_dev_unlock(hdev);
1964}
1965
1966static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1967{
1968	struct hci_ev_remote_name *ev = (void *) skb->data;
1969	struct hci_conn *conn;
1970
1971	BT_DBG("%s", hdev->name);
1972
1973	hci_conn_check_pending(hdev);
1974
1975	hci_dev_lock(hdev);
1976
1977	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1978
1979	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1980		goto check_auth;
1981
1982	if (ev->status == 0)
1983		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1984				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1985	else
1986		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1987
1988check_auth:
1989	if (!conn)
1990		goto unlock;
1991
1992	if (!hci_outgoing_auth_needed(hdev, conn))
1993		goto unlock;
1994
1995	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1996		struct hci_cp_auth_requested cp;
1997		cp.handle = __cpu_to_le16(conn->handle);
1998		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1999	}
2000
2001unlock:
2002	hci_dev_unlock(hdev);
2003}
2004
2005static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2006{
2007	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2008	struct hci_conn *conn;
2009
2010	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2011
2012	hci_dev_lock(hdev);
2013
2014	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2015	if (conn) {
2016		if (!ev->status) {
2017			if (ev->encrypt) {
2018				/* Encryption implies authentication */
2019				conn->link_mode |= HCI_LM_AUTH;
2020				conn->link_mode |= HCI_LM_ENCRYPT;
2021				conn->sec_level = conn->pending_sec_level;
2022			} else
2023				conn->link_mode &= ~HCI_LM_ENCRYPT;
2024		}
2025
2026		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2027
2028		if (ev->status && conn->state == BT_CONNECTED) {
2029			hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2030			hci_conn_drop(conn);
2031			goto unlock;
2032		}
2033
2034		if (conn->state == BT_CONFIG) {
2035			if (!ev->status)
2036				conn->state = BT_CONNECTED;
2037
2038			hci_proto_connect_cfm(conn, ev->status);
2039			hci_conn_drop(conn);
2040		} else
2041			hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2042	}
2043
2044unlock:
2045	hci_dev_unlock(hdev);
2046}
2047
2048static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2049					     struct sk_buff *skb)
2050{
2051	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2052	struct hci_conn *conn;
2053
2054	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2055
2056	hci_dev_lock(hdev);
2057
2058	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2059	if (conn) {
2060		if (!ev->status)
2061			conn->link_mode |= HCI_LM_SECURE;
2062
2063		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2064
2065		hci_key_change_cfm(conn, ev->status);
2066	}
2067
2068	hci_dev_unlock(hdev);
2069}
2070
2071static void hci_remote_features_evt(struct hci_dev *hdev,
2072				    struct sk_buff *skb)
2073{
2074	struct hci_ev_remote_features *ev = (void *) skb->data;
2075	struct hci_conn *conn;
2076
2077	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2078
2079	hci_dev_lock(hdev);
2080
2081	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2082	if (!conn)
2083		goto unlock;
2084
2085	if (!ev->status)
2086		memcpy(conn->features[0], ev->features, 8);
2087
2088	if (conn->state != BT_CONFIG)
2089		goto unlock;
2090
2091	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2092		struct hci_cp_read_remote_ext_features cp;
2093		cp.handle = ev->handle;
2094		cp.page = 0x01;
2095		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2096			     sizeof(cp), &cp);
2097		goto unlock;
2098	}
2099
2100	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2101		struct hci_cp_remote_name_req cp;
2102		memset(&cp, 0, sizeof(cp));
2103		bacpy(&cp.bdaddr, &conn->dst);
2104		cp.pscan_rep_mode = 0x02;
2105		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2106	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2107		mgmt_device_connected(hdev, &conn->dst, conn->type,
2108				      conn->dst_type, 0, NULL, 0,
2109				      conn->dev_class);
2110
2111	if (!hci_outgoing_auth_needed(hdev, conn)) {
2112		conn->state = BT_CONNECTED;
2113		hci_proto_connect_cfm(conn, ev->status);
2114		hci_conn_drop(conn);
2115	}
2116
2117unlock:
2118	hci_dev_unlock(hdev);
2119}
2120
2121static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2122{
2123	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2124	u8 status = skb->data[sizeof(*ev)];
2125	__u16 opcode;
2126
2127	skb_pull(skb, sizeof(*ev));
2128
2129	opcode = __le16_to_cpu(ev->opcode);
2130
2131	switch (opcode) {
2132	case HCI_OP_INQUIRY_CANCEL:
2133		hci_cc_inquiry_cancel(hdev, skb);
2134		break;
2135
2136	case HCI_OP_PERIODIC_INQ:
2137		hci_cc_periodic_inq(hdev, skb);
2138		break;
2139
2140	case HCI_OP_EXIT_PERIODIC_INQ:
2141		hci_cc_exit_periodic_inq(hdev, skb);
2142		break;
2143
2144	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2145		hci_cc_remote_name_req_cancel(hdev, skb);
2146		break;
2147
2148	case HCI_OP_ROLE_DISCOVERY:
2149		hci_cc_role_discovery(hdev, skb);
2150		break;
2151
2152	case HCI_OP_READ_LINK_POLICY:
2153		hci_cc_read_link_policy(hdev, skb);
2154		break;
2155
2156	case HCI_OP_WRITE_LINK_POLICY:
2157		hci_cc_write_link_policy(hdev, skb);
2158		break;
2159
2160	case HCI_OP_READ_DEF_LINK_POLICY:
2161		hci_cc_read_def_link_policy(hdev, skb);
2162		break;
2163
2164	case HCI_OP_WRITE_DEF_LINK_POLICY:
2165		hci_cc_write_def_link_policy(hdev, skb);
2166		break;
2167
2168	case HCI_OP_RESET:
2169		hci_cc_reset(hdev, skb);
2170		break;
2171
2172	case HCI_OP_WRITE_LOCAL_NAME:
2173		hci_cc_write_local_name(hdev, skb);
2174		break;
2175
2176	case HCI_OP_READ_LOCAL_NAME:
2177		hci_cc_read_local_name(hdev, skb);
2178		break;
2179
2180	case HCI_OP_WRITE_AUTH_ENABLE:
2181		hci_cc_write_auth_enable(hdev, skb);
2182		break;
2183
2184	case HCI_OP_WRITE_ENCRYPT_MODE:
2185		hci_cc_write_encrypt_mode(hdev, skb);
2186		break;
2187
2188	case HCI_OP_WRITE_SCAN_ENABLE:
2189		hci_cc_write_scan_enable(hdev, skb);
2190		break;
2191
2192	case HCI_OP_READ_CLASS_OF_DEV:
2193		hci_cc_read_class_of_dev(hdev, skb);
2194		break;
2195
2196	case HCI_OP_WRITE_CLASS_OF_DEV:
2197		hci_cc_write_class_of_dev(hdev, skb);
2198		break;
2199
2200	case HCI_OP_READ_VOICE_SETTING:
2201		hci_cc_read_voice_setting(hdev, skb);
2202		break;
2203
2204	case HCI_OP_WRITE_VOICE_SETTING:
2205		hci_cc_write_voice_setting(hdev, skb);
2206		break;
2207
2208	case HCI_OP_WRITE_SSP_MODE:
2209		hci_cc_write_ssp_mode(hdev, skb);
2210		break;
2211
2212	case HCI_OP_READ_LOCAL_VERSION:
2213		hci_cc_read_local_version(hdev, skb);
2214		break;
2215
2216	case HCI_OP_READ_LOCAL_COMMANDS:
2217		hci_cc_read_local_commands(hdev, skb);
2218		break;
2219
2220	case HCI_OP_READ_LOCAL_FEATURES:
2221		hci_cc_read_local_features(hdev, skb);
2222		break;
2223
2224	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2225		hci_cc_read_local_ext_features(hdev, skb);
2226		break;
2227
2228	case HCI_OP_READ_BUFFER_SIZE:
2229		hci_cc_read_buffer_size(hdev, skb);
2230		break;
2231
2232	case HCI_OP_READ_BD_ADDR:
2233		hci_cc_read_bd_addr(hdev, skb);
2234		break;
2235
2236	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2237		hci_cc_read_page_scan_activity(hdev, skb);
2238		break;
2239
2240	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2241		hci_cc_write_page_scan_activity(hdev, skb);
2242		break;
2243
2244	case HCI_OP_READ_PAGE_SCAN_TYPE:
2245		hci_cc_read_page_scan_type(hdev, skb);
2246		break;
2247
2248	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2249		hci_cc_write_page_scan_type(hdev, skb);
2250		break;
2251
2252	case HCI_OP_READ_DATA_BLOCK_SIZE:
2253		hci_cc_read_data_block_size(hdev, skb);
2254		break;
2255
2256	case HCI_OP_READ_FLOW_CONTROL_MODE:
2257		hci_cc_read_flow_control_mode(hdev, skb);
2258		break;
2259
2260	case HCI_OP_READ_LOCAL_AMP_INFO:
2261		hci_cc_read_local_amp_info(hdev, skb);
2262		break;
2263
2264	case HCI_OP_READ_LOCAL_AMP_ASSOC:
2265		hci_cc_read_local_amp_assoc(hdev, skb);
2266		break;
2267
2268	case HCI_OP_READ_INQ_RSP_TX_POWER:
2269		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2270		break;
2271
2272	case HCI_OP_PIN_CODE_REPLY:
2273		hci_cc_pin_code_reply(hdev, skb);
2274		break;
2275
2276	case HCI_OP_PIN_CODE_NEG_REPLY:
2277		hci_cc_pin_code_neg_reply(hdev, skb);
2278		break;
2279
2280	case HCI_OP_READ_LOCAL_OOB_DATA:
2281		hci_cc_read_local_oob_data_reply(hdev, skb);
2282		break;
2283
2284	case HCI_OP_LE_READ_BUFFER_SIZE:
2285		hci_cc_le_read_buffer_size(hdev, skb);
2286		break;
2287
2288	case HCI_OP_LE_READ_LOCAL_FEATURES:
2289		hci_cc_le_read_local_features(hdev, skb);
2290		break;
2291
2292	case HCI_OP_LE_READ_ADV_TX_POWER:
2293		hci_cc_le_read_adv_tx_power(hdev, skb);
2294		break;
2295
2296	case HCI_OP_USER_CONFIRM_REPLY:
2297		hci_cc_user_confirm_reply(hdev, skb);
2298		break;
2299
2300	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2301		hci_cc_user_confirm_neg_reply(hdev, skb);
2302		break;
2303
2304	case HCI_OP_USER_PASSKEY_REPLY:
2305		hci_cc_user_passkey_reply(hdev, skb);
2306		break;
2307
2308	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2309		hci_cc_user_passkey_neg_reply(hdev, skb);
2310		break;
2311
2312	case HCI_OP_LE_SET_SCAN_PARAM:
2313		hci_cc_le_set_scan_param(hdev, skb);
2314		break;
2315
2316	case HCI_OP_LE_SET_ADV_ENABLE:
2317		hci_cc_le_set_adv_enable(hdev, skb);
2318		break;
2319
2320	case HCI_OP_LE_SET_SCAN_ENABLE:
2321		hci_cc_le_set_scan_enable(hdev, skb);
2322		break;
2323
2324	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2325		hci_cc_le_read_white_list_size(hdev, skb);
2326		break;
2327
2328	case HCI_OP_LE_READ_SUPPORTED_STATES:
2329		hci_cc_le_read_supported_states(hdev, skb);
2330		break;
2331
2332	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2333		hci_cc_write_le_host_supported(hdev, skb);
2334		break;
2335
2336	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2337		hci_cc_write_remote_amp_assoc(hdev, skb);
2338		break;
2339
2340	default:
2341		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2342		break;
2343	}
2344
2345	if (opcode != HCI_OP_NOP)
2346		del_timer(&hdev->cmd_timer);
2347
2348	hci_req_cmd_complete(hdev, opcode, status);
2349
2350	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2351		atomic_set(&hdev->cmd_cnt, 1);
2352		if (!skb_queue_empty(&hdev->cmd_q))
2353			queue_work(hdev->workqueue, &hdev->cmd_work);
2354	}
2355}
2356
2357static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2358{
2359	struct hci_ev_cmd_status *ev = (void *) skb->data;
2360	__u16 opcode;
2361
2362	skb_pull(skb, sizeof(*ev));
2363
2364	opcode = __le16_to_cpu(ev->opcode);
2365
2366	switch (opcode) {
2367	case HCI_OP_INQUIRY:
2368		hci_cs_inquiry(hdev, ev->status);
2369		break;
2370
2371	case HCI_OP_CREATE_CONN:
2372		hci_cs_create_conn(hdev, ev->status);
2373		break;
2374
2375	case HCI_OP_ADD_SCO:
2376		hci_cs_add_sco(hdev, ev->status);
2377		break;
2378
2379	case HCI_OP_AUTH_REQUESTED:
2380		hci_cs_auth_requested(hdev, ev->status);
2381		break;
2382
2383	case HCI_OP_SET_CONN_ENCRYPT:
2384		hci_cs_set_conn_encrypt(hdev, ev->status);
2385		break;
2386
2387	case HCI_OP_REMOTE_NAME_REQ:
2388		hci_cs_remote_name_req(hdev, ev->status);
2389		break;
2390
2391	case HCI_OP_READ_REMOTE_FEATURES:
2392		hci_cs_read_remote_features(hdev, ev->status);
2393		break;
2394
2395	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2396		hci_cs_read_remote_ext_features(hdev, ev->status);
2397		break;
2398
2399	case HCI_OP_SETUP_SYNC_CONN:
2400		hci_cs_setup_sync_conn(hdev, ev->status);
2401		break;
2402
2403	case HCI_OP_SNIFF_MODE:
2404		hci_cs_sniff_mode(hdev, ev->status);
2405		break;
2406
2407	case HCI_OP_EXIT_SNIFF_MODE:
2408		hci_cs_exit_sniff_mode(hdev, ev->status);
2409		break;
2410
2411	case HCI_OP_DISCONNECT:
2412		hci_cs_disconnect(hdev, ev->status);
2413		break;
2414
2415	case HCI_OP_LE_CREATE_CONN:
2416		hci_cs_le_create_conn(hdev, ev->status);
2417		break;
2418
2419	case HCI_OP_CREATE_PHY_LINK:
2420		hci_cs_create_phylink(hdev, ev->status);
2421		break;
2422
2423	case HCI_OP_ACCEPT_PHY_LINK:
2424		hci_cs_accept_phylink(hdev, ev->status);
2425		break;
2426
2427	default:
2428		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2429		break;
2430	}
2431
2432	if (opcode != HCI_OP_NOP)
2433		del_timer(&hdev->cmd_timer);
2434
2435	if (ev->status ||
2436	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2437		hci_req_cmd_complete(hdev, opcode, ev->status);
2438
2439	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2440		atomic_set(&hdev->cmd_cnt, 1);
2441		if (!skb_queue_empty(&hdev->cmd_q))
2442			queue_work(hdev->workqueue, &hdev->cmd_work);
2443	}
2444}
2445
2446static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2447{
2448	struct hci_ev_role_change *ev = (void *) skb->data;
2449	struct hci_conn *conn;
2450
2451	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2452
2453	hci_dev_lock(hdev);
2454
2455	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2456	if (conn) {
2457		if (!ev->status) {
2458			if (ev->role)
2459				conn->link_mode &= ~HCI_LM_MASTER;
2460			else
2461				conn->link_mode |= HCI_LM_MASTER;
2462		}
2463
2464		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2465
2466		hci_role_switch_cfm(conn, ev->status, ev->role);
2467	}
2468
2469	hci_dev_unlock(hdev);
2470}
2471
2472static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2473{
2474	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2475	int i;
2476
2477	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2478		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2479		return;
2480	}
2481
2482	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2483	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2484		BT_DBG("%s bad parameters", hdev->name);
2485		return;
2486	}
2487
2488	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2489
2490	for (i = 0; i < ev->num_hndl; i++) {
2491		struct hci_comp_pkts_info *info = &ev->handles[i];
2492		struct hci_conn *conn;
2493		__u16  handle, count;
2494
2495		handle = __le16_to_cpu(info->handle);
2496		count  = __le16_to_cpu(info->count);
2497
2498		conn = hci_conn_hash_lookup_handle(hdev, handle);
2499		if (!conn)
2500			continue;
2501
2502		conn->sent -= count;
2503
2504		switch (conn->type) {
2505		case ACL_LINK:
2506			hdev->acl_cnt += count;
2507			if (hdev->acl_cnt > hdev->acl_pkts)
2508				hdev->acl_cnt = hdev->acl_pkts;
2509			break;
2510
2511		case LE_LINK:
2512			if (hdev->le_pkts) {
2513				hdev->le_cnt += count;
2514				if (hdev->le_cnt > hdev->le_pkts)
2515					hdev->le_cnt = hdev->le_pkts;
2516			} else {
2517				hdev->acl_cnt += count;
2518				if (hdev->acl_cnt > hdev->acl_pkts)
2519					hdev->acl_cnt = hdev->acl_pkts;
2520			}
2521			break;
2522
2523		case SCO_LINK:
2524			hdev->sco_cnt += count;
2525			if (hdev->sco_cnt > hdev->sco_pkts)
2526				hdev->sco_cnt = hdev->sco_pkts;
2527			break;
2528
2529		default:
2530			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2531			break;
2532		}
2533	}
2534
2535	queue_work(hdev->workqueue, &hdev->tx_work);
2536}
2537
2538static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2539						 __u16 handle)
2540{
2541	struct hci_chan *chan;
2542
2543	switch (hdev->dev_type) {
2544	case HCI_BREDR:
2545		return hci_conn_hash_lookup_handle(hdev, handle);
2546	case HCI_AMP:
2547		chan = hci_chan_lookup_handle(hdev, handle);
2548		if (chan)
2549			return chan->conn;
2550		break;
2551	default:
2552		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2553		break;
2554	}
2555
2556	return NULL;
2557}
2558
2559static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2560{
2561	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2562	int i;
2563
2564	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2565		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2566		return;
2567	}
2568
2569	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2570	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2571		BT_DBG("%s bad parameters", hdev->name);
2572		return;
2573	}
2574
2575	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2576	       ev->num_hndl);
2577
2578	for (i = 0; i < ev->num_hndl; i++) {
2579		struct hci_comp_blocks_info *info = &ev->handles[i];
2580		struct hci_conn *conn = NULL;
2581		__u16  handle, block_count;
2582
2583		handle = __le16_to_cpu(info->handle);
2584		block_count = __le16_to_cpu(info->blocks);
2585
2586		conn = __hci_conn_lookup_handle(hdev, handle);
2587		if (!conn)
2588			continue;
2589
2590		conn->sent -= block_count;
2591
2592		switch (conn->type) {
2593		case ACL_LINK:
2594		case AMP_LINK:
2595			hdev->block_cnt += block_count;
2596			if (hdev->block_cnt > hdev->num_blocks)
2597				hdev->block_cnt = hdev->num_blocks;
2598			break;
2599
2600		default:
2601			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2602			break;
2603		}
2604	}
2605
2606	queue_work(hdev->workqueue, &hdev->tx_work);
2607}
2608
2609static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2610{
2611	struct hci_ev_mode_change *ev = (void *) skb->data;
2612	struct hci_conn *conn;
2613
2614	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2615
2616	hci_dev_lock(hdev);
2617
2618	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2619	if (conn) {
2620		conn->mode = ev->mode;
2621		conn->interval = __le16_to_cpu(ev->interval);
2622
2623		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2624					&conn->flags)) {
2625			if (conn->mode == HCI_CM_ACTIVE)
2626				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2627			else
2628				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2629		}
2630
2631		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2632			hci_sco_setup(conn, ev->status);
2633	}
2634
2635	hci_dev_unlock(hdev);
2636}
2637
2638static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2639{
2640	struct hci_ev_pin_code_req *ev = (void *) skb->data;
2641	struct hci_conn *conn;
2642
2643	BT_DBG("%s", hdev->name);
2644
2645	hci_dev_lock(hdev);
2646
2647	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2648	if (!conn)
2649		goto unlock;
2650
2651	if (conn->state == BT_CONNECTED) {
2652		hci_conn_hold(conn);
2653		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2654		hci_conn_drop(conn);
2655	}
2656
2657	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2658		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2659			     sizeof(ev->bdaddr), &ev->bdaddr);
2660	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2661		u8 secure;
2662
2663		if (conn->pending_sec_level == BT_SECURITY_HIGH)
2664			secure = 1;
2665		else
2666			secure = 0;
2667
2668		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2669	}
2670
2671unlock:
2672	hci_dev_unlock(hdev);
2673}
2674
2675static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2676{
2677	struct hci_ev_link_key_req *ev = (void *) skb->data;
2678	struct hci_cp_link_key_reply cp;
2679	struct hci_conn *conn;
2680	struct link_key *key;
2681
2682	BT_DBG("%s", hdev->name);
2683
2684	if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2685		return;
2686
2687	hci_dev_lock(hdev);
2688
2689	key = hci_find_link_key(hdev, &ev->bdaddr);
2690	if (!key) {
2691		BT_DBG("%s link key not found for %pMR", hdev->name,
2692		       &ev->bdaddr);
2693		goto not_found;
2694	}
2695
2696	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2697	       &ev->bdaddr);
2698
2699	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2700	    key->type == HCI_LK_DEBUG_COMBINATION) {
2701		BT_DBG("%s ignoring debug key", hdev->name);
2702		goto not_found;
2703	}
2704
2705	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2706	if (conn) {
2707		if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2708		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2709			BT_DBG("%s ignoring unauthenticated key", hdev->name);
2710			goto not_found;
2711		}
2712
2713		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2714		    conn->pending_sec_level == BT_SECURITY_HIGH) {
2715			BT_DBG("%s ignoring key unauthenticated for high security",
2716			       hdev->name);
2717			goto not_found;
2718		}
2719
2720		conn->key_type = key->type;
2721		conn->pin_length = key->pin_len;
2722	}
2723
2724	bacpy(&cp.bdaddr, &ev->bdaddr);
2725	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2726
2727	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2728
2729	hci_dev_unlock(hdev);
2730
2731	return;
2732
2733not_found:
2734	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2735	hci_dev_unlock(hdev);
2736}
2737
2738static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2739{
2740	struct hci_ev_link_key_notify *ev = (void *) skb->data;
2741	struct hci_conn *conn;
2742	u8 pin_len = 0;
2743
2744	BT_DBG("%s", hdev->name);
2745
2746	hci_dev_lock(hdev);
2747
2748	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2749	if (conn) {
2750		hci_conn_hold(conn);
2751		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2752		pin_len = conn->pin_length;
2753
2754		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2755			conn->key_type = ev->key_type;
2756
2757		hci_conn_drop(conn);
2758	}
2759
2760	if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2761		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2762				 ev->key_type, pin_len);
2763
2764	hci_dev_unlock(hdev);
2765}
2766
2767static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2768{
2769	struct hci_ev_clock_offset *ev = (void *) skb->data;
2770	struct hci_conn *conn;
2771
2772	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2773
2774	hci_dev_lock(hdev);
2775
2776	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2777	if (conn && !ev->status) {
2778		struct inquiry_entry *ie;
2779
2780		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2781		if (ie) {
2782			ie->data.clock_offset = ev->clock_offset;
2783			ie->timestamp = jiffies;
2784		}
2785	}
2786
2787	hci_dev_unlock(hdev);
2788}
2789
2790static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2791{
2792	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2793	struct hci_conn *conn;
2794
2795	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2796
2797	hci_dev_lock(hdev);
2798
2799	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2800	if (conn && !ev->status)
2801		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2802
2803	hci_dev_unlock(hdev);
2804}
2805
2806static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2807{
2808	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2809	struct inquiry_entry *ie;
2810
2811	BT_DBG("%s", hdev->name);
2812
2813	hci_dev_lock(hdev);
2814
2815	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2816	if (ie) {
2817		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2818		ie->timestamp = jiffies;
2819	}
2820
2821	hci_dev_unlock(hdev);
2822}
2823
2824static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2825					     struct sk_buff *skb)
2826{
2827	struct inquiry_data data;
2828	int num_rsp = *((__u8 *) skb->data);
2829	bool name_known, ssp;
2830
2831	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2832
2833	if (!num_rsp)
2834		return;
2835
2836	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2837		return;
2838
2839	hci_dev_lock(hdev);
2840
2841	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2842		struct inquiry_info_with_rssi_and_pscan_mode *info;
2843		info = (void *) (skb->data + 1);
2844
2845		for (; num_rsp; num_rsp--, info++) {
2846			bacpy(&data.bdaddr, &info->bdaddr);
2847			data.pscan_rep_mode	= info->pscan_rep_mode;
2848			data.pscan_period_mode	= info->pscan_period_mode;
2849			data.pscan_mode		= info->pscan_mode;
2850			memcpy(data.dev_class, info->dev_class, 3);
2851			data.clock_offset	= info->clock_offset;
2852			data.rssi		= info->rssi;
2853			data.ssp_mode		= 0x00;
2854
2855			name_known = hci_inquiry_cache_update(hdev, &data,
2856							      false, &ssp);
2857			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2858					  info->dev_class, info->rssi,
2859					  !name_known, ssp, NULL, 0);
2860		}
2861	} else {
2862		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2863
2864		for (; num_rsp; num_rsp--, info++) {
2865			bacpy(&data.bdaddr, &info->bdaddr);
2866			data.pscan_rep_mode	= info->pscan_rep_mode;
2867			data.pscan_period_mode	= info->pscan_period_mode;
2868			data.pscan_mode		= 0x00;
2869			memcpy(data.dev_class, info->dev_class, 3);
2870			data.clock_offset	= info->clock_offset;
2871			data.rssi		= info->rssi;
2872			data.ssp_mode		= 0x00;
2873			name_known = hci_inquiry_cache_update(hdev, &data,
2874							      false, &ssp);
2875			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2876					  info->dev_class, info->rssi,
2877					  !name_known, ssp, NULL, 0);
2878		}
2879	}
2880
2881	hci_dev_unlock(hdev);
2882}
2883
2884static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2885					struct sk_buff *skb)
2886{
2887	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2888	struct hci_conn *conn;
2889
2890	BT_DBG("%s", hdev->name);
2891
2892	hci_dev_lock(hdev);
2893
2894	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2895	if (!conn)
2896		goto unlock;
2897
2898	if (ev->page < HCI_MAX_PAGES)
2899		memcpy(conn->features[ev->page], ev->features, 8);
2900
2901	if (!ev->status && ev->page == 0x01) {
2902		struct inquiry_entry *ie;
2903
2904		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2905		if (ie)
2906			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2907
2908		if (ev->features[0] & LMP_HOST_SSP) {
2909			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2910		} else {
2911			/* It is mandatory by the Bluetooth specification that
2912			 * Extended Inquiry Results are only used when Secure
2913			 * Simple Pairing is enabled, but some devices violate
2914			 * this.
2915			 *
2916			 * To make these devices work, the internal SSP
2917			 * enabled flag needs to be cleared if the remote host
2918			 * features do not indicate SSP support */
2919			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2920		}
2921	}
2922
2923	if (conn->state != BT_CONFIG)
2924		goto unlock;
2925
2926	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2927		struct hci_cp_remote_name_req cp;
2928		memset(&cp, 0, sizeof(cp));
2929		bacpy(&cp.bdaddr, &conn->dst);
2930		cp.pscan_rep_mode = 0x02;
2931		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2932	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2933		mgmt_device_connected(hdev, &conn->dst, conn->type,
2934				      conn->dst_type, 0, NULL, 0,
2935				      conn->dev_class);
2936
2937	if (!hci_outgoing_auth_needed(hdev, conn)) {
2938		conn->state = BT_CONNECTED;
2939		hci_proto_connect_cfm(conn, ev->status);
2940		hci_conn_drop(conn);
2941	}
2942
2943unlock:
2944	hci_dev_unlock(hdev);
2945}
2946
2947static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2948				       struct sk_buff *skb)
2949{
2950	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2951	struct hci_conn *conn;
2952
2953	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2954
2955	hci_dev_lock(hdev);
2956
2957	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2958	if (!conn) {
2959		if (ev->link_type == ESCO_LINK)
2960			goto unlock;
2961
2962		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2963		if (!conn)
2964			goto unlock;
2965
2966		conn->type = SCO_LINK;
2967	}
2968
2969	switch (ev->status) {
2970	case 0x00:
2971		conn->handle = __le16_to_cpu(ev->handle);
2972		conn->state  = BT_CONNECTED;
2973
2974		hci_conn_add_sysfs(conn);
2975		break;
2976
2977	case 0x10:	/* Connection Accept Timeout */
2978	case 0x11:	/* Unsupported Feature or Parameter Value */
2979	case 0x1c:	/* SCO interval rejected */
2980	case 0x1a:	/* Unsupported Remote Feature */
2981	case 0x1f:	/* Unspecified error */
2982		if (conn->out && conn->attempt < 2) {
2983			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2984					(hdev->esco_type & EDR_ESCO_MASK);
2985			hci_setup_sync(conn, conn->link->handle);
2986			goto unlock;
2987		}
2988		/* fall through */
2989
2990	default:
2991		conn->state = BT_CLOSED;
2992		break;
2993	}
2994
2995	hci_proto_connect_cfm(conn, ev->status);
2996	if (ev->status)
2997		hci_conn_del(conn);
2998
2999unlock:
3000	hci_dev_unlock(hdev);
3001}
3002
3003static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3004					    struct sk_buff *skb)
3005{
3006	struct inquiry_data data;
3007	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3008	int num_rsp = *((__u8 *) skb->data);
3009	size_t eir_len;
3010
3011	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3012
3013	if (!num_rsp)
3014		return;
3015
3016	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3017		return;
3018
3019	hci_dev_lock(hdev);
3020
3021	for (; num_rsp; num_rsp--, info++) {
3022		bool name_known, ssp;
3023
3024		bacpy(&data.bdaddr, &info->bdaddr);
3025		data.pscan_rep_mode	= info->pscan_rep_mode;
3026		data.pscan_period_mode	= info->pscan_period_mode;
3027		data.pscan_mode		= 0x00;
3028		memcpy(data.dev_class, info->dev_class, 3);
3029		data.clock_offset	= info->clock_offset;
3030		data.rssi		= info->rssi;
3031		data.ssp_mode		= 0x01;
3032
3033		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3034			name_known = eir_has_data_type(info->data,
3035						       sizeof(info->data),
3036						       EIR_NAME_COMPLETE);
3037		else
3038			name_known = true;
3039
3040		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3041						      &ssp);
3042		eir_len = eir_get_length(info->data, sizeof(info->data));
3043		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3044				  info->dev_class, info->rssi, !name_known,
3045				  ssp, info->data, eir_len);
3046	}
3047
3048	hci_dev_unlock(hdev);
3049}
3050
3051static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3052					 struct sk_buff *skb)
3053{
3054	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3055	struct hci_conn *conn;
3056
3057	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3058	       __le16_to_cpu(ev->handle));
3059
3060	hci_dev_lock(hdev);
3061
3062	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3063	if (!conn)
3064		goto unlock;
3065
3066	if (!ev->status)
3067		conn->sec_level = conn->pending_sec_level;
3068
3069	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3070
3071	if (ev->status && conn->state == BT_CONNECTED) {
3072		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3073		hci_conn_drop(conn);
3074		goto unlock;
3075	}
3076
3077	if (conn->state == BT_CONFIG) {
3078		if (!ev->status)
3079			conn->state = BT_CONNECTED;
3080
3081		hci_proto_connect_cfm(conn, ev->status);
3082		hci_conn_drop(conn);
3083	} else {
3084		hci_auth_cfm(conn, ev->status);
3085
3086		hci_conn_hold(conn);
3087		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3088		hci_conn_drop(conn);
3089	}
3090
3091unlock:
3092	hci_dev_unlock(hdev);
3093}
3094
3095static u8 hci_get_auth_req(struct hci_conn *conn)
3096{
3097	/* If remote requests dedicated bonding follow that lead */
3098	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3099		/* If both remote and local IO capabilities allow MITM
3100		 * protection then require it, otherwise don't */
3101		if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3102			return 0x02;
3103		else
3104			return 0x03;
3105	}
3106
3107	/* If remote requests no-bonding follow that lead */
3108	if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3109		return conn->remote_auth | (conn->auth_type & 0x01);
3110
3111	return conn->auth_type;
3112}
3113
3114static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3115{
3116	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3117	struct hci_conn *conn;
3118
3119	BT_DBG("%s", hdev->name);
3120
3121	hci_dev_lock(hdev);
3122
3123	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3124	if (!conn)
3125		goto unlock;
3126
3127	hci_conn_hold(conn);
3128
3129	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3130		goto unlock;
3131
3132	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3133	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3134		struct hci_cp_io_capability_reply cp;
3135
3136		bacpy(&cp.bdaddr, &ev->bdaddr);
3137		/* Change the IO capability from KeyboardDisplay
3138		 * to DisplayYesNo as it is not supported by BT spec. */
3139		cp.capability = (conn->io_capability == 0x04) ?
3140						0x01 : conn->io_capability;
3141		conn->auth_type = hci_get_auth_req(conn);
3142		cp.authentication = conn->auth_type;
3143
3144		if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3145		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3146			cp.oob_data = 0x01;
3147		else
3148			cp.oob_data = 0x00;
3149
3150		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3151			     sizeof(cp), &cp);
3152	} else {
3153		struct hci_cp_io_capability_neg_reply cp;
3154
3155		bacpy(&cp.bdaddr, &ev->bdaddr);
3156		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3157
3158		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3159			     sizeof(cp), &cp);
3160	}
3161
3162unlock:
3163	hci_dev_unlock(hdev);
3164}
3165
3166static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3167{
3168	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3169	struct hci_conn *conn;
3170
3171	BT_DBG("%s", hdev->name);
3172
3173	hci_dev_lock(hdev);
3174
3175	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3176	if (!conn)
3177		goto unlock;
3178
3179	conn->remote_cap = ev->capability;
3180	conn->remote_auth = ev->authentication;
3181	if (ev->oob_data)
3182		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3183
3184unlock:
3185	hci_dev_unlock(hdev);
3186}
3187
3188static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3189					 struct sk_buff *skb)
3190{
3191	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3192	int loc_mitm, rem_mitm, confirm_hint = 0;
3193	struct hci_conn *conn;
3194
3195	BT_DBG("%s", hdev->name);
3196
3197	hci_dev_lock(hdev);
3198
3199	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3200		goto unlock;
3201
3202	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3203	if (!conn)
3204		goto unlock;
3205
3206	loc_mitm = (conn->auth_type & 0x01);
3207	rem_mitm = (conn->remote_auth & 0x01);
3208
3209	/* If we require MITM but the remote device can't provide that
3210	 * (it has NoInputNoOutput) then reject the confirmation
3211	 * request. The only exception is when we're dedicated bonding
3212	 * initiators (connect_cfm_cb set) since then we always have the MITM
3213	 * bit set. */
3214	if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3215		BT_DBG("Rejecting request: remote device can't provide MITM");
3216		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3217			     sizeof(ev->bdaddr), &ev->bdaddr);
3218		goto unlock;
3219	}
3220
3221	/* If no side requires MITM protection; auto-accept */
3222	if ((!loc_mitm || conn->remote_cap == 0x03) &&
3223	    (!rem_mitm || conn->io_capability == 0x03)) {
3224
3225		/* If we're not the initiators request authorization to
3226		 * proceed from user space (mgmt_user_confirm with
3227		 * confirm_hint set to 1). */
3228		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3229			BT_DBG("Confirming auto-accept as acceptor");
3230			confirm_hint = 1;
3231			goto confirm;
3232		}
3233
3234		BT_DBG("Auto-accept of user confirmation with %ums delay",
3235		       hdev->auto_accept_delay);
3236
3237		if (hdev->auto_accept_delay > 0) {
3238			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3239			mod_timer(&conn->auto_accept_timer, jiffies + delay);
3240			goto unlock;
3241		}
3242
3243		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3244			     sizeof(ev->bdaddr), &ev->bdaddr);
3245		goto unlock;
3246	}
3247
3248confirm:
3249	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3250				  confirm_hint);
3251
3252unlock:
3253	hci_dev_unlock(hdev);
3254}
3255
3256static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3257					 struct sk_buff *skb)
3258{
3259	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3260
3261	BT_DBG("%s", hdev->name);
3262
3263	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3264		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3265}
3266
3267static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3268					struct sk_buff *skb)
3269{
3270	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3271	struct hci_conn *conn;
3272
3273	BT_DBG("%s", hdev->name);
3274
3275	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3276	if (!conn)
3277		return;
3278
3279	conn->passkey_notify = __le32_to_cpu(ev->passkey);
3280	conn->passkey_entered = 0;
3281
3282	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3283		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3284					 conn->dst_type, conn->passkey_notify,
3285					 conn->passkey_entered);
3286}
3287
3288static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3289{
3290	struct hci_ev_keypress_notify *ev = (void *) skb->data;
3291	struct hci_conn *conn;
3292
3293	BT_DBG("%s", hdev->name);
3294
3295	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3296	if (!conn)
3297		return;
3298
3299	switch (ev->type) {
3300	case HCI_KEYPRESS_STARTED:
3301		conn->passkey_entered = 0;
3302		return;
3303
3304	case HCI_KEYPRESS_ENTERED:
3305		conn->passkey_entered++;
3306		break;
3307
3308	case HCI_KEYPRESS_ERASED:
3309		conn->passkey_entered--;
3310		break;
3311
3312	case HCI_KEYPRESS_CLEARED:
3313		conn->passkey_entered = 0;
3314		break;
3315
3316	case HCI_KEYPRESS_COMPLETED:
3317		return;
3318	}
3319
3320	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3321		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3322					 conn->dst_type, conn->passkey_notify,
3323					 conn->passkey_entered);
3324}
3325
3326static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3327					 struct sk_buff *skb)
3328{
3329	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3330	struct hci_conn *conn;
3331
3332	BT_DBG("%s", hdev->name);
3333
3334	hci_dev_lock(hdev);
3335
3336	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3337	if (!conn)
3338		goto unlock;
3339
3340	/* To avoid duplicate auth_failed events to user space we check
3341	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3342	 * initiated the authentication. A traditional auth_complete
3343	 * event gets always produced as initiator and is also mapped to
3344	 * the mgmt_auth_failed event */
3345	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3346		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3347				 ev->status);
3348
3349	hci_conn_drop(conn);
3350
3351unlock:
3352	hci_dev_unlock(hdev);
3353}
3354
3355static void hci_remote_host_features_evt(struct hci_dev *hdev,
3356					 struct sk_buff *skb)
3357{
3358	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3359	struct inquiry_entry *ie;
3360	struct hci_conn *conn;
3361
3362	BT_DBG("%s", hdev->name);
3363
3364	hci_dev_lock(hdev);
3365
3366	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3367	if (conn)
3368		memcpy(conn->features[1], ev->features, 8);
3369
3370	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3371	if (ie)
3372		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3373
3374	hci_dev_unlock(hdev);
3375}
3376
3377static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3378					    struct sk_buff *skb)
3379{
3380	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3381	struct oob_data *data;
3382
3383	BT_DBG("%s", hdev->name);
3384
3385	hci_dev_lock(hdev);
3386
3387	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3388		goto unlock;
3389
3390	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3391	if (data) {
3392		struct hci_cp_remote_oob_data_reply cp;
3393
3394		bacpy(&cp.bdaddr, &ev->bdaddr);
3395		memcpy(cp.hash, data->hash, sizeof(cp.hash));
3396		memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3397
3398		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3399			     &cp);
3400	} else {
3401		struct hci_cp_remote_oob_data_neg_reply cp;
3402
3403		bacpy(&cp.bdaddr, &ev->bdaddr);
3404		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3405			     &cp);
3406	}
3407
3408unlock:
3409	hci_dev_unlock(hdev);
3410}
3411
3412static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3413				      struct sk_buff *skb)
3414{
3415	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3416	struct hci_conn *hcon, *bredr_hcon;
3417
3418	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3419	       ev->status);
3420
3421	hci_dev_lock(hdev);
3422
3423	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3424	if (!hcon) {
3425		hci_dev_unlock(hdev);
3426		return;
3427	}
3428
3429	if (ev->status) {
3430		hci_conn_del(hcon);
3431		hci_dev_unlock(hdev);
3432		return;
3433	}
3434
3435	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3436
3437	hcon->state = BT_CONNECTED;
3438	bacpy(&hcon->dst, &bredr_hcon->dst);
3439
3440	hci_conn_hold(hcon);
3441	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3442	hci_conn_drop(hcon);
3443
3444	hci_conn_add_sysfs(hcon);
3445
3446	amp_physical_cfm(bredr_hcon, hcon);
3447
3448	hci_dev_unlock(hdev);
3449}
3450
3451static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3452{
3453	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3454	struct hci_conn *hcon;
3455	struct hci_chan *hchan;
3456	struct amp_mgr *mgr;
3457
3458	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3459	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3460	       ev->status);
3461
3462	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3463	if (!hcon)
3464		return;
3465
3466	/* Create AMP hchan */
3467	hchan = hci_chan_create(hcon);
3468	if (!hchan)
3469		return;
3470
3471	hchan->handle = le16_to_cpu(ev->handle);
3472
3473	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3474
3475	mgr = hcon->amp_mgr;
3476	if (mgr && mgr->bredr_chan) {
3477		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3478
3479		l2cap_chan_lock(bredr_chan);
3480
3481		bredr_chan->conn->mtu = hdev->block_mtu;
3482		l2cap_logical_cfm(bredr_chan, hchan, 0);
3483		hci_conn_hold(hcon);
3484
3485		l2cap_chan_unlock(bredr_chan);
3486	}
3487}
3488
3489static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3490					     struct sk_buff *skb)
3491{
3492	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3493	struct hci_chan *hchan;
3494
3495	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3496	       le16_to_cpu(ev->handle), ev->status);
3497
3498	if (ev->status)
3499		return;
3500
3501	hci_dev_lock(hdev);
3502
3503	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3504	if (!hchan)
3505		goto unlock;
3506
3507	amp_destroy_logical_link(hchan, ev->reason);
3508
3509unlock:
3510	hci_dev_unlock(hdev);
3511}
3512
3513static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3514					     struct sk_buff *skb)
3515{
3516	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3517	struct hci_conn *hcon;
3518
3519	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3520
3521	if (ev->status)
3522		return;
3523
3524	hci_dev_lock(hdev);
3525
3526	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3527	if (hcon) {
3528		hcon->state = BT_CLOSED;
3529		hci_conn_del(hcon);
3530	}
3531
3532	hci_dev_unlock(hdev);
3533}
3534
3535static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3536{
3537	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3538	struct hci_conn *conn;
3539
3540	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3541
3542	hci_dev_lock(hdev);
3543
3544	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3545	if (!conn) {
3546		conn = hci_conn_add(hdev, LE_LINK, 0, &ev->bdaddr);
3547		if (!conn) {
3548			BT_ERR("No memory for new connection");
3549			goto unlock;
3550		}
3551
3552		conn->dst_type = ev->bdaddr_type;
3553
3554		if (ev->role == LE_CONN_ROLE_MASTER) {
3555			conn->out = true;
3556			conn->link_mode |= HCI_LM_MASTER;
3557		}
3558	}
3559
3560	if (ev->status) {
3561		mgmt_connect_failed(hdev, &conn->dst, conn->type,
3562				    conn->dst_type, ev->status);
3563		hci_proto_connect_cfm(conn, ev->status);
3564		conn->state = BT_CLOSED;
3565		hci_conn_del(conn);
3566		goto unlock;
3567	}
3568
3569	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3570		mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3571				      conn->dst_type, 0, NULL, 0, NULL);
3572
3573	conn->sec_level = BT_SECURITY_LOW;
3574	conn->handle = __le16_to_cpu(ev->handle);
3575	conn->state = BT_CONNECTED;
3576
3577	hci_conn_add_sysfs(conn);
3578
3579	hci_proto_connect_cfm(conn, ev->status);
3580
3581unlock:
3582	hci_dev_unlock(hdev);
3583}
3584
3585static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3586{
3587	u8 num_reports = skb->data[0];
3588	void *ptr = &skb->data[1];
3589	s8 rssi;
3590
3591	while (num_reports--) {
3592		struct hci_ev_le_advertising_info *ev = ptr;
3593
3594		rssi = ev->data[ev->length];
3595		mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3596				  NULL, rssi, 0, 1, ev->data, ev->length);
3597
3598		ptr += sizeof(*ev) + ev->length + 1;
3599	}
3600}
3601
3602static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3603{
3604	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3605	struct hci_cp_le_ltk_reply cp;
3606	struct hci_cp_le_ltk_neg_reply neg;
3607	struct hci_conn *conn;
3608	struct smp_ltk *ltk;
3609
3610	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3611
3612	hci_dev_lock(hdev);
3613
3614	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3615	if (conn == NULL)
3616		goto not_found;
3617
3618	ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3619	if (ltk == NULL)
3620		goto not_found;
3621
3622	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3623	cp.handle = cpu_to_le16(conn->handle);
3624
3625	if (ltk->authenticated)
3626		conn->sec_level = BT_SECURITY_HIGH;
3627
3628	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3629
3630	if (ltk->type & HCI_SMP_STK) {
3631		list_del(&ltk->list);
3632		kfree(ltk);
3633	}
3634
3635	hci_dev_unlock(hdev);
3636
3637	return;
3638
3639not_found:
3640	neg.handle = ev->handle;
3641	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3642	hci_dev_unlock(hdev);
3643}
3644
3645static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3646{
3647	struct hci_ev_le_meta *le_ev = (void *) skb->data;
3648
3649	skb_pull(skb, sizeof(*le_ev));
3650
3651	switch (le_ev->subevent) {
3652	case HCI_EV_LE_CONN_COMPLETE:
3653		hci_le_conn_complete_evt(hdev, skb);
3654		break;
3655
3656	case HCI_EV_LE_ADVERTISING_REPORT:
3657		hci_le_adv_report_evt(hdev, skb);
3658		break;
3659
3660	case HCI_EV_LE_LTK_REQ:
3661		hci_le_ltk_request_evt(hdev, skb);
3662		break;
3663
3664	default:
3665		break;
3666	}
3667}
3668
3669static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3670{
3671	struct hci_ev_channel_selected *ev = (void *) skb->data;
3672	struct hci_conn *hcon;
3673
3674	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3675
3676	skb_pull(skb, sizeof(*ev));
3677
3678	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3679	if (!hcon)
3680		return;
3681
3682	amp_read_loc_assoc_final_data(hdev, hcon);
3683}
3684
3685void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3686{
3687	struct hci_event_hdr *hdr = (void *) skb->data;
3688	__u8 event = hdr->evt;
3689
3690	hci_dev_lock(hdev);
3691
3692	/* Received events are (currently) only needed when a request is
3693	 * ongoing so avoid unnecessary memory allocation.
3694	 */
3695	if (hdev->req_status == HCI_REQ_PEND) {
3696		kfree_skb(hdev->recv_evt);
3697		hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
3698	}
3699
3700	hci_dev_unlock(hdev);
3701
3702	skb_pull(skb, HCI_EVENT_HDR_SIZE);
3703
3704	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
3705		struct hci_command_hdr *hdr = (void *) hdev->sent_cmd->data;
3706		u16 opcode = __le16_to_cpu(hdr->opcode);
3707
3708		hci_req_cmd_complete(hdev, opcode, 0);
3709	}
3710
3711	switch (event) {
3712	case HCI_EV_INQUIRY_COMPLETE:
3713		hci_inquiry_complete_evt(hdev, skb);
3714		break;
3715
3716	case HCI_EV_INQUIRY_RESULT:
3717		hci_inquiry_result_evt(hdev, skb);
3718		break;
3719
3720	case HCI_EV_CONN_COMPLETE:
3721		hci_conn_complete_evt(hdev, skb);
3722		break;
3723
3724	case HCI_EV_CONN_REQUEST:
3725		hci_conn_request_evt(hdev, skb);
3726		break;
3727
3728	case HCI_EV_DISCONN_COMPLETE:
3729		hci_disconn_complete_evt(hdev, skb);
3730		break;
3731
3732	case HCI_EV_AUTH_COMPLETE:
3733		hci_auth_complete_evt(hdev, skb);
3734		break;
3735
3736	case HCI_EV_REMOTE_NAME:
3737		hci_remote_name_evt(hdev, skb);
3738		break;
3739
3740	case HCI_EV_ENCRYPT_CHANGE:
3741		hci_encrypt_change_evt(hdev, skb);
3742		break;
3743
3744	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3745		hci_change_link_key_complete_evt(hdev, skb);
3746		break;
3747
3748	case HCI_EV_REMOTE_FEATURES:
3749		hci_remote_features_evt(hdev, skb);
3750		break;
3751
3752	case HCI_EV_CMD_COMPLETE:
3753		hci_cmd_complete_evt(hdev, skb);
3754		break;
3755
3756	case HCI_EV_CMD_STATUS:
3757		hci_cmd_status_evt(hdev, skb);
3758		break;
3759
3760	case HCI_EV_ROLE_CHANGE:
3761		hci_role_change_evt(hdev, skb);
3762		break;
3763
3764	case HCI_EV_NUM_COMP_PKTS:
3765		hci_num_comp_pkts_evt(hdev, skb);
3766		break;
3767
3768	case HCI_EV_MODE_CHANGE:
3769		hci_mode_change_evt(hdev, skb);
3770		break;
3771
3772	case HCI_EV_PIN_CODE_REQ:
3773		hci_pin_code_request_evt(hdev, skb);
3774		break;
3775
3776	case HCI_EV_LINK_KEY_REQ:
3777		hci_link_key_request_evt(hdev, skb);
3778		break;
3779
3780	case HCI_EV_LINK_KEY_NOTIFY:
3781		hci_link_key_notify_evt(hdev, skb);
3782		break;
3783
3784	case HCI_EV_CLOCK_OFFSET:
3785		hci_clock_offset_evt(hdev, skb);
3786		break;
3787
3788	case HCI_EV_PKT_TYPE_CHANGE:
3789		hci_pkt_type_change_evt(hdev, skb);
3790		break;
3791
3792	case HCI_EV_PSCAN_REP_MODE:
3793		hci_pscan_rep_mode_evt(hdev, skb);
3794		break;
3795
3796	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3797		hci_inquiry_result_with_rssi_evt(hdev, skb);
3798		break;
3799
3800	case HCI_EV_REMOTE_EXT_FEATURES:
3801		hci_remote_ext_features_evt(hdev, skb);
3802		break;
3803
3804	case HCI_EV_SYNC_CONN_COMPLETE:
3805		hci_sync_conn_complete_evt(hdev, skb);
3806		break;
3807
3808	case HCI_EV_EXTENDED_INQUIRY_RESULT:
3809		hci_extended_inquiry_result_evt(hdev, skb);
3810		break;
3811
3812	case HCI_EV_KEY_REFRESH_COMPLETE:
3813		hci_key_refresh_complete_evt(hdev, skb);
3814		break;
3815
3816	case HCI_EV_IO_CAPA_REQUEST:
3817		hci_io_capa_request_evt(hdev, skb);
3818		break;
3819
3820	case HCI_EV_IO_CAPA_REPLY:
3821		hci_io_capa_reply_evt(hdev, skb);
3822		break;
3823
3824	case HCI_EV_USER_CONFIRM_REQUEST:
3825		hci_user_confirm_request_evt(hdev, skb);
3826		break;
3827
3828	case HCI_EV_USER_PASSKEY_REQUEST:
3829		hci_user_passkey_request_evt(hdev, skb);
3830		break;
3831
3832	case HCI_EV_USER_PASSKEY_NOTIFY:
3833		hci_user_passkey_notify_evt(hdev, skb);
3834		break;
3835
3836	case HCI_EV_KEYPRESS_NOTIFY:
3837		hci_keypress_notify_evt(hdev, skb);
3838		break;
3839
3840	case HCI_EV_SIMPLE_PAIR_COMPLETE:
3841		hci_simple_pair_complete_evt(hdev, skb);
3842		break;
3843
3844	case HCI_EV_REMOTE_HOST_FEATURES:
3845		hci_remote_host_features_evt(hdev, skb);
3846		break;
3847
3848	case HCI_EV_LE_META:
3849		hci_le_meta_evt(hdev, skb);
3850		break;
3851
3852	case HCI_EV_CHANNEL_SELECTED:
3853		hci_chan_selected_evt(hdev, skb);
3854		break;
3855
3856	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3857		hci_remote_oob_data_request_evt(hdev, skb);
3858		break;
3859
3860	case HCI_EV_PHY_LINK_COMPLETE:
3861		hci_phy_link_complete_evt(hdev, skb);
3862		break;
3863
3864	case HCI_EV_LOGICAL_LINK_COMPLETE:
3865		hci_loglink_complete_evt(hdev, skb);
3866		break;
3867
3868	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3869		hci_disconn_loglink_complete_evt(hdev, skb);
3870		break;
3871
3872	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3873		hci_disconn_phylink_complete_evt(hdev, skb);
3874		break;
3875
3876	case HCI_EV_NUM_COMP_BLOCKS:
3877		hci_num_comp_blocks_evt(hdev, skb);
3878		break;
3879
3880	default:
3881		BT_DBG("%s event 0x%2.2x", hdev->name, event);
3882		break;
3883	}
3884
3885	kfree_skb(skb);
3886	hdev->stat.evt_rx++;
3887}
3888