hci_event.c revision 9b3b44604ac8e06d299718c5d0fa0b91b675ae0b
1/*
2   BlueZ - Bluetooth protocol stack for Linux
3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/init.h>
36#include <linux/skbuff.h>
37#include <linux/interrupt.h>
38#include <net/sock.h>
39
40#include <linux/uaccess.h>
41#include <asm/unaligned.h>
42
43#include <net/bluetooth/bluetooth.h>
44#include <net/bluetooth/hci_core.h>
45
46/* Handle HCI Event packets */
47
48static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
49{
50	__u8 status = *((__u8 *) skb->data);
51
52	BT_DBG("%s status 0x%x", hdev->name, status);
53
54	if (status) {
55		hci_dev_lock(hdev);
56		mgmt_stop_discovery_failed(hdev, status);
57		hci_dev_unlock(hdev);
58		return;
59	}
60
61	clear_bit(HCI_INQUIRY, &hdev->flags);
62
63	hci_dev_lock(hdev);
64	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
65	hci_dev_unlock(hdev);
66
67	hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
68
69	hci_conn_check_pending(hdev);
70}
71
72static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73{
74	__u8 status = *((__u8 *) skb->data);
75
76	BT_DBG("%s status 0x%x", hdev->name, status);
77
78	if (status)
79		return;
80
81	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
82}
83
84static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
85{
86	__u8 status = *((__u8 *) skb->data);
87
88	BT_DBG("%s status 0x%x", hdev->name, status);
89
90	if (status)
91		return;
92
93	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
94
95	hci_conn_check_pending(hdev);
96}
97
98static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
99					  struct sk_buff *skb)
100{
101	BT_DBG("%s", hdev->name);
102}
103
104static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
105{
106	struct hci_rp_role_discovery *rp = (void *) skb->data;
107	struct hci_conn *conn;
108
109	BT_DBG("%s status 0x%x", hdev->name, rp->status);
110
111	if (rp->status)
112		return;
113
114	hci_dev_lock(hdev);
115
116	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
117	if (conn) {
118		if (rp->role)
119			conn->link_mode &= ~HCI_LM_MASTER;
120		else
121			conn->link_mode |= HCI_LM_MASTER;
122	}
123
124	hci_dev_unlock(hdev);
125}
126
127static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
128{
129	struct hci_rp_read_link_policy *rp = (void *) skb->data;
130	struct hci_conn *conn;
131
132	BT_DBG("%s status 0x%x", hdev->name, rp->status);
133
134	if (rp->status)
135		return;
136
137	hci_dev_lock(hdev);
138
139	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
140	if (conn)
141		conn->link_policy = __le16_to_cpu(rp->policy);
142
143	hci_dev_unlock(hdev);
144}
145
146static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
147{
148	struct hci_rp_write_link_policy *rp = (void *) skb->data;
149	struct hci_conn *conn;
150	void *sent;
151
152	BT_DBG("%s status 0x%x", hdev->name, rp->status);
153
154	if (rp->status)
155		return;
156
157	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
158	if (!sent)
159		return;
160
161	hci_dev_lock(hdev);
162
163	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
164	if (conn)
165		conn->link_policy = get_unaligned_le16(sent + 2);
166
167	hci_dev_unlock(hdev);
168}
169
170static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
171					struct sk_buff *skb)
172{
173	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
174
175	BT_DBG("%s status 0x%x", hdev->name, rp->status);
176
177	if (rp->status)
178		return;
179
180	hdev->link_policy = __le16_to_cpu(rp->policy);
181}
182
183static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
184					 struct sk_buff *skb)
185{
186	__u8 status = *((__u8 *) skb->data);
187	void *sent;
188
189	BT_DBG("%s status 0x%x", hdev->name, status);
190
191	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
192	if (!sent)
193		return;
194
195	if (!status)
196		hdev->link_policy = get_unaligned_le16(sent);
197
198	hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
199}
200
201static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
202{
203	__u8 status = *((__u8 *) skb->data);
204
205	BT_DBG("%s status 0x%x", hdev->name, status);
206
207	clear_bit(HCI_RESET, &hdev->flags);
208
209	hci_req_complete(hdev, HCI_OP_RESET, status);
210
211	/* Reset all non-persistent flags */
212	hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
213			     BIT(HCI_PERIODIC_INQ));
214
215	hdev->discovery.state = DISCOVERY_STOPPED;
216}
217
218static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
219{
220	__u8 status = *((__u8 *) skb->data);
221	void *sent;
222
223	BT_DBG("%s status 0x%x", hdev->name, status);
224
225	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
226	if (!sent)
227		return;
228
229	hci_dev_lock(hdev);
230
231	if (test_bit(HCI_MGMT, &hdev->dev_flags))
232		mgmt_set_local_name_complete(hdev, sent, status);
233	else if (!status)
234		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
235
236	hci_dev_unlock(hdev);
237
238	hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
239}
240
241static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
242{
243	struct hci_rp_read_local_name *rp = (void *) skb->data;
244
245	BT_DBG("%s status 0x%x", hdev->name, rp->status);
246
247	if (rp->status)
248		return;
249
250	if (test_bit(HCI_SETUP, &hdev->dev_flags))
251		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
252}
253
254static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
255{
256	__u8 status = *((__u8 *) skb->data);
257	void *sent;
258
259	BT_DBG("%s status 0x%x", hdev->name, status);
260
261	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
262	if (!sent)
263		return;
264
265	if (!status) {
266		__u8 param = *((__u8 *) sent);
267
268		if (param == AUTH_ENABLED)
269			set_bit(HCI_AUTH, &hdev->flags);
270		else
271			clear_bit(HCI_AUTH, &hdev->flags);
272	}
273
274	if (test_bit(HCI_MGMT, &hdev->dev_flags))
275		mgmt_auth_enable_complete(hdev, status);
276
277	hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
278}
279
280static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
281{
282	__u8 status = *((__u8 *) skb->data);
283	void *sent;
284
285	BT_DBG("%s status 0x%x", hdev->name, status);
286
287	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
288	if (!sent)
289		return;
290
291	if (!status) {
292		__u8 param = *((__u8 *) sent);
293
294		if (param)
295			set_bit(HCI_ENCRYPT, &hdev->flags);
296		else
297			clear_bit(HCI_ENCRYPT, &hdev->flags);
298	}
299
300	hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
301}
302
303static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
304{
305	__u8 param, status = *((__u8 *) skb->data);
306	int old_pscan, old_iscan;
307	void *sent;
308
309	BT_DBG("%s status 0x%x", hdev->name, status);
310
311	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
312	if (!sent)
313		return;
314
315	param = *((__u8 *) sent);
316
317	hci_dev_lock(hdev);
318
319	if (status != 0) {
320		mgmt_write_scan_failed(hdev, param, status);
321		hdev->discov_timeout = 0;
322		goto done;
323	}
324
325	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
326	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
327
328	if (param & SCAN_INQUIRY) {
329		set_bit(HCI_ISCAN, &hdev->flags);
330		if (!old_iscan)
331			mgmt_discoverable(hdev, 1);
332		if (hdev->discov_timeout > 0) {
333			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
334			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
335					   to);
336		}
337	} else if (old_iscan)
338		mgmt_discoverable(hdev, 0);
339
340	if (param & SCAN_PAGE) {
341		set_bit(HCI_PSCAN, &hdev->flags);
342		if (!old_pscan)
343			mgmt_connectable(hdev, 1);
344	} else if (old_pscan)
345		mgmt_connectable(hdev, 0);
346
347done:
348	hci_dev_unlock(hdev);
349	hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
350}
351
352static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
353{
354	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
355
356	BT_DBG("%s status 0x%x", hdev->name, rp->status);
357
358	if (rp->status)
359		return;
360
361	memcpy(hdev->dev_class, rp->dev_class, 3);
362
363	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
364	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
365}
366
367static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
368{
369	__u8 status = *((__u8 *) skb->data);
370	void *sent;
371
372	BT_DBG("%s status 0x%x", hdev->name, status);
373
374	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
375	if (!sent)
376		return;
377
378	hci_dev_lock(hdev);
379
380	if (status == 0)
381		memcpy(hdev->dev_class, sent, 3);
382
383	if (test_bit(HCI_MGMT, &hdev->dev_flags))
384		mgmt_set_class_of_dev_complete(hdev, sent, status);
385
386	hci_dev_unlock(hdev);
387}
388
389static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
390{
391	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
392	__u16 setting;
393
394	BT_DBG("%s status 0x%x", hdev->name, rp->status);
395
396	if (rp->status)
397		return;
398
399	setting = __le16_to_cpu(rp->voice_setting);
400
401	if (hdev->voice_setting == setting)
402		return;
403
404	hdev->voice_setting = setting;
405
406	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
407
408	if (hdev->notify)
409		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
410}
411
412static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
413{
414	__u8 status = *((__u8 *) skb->data);
415	__u16 setting;
416	void *sent;
417
418	BT_DBG("%s status 0x%x", hdev->name, status);
419
420	if (status)
421		return;
422
423	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
424	if (!sent)
425		return;
426
427	setting = get_unaligned_le16(sent);
428
429	if (hdev->voice_setting == setting)
430		return;
431
432	hdev->voice_setting = setting;
433
434	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
435
436	if (hdev->notify)
437		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
438}
439
440static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
441{
442	__u8 status = *((__u8 *) skb->data);
443
444	BT_DBG("%s status 0x%x", hdev->name, status);
445
446	hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
447}
448
449static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
450{
451	__u8 status = *((__u8 *) skb->data);
452	void *sent;
453
454	BT_DBG("%s status 0x%x", hdev->name, status);
455
456	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
457	if (!sent)
458		return;
459
460	if (test_bit(HCI_MGMT, &hdev->dev_flags))
461		mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
462	else if (!status) {
463		if (*((u8 *) sent))
464			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
465		else
466			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
467	}
468}
469
470static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
471{
472	if (hdev->features[6] & LMP_EXT_INQ)
473		return 2;
474
475	if (hdev->features[3] & LMP_RSSI_INQ)
476		return 1;
477
478	if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
479	    hdev->lmp_subver == 0x0757)
480		return 1;
481
482	if (hdev->manufacturer == 15) {
483		if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
484			return 1;
485		if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
486			return 1;
487		if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
488			return 1;
489	}
490
491	if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
492	    hdev->lmp_subver == 0x1805)
493		return 1;
494
495	return 0;
496}
497
498static void hci_setup_inquiry_mode(struct hci_dev *hdev)
499{
500	u8 mode;
501
502	mode = hci_get_inquiry_mode(hdev);
503
504	hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
505}
506
507static void hci_setup_event_mask(struct hci_dev *hdev)
508{
509	/* The second byte is 0xff instead of 0x9f (two reserved bits
510	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
511	 * command otherwise */
512	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
513
514	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
515	 * any event mask for pre 1.2 devices */
516	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
517		return;
518
519	events[4] |= 0x01; /* Flow Specification Complete */
520	events[4] |= 0x02; /* Inquiry Result with RSSI */
521	events[4] |= 0x04; /* Read Remote Extended Features Complete */
522	events[5] |= 0x08; /* Synchronous Connection Complete */
523	events[5] |= 0x10; /* Synchronous Connection Changed */
524
525	if (hdev->features[3] & LMP_RSSI_INQ)
526		events[4] |= 0x02; /* Inquiry Result with RSSI */
527
528	if (hdev->features[5] & LMP_SNIFF_SUBR)
529		events[5] |= 0x20; /* Sniff Subrating */
530
531	if (hdev->features[5] & LMP_PAUSE_ENC)
532		events[5] |= 0x80; /* Encryption Key Refresh Complete */
533
534	if (hdev->features[6] & LMP_EXT_INQ)
535		events[5] |= 0x40; /* Extended Inquiry Result */
536
537	if (hdev->features[6] & LMP_NO_FLUSH)
538		events[7] |= 0x01; /* Enhanced Flush Complete */
539
540	if (hdev->features[7] & LMP_LSTO)
541		events[6] |= 0x80; /* Link Supervision Timeout Changed */
542
543	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
544		events[6] |= 0x01;	/* IO Capability Request */
545		events[6] |= 0x02;	/* IO Capability Response */
546		events[6] |= 0x04;	/* User Confirmation Request */
547		events[6] |= 0x08;	/* User Passkey Request */
548		events[6] |= 0x10;	/* Remote OOB Data Request */
549		events[6] |= 0x20;	/* Simple Pairing Complete */
550		events[7] |= 0x04;	/* User Passkey Notification */
551		events[7] |= 0x08;	/* Keypress Notification */
552		events[7] |= 0x10;	/* Remote Host Supported
553					 * Features Notification */
554	}
555
556	if (hdev->features[4] & LMP_LE)
557		events[7] |= 0x20;	/* LE Meta-Event */
558
559	hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
560}
561
562static void hci_setup(struct hci_dev *hdev)
563{
564	if (hdev->dev_type != HCI_BREDR)
565		return;
566
567	hci_setup_event_mask(hdev);
568
569	if (hdev->hci_ver > BLUETOOTH_VER_1_1)
570		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
571
572	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
573		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
574			u8 mode = 0x01;
575			hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
576				     sizeof(mode), &mode);
577		} else {
578			struct hci_cp_write_eir cp;
579
580			memset(hdev->eir, 0, sizeof(hdev->eir));
581			memset(&cp, 0, sizeof(cp));
582
583			hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
584		}
585	}
586
587	if (hdev->features[3] & LMP_RSSI_INQ)
588		hci_setup_inquiry_mode(hdev);
589
590	if (hdev->features[7] & LMP_INQ_TX_PWR)
591		hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
592
593	if (hdev->features[7] & LMP_EXTFEATURES) {
594		struct hci_cp_read_local_ext_features cp;
595
596		cp.page = 0x01;
597		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
598			     &cp);
599	}
600
601	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
602		u8 enable = 1;
603		hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
604			     &enable);
605	}
606}
607
608static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
609{
610	struct hci_rp_read_local_version *rp = (void *) skb->data;
611
612	BT_DBG("%s status 0x%x", hdev->name, rp->status);
613
614	if (rp->status)
615		goto done;
616
617	hdev->hci_ver = rp->hci_ver;
618	hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
619	hdev->lmp_ver = rp->lmp_ver;
620	hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
621	hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
622
623	BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
624	       hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
625
626	if (test_bit(HCI_INIT, &hdev->flags))
627		hci_setup(hdev);
628
629done:
630	hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
631}
632
633static void hci_setup_link_policy(struct hci_dev *hdev)
634{
635	struct hci_cp_write_def_link_policy cp;
636	u16 link_policy = 0;
637
638	if (hdev->features[0] & LMP_RSWITCH)
639		link_policy |= HCI_LP_RSWITCH;
640	if (hdev->features[0] & LMP_HOLD)
641		link_policy |= HCI_LP_HOLD;
642	if (hdev->features[0] & LMP_SNIFF)
643		link_policy |= HCI_LP_SNIFF;
644	if (hdev->features[1] & LMP_PARK)
645		link_policy |= HCI_LP_PARK;
646
647	cp.policy = cpu_to_le16(link_policy);
648	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
649}
650
651static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
652{
653	struct hci_rp_read_local_commands *rp = (void *) skb->data;
654
655	BT_DBG("%s status 0x%x", hdev->name, rp->status);
656
657	if (rp->status)
658		goto done;
659
660	memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
661
662	if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
663		hci_setup_link_policy(hdev);
664
665done:
666	hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
667}
668
669static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
670{
671	struct hci_rp_read_local_features *rp = (void *) skb->data;
672
673	BT_DBG("%s status 0x%x", hdev->name, rp->status);
674
675	if (rp->status)
676		return;
677
678	memcpy(hdev->features, rp->features, 8);
679
680	/* Adjust default settings according to features
681	 * supported by device. */
682
683	if (hdev->features[0] & LMP_3SLOT)
684		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
685
686	if (hdev->features[0] & LMP_5SLOT)
687		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
688
689	if (hdev->features[1] & LMP_HV2) {
690		hdev->pkt_type  |= (HCI_HV2);
691		hdev->esco_type |= (ESCO_HV2);
692	}
693
694	if (hdev->features[1] & LMP_HV3) {
695		hdev->pkt_type  |= (HCI_HV3);
696		hdev->esco_type |= (ESCO_HV3);
697	}
698
699	if (hdev->features[3] & LMP_ESCO)
700		hdev->esco_type |= (ESCO_EV3);
701
702	if (hdev->features[4] & LMP_EV4)
703		hdev->esco_type |= (ESCO_EV4);
704
705	if (hdev->features[4] & LMP_EV5)
706		hdev->esco_type |= (ESCO_EV5);
707
708	if (hdev->features[5] & LMP_EDR_ESCO_2M)
709		hdev->esco_type |= (ESCO_2EV3);
710
711	if (hdev->features[5] & LMP_EDR_ESCO_3M)
712		hdev->esco_type |= (ESCO_3EV3);
713
714	if (hdev->features[5] & LMP_EDR_3S_ESCO)
715		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
716
717	BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
718	       hdev->features[0], hdev->features[1],
719	       hdev->features[2], hdev->features[3],
720	       hdev->features[4], hdev->features[5],
721	       hdev->features[6], hdev->features[7]);
722}
723
724static void hci_set_le_support(struct hci_dev *hdev)
725{
726	struct hci_cp_write_le_host_supported cp;
727
728	memset(&cp, 0, sizeof(cp));
729
730	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
731		cp.le = 1;
732		cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
733	}
734
735	if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
736		hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
737			     &cp);
738}
739
740static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
741					   struct sk_buff *skb)
742{
743	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
744
745	BT_DBG("%s status 0x%x", hdev->name, rp->status);
746
747	if (rp->status)
748		goto done;
749
750	switch (rp->page) {
751	case 0:
752		memcpy(hdev->features, rp->features, 8);
753		break;
754	case 1:
755		memcpy(hdev->host_features, rp->features, 8);
756		break;
757	}
758
759	if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
760		hci_set_le_support(hdev);
761
762done:
763	hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
764}
765
766static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
767					  struct sk_buff *skb)
768{
769	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
770
771	BT_DBG("%s status 0x%x", hdev->name, rp->status);
772
773	if (rp->status)
774		return;
775
776	hdev->flow_ctl_mode = rp->mode;
777
778	hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
779}
780
781static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
782{
783	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
784
785	BT_DBG("%s status 0x%x", hdev->name, rp->status);
786
787	if (rp->status)
788		return;
789
790	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
791	hdev->sco_mtu  = rp->sco_mtu;
792	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
793	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
794
795	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
796		hdev->sco_mtu  = 64;
797		hdev->sco_pkts = 8;
798	}
799
800	hdev->acl_cnt = hdev->acl_pkts;
801	hdev->sco_cnt = hdev->sco_pkts;
802
803	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
804	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
805}
806
807static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
808{
809	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
810
811	BT_DBG("%s status 0x%x", hdev->name, rp->status);
812
813	if (!rp->status)
814		bacpy(&hdev->bdaddr, &rp->bdaddr);
815
816	hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
817}
818
819static void hci_cc_read_data_block_size(struct hci_dev *hdev,
820					struct sk_buff *skb)
821{
822	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
823
824	BT_DBG("%s status 0x%x", hdev->name, rp->status);
825
826	if (rp->status)
827		return;
828
829	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
830	hdev->block_len = __le16_to_cpu(rp->block_len);
831	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
832
833	hdev->block_cnt = hdev->num_blocks;
834
835	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
836	       hdev->block_cnt, hdev->block_len);
837
838	hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
839}
840
841static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
842{
843	__u8 status = *((__u8 *) skb->data);
844
845	BT_DBG("%s status 0x%x", hdev->name, status);
846
847	hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
848}
849
850static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
851				       struct sk_buff *skb)
852{
853	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
854
855	BT_DBG("%s status 0x%x", hdev->name, rp->status);
856
857	if (rp->status)
858		return;
859
860	hdev->amp_status = rp->amp_status;
861	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
862	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
863	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
864	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
865	hdev->amp_type = rp->amp_type;
866	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
867	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
868	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
869	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
870
871	hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
872}
873
874static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
875					  struct sk_buff *skb)
876{
877	__u8 status = *((__u8 *) skb->data);
878
879	BT_DBG("%s status 0x%x", hdev->name, status);
880
881	hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
882}
883
884static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
885{
886	__u8 status = *((__u8 *) skb->data);
887
888	BT_DBG("%s status 0x%x", hdev->name, status);
889
890	hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
891}
892
893static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
894				      struct sk_buff *skb)
895{
896	__u8 status = *((__u8 *) skb->data);
897
898	BT_DBG("%s status 0x%x", hdev->name, status);
899
900	hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
901}
902
903static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
904					 struct sk_buff *skb)
905{
906	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
907
908	BT_DBG("%s status 0x%x", hdev->name, rp->status);
909
910	if (!rp->status)
911		hdev->inq_tx_power = rp->tx_power;
912
913	hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
914}
915
916static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
917{
918	__u8 status = *((__u8 *) skb->data);
919
920	BT_DBG("%s status 0x%x", hdev->name, status);
921
922	hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
923}
924
925static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
926{
927	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
928	struct hci_cp_pin_code_reply *cp;
929	struct hci_conn *conn;
930
931	BT_DBG("%s status 0x%x", hdev->name, rp->status);
932
933	hci_dev_lock(hdev);
934
935	if (test_bit(HCI_MGMT, &hdev->dev_flags))
936		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
937
938	if (rp->status != 0)
939		goto unlock;
940
941	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
942	if (!cp)
943		goto unlock;
944
945	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
946	if (conn)
947		conn->pin_length = cp->pin_len;
948
949unlock:
950	hci_dev_unlock(hdev);
951}
952
953static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
954{
955	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
956
957	BT_DBG("%s status 0x%x", hdev->name, rp->status);
958
959	hci_dev_lock(hdev);
960
961	if (test_bit(HCI_MGMT, &hdev->dev_flags))
962		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
963						 rp->status);
964
965	hci_dev_unlock(hdev);
966}
967
968static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
969				       struct sk_buff *skb)
970{
971	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
972
973	BT_DBG("%s status 0x%x", hdev->name, rp->status);
974
975	if (rp->status)
976		return;
977
978	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
979	hdev->le_pkts = rp->le_max_pkt;
980
981	hdev->le_cnt = hdev->le_pkts;
982
983	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
984
985	hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
986}
987
988static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
989{
990	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
991
992	BT_DBG("%s status 0x%x", hdev->name, rp->status);
993
994	hci_dev_lock(hdev);
995
996	if (test_bit(HCI_MGMT, &hdev->dev_flags))
997		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
998						 rp->status);
999
1000	hci_dev_unlock(hdev);
1001}
1002
1003static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1004					  struct sk_buff *skb)
1005{
1006	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1007
1008	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1009
1010	hci_dev_lock(hdev);
1011
1012	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1013		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1014						     ACL_LINK, 0, rp->status);
1015
1016	hci_dev_unlock(hdev);
1017}
1018
1019static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1020{
1021	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1022
1023	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1024
1025	hci_dev_lock(hdev);
1026
1027	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1028		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1029						 0, rp->status);
1030
1031	hci_dev_unlock(hdev);
1032}
1033
1034static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1035					  struct sk_buff *skb)
1036{
1037	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1038
1039	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1040
1041	hci_dev_lock(hdev);
1042
1043	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1044		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1045						     ACL_LINK, 0, rp->status);
1046
1047	hci_dev_unlock(hdev);
1048}
1049
1050static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1051					     struct sk_buff *skb)
1052{
1053	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1054
1055	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1056
1057	hci_dev_lock(hdev);
1058	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1059						rp->randomizer, rp->status);
1060	hci_dev_unlock(hdev);
1061}
1062
1063static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1064{
1065	__u8 status = *((__u8 *) skb->data);
1066
1067	BT_DBG("%s status 0x%x", hdev->name, status);
1068
1069	hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1070
1071	if (status) {
1072		hci_dev_lock(hdev);
1073		mgmt_start_discovery_failed(hdev, status);
1074		hci_dev_unlock(hdev);
1075		return;
1076	}
1077}
1078
1079static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1080				      struct sk_buff *skb)
1081{
1082	struct hci_cp_le_set_scan_enable *cp;
1083	__u8 status = *((__u8 *) skb->data);
1084
1085	BT_DBG("%s status 0x%x", hdev->name, status);
1086
1087	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1088	if (!cp)
1089		return;
1090
1091	switch (cp->enable) {
1092	case LE_SCANNING_ENABLED:
1093		hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1094
1095		if (status) {
1096			hci_dev_lock(hdev);
1097			mgmt_start_discovery_failed(hdev, status);
1098			hci_dev_unlock(hdev);
1099			return;
1100		}
1101
1102		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1103
1104		hci_dev_lock(hdev);
1105		hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1106		hci_dev_unlock(hdev);
1107		break;
1108
1109	case LE_SCANNING_DISABLED:
1110		if (status) {
1111			hci_dev_lock(hdev);
1112			mgmt_stop_discovery_failed(hdev, status);
1113			hci_dev_unlock(hdev);
1114			return;
1115		}
1116
1117		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1118
1119		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
1120		    hdev->discovery.state == DISCOVERY_FINDING) {
1121			mgmt_interleaved_discovery(hdev);
1122		} else {
1123			hci_dev_lock(hdev);
1124			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1125			hci_dev_unlock(hdev);
1126		}
1127
1128		break;
1129
1130	default:
1131		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1132		break;
1133	}
1134}
1135
1136static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1137{
1138	struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1139
1140	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1141
1142	if (rp->status)
1143		return;
1144
1145	hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1146}
1147
1148static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1149{
1150	struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1151
1152	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1153
1154	if (rp->status)
1155		return;
1156
1157	hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1158}
1159
1160static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1161						  struct sk_buff *skb)
1162{
1163	struct hci_cp_write_le_host_supported *sent;
1164	__u8 status = *((__u8 *) skb->data);
1165
1166	BT_DBG("%s status 0x%x", hdev->name, status);
1167
1168	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1169	if (!sent)
1170		return;
1171
1172	if (!status) {
1173		if (sent->le)
1174			hdev->host_features[0] |= LMP_HOST_LE;
1175		else
1176			hdev->host_features[0] &= ~LMP_HOST_LE;
1177	}
1178
1179	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1180	    !test_bit(HCI_INIT, &hdev->flags))
1181		mgmt_le_enable_complete(hdev, sent->le, status);
1182
1183	hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1184}
1185
1186static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1187{
1188	BT_DBG("%s status 0x%x", hdev->name, status);
1189
1190	if (status) {
1191		hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1192		hci_conn_check_pending(hdev);
1193		hci_dev_lock(hdev);
1194		if (test_bit(HCI_MGMT, &hdev->dev_flags))
1195			mgmt_start_discovery_failed(hdev, status);
1196		hci_dev_unlock(hdev);
1197		return;
1198	}
1199
1200	set_bit(HCI_INQUIRY, &hdev->flags);
1201
1202	hci_dev_lock(hdev);
1203	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1204	hci_dev_unlock(hdev);
1205}
1206
1207static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1208{
1209	struct hci_cp_create_conn *cp;
1210	struct hci_conn *conn;
1211
1212	BT_DBG("%s status 0x%x", hdev->name, status);
1213
1214	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1215	if (!cp)
1216		return;
1217
1218	hci_dev_lock(hdev);
1219
1220	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1221
1222	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1223
1224	if (status) {
1225		if (conn && conn->state == BT_CONNECT) {
1226			if (status != 0x0c || conn->attempt > 2) {
1227				conn->state = BT_CLOSED;
1228				hci_proto_connect_cfm(conn, status);
1229				hci_conn_del(conn);
1230			} else
1231				conn->state = BT_CONNECT2;
1232		}
1233	} else {
1234		if (!conn) {
1235			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1236			if (conn) {
1237				conn->out = true;
1238				conn->link_mode |= HCI_LM_MASTER;
1239			} else
1240				BT_ERR("No memory for new connection");
1241		}
1242	}
1243
1244	hci_dev_unlock(hdev);
1245}
1246
1247static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1248{
1249	struct hci_cp_add_sco *cp;
1250	struct hci_conn *acl, *sco;
1251	__u16 handle;
1252
1253	BT_DBG("%s status 0x%x", hdev->name, status);
1254
1255	if (!status)
1256		return;
1257
1258	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1259	if (!cp)
1260		return;
1261
1262	handle = __le16_to_cpu(cp->handle);
1263
1264	BT_DBG("%s handle %d", hdev->name, handle);
1265
1266	hci_dev_lock(hdev);
1267
1268	acl = hci_conn_hash_lookup_handle(hdev, handle);
1269	if (acl) {
1270		sco = acl->link;
1271		if (sco) {
1272			sco->state = BT_CLOSED;
1273
1274			hci_proto_connect_cfm(sco, status);
1275			hci_conn_del(sco);
1276		}
1277	}
1278
1279	hci_dev_unlock(hdev);
1280}
1281
1282static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1283{
1284	struct hci_cp_auth_requested *cp;
1285	struct hci_conn *conn;
1286
1287	BT_DBG("%s status 0x%x", hdev->name, status);
1288
1289	if (!status)
1290		return;
1291
1292	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1293	if (!cp)
1294		return;
1295
1296	hci_dev_lock(hdev);
1297
1298	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1299	if (conn) {
1300		if (conn->state == BT_CONFIG) {
1301			hci_proto_connect_cfm(conn, status);
1302			hci_conn_put(conn);
1303		}
1304	}
1305
1306	hci_dev_unlock(hdev);
1307}
1308
1309static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1310{
1311	struct hci_cp_set_conn_encrypt *cp;
1312	struct hci_conn *conn;
1313
1314	BT_DBG("%s status 0x%x", hdev->name, status);
1315
1316	if (!status)
1317		return;
1318
1319	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1320	if (!cp)
1321		return;
1322
1323	hci_dev_lock(hdev);
1324
1325	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1326	if (conn) {
1327		if (conn->state == BT_CONFIG) {
1328			hci_proto_connect_cfm(conn, status);
1329			hci_conn_put(conn);
1330		}
1331	}
1332
1333	hci_dev_unlock(hdev);
1334}
1335
1336static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1337				    struct hci_conn *conn)
1338{
1339	if (conn->state != BT_CONFIG || !conn->out)
1340		return 0;
1341
1342	if (conn->pending_sec_level == BT_SECURITY_SDP)
1343		return 0;
1344
1345	/* Only request authentication for SSP connections or non-SSP
1346	 * devices with sec_level HIGH or if MITM protection is requested */
1347	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1348	    conn->pending_sec_level != BT_SECURITY_HIGH)
1349		return 0;
1350
1351	return 1;
1352}
1353
1354static inline int hci_resolve_name(struct hci_dev *hdev,
1355				   struct inquiry_entry *e)
1356{
1357	struct hci_cp_remote_name_req cp;
1358
1359	memset(&cp, 0, sizeof(cp));
1360
1361	bacpy(&cp.bdaddr, &e->data.bdaddr);
1362	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1363	cp.pscan_mode = e->data.pscan_mode;
1364	cp.clock_offset = e->data.clock_offset;
1365
1366	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1367}
1368
1369static bool hci_resolve_next_name(struct hci_dev *hdev)
1370{
1371	struct discovery_state *discov = &hdev->discovery;
1372	struct inquiry_entry *e;
1373
1374	if (list_empty(&discov->resolve))
1375		return false;
1376
1377	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1378	if (hci_resolve_name(hdev, e) == 0) {
1379		e->name_state = NAME_PENDING;
1380		return true;
1381	}
1382
1383	return false;
1384}
1385
1386static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1387				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1388{
1389	struct discovery_state *discov = &hdev->discovery;
1390	struct inquiry_entry *e;
1391
1392	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1393		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1394				      name_len, conn->dev_class);
1395
1396	if (discov->state == DISCOVERY_STOPPED)
1397		return;
1398
1399	if (discov->state == DISCOVERY_STOPPING)
1400		goto discov_complete;
1401
1402	if (discov->state != DISCOVERY_RESOLVING)
1403		return;
1404
1405	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1406	if (e) {
1407		e->name_state = NAME_KNOWN;
1408		list_del(&e->list);
1409		if (name)
1410			mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1411					 e->data.rssi, name, name_len);
1412	}
1413
1414	if (hci_resolve_next_name(hdev))
1415		return;
1416
1417discov_complete:
1418	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1419}
1420
1421static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1422{
1423	struct hci_cp_remote_name_req *cp;
1424	struct hci_conn *conn;
1425
1426	BT_DBG("%s status 0x%x", hdev->name, status);
1427
1428	/* If successful wait for the name req complete event before
1429	 * checking for the need to do authentication */
1430	if (!status)
1431		return;
1432
1433	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1434	if (!cp)
1435		return;
1436
1437	hci_dev_lock(hdev);
1438
1439	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1440
1441	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1442		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1443
1444	if (!conn)
1445		goto unlock;
1446
1447	if (!hci_outgoing_auth_needed(hdev, conn))
1448		goto unlock;
1449
1450	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1451		struct hci_cp_auth_requested cp;
1452		cp.handle = __cpu_to_le16(conn->handle);
1453		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1454	}
1455
1456unlock:
1457	hci_dev_unlock(hdev);
1458}
1459
1460static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1461{
1462	struct hci_cp_read_remote_features *cp;
1463	struct hci_conn *conn;
1464
1465	BT_DBG("%s status 0x%x", hdev->name, status);
1466
1467	if (!status)
1468		return;
1469
1470	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1471	if (!cp)
1472		return;
1473
1474	hci_dev_lock(hdev);
1475
1476	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1477	if (conn) {
1478		if (conn->state == BT_CONFIG) {
1479			hci_proto_connect_cfm(conn, status);
1480			hci_conn_put(conn);
1481		}
1482	}
1483
1484	hci_dev_unlock(hdev);
1485}
1486
1487static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1488{
1489	struct hci_cp_read_remote_ext_features *cp;
1490	struct hci_conn *conn;
1491
1492	BT_DBG("%s status 0x%x", hdev->name, status);
1493
1494	if (!status)
1495		return;
1496
1497	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1498	if (!cp)
1499		return;
1500
1501	hci_dev_lock(hdev);
1502
1503	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1504	if (conn) {
1505		if (conn->state == BT_CONFIG) {
1506			hci_proto_connect_cfm(conn, status);
1507			hci_conn_put(conn);
1508		}
1509	}
1510
1511	hci_dev_unlock(hdev);
1512}
1513
1514static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1515{
1516	struct hci_cp_setup_sync_conn *cp;
1517	struct hci_conn *acl, *sco;
1518	__u16 handle;
1519
1520	BT_DBG("%s status 0x%x", hdev->name, status);
1521
1522	if (!status)
1523		return;
1524
1525	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1526	if (!cp)
1527		return;
1528
1529	handle = __le16_to_cpu(cp->handle);
1530
1531	BT_DBG("%s handle %d", hdev->name, handle);
1532
1533	hci_dev_lock(hdev);
1534
1535	acl = hci_conn_hash_lookup_handle(hdev, handle);
1536	if (acl) {
1537		sco = acl->link;
1538		if (sco) {
1539			sco->state = BT_CLOSED;
1540
1541			hci_proto_connect_cfm(sco, status);
1542			hci_conn_del(sco);
1543		}
1544	}
1545
1546	hci_dev_unlock(hdev);
1547}
1548
1549static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1550{
1551	struct hci_cp_sniff_mode *cp;
1552	struct hci_conn *conn;
1553
1554	BT_DBG("%s status 0x%x", hdev->name, status);
1555
1556	if (!status)
1557		return;
1558
1559	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1560	if (!cp)
1561		return;
1562
1563	hci_dev_lock(hdev);
1564
1565	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1566	if (conn) {
1567		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1568
1569		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1570			hci_sco_setup(conn, status);
1571	}
1572
1573	hci_dev_unlock(hdev);
1574}
1575
1576static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1577{
1578	struct hci_cp_exit_sniff_mode *cp;
1579	struct hci_conn *conn;
1580
1581	BT_DBG("%s status 0x%x", hdev->name, status);
1582
1583	if (!status)
1584		return;
1585
1586	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1587	if (!cp)
1588		return;
1589
1590	hci_dev_lock(hdev);
1591
1592	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1593	if (conn) {
1594		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1595
1596		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1597			hci_sco_setup(conn, status);
1598	}
1599
1600	hci_dev_unlock(hdev);
1601}
1602
1603static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1604{
1605	struct hci_cp_disconnect *cp;
1606	struct hci_conn *conn;
1607
1608	if (!status)
1609		return;
1610
1611	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1612	if (!cp)
1613		return;
1614
1615	hci_dev_lock(hdev);
1616
1617	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1618	if (conn)
1619		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1620				       conn->dst_type, status);
1621
1622	hci_dev_unlock(hdev);
1623}
1624
1625static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1626{
1627	struct hci_cp_le_create_conn *cp;
1628	struct hci_conn *conn;
1629
1630	BT_DBG("%s status 0x%x", hdev->name, status);
1631
1632	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1633	if (!cp)
1634		return;
1635
1636	hci_dev_lock(hdev);
1637
1638	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1639
1640	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1641	       conn);
1642
1643	if (status) {
1644		if (conn && conn->state == BT_CONNECT) {
1645			conn->state = BT_CLOSED;
1646			mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
1647					    conn->dst_type, status);
1648			hci_proto_connect_cfm(conn, status);
1649			hci_conn_del(conn);
1650		}
1651	} else {
1652		if (!conn) {
1653			conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1654			if (conn) {
1655				conn->dst_type = cp->peer_addr_type;
1656				conn->out = true;
1657			} else {
1658				BT_ERR("No memory for new connection");
1659			}
1660		}
1661	}
1662
1663	hci_dev_unlock(hdev);
1664}
1665
1666static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1667{
1668	BT_DBG("%s status 0x%x", hdev->name, status);
1669}
1670
1671static inline void hci_inquiry_complete_evt(struct hci_dev *hdev,
1672					    struct sk_buff *skb)
1673{
1674	__u8 status = *((__u8 *) skb->data);
1675	struct discovery_state *discov = &hdev->discovery;
1676	struct inquiry_entry *e;
1677
1678	BT_DBG("%s status %d", hdev->name, status);
1679
1680	hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1681
1682	hci_conn_check_pending(hdev);
1683
1684	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1685		return;
1686
1687	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1688		return;
1689
1690	hci_dev_lock(hdev);
1691
1692	if (discov->state != DISCOVERY_FINDING)
1693		goto unlock;
1694
1695	if (list_empty(&discov->resolve)) {
1696		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1697		goto unlock;
1698	}
1699
1700	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1701	if (e && hci_resolve_name(hdev, e) == 0) {
1702		e->name_state = NAME_PENDING;
1703		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1704	} else {
1705		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1706	}
1707
1708unlock:
1709	hci_dev_unlock(hdev);
1710}
1711
1712static inline void hci_inquiry_result_evt(struct hci_dev *hdev,
1713					  struct sk_buff *skb)
1714{
1715	struct inquiry_data data;
1716	struct inquiry_info *info = (void *) (skb->data + 1);
1717	int num_rsp = *((__u8 *) skb->data);
1718
1719	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1720
1721	if (!num_rsp)
1722		return;
1723
1724	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1725		return;
1726
1727	hci_dev_lock(hdev);
1728
1729	for (; num_rsp; num_rsp--, info++) {
1730		bool name_known, ssp;
1731
1732		bacpy(&data.bdaddr, &info->bdaddr);
1733		data.pscan_rep_mode	= info->pscan_rep_mode;
1734		data.pscan_period_mode	= info->pscan_period_mode;
1735		data.pscan_mode		= info->pscan_mode;
1736		memcpy(data.dev_class, info->dev_class, 3);
1737		data.clock_offset	= info->clock_offset;
1738		data.rssi		= 0x00;
1739		data.ssp_mode		= 0x00;
1740
1741		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1742		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1743				  info->dev_class, 0, !name_known, ssp, NULL,
1744				  0);
1745	}
1746
1747	hci_dev_unlock(hdev);
1748}
1749
1750static inline void hci_conn_complete_evt(struct hci_dev *hdev,
1751					 struct sk_buff *skb)
1752{
1753	struct hci_ev_conn_complete *ev = (void *) skb->data;
1754	struct hci_conn *conn;
1755
1756	BT_DBG("%s", hdev->name);
1757
1758	hci_dev_lock(hdev);
1759
1760	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1761	if (!conn) {
1762		if (ev->link_type != SCO_LINK)
1763			goto unlock;
1764
1765		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1766		if (!conn)
1767			goto unlock;
1768
1769		conn->type = SCO_LINK;
1770	}
1771
1772	if (!ev->status) {
1773		conn->handle = __le16_to_cpu(ev->handle);
1774
1775		if (conn->type == ACL_LINK) {
1776			conn->state = BT_CONFIG;
1777			hci_conn_hold(conn);
1778			conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1779		} else
1780			conn->state = BT_CONNECTED;
1781
1782		hci_conn_hold_device(conn);
1783		hci_conn_add_sysfs(conn);
1784
1785		if (test_bit(HCI_AUTH, &hdev->flags))
1786			conn->link_mode |= HCI_LM_AUTH;
1787
1788		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1789			conn->link_mode |= HCI_LM_ENCRYPT;
1790
1791		/* Get remote features */
1792		if (conn->type == ACL_LINK) {
1793			struct hci_cp_read_remote_features cp;
1794			cp.handle = ev->handle;
1795			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1796				     sizeof(cp), &cp);
1797		}
1798
1799		/* Set packet type for incoming connection */
1800		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1801			struct hci_cp_change_conn_ptype cp;
1802			cp.handle = ev->handle;
1803			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1804			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1805				     &cp);
1806		}
1807	} else {
1808		conn->state = BT_CLOSED;
1809		if (conn->type == ACL_LINK)
1810			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1811					    conn->dst_type, ev->status);
1812	}
1813
1814	if (conn->type == ACL_LINK)
1815		hci_sco_setup(conn, ev->status);
1816
1817	if (ev->status) {
1818		hci_proto_connect_cfm(conn, ev->status);
1819		hci_conn_del(conn);
1820	} else if (ev->link_type != ACL_LINK)
1821		hci_proto_connect_cfm(conn, ev->status);
1822
1823unlock:
1824	hci_dev_unlock(hdev);
1825
1826	hci_conn_check_pending(hdev);
1827}
1828
1829static inline void hci_conn_request_evt(struct hci_dev *hdev,
1830					struct sk_buff *skb)
1831{
1832	struct hci_ev_conn_request *ev = (void *) skb->data;
1833	int mask = hdev->link_mode;
1834
1835	BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr),
1836	       ev->link_type);
1837
1838	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1839
1840	if ((mask & HCI_LM_ACCEPT) &&
1841	    !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1842		/* Connection accepted */
1843		struct inquiry_entry *ie;
1844		struct hci_conn *conn;
1845
1846		hci_dev_lock(hdev);
1847
1848		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1849		if (ie)
1850			memcpy(ie->data.dev_class, ev->dev_class, 3);
1851
1852		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1853		if (!conn) {
1854			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1855			if (!conn) {
1856				BT_ERR("No memory for new connection");
1857				hci_dev_unlock(hdev);
1858				return;
1859			}
1860		}
1861
1862		memcpy(conn->dev_class, ev->dev_class, 3);
1863		conn->state = BT_CONNECT;
1864
1865		hci_dev_unlock(hdev);
1866
1867		if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1868			struct hci_cp_accept_conn_req cp;
1869
1870			bacpy(&cp.bdaddr, &ev->bdaddr);
1871
1872			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1873				cp.role = 0x00; /* Become master */
1874			else
1875				cp.role = 0x01; /* Remain slave */
1876
1877			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1878				     &cp);
1879		} else {
1880			struct hci_cp_accept_sync_conn_req cp;
1881
1882			bacpy(&cp.bdaddr, &ev->bdaddr);
1883			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1884
1885			cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
1886			cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
1887			cp.max_latency    = cpu_to_le16(0xffff);
1888			cp.content_format = cpu_to_le16(hdev->voice_setting);
1889			cp.retrans_effort = 0xff;
1890
1891			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1892				     sizeof(cp), &cp);
1893		}
1894	} else {
1895		/* Connection rejected */
1896		struct hci_cp_reject_conn_req cp;
1897
1898		bacpy(&cp.bdaddr, &ev->bdaddr);
1899		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1900		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1901	}
1902}
1903
1904static inline void hci_disconn_complete_evt(struct hci_dev *hdev,
1905					    struct sk_buff *skb)
1906{
1907	struct hci_ev_disconn_complete *ev = (void *) skb->data;
1908	struct hci_conn *conn;
1909
1910	BT_DBG("%s status %d", hdev->name, ev->status);
1911
1912	hci_dev_lock(hdev);
1913
1914	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1915	if (!conn)
1916		goto unlock;
1917
1918	if (ev->status == 0)
1919		conn->state = BT_CLOSED;
1920
1921	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1922	    (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1923		if (ev->status != 0)
1924			mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1925					       conn->dst_type, ev->status);
1926		else
1927			mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1928						 conn->dst_type);
1929	}
1930
1931	if (ev->status == 0) {
1932		if (conn->type == ACL_LINK && conn->flush_key)
1933			hci_remove_link_key(hdev, &conn->dst);
1934		hci_proto_disconn_cfm(conn, ev->reason);
1935		hci_conn_del(conn);
1936	}
1937
1938unlock:
1939	hci_dev_unlock(hdev);
1940}
1941
1942static inline void hci_auth_complete_evt(struct hci_dev *hdev,
1943					 struct sk_buff *skb)
1944{
1945	struct hci_ev_auth_complete *ev = (void *) skb->data;
1946	struct hci_conn *conn;
1947
1948	BT_DBG("%s status %d", hdev->name, ev->status);
1949
1950	hci_dev_lock(hdev);
1951
1952	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1953	if (!conn)
1954		goto unlock;
1955
1956	if (!ev->status) {
1957		if (!hci_conn_ssp_enabled(conn) &&
1958		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1959			BT_INFO("re-auth of legacy device is not possible.");
1960		} else {
1961			conn->link_mode |= HCI_LM_AUTH;
1962			conn->sec_level = conn->pending_sec_level;
1963		}
1964	} else {
1965		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1966				 ev->status);
1967	}
1968
1969	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1970	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1971
1972	if (conn->state == BT_CONFIG) {
1973		if (!ev->status && hci_conn_ssp_enabled(conn)) {
1974			struct hci_cp_set_conn_encrypt cp;
1975			cp.handle  = ev->handle;
1976			cp.encrypt = 0x01;
1977			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1978				     &cp);
1979		} else {
1980			conn->state = BT_CONNECTED;
1981			hci_proto_connect_cfm(conn, ev->status);
1982			hci_conn_put(conn);
1983		}
1984	} else {
1985		hci_auth_cfm(conn, ev->status);
1986
1987		hci_conn_hold(conn);
1988		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1989		hci_conn_put(conn);
1990	}
1991
1992	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1993		if (!ev->status) {
1994			struct hci_cp_set_conn_encrypt cp;
1995			cp.handle  = ev->handle;
1996			cp.encrypt = 0x01;
1997			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1998				     &cp);
1999		} else {
2000			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2001			hci_encrypt_cfm(conn, ev->status, 0x00);
2002		}
2003	}
2004
2005unlock:
2006	hci_dev_unlock(hdev);
2007}
2008
2009static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2010{
2011	struct hci_ev_remote_name *ev = (void *) skb->data;
2012	struct hci_conn *conn;
2013
2014	BT_DBG("%s", hdev->name);
2015
2016	hci_conn_check_pending(hdev);
2017
2018	hci_dev_lock(hdev);
2019
2020	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2021
2022	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2023		goto check_auth;
2024
2025	if (ev->status == 0)
2026		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2027				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2028	else
2029		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2030
2031check_auth:
2032	if (!conn)
2033		goto unlock;
2034
2035	if (!hci_outgoing_auth_needed(hdev, conn))
2036		goto unlock;
2037
2038	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2039		struct hci_cp_auth_requested cp;
2040		cp.handle = __cpu_to_le16(conn->handle);
2041		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2042	}
2043
2044unlock:
2045	hci_dev_unlock(hdev);
2046}
2047
2048static inline void hci_encrypt_change_evt(struct hci_dev *hdev,
2049					  struct sk_buff *skb)
2050{
2051	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2052	struct hci_conn *conn;
2053
2054	BT_DBG("%s status %d", hdev->name, ev->status);
2055
2056	hci_dev_lock(hdev);
2057
2058	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2059	if (conn) {
2060		if (!ev->status) {
2061			if (ev->encrypt) {
2062				/* Encryption implies authentication */
2063				conn->link_mode |= HCI_LM_AUTH;
2064				conn->link_mode |= HCI_LM_ENCRYPT;
2065				conn->sec_level = conn->pending_sec_level;
2066			} else
2067				conn->link_mode &= ~HCI_LM_ENCRYPT;
2068		}
2069
2070		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2071
2072		if (ev->status && conn->state == BT_CONNECTED) {
2073			hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
2074			hci_conn_put(conn);
2075			goto unlock;
2076		}
2077
2078		if (conn->state == BT_CONFIG) {
2079			if (!ev->status)
2080				conn->state = BT_CONNECTED;
2081
2082			hci_proto_connect_cfm(conn, ev->status);
2083			hci_conn_put(conn);
2084		} else
2085			hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2086	}
2087
2088unlock:
2089	hci_dev_unlock(hdev);
2090}
2091
2092static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2093						    struct sk_buff *skb)
2094{
2095	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2096	struct hci_conn *conn;
2097
2098	BT_DBG("%s status %d", hdev->name, ev->status);
2099
2100	hci_dev_lock(hdev);
2101
2102	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2103	if (conn) {
2104		if (!ev->status)
2105			conn->link_mode |= HCI_LM_SECURE;
2106
2107		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2108
2109		hci_key_change_cfm(conn, ev->status);
2110	}
2111
2112	hci_dev_unlock(hdev);
2113}
2114
2115static inline void hci_remote_features_evt(struct hci_dev *hdev,
2116					   struct sk_buff *skb)
2117{
2118	struct hci_ev_remote_features *ev = (void *) skb->data;
2119	struct hci_conn *conn;
2120
2121	BT_DBG("%s status %d", hdev->name, ev->status);
2122
2123	hci_dev_lock(hdev);
2124
2125	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2126	if (!conn)
2127		goto unlock;
2128
2129	if (!ev->status)
2130		memcpy(conn->features, ev->features, 8);
2131
2132	if (conn->state != BT_CONFIG)
2133		goto unlock;
2134
2135	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2136		struct hci_cp_read_remote_ext_features cp;
2137		cp.handle = ev->handle;
2138		cp.page = 0x01;
2139		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2140			     sizeof(cp), &cp);
2141		goto unlock;
2142	}
2143
2144	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2145		struct hci_cp_remote_name_req cp;
2146		memset(&cp, 0, sizeof(cp));
2147		bacpy(&cp.bdaddr, &conn->dst);
2148		cp.pscan_rep_mode = 0x02;
2149		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2150	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2151		mgmt_device_connected(hdev, &conn->dst, conn->type,
2152				      conn->dst_type, 0, NULL, 0,
2153				      conn->dev_class);
2154
2155	if (!hci_outgoing_auth_needed(hdev, conn)) {
2156		conn->state = BT_CONNECTED;
2157		hci_proto_connect_cfm(conn, ev->status);
2158		hci_conn_put(conn);
2159	}
2160
2161unlock:
2162	hci_dev_unlock(hdev);
2163}
2164
2165static inline void hci_remote_version_evt(struct hci_dev *hdev,
2166					  struct sk_buff *skb)
2167{
2168	BT_DBG("%s", hdev->name);
2169}
2170
2171static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2172					      struct sk_buff *skb)
2173{
2174	BT_DBG("%s", hdev->name);
2175}
2176
2177static inline void hci_cmd_complete_evt(struct hci_dev *hdev,
2178					struct sk_buff *skb)
2179{
2180	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2181	__u16 opcode;
2182
2183	skb_pull(skb, sizeof(*ev));
2184
2185	opcode = __le16_to_cpu(ev->opcode);
2186
2187	switch (opcode) {
2188	case HCI_OP_INQUIRY_CANCEL:
2189		hci_cc_inquiry_cancel(hdev, skb);
2190		break;
2191
2192	case HCI_OP_PERIODIC_INQ:
2193		hci_cc_periodic_inq(hdev, skb);
2194		break;
2195
2196	case HCI_OP_EXIT_PERIODIC_INQ:
2197		hci_cc_exit_periodic_inq(hdev, skb);
2198		break;
2199
2200	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2201		hci_cc_remote_name_req_cancel(hdev, skb);
2202		break;
2203
2204	case HCI_OP_ROLE_DISCOVERY:
2205		hci_cc_role_discovery(hdev, skb);
2206		break;
2207
2208	case HCI_OP_READ_LINK_POLICY:
2209		hci_cc_read_link_policy(hdev, skb);
2210		break;
2211
2212	case HCI_OP_WRITE_LINK_POLICY:
2213		hci_cc_write_link_policy(hdev, skb);
2214		break;
2215
2216	case HCI_OP_READ_DEF_LINK_POLICY:
2217		hci_cc_read_def_link_policy(hdev, skb);
2218		break;
2219
2220	case HCI_OP_WRITE_DEF_LINK_POLICY:
2221		hci_cc_write_def_link_policy(hdev, skb);
2222		break;
2223
2224	case HCI_OP_RESET:
2225		hci_cc_reset(hdev, skb);
2226		break;
2227
2228	case HCI_OP_WRITE_LOCAL_NAME:
2229		hci_cc_write_local_name(hdev, skb);
2230		break;
2231
2232	case HCI_OP_READ_LOCAL_NAME:
2233		hci_cc_read_local_name(hdev, skb);
2234		break;
2235
2236	case HCI_OP_WRITE_AUTH_ENABLE:
2237		hci_cc_write_auth_enable(hdev, skb);
2238		break;
2239
2240	case HCI_OP_WRITE_ENCRYPT_MODE:
2241		hci_cc_write_encrypt_mode(hdev, skb);
2242		break;
2243
2244	case HCI_OP_WRITE_SCAN_ENABLE:
2245		hci_cc_write_scan_enable(hdev, skb);
2246		break;
2247
2248	case HCI_OP_READ_CLASS_OF_DEV:
2249		hci_cc_read_class_of_dev(hdev, skb);
2250		break;
2251
2252	case HCI_OP_WRITE_CLASS_OF_DEV:
2253		hci_cc_write_class_of_dev(hdev, skb);
2254		break;
2255
2256	case HCI_OP_READ_VOICE_SETTING:
2257		hci_cc_read_voice_setting(hdev, skb);
2258		break;
2259
2260	case HCI_OP_WRITE_VOICE_SETTING:
2261		hci_cc_write_voice_setting(hdev, skb);
2262		break;
2263
2264	case HCI_OP_HOST_BUFFER_SIZE:
2265		hci_cc_host_buffer_size(hdev, skb);
2266		break;
2267
2268	case HCI_OP_WRITE_SSP_MODE:
2269		hci_cc_write_ssp_mode(hdev, skb);
2270		break;
2271
2272	case HCI_OP_READ_LOCAL_VERSION:
2273		hci_cc_read_local_version(hdev, skb);
2274		break;
2275
2276	case HCI_OP_READ_LOCAL_COMMANDS:
2277		hci_cc_read_local_commands(hdev, skb);
2278		break;
2279
2280	case HCI_OP_READ_LOCAL_FEATURES:
2281		hci_cc_read_local_features(hdev, skb);
2282		break;
2283
2284	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2285		hci_cc_read_local_ext_features(hdev, skb);
2286		break;
2287
2288	case HCI_OP_READ_BUFFER_SIZE:
2289		hci_cc_read_buffer_size(hdev, skb);
2290		break;
2291
2292	case HCI_OP_READ_BD_ADDR:
2293		hci_cc_read_bd_addr(hdev, skb);
2294		break;
2295
2296	case HCI_OP_READ_DATA_BLOCK_SIZE:
2297		hci_cc_read_data_block_size(hdev, skb);
2298		break;
2299
2300	case HCI_OP_WRITE_CA_TIMEOUT:
2301		hci_cc_write_ca_timeout(hdev, skb);
2302		break;
2303
2304	case HCI_OP_READ_FLOW_CONTROL_MODE:
2305		hci_cc_read_flow_control_mode(hdev, skb);
2306		break;
2307
2308	case HCI_OP_READ_LOCAL_AMP_INFO:
2309		hci_cc_read_local_amp_info(hdev, skb);
2310		break;
2311
2312	case HCI_OP_DELETE_STORED_LINK_KEY:
2313		hci_cc_delete_stored_link_key(hdev, skb);
2314		break;
2315
2316	case HCI_OP_SET_EVENT_MASK:
2317		hci_cc_set_event_mask(hdev, skb);
2318		break;
2319
2320	case HCI_OP_WRITE_INQUIRY_MODE:
2321		hci_cc_write_inquiry_mode(hdev, skb);
2322		break;
2323
2324	case HCI_OP_READ_INQ_RSP_TX_POWER:
2325		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2326		break;
2327
2328	case HCI_OP_SET_EVENT_FLT:
2329		hci_cc_set_event_flt(hdev, skb);
2330		break;
2331
2332	case HCI_OP_PIN_CODE_REPLY:
2333		hci_cc_pin_code_reply(hdev, skb);
2334		break;
2335
2336	case HCI_OP_PIN_CODE_NEG_REPLY:
2337		hci_cc_pin_code_neg_reply(hdev, skb);
2338		break;
2339
2340	case HCI_OP_READ_LOCAL_OOB_DATA:
2341		hci_cc_read_local_oob_data_reply(hdev, skb);
2342		break;
2343
2344	case HCI_OP_LE_READ_BUFFER_SIZE:
2345		hci_cc_le_read_buffer_size(hdev, skb);
2346		break;
2347
2348	case HCI_OP_USER_CONFIRM_REPLY:
2349		hci_cc_user_confirm_reply(hdev, skb);
2350		break;
2351
2352	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2353		hci_cc_user_confirm_neg_reply(hdev, skb);
2354		break;
2355
2356	case HCI_OP_USER_PASSKEY_REPLY:
2357		hci_cc_user_passkey_reply(hdev, skb);
2358		break;
2359
2360	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2361		hci_cc_user_passkey_neg_reply(hdev, skb);
2362		break;
2363
2364	case HCI_OP_LE_SET_SCAN_PARAM:
2365		hci_cc_le_set_scan_param(hdev, skb);
2366		break;
2367
2368	case HCI_OP_LE_SET_SCAN_ENABLE:
2369		hci_cc_le_set_scan_enable(hdev, skb);
2370		break;
2371
2372	case HCI_OP_LE_LTK_REPLY:
2373		hci_cc_le_ltk_reply(hdev, skb);
2374		break;
2375
2376	case HCI_OP_LE_LTK_NEG_REPLY:
2377		hci_cc_le_ltk_neg_reply(hdev, skb);
2378		break;
2379
2380	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2381		hci_cc_write_le_host_supported(hdev, skb);
2382		break;
2383
2384	default:
2385		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2386		break;
2387	}
2388
2389	if (ev->opcode != HCI_OP_NOP)
2390		del_timer(&hdev->cmd_timer);
2391
2392	if (ev->ncmd) {
2393		atomic_set(&hdev->cmd_cnt, 1);
2394		if (!skb_queue_empty(&hdev->cmd_q))
2395			queue_work(hdev->workqueue, &hdev->cmd_work);
2396	}
2397}
2398
2399static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2400{
2401	struct hci_ev_cmd_status *ev = (void *) skb->data;
2402	__u16 opcode;
2403
2404	skb_pull(skb, sizeof(*ev));
2405
2406	opcode = __le16_to_cpu(ev->opcode);
2407
2408	switch (opcode) {
2409	case HCI_OP_INQUIRY:
2410		hci_cs_inquiry(hdev, ev->status);
2411		break;
2412
2413	case HCI_OP_CREATE_CONN:
2414		hci_cs_create_conn(hdev, ev->status);
2415		break;
2416
2417	case HCI_OP_ADD_SCO:
2418		hci_cs_add_sco(hdev, ev->status);
2419		break;
2420
2421	case HCI_OP_AUTH_REQUESTED:
2422		hci_cs_auth_requested(hdev, ev->status);
2423		break;
2424
2425	case HCI_OP_SET_CONN_ENCRYPT:
2426		hci_cs_set_conn_encrypt(hdev, ev->status);
2427		break;
2428
2429	case HCI_OP_REMOTE_NAME_REQ:
2430		hci_cs_remote_name_req(hdev, ev->status);
2431		break;
2432
2433	case HCI_OP_READ_REMOTE_FEATURES:
2434		hci_cs_read_remote_features(hdev, ev->status);
2435		break;
2436
2437	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2438		hci_cs_read_remote_ext_features(hdev, ev->status);
2439		break;
2440
2441	case HCI_OP_SETUP_SYNC_CONN:
2442		hci_cs_setup_sync_conn(hdev, ev->status);
2443		break;
2444
2445	case HCI_OP_SNIFF_MODE:
2446		hci_cs_sniff_mode(hdev, ev->status);
2447		break;
2448
2449	case HCI_OP_EXIT_SNIFF_MODE:
2450		hci_cs_exit_sniff_mode(hdev, ev->status);
2451		break;
2452
2453	case HCI_OP_DISCONNECT:
2454		hci_cs_disconnect(hdev, ev->status);
2455		break;
2456
2457	case HCI_OP_LE_CREATE_CONN:
2458		hci_cs_le_create_conn(hdev, ev->status);
2459		break;
2460
2461	case HCI_OP_LE_START_ENC:
2462		hci_cs_le_start_enc(hdev, ev->status);
2463		break;
2464
2465	default:
2466		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2467		break;
2468	}
2469
2470	if (ev->opcode != HCI_OP_NOP)
2471		del_timer(&hdev->cmd_timer);
2472
2473	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2474		atomic_set(&hdev->cmd_cnt, 1);
2475		if (!skb_queue_empty(&hdev->cmd_q))
2476			queue_work(hdev->workqueue, &hdev->cmd_work);
2477	}
2478}
2479
2480static inline void hci_role_change_evt(struct hci_dev *hdev,
2481				       struct sk_buff *skb)
2482{
2483	struct hci_ev_role_change *ev = (void *) skb->data;
2484	struct hci_conn *conn;
2485
2486	BT_DBG("%s status %d", hdev->name, ev->status);
2487
2488	hci_dev_lock(hdev);
2489
2490	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2491	if (conn) {
2492		if (!ev->status) {
2493			if (ev->role)
2494				conn->link_mode &= ~HCI_LM_MASTER;
2495			else
2496				conn->link_mode |= HCI_LM_MASTER;
2497		}
2498
2499		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2500
2501		hci_role_switch_cfm(conn, ev->status, ev->role);
2502	}
2503
2504	hci_dev_unlock(hdev);
2505}
2506
2507static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev,
2508					 struct sk_buff *skb)
2509{
2510	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2511	int i;
2512
2513	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2514		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2515		return;
2516	}
2517
2518	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2519	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2520		BT_DBG("%s bad parameters", hdev->name);
2521		return;
2522	}
2523
2524	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2525
2526	for (i = 0; i < ev->num_hndl; i++) {
2527		struct hci_comp_pkts_info *info = &ev->handles[i];
2528		struct hci_conn *conn;
2529		__u16  handle, count;
2530
2531		handle = __le16_to_cpu(info->handle);
2532		count  = __le16_to_cpu(info->count);
2533
2534		conn = hci_conn_hash_lookup_handle(hdev, handle);
2535		if (!conn)
2536			continue;
2537
2538		conn->sent -= count;
2539
2540		switch (conn->type) {
2541		case ACL_LINK:
2542			hdev->acl_cnt += count;
2543			if (hdev->acl_cnt > hdev->acl_pkts)
2544				hdev->acl_cnt = hdev->acl_pkts;
2545			break;
2546
2547		case LE_LINK:
2548			if (hdev->le_pkts) {
2549				hdev->le_cnt += count;
2550				if (hdev->le_cnt > hdev->le_pkts)
2551					hdev->le_cnt = hdev->le_pkts;
2552			} else {
2553				hdev->acl_cnt += count;
2554				if (hdev->acl_cnt > hdev->acl_pkts)
2555					hdev->acl_cnt = hdev->acl_pkts;
2556			}
2557			break;
2558
2559		case SCO_LINK:
2560			hdev->sco_cnt += count;
2561			if (hdev->sco_cnt > hdev->sco_pkts)
2562				hdev->sco_cnt = hdev->sco_pkts;
2563			break;
2564
2565		default:
2566			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2567			break;
2568		}
2569	}
2570
2571	queue_work(hdev->workqueue, &hdev->tx_work);
2572}
2573
2574static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2575					   struct sk_buff *skb)
2576{
2577	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2578	int i;
2579
2580	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2581		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2582		return;
2583	}
2584
2585	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2586	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2587		BT_DBG("%s bad parameters", hdev->name);
2588		return;
2589	}
2590
2591	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2592	       ev->num_hndl);
2593
2594	for (i = 0; i < ev->num_hndl; i++) {
2595		struct hci_comp_blocks_info *info = &ev->handles[i];
2596		struct hci_conn *conn;
2597		__u16  handle, block_count;
2598
2599		handle = __le16_to_cpu(info->handle);
2600		block_count = __le16_to_cpu(info->blocks);
2601
2602		conn = hci_conn_hash_lookup_handle(hdev, handle);
2603		if (!conn)
2604			continue;
2605
2606		conn->sent -= block_count;
2607
2608		switch (conn->type) {
2609		case ACL_LINK:
2610			hdev->block_cnt += block_count;
2611			if (hdev->block_cnt > hdev->num_blocks)
2612				hdev->block_cnt = hdev->num_blocks;
2613			break;
2614
2615		default:
2616			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2617			break;
2618		}
2619	}
2620
2621	queue_work(hdev->workqueue, &hdev->tx_work);
2622}
2623
2624static inline void hci_mode_change_evt(struct hci_dev *hdev,
2625				       struct sk_buff *skb)
2626{
2627	struct hci_ev_mode_change *ev = (void *) skb->data;
2628	struct hci_conn *conn;
2629
2630	BT_DBG("%s status %d", hdev->name, ev->status);
2631
2632	hci_dev_lock(hdev);
2633
2634	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2635	if (conn) {
2636		conn->mode = ev->mode;
2637		conn->interval = __le16_to_cpu(ev->interval);
2638
2639		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2640			if (conn->mode == HCI_CM_ACTIVE)
2641				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2642			else
2643				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2644		}
2645
2646		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2647			hci_sco_setup(conn, ev->status);
2648	}
2649
2650	hci_dev_unlock(hdev);
2651}
2652
2653static inline void hci_pin_code_request_evt(struct hci_dev *hdev,
2654					    struct sk_buff *skb)
2655{
2656	struct hci_ev_pin_code_req *ev = (void *) skb->data;
2657	struct hci_conn *conn;
2658
2659	BT_DBG("%s", hdev->name);
2660
2661	hci_dev_lock(hdev);
2662
2663	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2664	if (!conn)
2665		goto unlock;
2666
2667	if (conn->state == BT_CONNECTED) {
2668		hci_conn_hold(conn);
2669		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2670		hci_conn_put(conn);
2671	}
2672
2673	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2674		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2675			     sizeof(ev->bdaddr), &ev->bdaddr);
2676	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2677		u8 secure;
2678
2679		if (conn->pending_sec_level == BT_SECURITY_HIGH)
2680			secure = 1;
2681		else
2682			secure = 0;
2683
2684		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2685	}
2686
2687unlock:
2688	hci_dev_unlock(hdev);
2689}
2690
2691static inline void hci_link_key_request_evt(struct hci_dev *hdev,
2692					    struct sk_buff *skb)
2693{
2694	struct hci_ev_link_key_req *ev = (void *) skb->data;
2695	struct hci_cp_link_key_reply cp;
2696	struct hci_conn *conn;
2697	struct link_key *key;
2698
2699	BT_DBG("%s", hdev->name);
2700
2701	if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2702		return;
2703
2704	hci_dev_lock(hdev);
2705
2706	key = hci_find_link_key(hdev, &ev->bdaddr);
2707	if (!key) {
2708		BT_DBG("%s link key not found for %s", hdev->name,
2709		       batostr(&ev->bdaddr));
2710		goto not_found;
2711	}
2712
2713	BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2714	       batostr(&ev->bdaddr));
2715
2716	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2717	    key->type == HCI_LK_DEBUG_COMBINATION) {
2718		BT_DBG("%s ignoring debug key", hdev->name);
2719		goto not_found;
2720	}
2721
2722	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2723	if (conn) {
2724		if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2725		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2726			BT_DBG("%s ignoring unauthenticated key", hdev->name);
2727			goto not_found;
2728		}
2729
2730		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2731		    conn->pending_sec_level == BT_SECURITY_HIGH) {
2732			BT_DBG("%s ignoring key unauthenticated for high \
2733			       security", hdev->name);
2734			goto not_found;
2735		}
2736
2737		conn->key_type = key->type;
2738		conn->pin_length = key->pin_len;
2739	}
2740
2741	bacpy(&cp.bdaddr, &ev->bdaddr);
2742	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2743
2744	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2745
2746	hci_dev_unlock(hdev);
2747
2748	return;
2749
2750not_found:
2751	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2752	hci_dev_unlock(hdev);
2753}
2754
2755static inline void hci_link_key_notify_evt(struct hci_dev *hdev,
2756					   struct sk_buff *skb)
2757{
2758	struct hci_ev_link_key_notify *ev = (void *) skb->data;
2759	struct hci_conn *conn;
2760	u8 pin_len = 0;
2761
2762	BT_DBG("%s", hdev->name);
2763
2764	hci_dev_lock(hdev);
2765
2766	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2767	if (conn) {
2768		hci_conn_hold(conn);
2769		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2770		pin_len = conn->pin_length;
2771
2772		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2773			conn->key_type = ev->key_type;
2774
2775		hci_conn_put(conn);
2776	}
2777
2778	if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2779		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2780				 ev->key_type, pin_len);
2781
2782	hci_dev_unlock(hdev);
2783}
2784
2785static inline void hci_clock_offset_evt(struct hci_dev *hdev,
2786					struct sk_buff *skb)
2787{
2788	struct hci_ev_clock_offset *ev = (void *) skb->data;
2789	struct hci_conn *conn;
2790
2791	BT_DBG("%s status %d", hdev->name, ev->status);
2792
2793	hci_dev_lock(hdev);
2794
2795	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2796	if (conn && !ev->status) {
2797		struct inquiry_entry *ie;
2798
2799		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2800		if (ie) {
2801			ie->data.clock_offset = ev->clock_offset;
2802			ie->timestamp = jiffies;
2803		}
2804	}
2805
2806	hci_dev_unlock(hdev);
2807}
2808
2809static inline void hci_pkt_type_change_evt(struct hci_dev *hdev,
2810					   struct sk_buff *skb)
2811{
2812	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2813	struct hci_conn *conn;
2814
2815	BT_DBG("%s status %d", hdev->name, ev->status);
2816
2817	hci_dev_lock(hdev);
2818
2819	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2820	if (conn && !ev->status)
2821		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2822
2823	hci_dev_unlock(hdev);
2824}
2825
2826static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev,
2827					  struct sk_buff *skb)
2828{
2829	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2830	struct inquiry_entry *ie;
2831
2832	BT_DBG("%s", hdev->name);
2833
2834	hci_dev_lock(hdev);
2835
2836	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2837	if (ie) {
2838		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2839		ie->timestamp = jiffies;
2840	}
2841
2842	hci_dev_unlock(hdev);
2843}
2844
2845static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2846						    struct sk_buff *skb)
2847{
2848	struct inquiry_data data;
2849	int num_rsp = *((__u8 *) skb->data);
2850	bool name_known, ssp;
2851
2852	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2853
2854	if (!num_rsp)
2855		return;
2856
2857	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2858		return;
2859
2860	hci_dev_lock(hdev);
2861
2862	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2863		struct inquiry_info_with_rssi_and_pscan_mode *info;
2864		info = (void *) (skb->data + 1);
2865
2866		for (; num_rsp; num_rsp--, info++) {
2867			bacpy(&data.bdaddr, &info->bdaddr);
2868			data.pscan_rep_mode	= info->pscan_rep_mode;
2869			data.pscan_period_mode	= info->pscan_period_mode;
2870			data.pscan_mode		= info->pscan_mode;
2871			memcpy(data.dev_class, info->dev_class, 3);
2872			data.clock_offset	= info->clock_offset;
2873			data.rssi		= info->rssi;
2874			data.ssp_mode		= 0x00;
2875
2876			name_known = hci_inquiry_cache_update(hdev, &data,
2877							      false, &ssp);
2878			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2879					  info->dev_class, info->rssi,
2880					  !name_known, ssp, NULL, 0);
2881		}
2882	} else {
2883		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2884
2885		for (; num_rsp; num_rsp--, info++) {
2886			bacpy(&data.bdaddr, &info->bdaddr);
2887			data.pscan_rep_mode	= info->pscan_rep_mode;
2888			data.pscan_period_mode	= info->pscan_period_mode;
2889			data.pscan_mode		= 0x00;
2890			memcpy(data.dev_class, info->dev_class, 3);
2891			data.clock_offset	= info->clock_offset;
2892			data.rssi		= info->rssi;
2893			data.ssp_mode		= 0x00;
2894			name_known = hci_inquiry_cache_update(hdev, &data,
2895							      false, &ssp);
2896			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2897					  info->dev_class, info->rssi,
2898					  !name_known, ssp, NULL, 0);
2899		}
2900	}
2901
2902	hci_dev_unlock(hdev);
2903}
2904
2905static inline void hci_remote_ext_features_evt(struct hci_dev *hdev,
2906					       struct sk_buff *skb)
2907{
2908	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2909	struct hci_conn *conn;
2910
2911	BT_DBG("%s", hdev->name);
2912
2913	hci_dev_lock(hdev);
2914
2915	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2916	if (!conn)
2917		goto unlock;
2918
2919	if (!ev->status && ev->page == 0x01) {
2920		struct inquiry_entry *ie;
2921
2922		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2923		if (ie)
2924			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2925
2926		if (ev->features[0] & LMP_HOST_SSP)
2927			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2928	}
2929
2930	if (conn->state != BT_CONFIG)
2931		goto unlock;
2932
2933	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2934		struct hci_cp_remote_name_req cp;
2935		memset(&cp, 0, sizeof(cp));
2936		bacpy(&cp.bdaddr, &conn->dst);
2937		cp.pscan_rep_mode = 0x02;
2938		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2939	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2940		mgmt_device_connected(hdev, &conn->dst, conn->type,
2941				      conn->dst_type, 0, NULL, 0,
2942				      conn->dev_class);
2943
2944	if (!hci_outgoing_auth_needed(hdev, conn)) {
2945		conn->state = BT_CONNECTED;
2946		hci_proto_connect_cfm(conn, ev->status);
2947		hci_conn_put(conn);
2948	}
2949
2950unlock:
2951	hci_dev_unlock(hdev);
2952}
2953
2954static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2955					      struct sk_buff *skb)
2956{
2957	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2958	struct hci_conn *conn;
2959
2960	BT_DBG("%s status %d", hdev->name, ev->status);
2961
2962	hci_dev_lock(hdev);
2963
2964	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2965	if (!conn) {
2966		if (ev->link_type == ESCO_LINK)
2967			goto unlock;
2968
2969		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2970		if (!conn)
2971			goto unlock;
2972
2973		conn->type = SCO_LINK;
2974	}
2975
2976	switch (ev->status) {
2977	case 0x00:
2978		conn->handle = __le16_to_cpu(ev->handle);
2979		conn->state  = BT_CONNECTED;
2980
2981		hci_conn_hold_device(conn);
2982		hci_conn_add_sysfs(conn);
2983		break;
2984
2985	case 0x11:	/* Unsupported Feature or Parameter Value */
2986	case 0x1c:	/* SCO interval rejected */
2987	case 0x1a:	/* Unsupported Remote Feature */
2988	case 0x1f:	/* Unspecified error */
2989		if (conn->out && conn->attempt < 2) {
2990			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2991					(hdev->esco_type & EDR_ESCO_MASK);
2992			hci_setup_sync(conn, conn->link->handle);
2993			goto unlock;
2994		}
2995		/* fall through */
2996
2997	default:
2998		conn->state = BT_CLOSED;
2999		break;
3000	}
3001
3002	hci_proto_connect_cfm(conn, ev->status);
3003	if (ev->status)
3004		hci_conn_del(conn);
3005
3006unlock:
3007	hci_dev_unlock(hdev);
3008}
3009
3010static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev,
3011					     struct sk_buff *skb)
3012{
3013	BT_DBG("%s", hdev->name);
3014}
3015
3016static inline void hci_sniff_subrate_evt(struct hci_dev *hdev,
3017					 struct sk_buff *skb)
3018{
3019	struct hci_ev_sniff_subrate *ev = (void *) skb->data;
3020
3021	BT_DBG("%s status %d", hdev->name, ev->status);
3022}
3023
3024static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3025						   struct sk_buff *skb)
3026{
3027	struct inquiry_data data;
3028	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3029	int num_rsp = *((__u8 *) skb->data);
3030	size_t eir_len;
3031
3032	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3033
3034	if (!num_rsp)
3035		return;
3036
3037	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3038		return;
3039
3040	hci_dev_lock(hdev);
3041
3042	for (; num_rsp; num_rsp--, info++) {
3043		bool name_known, ssp;
3044
3045		bacpy(&data.bdaddr, &info->bdaddr);
3046		data.pscan_rep_mode	= info->pscan_rep_mode;
3047		data.pscan_period_mode	= info->pscan_period_mode;
3048		data.pscan_mode		= 0x00;
3049		memcpy(data.dev_class, info->dev_class, 3);
3050		data.clock_offset	= info->clock_offset;
3051		data.rssi		= info->rssi;
3052		data.ssp_mode		= 0x01;
3053
3054		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3055			name_known = eir_has_data_type(info->data,
3056						       sizeof(info->data),
3057						       EIR_NAME_COMPLETE);
3058		else
3059			name_known = true;
3060
3061		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3062						      &ssp);
3063		eir_len = eir_get_length(info->data, sizeof(info->data));
3064		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3065				  info->dev_class, info->rssi, !name_known,
3066				  ssp, info->data, eir_len);
3067	}
3068
3069	hci_dev_unlock(hdev);
3070}
3071
3072static inline u8 hci_get_auth_req(struct hci_conn *conn)
3073{
3074	/* If remote requests dedicated bonding follow that lead */
3075	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3076		/* If both remote and local IO capabilities allow MITM
3077		 * protection then require it, otherwise don't */
3078		if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3079			return 0x02;
3080		else
3081			return 0x03;
3082	}
3083
3084	/* If remote requests no-bonding follow that lead */
3085	if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3086		return conn->remote_auth | (conn->auth_type & 0x01);
3087
3088	return conn->auth_type;
3089}
3090
3091static inline void hci_io_capa_request_evt(struct hci_dev *hdev,
3092					   struct sk_buff *skb)
3093{
3094	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3095	struct hci_conn *conn;
3096
3097	BT_DBG("%s", hdev->name);
3098
3099	hci_dev_lock(hdev);
3100
3101	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3102	if (!conn)
3103		goto unlock;
3104
3105	hci_conn_hold(conn);
3106
3107	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3108		goto unlock;
3109
3110	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3111	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3112		struct hci_cp_io_capability_reply cp;
3113
3114		bacpy(&cp.bdaddr, &ev->bdaddr);
3115		/* Change the IO capability from KeyboardDisplay
3116		 * to DisplayYesNo as it is not supported by BT spec. */
3117		cp.capability = (conn->io_capability == 0x04) ?
3118						0x01 : conn->io_capability;
3119		conn->auth_type = hci_get_auth_req(conn);
3120		cp.authentication = conn->auth_type;
3121
3122		if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
3123		    hci_find_remote_oob_data(hdev, &conn->dst))
3124			cp.oob_data = 0x01;
3125		else
3126			cp.oob_data = 0x00;
3127
3128		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3129			     sizeof(cp), &cp);
3130	} else {
3131		struct hci_cp_io_capability_neg_reply cp;
3132
3133		bacpy(&cp.bdaddr, &ev->bdaddr);
3134		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3135
3136		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3137			     sizeof(cp), &cp);
3138	}
3139
3140unlock:
3141	hci_dev_unlock(hdev);
3142}
3143
3144static inline void hci_io_capa_reply_evt(struct hci_dev *hdev,
3145					 struct sk_buff *skb)
3146{
3147	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3148	struct hci_conn *conn;
3149
3150	BT_DBG("%s", hdev->name);
3151
3152	hci_dev_lock(hdev);
3153
3154	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3155	if (!conn)
3156		goto unlock;
3157
3158	conn->remote_cap = ev->capability;
3159	conn->remote_auth = ev->authentication;
3160	if (ev->oob_data)
3161		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3162
3163unlock:
3164	hci_dev_unlock(hdev);
3165}
3166
3167static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3168						struct sk_buff *skb)
3169{
3170	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3171	int loc_mitm, rem_mitm, confirm_hint = 0;
3172	struct hci_conn *conn;
3173
3174	BT_DBG("%s", hdev->name);
3175
3176	hci_dev_lock(hdev);
3177
3178	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3179		goto unlock;
3180
3181	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3182	if (!conn)
3183		goto unlock;
3184
3185	loc_mitm = (conn->auth_type & 0x01);
3186	rem_mitm = (conn->remote_auth & 0x01);
3187
3188	/* If we require MITM but the remote device can't provide that
3189	 * (it has NoInputNoOutput) then reject the confirmation
3190	 * request. The only exception is when we're dedicated bonding
3191	 * initiators (connect_cfm_cb set) since then we always have the MITM
3192	 * bit set. */
3193	if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3194		BT_DBG("Rejecting request: remote device can't provide MITM");
3195		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3196			     sizeof(ev->bdaddr), &ev->bdaddr);
3197		goto unlock;
3198	}
3199
3200	/* If no side requires MITM protection; auto-accept */
3201	if ((!loc_mitm || conn->remote_cap == 0x03) &&
3202	    (!rem_mitm || conn->io_capability == 0x03)) {
3203
3204		/* If we're not the initiators request authorization to
3205		 * proceed from user space (mgmt_user_confirm with
3206		 * confirm_hint set to 1). */
3207		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3208			BT_DBG("Confirming auto-accept as acceptor");
3209			confirm_hint = 1;
3210			goto confirm;
3211		}
3212
3213		BT_DBG("Auto-accept of user confirmation with %ums delay",
3214		       hdev->auto_accept_delay);
3215
3216		if (hdev->auto_accept_delay > 0) {
3217			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3218			mod_timer(&conn->auto_accept_timer, jiffies + delay);
3219			goto unlock;
3220		}
3221
3222		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3223			     sizeof(ev->bdaddr), &ev->bdaddr);
3224		goto unlock;
3225	}
3226
3227confirm:
3228	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3229				  confirm_hint);
3230
3231unlock:
3232	hci_dev_unlock(hdev);
3233}
3234
3235static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3236						struct sk_buff *skb)
3237{
3238	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3239
3240	BT_DBG("%s", hdev->name);
3241
3242	hci_dev_lock(hdev);
3243
3244	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3245		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3246
3247	hci_dev_unlock(hdev);
3248}
3249
3250static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3251						struct sk_buff *skb)
3252{
3253	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3254	struct hci_conn *conn;
3255
3256	BT_DBG("%s", hdev->name);
3257
3258	hci_dev_lock(hdev);
3259
3260	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3261	if (!conn)
3262		goto unlock;
3263
3264	/* To avoid duplicate auth_failed events to user space we check
3265	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3266	 * initiated the authentication. A traditional auth_complete
3267	 * event gets always produced as initiator and is also mapped to
3268	 * the mgmt_auth_failed event */
3269	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3270		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3271				 ev->status);
3272
3273	hci_conn_put(conn);
3274
3275unlock:
3276	hci_dev_unlock(hdev);
3277}
3278
3279static inline void hci_remote_host_features_evt(struct hci_dev *hdev,
3280						struct sk_buff *skb)
3281{
3282	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3283	struct inquiry_entry *ie;
3284
3285	BT_DBG("%s", hdev->name);
3286
3287	hci_dev_lock(hdev);
3288
3289	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3290	if (ie)
3291		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3292
3293	hci_dev_unlock(hdev);
3294}
3295
3296static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3297						   struct sk_buff *skb)
3298{
3299	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3300	struct oob_data *data;
3301
3302	BT_DBG("%s", hdev->name);
3303
3304	hci_dev_lock(hdev);
3305
3306	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3307		goto unlock;
3308
3309	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3310	if (data) {
3311		struct hci_cp_remote_oob_data_reply cp;
3312
3313		bacpy(&cp.bdaddr, &ev->bdaddr);
3314		memcpy(cp.hash, data->hash, sizeof(cp.hash));
3315		memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3316
3317		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3318			     &cp);
3319	} else {
3320		struct hci_cp_remote_oob_data_neg_reply cp;
3321
3322		bacpy(&cp.bdaddr, &ev->bdaddr);
3323		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3324			     &cp);
3325	}
3326
3327unlock:
3328	hci_dev_unlock(hdev);
3329}
3330
3331static inline void hci_le_conn_complete_evt(struct hci_dev *hdev,
3332					    struct sk_buff *skb)
3333{
3334	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3335	struct hci_conn *conn;
3336
3337	BT_DBG("%s status %d", hdev->name, ev->status);
3338
3339	hci_dev_lock(hdev);
3340
3341	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3342	if (!conn) {
3343		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3344		if (!conn) {
3345			BT_ERR("No memory for new connection");
3346			hci_dev_unlock(hdev);
3347			return;
3348		}
3349
3350		conn->dst_type = ev->bdaddr_type;
3351	}
3352
3353	if (ev->status) {
3354		mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3355				    conn->dst_type, ev->status);
3356		hci_proto_connect_cfm(conn, ev->status);
3357		conn->state = BT_CLOSED;
3358		hci_conn_del(conn);
3359		goto unlock;
3360	}
3361
3362	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3363		mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3364				      conn->dst_type, 0, NULL, 0, NULL);
3365
3366	conn->sec_level = BT_SECURITY_LOW;
3367	conn->handle = __le16_to_cpu(ev->handle);
3368	conn->state = BT_CONNECTED;
3369
3370	hci_conn_hold_device(conn);
3371	hci_conn_add_sysfs(conn);
3372
3373	hci_proto_connect_cfm(conn, ev->status);
3374
3375unlock:
3376	hci_dev_unlock(hdev);
3377}
3378
3379static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3380					 struct sk_buff *skb)
3381{
3382	u8 num_reports = skb->data[0];
3383	void *ptr = &skb->data[1];
3384	s8 rssi;
3385
3386	hci_dev_lock(hdev);
3387
3388	while (num_reports--) {
3389		struct hci_ev_le_advertising_info *ev = ptr;
3390
3391		rssi = ev->data[ev->length];
3392		mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3393				  NULL, rssi, 0, 1, ev->data, ev->length);
3394
3395		ptr += sizeof(*ev) + ev->length + 1;
3396	}
3397
3398	hci_dev_unlock(hdev);
3399}
3400
3401static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3402					  struct sk_buff *skb)
3403{
3404	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3405	struct hci_cp_le_ltk_reply cp;
3406	struct hci_cp_le_ltk_neg_reply neg;
3407	struct hci_conn *conn;
3408	struct smp_ltk *ltk;
3409
3410	BT_DBG("%s handle %d", hdev->name, __le16_to_cpu(ev->handle));
3411
3412	hci_dev_lock(hdev);
3413
3414	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3415	if (conn == NULL)
3416		goto not_found;
3417
3418	ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3419	if (ltk == NULL)
3420		goto not_found;
3421
3422	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3423	cp.handle = cpu_to_le16(conn->handle);
3424
3425	if (ltk->authenticated)
3426		conn->sec_level = BT_SECURITY_HIGH;
3427
3428	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3429
3430	if (ltk->type & HCI_SMP_STK) {
3431		list_del(&ltk->list);
3432		kfree(ltk);
3433	}
3434
3435	hci_dev_unlock(hdev);
3436
3437	return;
3438
3439not_found:
3440	neg.handle = ev->handle;
3441	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3442	hci_dev_unlock(hdev);
3443}
3444
3445static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3446{
3447	struct hci_ev_le_meta *le_ev = (void *) skb->data;
3448
3449	skb_pull(skb, sizeof(*le_ev));
3450
3451	switch (le_ev->subevent) {
3452	case HCI_EV_LE_CONN_COMPLETE:
3453		hci_le_conn_complete_evt(hdev, skb);
3454		break;
3455
3456	case HCI_EV_LE_ADVERTISING_REPORT:
3457		hci_le_adv_report_evt(hdev, skb);
3458		break;
3459
3460	case HCI_EV_LE_LTK_REQ:
3461		hci_le_ltk_request_evt(hdev, skb);
3462		break;
3463
3464	default:
3465		break;
3466	}
3467}
3468
3469void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3470{
3471	struct hci_event_hdr *hdr = (void *) skb->data;
3472	__u8 event = hdr->evt;
3473
3474	skb_pull(skb, HCI_EVENT_HDR_SIZE);
3475
3476	switch (event) {
3477	case HCI_EV_INQUIRY_COMPLETE:
3478		hci_inquiry_complete_evt(hdev, skb);
3479		break;
3480
3481	case HCI_EV_INQUIRY_RESULT:
3482		hci_inquiry_result_evt(hdev, skb);
3483		break;
3484
3485	case HCI_EV_CONN_COMPLETE:
3486		hci_conn_complete_evt(hdev, skb);
3487		break;
3488
3489	case HCI_EV_CONN_REQUEST:
3490		hci_conn_request_evt(hdev, skb);
3491		break;
3492
3493	case HCI_EV_DISCONN_COMPLETE:
3494		hci_disconn_complete_evt(hdev, skb);
3495		break;
3496
3497	case HCI_EV_AUTH_COMPLETE:
3498		hci_auth_complete_evt(hdev, skb);
3499		break;
3500
3501	case HCI_EV_REMOTE_NAME:
3502		hci_remote_name_evt(hdev, skb);
3503		break;
3504
3505	case HCI_EV_ENCRYPT_CHANGE:
3506		hci_encrypt_change_evt(hdev, skb);
3507		break;
3508
3509	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3510		hci_change_link_key_complete_evt(hdev, skb);
3511		break;
3512
3513	case HCI_EV_REMOTE_FEATURES:
3514		hci_remote_features_evt(hdev, skb);
3515		break;
3516
3517	case HCI_EV_REMOTE_VERSION:
3518		hci_remote_version_evt(hdev, skb);
3519		break;
3520
3521	case HCI_EV_QOS_SETUP_COMPLETE:
3522		hci_qos_setup_complete_evt(hdev, skb);
3523		break;
3524
3525	case HCI_EV_CMD_COMPLETE:
3526		hci_cmd_complete_evt(hdev, skb);
3527		break;
3528
3529	case HCI_EV_CMD_STATUS:
3530		hci_cmd_status_evt(hdev, skb);
3531		break;
3532
3533	case HCI_EV_ROLE_CHANGE:
3534		hci_role_change_evt(hdev, skb);
3535		break;
3536
3537	case HCI_EV_NUM_COMP_PKTS:
3538		hci_num_comp_pkts_evt(hdev, skb);
3539		break;
3540
3541	case HCI_EV_MODE_CHANGE:
3542		hci_mode_change_evt(hdev, skb);
3543		break;
3544
3545	case HCI_EV_PIN_CODE_REQ:
3546		hci_pin_code_request_evt(hdev, skb);
3547		break;
3548
3549	case HCI_EV_LINK_KEY_REQ:
3550		hci_link_key_request_evt(hdev, skb);
3551		break;
3552
3553	case HCI_EV_LINK_KEY_NOTIFY:
3554		hci_link_key_notify_evt(hdev, skb);
3555		break;
3556
3557	case HCI_EV_CLOCK_OFFSET:
3558		hci_clock_offset_evt(hdev, skb);
3559		break;
3560
3561	case HCI_EV_PKT_TYPE_CHANGE:
3562		hci_pkt_type_change_evt(hdev, skb);
3563		break;
3564
3565	case HCI_EV_PSCAN_REP_MODE:
3566		hci_pscan_rep_mode_evt(hdev, skb);
3567		break;
3568
3569	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3570		hci_inquiry_result_with_rssi_evt(hdev, skb);
3571		break;
3572
3573	case HCI_EV_REMOTE_EXT_FEATURES:
3574		hci_remote_ext_features_evt(hdev, skb);
3575		break;
3576
3577	case HCI_EV_SYNC_CONN_COMPLETE:
3578		hci_sync_conn_complete_evt(hdev, skb);
3579		break;
3580
3581	case HCI_EV_SYNC_CONN_CHANGED:
3582		hci_sync_conn_changed_evt(hdev, skb);
3583		break;
3584
3585	case HCI_EV_SNIFF_SUBRATE:
3586		hci_sniff_subrate_evt(hdev, skb);
3587		break;
3588
3589	case HCI_EV_EXTENDED_INQUIRY_RESULT:
3590		hci_extended_inquiry_result_evt(hdev, skb);
3591		break;
3592
3593	case HCI_EV_IO_CAPA_REQUEST:
3594		hci_io_capa_request_evt(hdev, skb);
3595		break;
3596
3597	case HCI_EV_IO_CAPA_REPLY:
3598		hci_io_capa_reply_evt(hdev, skb);
3599		break;
3600
3601	case HCI_EV_USER_CONFIRM_REQUEST:
3602		hci_user_confirm_request_evt(hdev, skb);
3603		break;
3604
3605	case HCI_EV_USER_PASSKEY_REQUEST:
3606		hci_user_passkey_request_evt(hdev, skb);
3607		break;
3608
3609	case HCI_EV_SIMPLE_PAIR_COMPLETE:
3610		hci_simple_pair_complete_evt(hdev, skb);
3611		break;
3612
3613	case HCI_EV_REMOTE_HOST_FEATURES:
3614		hci_remote_host_features_evt(hdev, skb);
3615		break;
3616
3617	case HCI_EV_LE_META:
3618		hci_le_meta_evt(hdev, skb);
3619		break;
3620
3621	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3622		hci_remote_oob_data_request_evt(hdev, skb);
3623		break;
3624
3625	case HCI_EV_NUM_COMP_BLOCKS:
3626		hci_num_comp_blocks_evt(hdev, skb);
3627		break;
3628
3629	default:
3630		BT_DBG("%s event 0x%x", hdev->name, event);
3631		break;
3632	}
3633
3634	kfree_skb(skb);
3635	hdev->stat.evt_rx++;
3636}
3637