hci_event.c revision c9839a11c0e460a2457e7cac76650d07773e6c3b
1/*
2   BlueZ - Bluetooth protocol stack for Linux
3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/init.h>
36#include <linux/skbuff.h>
37#include <linux/interrupt.h>
38#include <linux/notifier.h>
39#include <net/sock.h>
40
41#include <asm/system.h>
42#include <linux/uaccess.h>
43#include <asm/unaligned.h>
44
45#include <net/bluetooth/bluetooth.h>
46#include <net/bluetooth/hci_core.h>
47
48static bool enable_le;
49
50/* Handle HCI Event packets */
51
52static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
53{
54	__u8 status = *((__u8 *) skb->data);
55
56	BT_DBG("%s status 0x%x", hdev->name, status);
57
58	if (status) {
59		hci_dev_lock(hdev);
60		mgmt_stop_discovery_failed(hdev, status);
61		hci_dev_unlock(hdev);
62		return;
63	}
64
65	clear_bit(HCI_INQUIRY, &hdev->flags);
66
67	hci_dev_lock(hdev);
68	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
69	hci_dev_unlock(hdev);
70
71	hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
72
73	hci_conn_check_pending(hdev);
74}
75
76static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77{
78	__u8 status = *((__u8 *) skb->data);
79
80	BT_DBG("%s status 0x%x", hdev->name, status);
81
82	if (status)
83		return;
84
85	hci_conn_check_pending(hdev);
86}
87
88static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
89{
90	BT_DBG("%s", hdev->name);
91}
92
93static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94{
95	struct hci_rp_role_discovery *rp = (void *) skb->data;
96	struct hci_conn *conn;
97
98	BT_DBG("%s status 0x%x", hdev->name, rp->status);
99
100	if (rp->status)
101		return;
102
103	hci_dev_lock(hdev);
104
105	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106	if (conn) {
107		if (rp->role)
108			conn->link_mode &= ~HCI_LM_MASTER;
109		else
110			conn->link_mode |= HCI_LM_MASTER;
111	}
112
113	hci_dev_unlock(hdev);
114}
115
116static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117{
118	struct hci_rp_read_link_policy *rp = (void *) skb->data;
119	struct hci_conn *conn;
120
121	BT_DBG("%s status 0x%x", hdev->name, rp->status);
122
123	if (rp->status)
124		return;
125
126	hci_dev_lock(hdev);
127
128	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129	if (conn)
130		conn->link_policy = __le16_to_cpu(rp->policy);
131
132	hci_dev_unlock(hdev);
133}
134
135static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136{
137	struct hci_rp_write_link_policy *rp = (void *) skb->data;
138	struct hci_conn *conn;
139	void *sent;
140
141	BT_DBG("%s status 0x%x", hdev->name, rp->status);
142
143	if (rp->status)
144		return;
145
146	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147	if (!sent)
148		return;
149
150	hci_dev_lock(hdev);
151
152	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153	if (conn)
154		conn->link_policy = get_unaligned_le16(sent + 2);
155
156	hci_dev_unlock(hdev);
157}
158
159static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
160{
161	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162
163	BT_DBG("%s status 0x%x", hdev->name, rp->status);
164
165	if (rp->status)
166		return;
167
168	hdev->link_policy = __le16_to_cpu(rp->policy);
169}
170
171static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
172{
173	__u8 status = *((__u8 *) skb->data);
174	void *sent;
175
176	BT_DBG("%s status 0x%x", hdev->name, status);
177
178	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179	if (!sent)
180		return;
181
182	if (!status)
183		hdev->link_policy = get_unaligned_le16(sent);
184
185	hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
186}
187
188static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189{
190	__u8 status = *((__u8 *) skb->data);
191
192	BT_DBG("%s status 0x%x", hdev->name, status);
193
194	clear_bit(HCI_RESET, &hdev->flags);
195
196	hci_req_complete(hdev, HCI_OP_RESET, status);
197
198	/* Reset all flags, except persistent ones */
199	hdev->dev_flags &= BIT(HCI_MGMT) | BIT(HCI_SETUP) | BIT(HCI_AUTO_OFF) |
200				BIT(HCI_LINK_KEYS) | BIT(HCI_DEBUG_KEYS);
201}
202
203static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
204{
205	__u8 status = *((__u8 *) skb->data);
206	void *sent;
207
208	BT_DBG("%s status 0x%x", hdev->name, status);
209
210	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
211	if (!sent)
212		return;
213
214	hci_dev_lock(hdev);
215
216	if (test_bit(HCI_MGMT, &hdev->dev_flags))
217		mgmt_set_local_name_complete(hdev, sent, status);
218
219	if (status == 0)
220		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
221
222	hci_dev_unlock(hdev);
223}
224
225static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
226{
227	struct hci_rp_read_local_name *rp = (void *) skb->data;
228
229	BT_DBG("%s status 0x%x", hdev->name, rp->status);
230
231	if (rp->status)
232		return;
233
234	memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
235}
236
237static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
238{
239	__u8 status = *((__u8 *) skb->data);
240	void *sent;
241
242	BT_DBG("%s status 0x%x", hdev->name, status);
243
244	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
245	if (!sent)
246		return;
247
248	if (!status) {
249		__u8 param = *((__u8 *) sent);
250
251		if (param == AUTH_ENABLED)
252			set_bit(HCI_AUTH, &hdev->flags);
253		else
254			clear_bit(HCI_AUTH, &hdev->flags);
255	}
256
257	hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
258}
259
260static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
261{
262	__u8 status = *((__u8 *) skb->data);
263	void *sent;
264
265	BT_DBG("%s status 0x%x", hdev->name, status);
266
267	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
268	if (!sent)
269		return;
270
271	if (!status) {
272		__u8 param = *((__u8 *) sent);
273
274		if (param)
275			set_bit(HCI_ENCRYPT, &hdev->flags);
276		else
277			clear_bit(HCI_ENCRYPT, &hdev->flags);
278	}
279
280	hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
281}
282
283static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
284{
285	__u8 param, status = *((__u8 *) skb->data);
286	int old_pscan, old_iscan;
287	void *sent;
288
289	BT_DBG("%s status 0x%x", hdev->name, status);
290
291	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
292	if (!sent)
293		return;
294
295	param = *((__u8 *) sent);
296
297	hci_dev_lock(hdev);
298
299	if (status != 0) {
300		mgmt_write_scan_failed(hdev, param, status);
301		hdev->discov_timeout = 0;
302		goto done;
303	}
304
305	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
306	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
307
308	if (param & SCAN_INQUIRY) {
309		set_bit(HCI_ISCAN, &hdev->flags);
310		if (!old_iscan)
311			mgmt_discoverable(hdev, 1);
312		if (hdev->discov_timeout > 0) {
313			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
314			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
315									to);
316		}
317	} else if (old_iscan)
318		mgmt_discoverable(hdev, 0);
319
320	if (param & SCAN_PAGE) {
321		set_bit(HCI_PSCAN, &hdev->flags);
322		if (!old_pscan)
323			mgmt_connectable(hdev, 1);
324	} else if (old_pscan)
325		mgmt_connectable(hdev, 0);
326
327done:
328	hci_dev_unlock(hdev);
329	hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
330}
331
332static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
333{
334	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
335
336	BT_DBG("%s status 0x%x", hdev->name, rp->status);
337
338	if (rp->status)
339		return;
340
341	memcpy(hdev->dev_class, rp->dev_class, 3);
342
343	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
344		hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
345}
346
347static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
348{
349	__u8 status = *((__u8 *) skb->data);
350	void *sent;
351
352	BT_DBG("%s status 0x%x", hdev->name, status);
353
354	if (status)
355		return;
356
357	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
358	if (!sent)
359		return;
360
361	memcpy(hdev->dev_class, sent, 3);
362}
363
364static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
365{
366	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
367	__u16 setting;
368
369	BT_DBG("%s status 0x%x", hdev->name, rp->status);
370
371	if (rp->status)
372		return;
373
374	setting = __le16_to_cpu(rp->voice_setting);
375
376	if (hdev->voice_setting == setting)
377		return;
378
379	hdev->voice_setting = setting;
380
381	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
382
383	if (hdev->notify)
384		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
385}
386
387static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
388{
389	__u8 status = *((__u8 *) skb->data);
390	__u16 setting;
391	void *sent;
392
393	BT_DBG("%s status 0x%x", hdev->name, status);
394
395	if (status)
396		return;
397
398	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
399	if (!sent)
400		return;
401
402	setting = get_unaligned_le16(sent);
403
404	if (hdev->voice_setting == setting)
405		return;
406
407	hdev->voice_setting = setting;
408
409	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
410
411	if (hdev->notify)
412		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
413}
414
415static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
416{
417	__u8 status = *((__u8 *) skb->data);
418
419	BT_DBG("%s status 0x%x", hdev->name, status);
420
421	hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
422}
423
424static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
425{
426	struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
427
428	BT_DBG("%s status 0x%x", hdev->name, rp->status);
429
430	if (rp->status)
431		return;
432
433	if (rp->mode)
434		set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
435	else
436		clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
437}
438
439static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
440{
441	__u8 status = *((__u8 *) skb->data);
442	void *sent;
443
444	BT_DBG("%s status 0x%x", hdev->name, status);
445
446	if (status)
447		return;
448
449	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
450	if (!sent)
451		return;
452
453	if (*((u8 *) sent))
454		set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
455	else
456		clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
457}
458
459static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
460{
461	if (hdev->features[6] & LMP_EXT_INQ)
462		return 2;
463
464	if (hdev->features[3] & LMP_RSSI_INQ)
465		return 1;
466
467	if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
468						hdev->lmp_subver == 0x0757)
469		return 1;
470
471	if (hdev->manufacturer == 15) {
472		if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
473			return 1;
474		if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
475			return 1;
476		if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
477			return 1;
478	}
479
480	if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
481						hdev->lmp_subver == 0x1805)
482		return 1;
483
484	return 0;
485}
486
487static void hci_setup_inquiry_mode(struct hci_dev *hdev)
488{
489	u8 mode;
490
491	mode = hci_get_inquiry_mode(hdev);
492
493	hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
494}
495
496static void hci_setup_event_mask(struct hci_dev *hdev)
497{
498	/* The second byte is 0xff instead of 0x9f (two reserved bits
499	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
500	 * command otherwise */
501	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
502
503	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
504	 * any event mask for pre 1.2 devices */
505	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
506		return;
507
508	events[4] |= 0x01; /* Flow Specification Complete */
509	events[4] |= 0x02; /* Inquiry Result with RSSI */
510	events[4] |= 0x04; /* Read Remote Extended Features Complete */
511	events[5] |= 0x08; /* Synchronous Connection Complete */
512	events[5] |= 0x10; /* Synchronous Connection Changed */
513
514	if (hdev->features[3] & LMP_RSSI_INQ)
515		events[4] |= 0x04; /* Inquiry Result with RSSI */
516
517	if (hdev->features[5] & LMP_SNIFF_SUBR)
518		events[5] |= 0x20; /* Sniff Subrating */
519
520	if (hdev->features[5] & LMP_PAUSE_ENC)
521		events[5] |= 0x80; /* Encryption Key Refresh Complete */
522
523	if (hdev->features[6] & LMP_EXT_INQ)
524		events[5] |= 0x40; /* Extended Inquiry Result */
525
526	if (hdev->features[6] & LMP_NO_FLUSH)
527		events[7] |= 0x01; /* Enhanced Flush Complete */
528
529	if (hdev->features[7] & LMP_LSTO)
530		events[6] |= 0x80; /* Link Supervision Timeout Changed */
531
532	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
533		events[6] |= 0x01;	/* IO Capability Request */
534		events[6] |= 0x02;	/* IO Capability Response */
535		events[6] |= 0x04;	/* User Confirmation Request */
536		events[6] |= 0x08;	/* User Passkey Request */
537		events[6] |= 0x10;	/* Remote OOB Data Request */
538		events[6] |= 0x20;	/* Simple Pairing Complete */
539		events[7] |= 0x04;	/* User Passkey Notification */
540		events[7] |= 0x08;	/* Keypress Notification */
541		events[7] |= 0x10;	/* Remote Host Supported
542					 * Features Notification */
543	}
544
545	if (hdev->features[4] & LMP_LE)
546		events[7] |= 0x20;	/* LE Meta-Event */
547
548	hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
549}
550
551static void hci_set_le_support(struct hci_dev *hdev)
552{
553	struct hci_cp_write_le_host_supported cp;
554
555	memset(&cp, 0, sizeof(cp));
556
557	if (enable_le) {
558		cp.le = 1;
559		cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
560	}
561
562	hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
563}
564
565static void hci_setup(struct hci_dev *hdev)
566{
567	if (hdev->dev_type != HCI_BREDR)
568		return;
569
570	hci_setup_event_mask(hdev);
571
572	if (hdev->hci_ver > BLUETOOTH_VER_1_1)
573		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
574
575	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
576		u8 mode = 0x01;
577		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
578	}
579
580	if (hdev->features[3] & LMP_RSSI_INQ)
581		hci_setup_inquiry_mode(hdev);
582
583	if (hdev->features[7] & LMP_INQ_TX_PWR)
584		hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
585
586	if (hdev->features[7] & LMP_EXTFEATURES) {
587		struct hci_cp_read_local_ext_features cp;
588
589		cp.page = 0x01;
590		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
591							sizeof(cp), &cp);
592	}
593
594	if (hdev->features[4] & LMP_LE)
595		hci_set_le_support(hdev);
596}
597
598static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
599{
600	struct hci_rp_read_local_version *rp = (void *) skb->data;
601
602	BT_DBG("%s status 0x%x", hdev->name, rp->status);
603
604	if (rp->status)
605		return;
606
607	hdev->hci_ver = rp->hci_ver;
608	hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
609	hdev->lmp_ver = rp->lmp_ver;
610	hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
611	hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
612
613	BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
614					hdev->manufacturer,
615					hdev->hci_ver, hdev->hci_rev);
616
617	if (test_bit(HCI_INIT, &hdev->flags))
618		hci_setup(hdev);
619}
620
621static void hci_setup_link_policy(struct hci_dev *hdev)
622{
623	u16 link_policy = 0;
624
625	if (hdev->features[0] & LMP_RSWITCH)
626		link_policy |= HCI_LP_RSWITCH;
627	if (hdev->features[0] & LMP_HOLD)
628		link_policy |= HCI_LP_HOLD;
629	if (hdev->features[0] & LMP_SNIFF)
630		link_policy |= HCI_LP_SNIFF;
631	if (hdev->features[1] & LMP_PARK)
632		link_policy |= HCI_LP_PARK;
633
634	link_policy = cpu_to_le16(link_policy);
635	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
636					sizeof(link_policy), &link_policy);
637}
638
639static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
640{
641	struct hci_rp_read_local_commands *rp = (void *) skb->data;
642
643	BT_DBG("%s status 0x%x", hdev->name, rp->status);
644
645	if (rp->status)
646		goto done;
647
648	memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
649
650	if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
651		hci_setup_link_policy(hdev);
652
653done:
654	hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
655}
656
657static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
658{
659	struct hci_rp_read_local_features *rp = (void *) skb->data;
660
661	BT_DBG("%s status 0x%x", hdev->name, rp->status);
662
663	if (rp->status)
664		return;
665
666	memcpy(hdev->features, rp->features, 8);
667
668	/* Adjust default settings according to features
669	 * supported by device. */
670
671	if (hdev->features[0] & LMP_3SLOT)
672		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
673
674	if (hdev->features[0] & LMP_5SLOT)
675		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
676
677	if (hdev->features[1] & LMP_HV2) {
678		hdev->pkt_type  |= (HCI_HV2);
679		hdev->esco_type |= (ESCO_HV2);
680	}
681
682	if (hdev->features[1] & LMP_HV3) {
683		hdev->pkt_type  |= (HCI_HV3);
684		hdev->esco_type |= (ESCO_HV3);
685	}
686
687	if (hdev->features[3] & LMP_ESCO)
688		hdev->esco_type |= (ESCO_EV3);
689
690	if (hdev->features[4] & LMP_EV4)
691		hdev->esco_type |= (ESCO_EV4);
692
693	if (hdev->features[4] & LMP_EV5)
694		hdev->esco_type |= (ESCO_EV5);
695
696	if (hdev->features[5] & LMP_EDR_ESCO_2M)
697		hdev->esco_type |= (ESCO_2EV3);
698
699	if (hdev->features[5] & LMP_EDR_ESCO_3M)
700		hdev->esco_type |= (ESCO_3EV3);
701
702	if (hdev->features[5] & LMP_EDR_3S_ESCO)
703		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
704
705	BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
706					hdev->features[0], hdev->features[1],
707					hdev->features[2], hdev->features[3],
708					hdev->features[4], hdev->features[5],
709					hdev->features[6], hdev->features[7]);
710}
711
712static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
713							struct sk_buff *skb)
714{
715	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
716
717	BT_DBG("%s status 0x%x", hdev->name, rp->status);
718
719	if (rp->status)
720		return;
721
722	switch (rp->page) {
723	case 0:
724		memcpy(hdev->features, rp->features, 8);
725		break;
726	case 1:
727		memcpy(hdev->host_features, rp->features, 8);
728		break;
729	}
730
731	hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
732}
733
734static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
735						struct sk_buff *skb)
736{
737	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
738
739	BT_DBG("%s status 0x%x", hdev->name, rp->status);
740
741	if (rp->status)
742		return;
743
744	hdev->flow_ctl_mode = rp->mode;
745
746	hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
747}
748
749static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
750{
751	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
752
753	BT_DBG("%s status 0x%x", hdev->name, rp->status);
754
755	if (rp->status)
756		return;
757
758	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
759	hdev->sco_mtu  = rp->sco_mtu;
760	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
761	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
762
763	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
764		hdev->sco_mtu  = 64;
765		hdev->sco_pkts = 8;
766	}
767
768	hdev->acl_cnt = hdev->acl_pkts;
769	hdev->sco_cnt = hdev->sco_pkts;
770
771	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
772					hdev->acl_mtu, hdev->acl_pkts,
773					hdev->sco_mtu, hdev->sco_pkts);
774}
775
776static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
777{
778	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
779
780	BT_DBG("%s status 0x%x", hdev->name, rp->status);
781
782	if (!rp->status)
783		bacpy(&hdev->bdaddr, &rp->bdaddr);
784
785	hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
786}
787
788static void hci_cc_read_data_block_size(struct hci_dev *hdev,
789							struct sk_buff *skb)
790{
791	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
792
793	BT_DBG("%s status 0x%x", hdev->name, rp->status);
794
795	if (rp->status)
796		return;
797
798	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
799	hdev->block_len = __le16_to_cpu(rp->block_len);
800	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
801
802	hdev->block_cnt = hdev->num_blocks;
803
804	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
805					hdev->block_cnt, hdev->block_len);
806
807	hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
808}
809
810static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
811{
812	__u8 status = *((__u8 *) skb->data);
813
814	BT_DBG("%s status 0x%x", hdev->name, status);
815
816	hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
817}
818
819static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
820		struct sk_buff *skb)
821{
822	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
823
824	BT_DBG("%s status 0x%x", hdev->name, rp->status);
825
826	if (rp->status)
827		return;
828
829	hdev->amp_status = rp->amp_status;
830	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
831	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
832	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
833	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
834	hdev->amp_type = rp->amp_type;
835	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
836	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
837	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
838	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
839
840	hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
841}
842
843static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
844							struct sk_buff *skb)
845{
846	__u8 status = *((__u8 *) skb->data);
847
848	BT_DBG("%s status 0x%x", hdev->name, status);
849
850	hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
851}
852
853static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
854{
855	__u8 status = *((__u8 *) skb->data);
856
857	BT_DBG("%s status 0x%x", hdev->name, status);
858
859	hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
860}
861
862static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
863							struct sk_buff *skb)
864{
865	__u8 status = *((__u8 *) skb->data);
866
867	BT_DBG("%s status 0x%x", hdev->name, status);
868
869	hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
870}
871
872static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
873							struct sk_buff *skb)
874{
875	__u8 status = *((__u8 *) skb->data);
876
877	BT_DBG("%s status 0x%x", hdev->name, status);
878
879	hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
880}
881
882static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
883{
884	__u8 status = *((__u8 *) skb->data);
885
886	BT_DBG("%s status 0x%x", hdev->name, status);
887
888	hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
889}
890
891static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
892{
893	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
894	struct hci_cp_pin_code_reply *cp;
895	struct hci_conn *conn;
896
897	BT_DBG("%s status 0x%x", hdev->name, rp->status);
898
899	hci_dev_lock(hdev);
900
901	if (test_bit(HCI_MGMT, &hdev->dev_flags))
902		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
903
904	if (rp->status != 0)
905		goto unlock;
906
907	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
908	if (!cp)
909		goto unlock;
910
911	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
912	if (conn)
913		conn->pin_length = cp->pin_len;
914
915unlock:
916	hci_dev_unlock(hdev);
917}
918
919static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
920{
921	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
922
923	BT_DBG("%s status 0x%x", hdev->name, rp->status);
924
925	hci_dev_lock(hdev);
926
927	if (test_bit(HCI_MGMT, &hdev->dev_flags))
928		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
929								rp->status);
930
931	hci_dev_unlock(hdev);
932}
933
934static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
935				       struct sk_buff *skb)
936{
937	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
938
939	BT_DBG("%s status 0x%x", hdev->name, rp->status);
940
941	if (rp->status)
942		return;
943
944	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
945	hdev->le_pkts = rp->le_max_pkt;
946
947	hdev->le_cnt = hdev->le_pkts;
948
949	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
950
951	hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
952}
953
954static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
955{
956	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
957
958	BT_DBG("%s status 0x%x", hdev->name, rp->status);
959
960	hci_dev_lock(hdev);
961
962	if (test_bit(HCI_MGMT, &hdev->dev_flags))
963		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr,
964								rp->status);
965
966	hci_dev_unlock(hdev);
967}
968
969static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
970							struct sk_buff *skb)
971{
972	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
973
974	BT_DBG("%s status 0x%x", hdev->name, rp->status);
975
976	hci_dev_lock(hdev);
977
978	if (test_bit(HCI_MGMT, &hdev->dev_flags))
979		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
980								rp->status);
981
982	hci_dev_unlock(hdev);
983}
984
985static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
986{
987	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
988
989	BT_DBG("%s status 0x%x", hdev->name, rp->status);
990
991	hci_dev_lock(hdev);
992
993	if (test_bit(HCI_MGMT, &hdev->dev_flags))
994		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr,
995								rp->status);
996
997	hci_dev_unlock(hdev);
998}
999
1000static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1001							struct sk_buff *skb)
1002{
1003	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1004
1005	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1006
1007	hci_dev_lock(hdev);
1008
1009	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1010		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1011								rp->status);
1012
1013	hci_dev_unlock(hdev);
1014}
1015
1016static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1017							struct sk_buff *skb)
1018{
1019	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1020
1021	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1022
1023	hci_dev_lock(hdev);
1024	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1025						rp->randomizer, rp->status);
1026	hci_dev_unlock(hdev);
1027}
1028
1029static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1030{
1031	__u8 status = *((__u8 *) skb->data);
1032
1033	BT_DBG("%s status 0x%x", hdev->name, status);
1034}
1035
1036static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1037					struct sk_buff *skb)
1038{
1039	struct hci_cp_le_set_scan_enable *cp;
1040	__u8 status = *((__u8 *) skb->data);
1041
1042	BT_DBG("%s status 0x%x", hdev->name, status);
1043
1044	if (status)
1045		return;
1046
1047	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1048	if (!cp)
1049		return;
1050
1051	switch (cp->enable) {
1052	case LE_SCANNING_ENABLED:
1053		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1054
1055		cancel_delayed_work_sync(&hdev->adv_work);
1056
1057		hci_dev_lock(hdev);
1058		hci_adv_entries_clear(hdev);
1059		hci_dev_unlock(hdev);
1060		break;
1061
1062	case LE_SCANNING_DISABLED:
1063		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1064
1065		schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1066		break;
1067
1068	default:
1069		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1070		break;
1071	}
1072}
1073
1074static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1075{
1076	struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1077
1078	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1079
1080	if (rp->status)
1081		return;
1082
1083	hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1084}
1085
1086static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1087{
1088	struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1089
1090	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1091
1092	if (rp->status)
1093		return;
1094
1095	hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1096}
1097
1098static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1099							struct sk_buff *skb)
1100{
1101	struct hci_cp_read_local_ext_features cp;
1102	__u8 status = *((__u8 *) skb->data);
1103
1104	BT_DBG("%s status 0x%x", hdev->name, status);
1105
1106	if (status)
1107		return;
1108
1109	cp.page = 0x01;
1110	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
1111}
1112
1113static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1114{
1115	BT_DBG("%s status 0x%x", hdev->name, status);
1116
1117	if (status) {
1118		hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1119		hci_conn_check_pending(hdev);
1120		hci_dev_lock(hdev);
1121		if (test_bit(HCI_MGMT, &hdev->dev_flags))
1122			mgmt_start_discovery_failed(hdev, status);
1123		hci_dev_unlock(hdev);
1124		return;
1125	}
1126
1127	set_bit(HCI_INQUIRY, &hdev->flags);
1128
1129	hci_dev_lock(hdev);
1130	hci_discovery_set_state(hdev, DISCOVERY_INQUIRY);
1131	hci_dev_unlock(hdev);
1132}
1133
1134static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1135{
1136	struct hci_cp_create_conn *cp;
1137	struct hci_conn *conn;
1138
1139	BT_DBG("%s status 0x%x", hdev->name, status);
1140
1141	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1142	if (!cp)
1143		return;
1144
1145	hci_dev_lock(hdev);
1146
1147	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1148
1149	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1150
1151	if (status) {
1152		if (conn && conn->state == BT_CONNECT) {
1153			if (status != 0x0c || conn->attempt > 2) {
1154				conn->state = BT_CLOSED;
1155				hci_proto_connect_cfm(conn, status);
1156				hci_conn_del(conn);
1157			} else
1158				conn->state = BT_CONNECT2;
1159		}
1160	} else {
1161		if (!conn) {
1162			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1163			if (conn) {
1164				conn->out = true;
1165				conn->link_mode |= HCI_LM_MASTER;
1166			} else
1167				BT_ERR("No memory for new connection");
1168		}
1169	}
1170
1171	hci_dev_unlock(hdev);
1172}
1173
1174static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1175{
1176	struct hci_cp_add_sco *cp;
1177	struct hci_conn *acl, *sco;
1178	__u16 handle;
1179
1180	BT_DBG("%s status 0x%x", hdev->name, status);
1181
1182	if (!status)
1183		return;
1184
1185	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1186	if (!cp)
1187		return;
1188
1189	handle = __le16_to_cpu(cp->handle);
1190
1191	BT_DBG("%s handle %d", hdev->name, handle);
1192
1193	hci_dev_lock(hdev);
1194
1195	acl = hci_conn_hash_lookup_handle(hdev, handle);
1196	if (acl) {
1197		sco = acl->link;
1198		if (sco) {
1199			sco->state = BT_CLOSED;
1200
1201			hci_proto_connect_cfm(sco, status);
1202			hci_conn_del(sco);
1203		}
1204	}
1205
1206	hci_dev_unlock(hdev);
1207}
1208
1209static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1210{
1211	struct hci_cp_auth_requested *cp;
1212	struct hci_conn *conn;
1213
1214	BT_DBG("%s status 0x%x", hdev->name, status);
1215
1216	if (!status)
1217		return;
1218
1219	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1220	if (!cp)
1221		return;
1222
1223	hci_dev_lock(hdev);
1224
1225	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1226	if (conn) {
1227		if (conn->state == BT_CONFIG) {
1228			hci_proto_connect_cfm(conn, status);
1229			hci_conn_put(conn);
1230		}
1231	}
1232
1233	hci_dev_unlock(hdev);
1234}
1235
1236static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1237{
1238	struct hci_cp_set_conn_encrypt *cp;
1239	struct hci_conn *conn;
1240
1241	BT_DBG("%s status 0x%x", hdev->name, status);
1242
1243	if (!status)
1244		return;
1245
1246	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1247	if (!cp)
1248		return;
1249
1250	hci_dev_lock(hdev);
1251
1252	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1253	if (conn) {
1254		if (conn->state == BT_CONFIG) {
1255			hci_proto_connect_cfm(conn, status);
1256			hci_conn_put(conn);
1257		}
1258	}
1259
1260	hci_dev_unlock(hdev);
1261}
1262
1263static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1264							struct hci_conn *conn)
1265{
1266	if (conn->state != BT_CONFIG || !conn->out)
1267		return 0;
1268
1269	if (conn->pending_sec_level == BT_SECURITY_SDP)
1270		return 0;
1271
1272	/* Only request authentication for SSP connections or non-SSP
1273	 * devices with sec_level HIGH or if MITM protection is requested */
1274	if (!hci_conn_ssp_enabled(conn) &&
1275				conn->pending_sec_level != BT_SECURITY_HIGH &&
1276				!(conn->auth_type & 0x01))
1277		return 0;
1278
1279	return 1;
1280}
1281
1282static inline int hci_resolve_name(struct hci_dev *hdev, struct inquiry_entry *e)
1283{
1284	struct hci_cp_remote_name_req cp;
1285
1286	memset(&cp, 0, sizeof(cp));
1287
1288	bacpy(&cp.bdaddr, &e->data.bdaddr);
1289	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1290	cp.pscan_mode = e->data.pscan_mode;
1291	cp.clock_offset = e->data.clock_offset;
1292
1293	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1294}
1295
1296static bool hci_resolve_next_name(struct hci_dev *hdev)
1297{
1298	struct discovery_state *discov = &hdev->discovery;
1299	struct inquiry_entry *e;
1300
1301	if (list_empty(&discov->resolve))
1302		return false;
1303
1304	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1305	if (hci_resolve_name(hdev, e) == 0) {
1306		e->name_state = NAME_PENDING;
1307		return true;
1308	}
1309
1310	return false;
1311}
1312
1313static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1314					bdaddr_t *bdaddr, u8 *name, u8 name_len)
1315{
1316	struct discovery_state *discov = &hdev->discovery;
1317	struct inquiry_entry *e;
1318
1319	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1320		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00,
1321					name, name_len, conn->dev_class);
1322
1323	if (discov->state == DISCOVERY_STOPPED)
1324		return;
1325
1326	if (discov->state == DISCOVERY_STOPPING)
1327		goto discov_complete;
1328
1329	if (discov->state != DISCOVERY_RESOLVING)
1330		return;
1331
1332	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1333	if (e) {
1334		e->name_state = NAME_KNOWN;
1335		list_del(&e->list);
1336		if (name)
1337			mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1338					e->data.rssi, name, name_len);
1339	}
1340
1341	if (hci_resolve_next_name(hdev))
1342		return;
1343
1344discov_complete:
1345	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1346}
1347
1348static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1349{
1350	struct hci_cp_remote_name_req *cp;
1351	struct hci_conn *conn;
1352
1353	BT_DBG("%s status 0x%x", hdev->name, status);
1354
1355	/* If successful wait for the name req complete event before
1356	 * checking for the need to do authentication */
1357	if (!status)
1358		return;
1359
1360	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1361	if (!cp)
1362		return;
1363
1364	hci_dev_lock(hdev);
1365
1366	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1367
1368	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1369		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1370
1371	if (!conn)
1372		goto unlock;
1373
1374	if (!hci_outgoing_auth_needed(hdev, conn))
1375		goto unlock;
1376
1377	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1378		struct hci_cp_auth_requested cp;
1379		cp.handle = __cpu_to_le16(conn->handle);
1380		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1381	}
1382
1383unlock:
1384	hci_dev_unlock(hdev);
1385}
1386
1387static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1388{
1389	struct hci_cp_read_remote_features *cp;
1390	struct hci_conn *conn;
1391
1392	BT_DBG("%s status 0x%x", hdev->name, status);
1393
1394	if (!status)
1395		return;
1396
1397	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1398	if (!cp)
1399		return;
1400
1401	hci_dev_lock(hdev);
1402
1403	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1404	if (conn) {
1405		if (conn->state == BT_CONFIG) {
1406			hci_proto_connect_cfm(conn, status);
1407			hci_conn_put(conn);
1408		}
1409	}
1410
1411	hci_dev_unlock(hdev);
1412}
1413
1414static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1415{
1416	struct hci_cp_read_remote_ext_features *cp;
1417	struct hci_conn *conn;
1418
1419	BT_DBG("%s status 0x%x", hdev->name, status);
1420
1421	if (!status)
1422		return;
1423
1424	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1425	if (!cp)
1426		return;
1427
1428	hci_dev_lock(hdev);
1429
1430	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1431	if (conn) {
1432		if (conn->state == BT_CONFIG) {
1433			hci_proto_connect_cfm(conn, status);
1434			hci_conn_put(conn);
1435		}
1436	}
1437
1438	hci_dev_unlock(hdev);
1439}
1440
1441static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1442{
1443	struct hci_cp_setup_sync_conn *cp;
1444	struct hci_conn *acl, *sco;
1445	__u16 handle;
1446
1447	BT_DBG("%s status 0x%x", hdev->name, status);
1448
1449	if (!status)
1450		return;
1451
1452	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1453	if (!cp)
1454		return;
1455
1456	handle = __le16_to_cpu(cp->handle);
1457
1458	BT_DBG("%s handle %d", hdev->name, handle);
1459
1460	hci_dev_lock(hdev);
1461
1462	acl = hci_conn_hash_lookup_handle(hdev, handle);
1463	if (acl) {
1464		sco = acl->link;
1465		if (sco) {
1466			sco->state = BT_CLOSED;
1467
1468			hci_proto_connect_cfm(sco, status);
1469			hci_conn_del(sco);
1470		}
1471	}
1472
1473	hci_dev_unlock(hdev);
1474}
1475
1476static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1477{
1478	struct hci_cp_sniff_mode *cp;
1479	struct hci_conn *conn;
1480
1481	BT_DBG("%s status 0x%x", hdev->name, status);
1482
1483	if (!status)
1484		return;
1485
1486	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1487	if (!cp)
1488		return;
1489
1490	hci_dev_lock(hdev);
1491
1492	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1493	if (conn) {
1494		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1495
1496		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1497			hci_sco_setup(conn, status);
1498	}
1499
1500	hci_dev_unlock(hdev);
1501}
1502
1503static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1504{
1505	struct hci_cp_exit_sniff_mode *cp;
1506	struct hci_conn *conn;
1507
1508	BT_DBG("%s status 0x%x", hdev->name, status);
1509
1510	if (!status)
1511		return;
1512
1513	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1514	if (!cp)
1515		return;
1516
1517	hci_dev_lock(hdev);
1518
1519	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1520	if (conn) {
1521		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1522
1523		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1524			hci_sco_setup(conn, status);
1525	}
1526
1527	hci_dev_unlock(hdev);
1528}
1529
1530static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1531{
1532	struct hci_cp_le_create_conn *cp;
1533	struct hci_conn *conn;
1534
1535	BT_DBG("%s status 0x%x", hdev->name, status);
1536
1537	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1538	if (!cp)
1539		return;
1540
1541	hci_dev_lock(hdev);
1542
1543	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1544
1545	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1546		conn);
1547
1548	if (status) {
1549		if (conn && conn->state == BT_CONNECT) {
1550			conn->state = BT_CLOSED;
1551			hci_proto_connect_cfm(conn, status);
1552			hci_conn_del(conn);
1553		}
1554	} else {
1555		if (!conn) {
1556			conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1557			if (conn) {
1558				conn->dst_type = cp->peer_addr_type;
1559				conn->out = true;
1560			} else {
1561				BT_ERR("No memory for new connection");
1562			}
1563		}
1564	}
1565
1566	hci_dev_unlock(hdev);
1567}
1568
1569static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1570{
1571	BT_DBG("%s status 0x%x", hdev->name, status);
1572}
1573
1574static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1575{
1576	__u8 status = *((__u8 *) skb->data);
1577	struct discovery_state *discov = &hdev->discovery;
1578	struct inquiry_entry *e;
1579
1580	BT_DBG("%s status %d", hdev->name, status);
1581
1582	hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1583
1584	hci_conn_check_pending(hdev);
1585
1586	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1587		return;
1588
1589	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1590		return;
1591
1592	hci_dev_lock(hdev);
1593
1594	if (discov->state != DISCOVERY_INQUIRY)
1595		goto unlock;
1596
1597	if (list_empty(&discov->resolve)) {
1598		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1599		goto unlock;
1600	}
1601
1602	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1603	if (e && hci_resolve_name(hdev, e) == 0) {
1604		e->name_state = NAME_PENDING;
1605		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1606	} else {
1607		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1608	}
1609
1610unlock:
1611	hci_dev_unlock(hdev);
1612}
1613
1614static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1615{
1616	struct inquiry_data data;
1617	struct inquiry_info *info = (void *) (skb->data + 1);
1618	int num_rsp = *((__u8 *) skb->data);
1619
1620	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1621
1622	if (!num_rsp)
1623		return;
1624
1625	hci_dev_lock(hdev);
1626
1627	for (; num_rsp; num_rsp--, info++) {
1628		bool name_known;
1629
1630		bacpy(&data.bdaddr, &info->bdaddr);
1631		data.pscan_rep_mode	= info->pscan_rep_mode;
1632		data.pscan_period_mode	= info->pscan_period_mode;
1633		data.pscan_mode		= info->pscan_mode;
1634		memcpy(data.dev_class, info->dev_class, 3);
1635		data.clock_offset	= info->clock_offset;
1636		data.rssi		= 0x00;
1637		data.ssp_mode		= 0x00;
1638
1639		name_known = hci_inquiry_cache_update(hdev, &data, false);
1640		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1641					info->dev_class, 0, !name_known,
1642					NULL, 0);
1643	}
1644
1645	hci_dev_unlock(hdev);
1646}
1647
1648static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1649{
1650	struct hci_ev_conn_complete *ev = (void *) skb->data;
1651	struct hci_conn *conn;
1652
1653	BT_DBG("%s", hdev->name);
1654
1655	hci_dev_lock(hdev);
1656
1657	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1658	if (!conn) {
1659		if (ev->link_type != SCO_LINK)
1660			goto unlock;
1661
1662		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1663		if (!conn)
1664			goto unlock;
1665
1666		conn->type = SCO_LINK;
1667	}
1668
1669	if (!ev->status) {
1670		conn->handle = __le16_to_cpu(ev->handle);
1671
1672		if (conn->type == ACL_LINK) {
1673			conn->state = BT_CONFIG;
1674			hci_conn_hold(conn);
1675			conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1676		} else
1677			conn->state = BT_CONNECTED;
1678
1679		hci_conn_hold_device(conn);
1680		hci_conn_add_sysfs(conn);
1681
1682		if (test_bit(HCI_AUTH, &hdev->flags))
1683			conn->link_mode |= HCI_LM_AUTH;
1684
1685		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1686			conn->link_mode |= HCI_LM_ENCRYPT;
1687
1688		/* Get remote features */
1689		if (conn->type == ACL_LINK) {
1690			struct hci_cp_read_remote_features cp;
1691			cp.handle = ev->handle;
1692			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1693							sizeof(cp), &cp);
1694		}
1695
1696		/* Set packet type for incoming connection */
1697		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1698			struct hci_cp_change_conn_ptype cp;
1699			cp.handle = ev->handle;
1700			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1701			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1702							sizeof(cp), &cp);
1703		}
1704	} else {
1705		conn->state = BT_CLOSED;
1706		if (conn->type == ACL_LINK)
1707			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1708						conn->dst_type, ev->status);
1709	}
1710
1711	if (conn->type == ACL_LINK)
1712		hci_sco_setup(conn, ev->status);
1713
1714	if (ev->status) {
1715		hci_proto_connect_cfm(conn, ev->status);
1716		hci_conn_del(conn);
1717	} else if (ev->link_type != ACL_LINK)
1718		hci_proto_connect_cfm(conn, ev->status);
1719
1720unlock:
1721	hci_dev_unlock(hdev);
1722
1723	hci_conn_check_pending(hdev);
1724}
1725
1726static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1727{
1728	struct hci_ev_conn_request *ev = (void *) skb->data;
1729	int mask = hdev->link_mode;
1730
1731	BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1732					batostr(&ev->bdaddr), ev->link_type);
1733
1734	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1735
1736	if ((mask & HCI_LM_ACCEPT) &&
1737			!hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1738		/* Connection accepted */
1739		struct inquiry_entry *ie;
1740		struct hci_conn *conn;
1741
1742		hci_dev_lock(hdev);
1743
1744		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1745		if (ie)
1746			memcpy(ie->data.dev_class, ev->dev_class, 3);
1747
1748		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1749		if (!conn) {
1750			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1751			if (!conn) {
1752				BT_ERR("No memory for new connection");
1753				hci_dev_unlock(hdev);
1754				return;
1755			}
1756		}
1757
1758		memcpy(conn->dev_class, ev->dev_class, 3);
1759		conn->state = BT_CONNECT;
1760
1761		hci_dev_unlock(hdev);
1762
1763		if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1764			struct hci_cp_accept_conn_req cp;
1765
1766			bacpy(&cp.bdaddr, &ev->bdaddr);
1767
1768			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1769				cp.role = 0x00; /* Become master */
1770			else
1771				cp.role = 0x01; /* Remain slave */
1772
1773			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1774							sizeof(cp), &cp);
1775		} else {
1776			struct hci_cp_accept_sync_conn_req cp;
1777
1778			bacpy(&cp.bdaddr, &ev->bdaddr);
1779			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1780
1781			cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
1782			cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
1783			cp.max_latency    = cpu_to_le16(0xffff);
1784			cp.content_format = cpu_to_le16(hdev->voice_setting);
1785			cp.retrans_effort = 0xff;
1786
1787			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1788							sizeof(cp), &cp);
1789		}
1790	} else {
1791		/* Connection rejected */
1792		struct hci_cp_reject_conn_req cp;
1793
1794		bacpy(&cp.bdaddr, &ev->bdaddr);
1795		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1796		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1797	}
1798}
1799
1800static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1801{
1802	struct hci_ev_disconn_complete *ev = (void *) skb->data;
1803	struct hci_conn *conn;
1804
1805	BT_DBG("%s status %d", hdev->name, ev->status);
1806
1807	hci_dev_lock(hdev);
1808
1809	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1810	if (!conn)
1811		goto unlock;
1812
1813	if (ev->status == 0)
1814		conn->state = BT_CLOSED;
1815
1816	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1817			(conn->type == ACL_LINK || conn->type == LE_LINK)) {
1818		if (ev->status != 0)
1819			mgmt_disconnect_failed(hdev, &conn->dst, ev->status);
1820		else
1821			mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1822							conn->dst_type);
1823	}
1824
1825	if (ev->status == 0) {
1826		hci_proto_disconn_cfm(conn, ev->reason);
1827		hci_conn_del(conn);
1828	}
1829
1830unlock:
1831	hci_dev_unlock(hdev);
1832}
1833
1834static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1835{
1836	struct hci_ev_auth_complete *ev = (void *) skb->data;
1837	struct hci_conn *conn;
1838
1839	BT_DBG("%s status %d", hdev->name, ev->status);
1840
1841	hci_dev_lock(hdev);
1842
1843	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1844	if (!conn)
1845		goto unlock;
1846
1847	if (!ev->status) {
1848		if (!hci_conn_ssp_enabled(conn) &&
1849				test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1850			BT_INFO("re-auth of legacy device is not possible.");
1851		} else {
1852			conn->link_mode |= HCI_LM_AUTH;
1853			conn->sec_level = conn->pending_sec_level;
1854		}
1855	} else {
1856		mgmt_auth_failed(hdev, &conn->dst, ev->status);
1857	}
1858
1859	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1860	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1861
1862	if (conn->state == BT_CONFIG) {
1863		if (!ev->status && hci_conn_ssp_enabled(conn)) {
1864			struct hci_cp_set_conn_encrypt cp;
1865			cp.handle  = ev->handle;
1866			cp.encrypt = 0x01;
1867			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1868									&cp);
1869		} else {
1870			conn->state = BT_CONNECTED;
1871			hci_proto_connect_cfm(conn, ev->status);
1872			hci_conn_put(conn);
1873		}
1874	} else {
1875		hci_auth_cfm(conn, ev->status);
1876
1877		hci_conn_hold(conn);
1878		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1879		hci_conn_put(conn);
1880	}
1881
1882	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1883		if (!ev->status) {
1884			struct hci_cp_set_conn_encrypt cp;
1885			cp.handle  = ev->handle;
1886			cp.encrypt = 0x01;
1887			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1888									&cp);
1889		} else {
1890			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1891			hci_encrypt_cfm(conn, ev->status, 0x00);
1892		}
1893	}
1894
1895unlock:
1896	hci_dev_unlock(hdev);
1897}
1898
1899static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1900{
1901	struct hci_ev_remote_name *ev = (void *) skb->data;
1902	struct hci_conn *conn;
1903
1904	BT_DBG("%s", hdev->name);
1905
1906	hci_conn_check_pending(hdev);
1907
1908	hci_dev_lock(hdev);
1909
1910	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1911
1912	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1913		goto check_auth;
1914
1915	if (ev->status == 0)
1916		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1917					strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1918	else
1919		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1920
1921check_auth:
1922	if (!conn)
1923		goto unlock;
1924
1925	if (!hci_outgoing_auth_needed(hdev, conn))
1926		goto unlock;
1927
1928	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1929		struct hci_cp_auth_requested cp;
1930		cp.handle = __cpu_to_le16(conn->handle);
1931		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1932	}
1933
1934unlock:
1935	hci_dev_unlock(hdev);
1936}
1937
1938static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1939{
1940	struct hci_ev_encrypt_change *ev = (void *) skb->data;
1941	struct hci_conn *conn;
1942
1943	BT_DBG("%s status %d", hdev->name, ev->status);
1944
1945	hci_dev_lock(hdev);
1946
1947	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1948	if (conn) {
1949		if (!ev->status) {
1950			if (ev->encrypt) {
1951				/* Encryption implies authentication */
1952				conn->link_mode |= HCI_LM_AUTH;
1953				conn->link_mode |= HCI_LM_ENCRYPT;
1954				conn->sec_level = conn->pending_sec_level;
1955			} else
1956				conn->link_mode &= ~HCI_LM_ENCRYPT;
1957		}
1958
1959		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1960
1961		if (conn->state == BT_CONFIG) {
1962			if (!ev->status)
1963				conn->state = BT_CONNECTED;
1964
1965			hci_proto_connect_cfm(conn, ev->status);
1966			hci_conn_put(conn);
1967		} else
1968			hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1969	}
1970
1971	hci_dev_unlock(hdev);
1972}
1973
1974static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1975{
1976	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1977	struct hci_conn *conn;
1978
1979	BT_DBG("%s status %d", hdev->name, ev->status);
1980
1981	hci_dev_lock(hdev);
1982
1983	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1984	if (conn) {
1985		if (!ev->status)
1986			conn->link_mode |= HCI_LM_SECURE;
1987
1988		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1989
1990		hci_key_change_cfm(conn, ev->status);
1991	}
1992
1993	hci_dev_unlock(hdev);
1994}
1995
1996static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1997{
1998	struct hci_ev_remote_features *ev = (void *) skb->data;
1999	struct hci_conn *conn;
2000
2001	BT_DBG("%s status %d", hdev->name, ev->status);
2002
2003	hci_dev_lock(hdev);
2004
2005	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2006	if (!conn)
2007		goto unlock;
2008
2009	if (!ev->status)
2010		memcpy(conn->features, ev->features, 8);
2011
2012	if (conn->state != BT_CONFIG)
2013		goto unlock;
2014
2015	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2016		struct hci_cp_read_remote_ext_features cp;
2017		cp.handle = ev->handle;
2018		cp.page = 0x01;
2019		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2020							sizeof(cp), &cp);
2021		goto unlock;
2022	}
2023
2024	if (!ev->status) {
2025		struct hci_cp_remote_name_req cp;
2026		memset(&cp, 0, sizeof(cp));
2027		bacpy(&cp.bdaddr, &conn->dst);
2028		cp.pscan_rep_mode = 0x02;
2029		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2030	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2031		mgmt_device_connected(hdev, &conn->dst, conn->type,
2032						conn->dst_type, NULL, 0,
2033						conn->dev_class);
2034
2035	if (!hci_outgoing_auth_needed(hdev, conn)) {
2036		conn->state = BT_CONNECTED;
2037		hci_proto_connect_cfm(conn, ev->status);
2038		hci_conn_put(conn);
2039	}
2040
2041unlock:
2042	hci_dev_unlock(hdev);
2043}
2044
2045static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2046{
2047	BT_DBG("%s", hdev->name);
2048}
2049
2050static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2051{
2052	BT_DBG("%s", hdev->name);
2053}
2054
2055static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2056{
2057	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2058	__u16 opcode;
2059
2060	skb_pull(skb, sizeof(*ev));
2061
2062	opcode = __le16_to_cpu(ev->opcode);
2063
2064	switch (opcode) {
2065	case HCI_OP_INQUIRY_CANCEL:
2066		hci_cc_inquiry_cancel(hdev, skb);
2067		break;
2068
2069	case HCI_OP_EXIT_PERIODIC_INQ:
2070		hci_cc_exit_periodic_inq(hdev, skb);
2071		break;
2072
2073	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2074		hci_cc_remote_name_req_cancel(hdev, skb);
2075		break;
2076
2077	case HCI_OP_ROLE_DISCOVERY:
2078		hci_cc_role_discovery(hdev, skb);
2079		break;
2080
2081	case HCI_OP_READ_LINK_POLICY:
2082		hci_cc_read_link_policy(hdev, skb);
2083		break;
2084
2085	case HCI_OP_WRITE_LINK_POLICY:
2086		hci_cc_write_link_policy(hdev, skb);
2087		break;
2088
2089	case HCI_OP_READ_DEF_LINK_POLICY:
2090		hci_cc_read_def_link_policy(hdev, skb);
2091		break;
2092
2093	case HCI_OP_WRITE_DEF_LINK_POLICY:
2094		hci_cc_write_def_link_policy(hdev, skb);
2095		break;
2096
2097	case HCI_OP_RESET:
2098		hci_cc_reset(hdev, skb);
2099		break;
2100
2101	case HCI_OP_WRITE_LOCAL_NAME:
2102		hci_cc_write_local_name(hdev, skb);
2103		break;
2104
2105	case HCI_OP_READ_LOCAL_NAME:
2106		hci_cc_read_local_name(hdev, skb);
2107		break;
2108
2109	case HCI_OP_WRITE_AUTH_ENABLE:
2110		hci_cc_write_auth_enable(hdev, skb);
2111		break;
2112
2113	case HCI_OP_WRITE_ENCRYPT_MODE:
2114		hci_cc_write_encrypt_mode(hdev, skb);
2115		break;
2116
2117	case HCI_OP_WRITE_SCAN_ENABLE:
2118		hci_cc_write_scan_enable(hdev, skb);
2119		break;
2120
2121	case HCI_OP_READ_CLASS_OF_DEV:
2122		hci_cc_read_class_of_dev(hdev, skb);
2123		break;
2124
2125	case HCI_OP_WRITE_CLASS_OF_DEV:
2126		hci_cc_write_class_of_dev(hdev, skb);
2127		break;
2128
2129	case HCI_OP_READ_VOICE_SETTING:
2130		hci_cc_read_voice_setting(hdev, skb);
2131		break;
2132
2133	case HCI_OP_WRITE_VOICE_SETTING:
2134		hci_cc_write_voice_setting(hdev, skb);
2135		break;
2136
2137	case HCI_OP_HOST_BUFFER_SIZE:
2138		hci_cc_host_buffer_size(hdev, skb);
2139		break;
2140
2141	case HCI_OP_READ_SSP_MODE:
2142		hci_cc_read_ssp_mode(hdev, skb);
2143		break;
2144
2145	case HCI_OP_WRITE_SSP_MODE:
2146		hci_cc_write_ssp_mode(hdev, skb);
2147		break;
2148
2149	case HCI_OP_READ_LOCAL_VERSION:
2150		hci_cc_read_local_version(hdev, skb);
2151		break;
2152
2153	case HCI_OP_READ_LOCAL_COMMANDS:
2154		hci_cc_read_local_commands(hdev, skb);
2155		break;
2156
2157	case HCI_OP_READ_LOCAL_FEATURES:
2158		hci_cc_read_local_features(hdev, skb);
2159		break;
2160
2161	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2162		hci_cc_read_local_ext_features(hdev, skb);
2163		break;
2164
2165	case HCI_OP_READ_BUFFER_SIZE:
2166		hci_cc_read_buffer_size(hdev, skb);
2167		break;
2168
2169	case HCI_OP_READ_BD_ADDR:
2170		hci_cc_read_bd_addr(hdev, skb);
2171		break;
2172
2173	case HCI_OP_READ_DATA_BLOCK_SIZE:
2174		hci_cc_read_data_block_size(hdev, skb);
2175		break;
2176
2177	case HCI_OP_WRITE_CA_TIMEOUT:
2178		hci_cc_write_ca_timeout(hdev, skb);
2179		break;
2180
2181	case HCI_OP_READ_FLOW_CONTROL_MODE:
2182		hci_cc_read_flow_control_mode(hdev, skb);
2183		break;
2184
2185	case HCI_OP_READ_LOCAL_AMP_INFO:
2186		hci_cc_read_local_amp_info(hdev, skb);
2187		break;
2188
2189	case HCI_OP_DELETE_STORED_LINK_KEY:
2190		hci_cc_delete_stored_link_key(hdev, skb);
2191		break;
2192
2193	case HCI_OP_SET_EVENT_MASK:
2194		hci_cc_set_event_mask(hdev, skb);
2195		break;
2196
2197	case HCI_OP_WRITE_INQUIRY_MODE:
2198		hci_cc_write_inquiry_mode(hdev, skb);
2199		break;
2200
2201	case HCI_OP_READ_INQ_RSP_TX_POWER:
2202		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2203		break;
2204
2205	case HCI_OP_SET_EVENT_FLT:
2206		hci_cc_set_event_flt(hdev, skb);
2207		break;
2208
2209	case HCI_OP_PIN_CODE_REPLY:
2210		hci_cc_pin_code_reply(hdev, skb);
2211		break;
2212
2213	case HCI_OP_PIN_CODE_NEG_REPLY:
2214		hci_cc_pin_code_neg_reply(hdev, skb);
2215		break;
2216
2217	case HCI_OP_READ_LOCAL_OOB_DATA:
2218		hci_cc_read_local_oob_data_reply(hdev, skb);
2219		break;
2220
2221	case HCI_OP_LE_READ_BUFFER_SIZE:
2222		hci_cc_le_read_buffer_size(hdev, skb);
2223		break;
2224
2225	case HCI_OP_USER_CONFIRM_REPLY:
2226		hci_cc_user_confirm_reply(hdev, skb);
2227		break;
2228
2229	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2230		hci_cc_user_confirm_neg_reply(hdev, skb);
2231		break;
2232
2233	case HCI_OP_USER_PASSKEY_REPLY:
2234		hci_cc_user_passkey_reply(hdev, skb);
2235		break;
2236
2237	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2238		hci_cc_user_passkey_neg_reply(hdev, skb);
2239
2240	case HCI_OP_LE_SET_SCAN_PARAM:
2241		hci_cc_le_set_scan_param(hdev, skb);
2242		break;
2243
2244	case HCI_OP_LE_SET_SCAN_ENABLE:
2245		hci_cc_le_set_scan_enable(hdev, skb);
2246		break;
2247
2248	case HCI_OP_LE_LTK_REPLY:
2249		hci_cc_le_ltk_reply(hdev, skb);
2250		break;
2251
2252	case HCI_OP_LE_LTK_NEG_REPLY:
2253		hci_cc_le_ltk_neg_reply(hdev, skb);
2254		break;
2255
2256	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2257		hci_cc_write_le_host_supported(hdev, skb);
2258		break;
2259
2260	default:
2261		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2262		break;
2263	}
2264
2265	if (ev->opcode != HCI_OP_NOP)
2266		del_timer(&hdev->cmd_timer);
2267
2268	if (ev->ncmd) {
2269		atomic_set(&hdev->cmd_cnt, 1);
2270		if (!skb_queue_empty(&hdev->cmd_q))
2271			queue_work(hdev->workqueue, &hdev->cmd_work);
2272	}
2273}
2274
2275static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2276{
2277	struct hci_ev_cmd_status *ev = (void *) skb->data;
2278	__u16 opcode;
2279
2280	skb_pull(skb, sizeof(*ev));
2281
2282	opcode = __le16_to_cpu(ev->opcode);
2283
2284	switch (opcode) {
2285	case HCI_OP_INQUIRY:
2286		hci_cs_inquiry(hdev, ev->status);
2287		break;
2288
2289	case HCI_OP_CREATE_CONN:
2290		hci_cs_create_conn(hdev, ev->status);
2291		break;
2292
2293	case HCI_OP_ADD_SCO:
2294		hci_cs_add_sco(hdev, ev->status);
2295		break;
2296
2297	case HCI_OP_AUTH_REQUESTED:
2298		hci_cs_auth_requested(hdev, ev->status);
2299		break;
2300
2301	case HCI_OP_SET_CONN_ENCRYPT:
2302		hci_cs_set_conn_encrypt(hdev, ev->status);
2303		break;
2304
2305	case HCI_OP_REMOTE_NAME_REQ:
2306		hci_cs_remote_name_req(hdev, ev->status);
2307		break;
2308
2309	case HCI_OP_READ_REMOTE_FEATURES:
2310		hci_cs_read_remote_features(hdev, ev->status);
2311		break;
2312
2313	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2314		hci_cs_read_remote_ext_features(hdev, ev->status);
2315		break;
2316
2317	case HCI_OP_SETUP_SYNC_CONN:
2318		hci_cs_setup_sync_conn(hdev, ev->status);
2319		break;
2320
2321	case HCI_OP_SNIFF_MODE:
2322		hci_cs_sniff_mode(hdev, ev->status);
2323		break;
2324
2325	case HCI_OP_EXIT_SNIFF_MODE:
2326		hci_cs_exit_sniff_mode(hdev, ev->status);
2327		break;
2328
2329	case HCI_OP_DISCONNECT:
2330		if (ev->status != 0)
2331			mgmt_disconnect_failed(hdev, NULL, ev->status);
2332		break;
2333
2334	case HCI_OP_LE_CREATE_CONN:
2335		hci_cs_le_create_conn(hdev, ev->status);
2336		break;
2337
2338	case HCI_OP_LE_START_ENC:
2339		hci_cs_le_start_enc(hdev, ev->status);
2340		break;
2341
2342	default:
2343		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2344		break;
2345	}
2346
2347	if (ev->opcode != HCI_OP_NOP)
2348		del_timer(&hdev->cmd_timer);
2349
2350	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2351		atomic_set(&hdev->cmd_cnt, 1);
2352		if (!skb_queue_empty(&hdev->cmd_q))
2353			queue_work(hdev->workqueue, &hdev->cmd_work);
2354	}
2355}
2356
2357static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2358{
2359	struct hci_ev_role_change *ev = (void *) skb->data;
2360	struct hci_conn *conn;
2361
2362	BT_DBG("%s status %d", hdev->name, ev->status);
2363
2364	hci_dev_lock(hdev);
2365
2366	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2367	if (conn) {
2368		if (!ev->status) {
2369			if (ev->role)
2370				conn->link_mode &= ~HCI_LM_MASTER;
2371			else
2372				conn->link_mode |= HCI_LM_MASTER;
2373		}
2374
2375		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2376
2377		hci_role_switch_cfm(conn, ev->status, ev->role);
2378	}
2379
2380	hci_dev_unlock(hdev);
2381}
2382
2383static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2384{
2385	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2386	int i;
2387
2388	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2389		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2390		return;
2391	}
2392
2393	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2394			ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2395		BT_DBG("%s bad parameters", hdev->name);
2396		return;
2397	}
2398
2399	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2400
2401	for (i = 0; i < ev->num_hndl; i++) {
2402		struct hci_comp_pkts_info *info = &ev->handles[i];
2403		struct hci_conn *conn;
2404		__u16  handle, count;
2405
2406		handle = __le16_to_cpu(info->handle);
2407		count  = __le16_to_cpu(info->count);
2408
2409		conn = hci_conn_hash_lookup_handle(hdev, handle);
2410		if (!conn)
2411			continue;
2412
2413		conn->sent -= count;
2414
2415		switch (conn->type) {
2416		case ACL_LINK:
2417			hdev->acl_cnt += count;
2418			if (hdev->acl_cnt > hdev->acl_pkts)
2419				hdev->acl_cnt = hdev->acl_pkts;
2420			break;
2421
2422		case LE_LINK:
2423			if (hdev->le_pkts) {
2424				hdev->le_cnt += count;
2425				if (hdev->le_cnt > hdev->le_pkts)
2426					hdev->le_cnt = hdev->le_pkts;
2427			} else {
2428				hdev->acl_cnt += count;
2429				if (hdev->acl_cnt > hdev->acl_pkts)
2430					hdev->acl_cnt = hdev->acl_pkts;
2431			}
2432			break;
2433
2434		case SCO_LINK:
2435			hdev->sco_cnt += count;
2436			if (hdev->sco_cnt > hdev->sco_pkts)
2437				hdev->sco_cnt = hdev->sco_pkts;
2438			break;
2439
2440		default:
2441			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2442			break;
2443		}
2444	}
2445
2446	queue_work(hdev->workqueue, &hdev->tx_work);
2447}
2448
2449static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2450							struct sk_buff *skb)
2451{
2452	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2453	int i;
2454
2455	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2456		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2457		return;
2458	}
2459
2460	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2461			ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2462		BT_DBG("%s bad parameters", hdev->name);
2463		return;
2464	}
2465
2466	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2467								ev->num_hndl);
2468
2469	for (i = 0; i < ev->num_hndl; i++) {
2470		struct hci_comp_blocks_info *info = &ev->handles[i];
2471		struct hci_conn *conn;
2472		__u16  handle, block_count;
2473
2474		handle = __le16_to_cpu(info->handle);
2475		block_count = __le16_to_cpu(info->blocks);
2476
2477		conn = hci_conn_hash_lookup_handle(hdev, handle);
2478		if (!conn)
2479			continue;
2480
2481		conn->sent -= block_count;
2482
2483		switch (conn->type) {
2484		case ACL_LINK:
2485			hdev->block_cnt += block_count;
2486			if (hdev->block_cnt > hdev->num_blocks)
2487				hdev->block_cnt = hdev->num_blocks;
2488			break;
2489
2490		default:
2491			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2492			break;
2493		}
2494	}
2495
2496	queue_work(hdev->workqueue, &hdev->tx_work);
2497}
2498
2499static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2500{
2501	struct hci_ev_mode_change *ev = (void *) skb->data;
2502	struct hci_conn *conn;
2503
2504	BT_DBG("%s status %d", hdev->name, ev->status);
2505
2506	hci_dev_lock(hdev);
2507
2508	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2509	if (conn) {
2510		conn->mode = ev->mode;
2511		conn->interval = __le16_to_cpu(ev->interval);
2512
2513		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2514			if (conn->mode == HCI_CM_ACTIVE)
2515				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2516			else
2517				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2518		}
2519
2520		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2521			hci_sco_setup(conn, ev->status);
2522	}
2523
2524	hci_dev_unlock(hdev);
2525}
2526
2527static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2528{
2529	struct hci_ev_pin_code_req *ev = (void *) skb->data;
2530	struct hci_conn *conn;
2531
2532	BT_DBG("%s", hdev->name);
2533
2534	hci_dev_lock(hdev);
2535
2536	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2537	if (!conn)
2538		goto unlock;
2539
2540	if (conn->state == BT_CONNECTED) {
2541		hci_conn_hold(conn);
2542		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2543		hci_conn_put(conn);
2544	}
2545
2546	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2547		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2548					sizeof(ev->bdaddr), &ev->bdaddr);
2549	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2550		u8 secure;
2551
2552		if (conn->pending_sec_level == BT_SECURITY_HIGH)
2553			secure = 1;
2554		else
2555			secure = 0;
2556
2557		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2558	}
2559
2560unlock:
2561	hci_dev_unlock(hdev);
2562}
2563
2564static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2565{
2566	struct hci_ev_link_key_req *ev = (void *) skb->data;
2567	struct hci_cp_link_key_reply cp;
2568	struct hci_conn *conn;
2569	struct link_key *key;
2570
2571	BT_DBG("%s", hdev->name);
2572
2573	if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2574		return;
2575
2576	hci_dev_lock(hdev);
2577
2578	key = hci_find_link_key(hdev, &ev->bdaddr);
2579	if (!key) {
2580		BT_DBG("%s link key not found for %s", hdev->name,
2581							batostr(&ev->bdaddr));
2582		goto not_found;
2583	}
2584
2585	BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2586							batostr(&ev->bdaddr));
2587
2588	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2589				key->type == HCI_LK_DEBUG_COMBINATION) {
2590		BT_DBG("%s ignoring debug key", hdev->name);
2591		goto not_found;
2592	}
2593
2594	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2595	if (conn) {
2596		if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2597				conn->auth_type != 0xff &&
2598				(conn->auth_type & 0x01)) {
2599			BT_DBG("%s ignoring unauthenticated key", hdev->name);
2600			goto not_found;
2601		}
2602
2603		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2604				conn->pending_sec_level == BT_SECURITY_HIGH) {
2605			BT_DBG("%s ignoring key unauthenticated for high \
2606							security", hdev->name);
2607			goto not_found;
2608		}
2609
2610		conn->key_type = key->type;
2611		conn->pin_length = key->pin_len;
2612	}
2613
2614	bacpy(&cp.bdaddr, &ev->bdaddr);
2615	memcpy(cp.link_key, key->val, 16);
2616
2617	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2618
2619	hci_dev_unlock(hdev);
2620
2621	return;
2622
2623not_found:
2624	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2625	hci_dev_unlock(hdev);
2626}
2627
2628static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2629{
2630	struct hci_ev_link_key_notify *ev = (void *) skb->data;
2631	struct hci_conn *conn;
2632	u8 pin_len = 0;
2633
2634	BT_DBG("%s", hdev->name);
2635
2636	hci_dev_lock(hdev);
2637
2638	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2639	if (conn) {
2640		hci_conn_hold(conn);
2641		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2642		pin_len = conn->pin_length;
2643
2644		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2645			conn->key_type = ev->key_type;
2646
2647		hci_conn_put(conn);
2648	}
2649
2650	if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2651		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2652							ev->key_type, pin_len);
2653
2654	hci_dev_unlock(hdev);
2655}
2656
2657static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2658{
2659	struct hci_ev_clock_offset *ev = (void *) skb->data;
2660	struct hci_conn *conn;
2661
2662	BT_DBG("%s status %d", hdev->name, ev->status);
2663
2664	hci_dev_lock(hdev);
2665
2666	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2667	if (conn && !ev->status) {
2668		struct inquiry_entry *ie;
2669
2670		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2671		if (ie) {
2672			ie->data.clock_offset = ev->clock_offset;
2673			ie->timestamp = jiffies;
2674		}
2675	}
2676
2677	hci_dev_unlock(hdev);
2678}
2679
2680static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2681{
2682	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2683	struct hci_conn *conn;
2684
2685	BT_DBG("%s status %d", hdev->name, ev->status);
2686
2687	hci_dev_lock(hdev);
2688
2689	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2690	if (conn && !ev->status)
2691		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2692
2693	hci_dev_unlock(hdev);
2694}
2695
2696static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2697{
2698	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2699	struct inquiry_entry *ie;
2700
2701	BT_DBG("%s", hdev->name);
2702
2703	hci_dev_lock(hdev);
2704
2705	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2706	if (ie) {
2707		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2708		ie->timestamp = jiffies;
2709	}
2710
2711	hci_dev_unlock(hdev);
2712}
2713
2714static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2715{
2716	struct inquiry_data data;
2717	int num_rsp = *((__u8 *) skb->data);
2718	bool name_known;
2719
2720	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2721
2722	if (!num_rsp)
2723		return;
2724
2725	hci_dev_lock(hdev);
2726
2727	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2728		struct inquiry_info_with_rssi_and_pscan_mode *info;
2729		info = (void *) (skb->data + 1);
2730
2731		for (; num_rsp; num_rsp--, info++) {
2732			bacpy(&data.bdaddr, &info->bdaddr);
2733			data.pscan_rep_mode	= info->pscan_rep_mode;
2734			data.pscan_period_mode	= info->pscan_period_mode;
2735			data.pscan_mode		= info->pscan_mode;
2736			memcpy(data.dev_class, info->dev_class, 3);
2737			data.clock_offset	= info->clock_offset;
2738			data.rssi		= info->rssi;
2739			data.ssp_mode		= 0x00;
2740
2741			name_known = hci_inquiry_cache_update(hdev, &data,
2742								false);
2743			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2744						info->dev_class, info->rssi,
2745						!name_known, NULL, 0);
2746		}
2747	} else {
2748		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2749
2750		for (; num_rsp; num_rsp--, info++) {
2751			bacpy(&data.bdaddr, &info->bdaddr);
2752			data.pscan_rep_mode	= info->pscan_rep_mode;
2753			data.pscan_period_mode	= info->pscan_period_mode;
2754			data.pscan_mode		= 0x00;
2755			memcpy(data.dev_class, info->dev_class, 3);
2756			data.clock_offset	= info->clock_offset;
2757			data.rssi		= info->rssi;
2758			data.ssp_mode		= 0x00;
2759			name_known = hci_inquiry_cache_update(hdev, &data,
2760								false);
2761			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2762						info->dev_class, info->rssi,
2763						!name_known, NULL, 0);
2764		}
2765	}
2766
2767	hci_dev_unlock(hdev);
2768}
2769
2770static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2771{
2772	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2773	struct hci_conn *conn;
2774
2775	BT_DBG("%s", hdev->name);
2776
2777	hci_dev_lock(hdev);
2778
2779	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2780	if (!conn)
2781		goto unlock;
2782
2783	if (!ev->status && ev->page == 0x01) {
2784		struct inquiry_entry *ie;
2785
2786		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2787		if (ie)
2788			ie->data.ssp_mode = (ev->features[0] & 0x01);
2789
2790		if (ev->features[0] & 0x01)
2791			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2792	}
2793
2794	if (conn->state != BT_CONFIG)
2795		goto unlock;
2796
2797	if (!ev->status) {
2798		struct hci_cp_remote_name_req cp;
2799		memset(&cp, 0, sizeof(cp));
2800		bacpy(&cp.bdaddr, &conn->dst);
2801		cp.pscan_rep_mode = 0x02;
2802		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2803	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2804		mgmt_device_connected(hdev, &conn->dst, conn->type,
2805						conn->dst_type, NULL, 0,
2806						conn->dev_class);
2807
2808	if (!hci_outgoing_auth_needed(hdev, conn)) {
2809		conn->state = BT_CONNECTED;
2810		hci_proto_connect_cfm(conn, ev->status);
2811		hci_conn_put(conn);
2812	}
2813
2814unlock:
2815	hci_dev_unlock(hdev);
2816}
2817
2818static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2819{
2820	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2821	struct hci_conn *conn;
2822
2823	BT_DBG("%s status %d", hdev->name, ev->status);
2824
2825	hci_dev_lock(hdev);
2826
2827	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2828	if (!conn) {
2829		if (ev->link_type == ESCO_LINK)
2830			goto unlock;
2831
2832		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2833		if (!conn)
2834			goto unlock;
2835
2836		conn->type = SCO_LINK;
2837	}
2838
2839	switch (ev->status) {
2840	case 0x00:
2841		conn->handle = __le16_to_cpu(ev->handle);
2842		conn->state  = BT_CONNECTED;
2843
2844		hci_conn_hold_device(conn);
2845		hci_conn_add_sysfs(conn);
2846		break;
2847
2848	case 0x11:	/* Unsupported Feature or Parameter Value */
2849	case 0x1c:	/* SCO interval rejected */
2850	case 0x1a:	/* Unsupported Remote Feature */
2851	case 0x1f:	/* Unspecified error */
2852		if (conn->out && conn->attempt < 2) {
2853			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2854					(hdev->esco_type & EDR_ESCO_MASK);
2855			hci_setup_sync(conn, conn->link->handle);
2856			goto unlock;
2857		}
2858		/* fall through */
2859
2860	default:
2861		conn->state = BT_CLOSED;
2862		break;
2863	}
2864
2865	hci_proto_connect_cfm(conn, ev->status);
2866	if (ev->status)
2867		hci_conn_del(conn);
2868
2869unlock:
2870	hci_dev_unlock(hdev);
2871}
2872
2873static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2874{
2875	BT_DBG("%s", hdev->name);
2876}
2877
2878static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2879{
2880	struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2881
2882	BT_DBG("%s status %d", hdev->name, ev->status);
2883}
2884
2885static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2886{
2887	struct inquiry_data data;
2888	struct extended_inquiry_info *info = (void *) (skb->data + 1);
2889	int num_rsp = *((__u8 *) skb->data);
2890
2891	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2892
2893	if (!num_rsp)
2894		return;
2895
2896	hci_dev_lock(hdev);
2897
2898	for (; num_rsp; num_rsp--, info++) {
2899		bool name_known;
2900
2901		bacpy(&data.bdaddr, &info->bdaddr);
2902		data.pscan_rep_mode	= info->pscan_rep_mode;
2903		data.pscan_period_mode	= info->pscan_period_mode;
2904		data.pscan_mode		= 0x00;
2905		memcpy(data.dev_class, info->dev_class, 3);
2906		data.clock_offset	= info->clock_offset;
2907		data.rssi		= info->rssi;
2908		data.ssp_mode		= 0x01;
2909
2910		if (test_bit(HCI_MGMT, &hdev->dev_flags))
2911			name_known = eir_has_data_type(info->data,
2912							sizeof(info->data),
2913							EIR_NAME_COMPLETE);
2914		else
2915			name_known = true;
2916
2917		name_known = hci_inquiry_cache_update(hdev, &data, name_known);
2918		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2919						info->dev_class, info->rssi,
2920						!name_known, info->data,
2921						sizeof(info->data));
2922	}
2923
2924	hci_dev_unlock(hdev);
2925}
2926
2927static inline u8 hci_get_auth_req(struct hci_conn *conn)
2928{
2929	/* If remote requests dedicated bonding follow that lead */
2930	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2931		/* If both remote and local IO capabilities allow MITM
2932		 * protection then require it, otherwise don't */
2933		if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2934			return 0x02;
2935		else
2936			return 0x03;
2937	}
2938
2939	/* If remote requests no-bonding follow that lead */
2940	if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2941		return conn->remote_auth | (conn->auth_type & 0x01);
2942
2943	return conn->auth_type;
2944}
2945
2946static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2947{
2948	struct hci_ev_io_capa_request *ev = (void *) skb->data;
2949	struct hci_conn *conn;
2950
2951	BT_DBG("%s", hdev->name);
2952
2953	hci_dev_lock(hdev);
2954
2955	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2956	if (!conn)
2957		goto unlock;
2958
2959	hci_conn_hold(conn);
2960
2961	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2962		goto unlock;
2963
2964	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
2965			(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2966		struct hci_cp_io_capability_reply cp;
2967
2968		bacpy(&cp.bdaddr, &ev->bdaddr);
2969		/* Change the IO capability from KeyboardDisplay
2970		 * to DisplayYesNo as it is not supported by BT spec. */
2971		cp.capability = (conn->io_capability == 0x04) ?
2972						0x01 : conn->io_capability;
2973		conn->auth_type = hci_get_auth_req(conn);
2974		cp.authentication = conn->auth_type;
2975
2976		if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
2977				hci_find_remote_oob_data(hdev, &conn->dst))
2978			cp.oob_data = 0x01;
2979		else
2980			cp.oob_data = 0x00;
2981
2982		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2983							sizeof(cp), &cp);
2984	} else {
2985		struct hci_cp_io_capability_neg_reply cp;
2986
2987		bacpy(&cp.bdaddr, &ev->bdaddr);
2988		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
2989
2990		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2991							sizeof(cp), &cp);
2992	}
2993
2994unlock:
2995	hci_dev_unlock(hdev);
2996}
2997
2998static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2999{
3000	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3001	struct hci_conn *conn;
3002
3003	BT_DBG("%s", hdev->name);
3004
3005	hci_dev_lock(hdev);
3006
3007	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3008	if (!conn)
3009		goto unlock;
3010
3011	conn->remote_cap = ev->capability;
3012	conn->remote_auth = ev->authentication;
3013	if (ev->oob_data)
3014		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3015
3016unlock:
3017	hci_dev_unlock(hdev);
3018}
3019
3020static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3021							struct sk_buff *skb)
3022{
3023	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3024	int loc_mitm, rem_mitm, confirm_hint = 0;
3025	struct hci_conn *conn;
3026
3027	BT_DBG("%s", hdev->name);
3028
3029	hci_dev_lock(hdev);
3030
3031	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3032		goto unlock;
3033
3034	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3035	if (!conn)
3036		goto unlock;
3037
3038	loc_mitm = (conn->auth_type & 0x01);
3039	rem_mitm = (conn->remote_auth & 0x01);
3040
3041	/* If we require MITM but the remote device can't provide that
3042	 * (it has NoInputNoOutput) then reject the confirmation
3043	 * request. The only exception is when we're dedicated bonding
3044	 * initiators (connect_cfm_cb set) since then we always have the MITM
3045	 * bit set. */
3046	if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3047		BT_DBG("Rejecting request: remote device can't provide MITM");
3048		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3049					sizeof(ev->bdaddr), &ev->bdaddr);
3050		goto unlock;
3051	}
3052
3053	/* If no side requires MITM protection; auto-accept */
3054	if ((!loc_mitm || conn->remote_cap == 0x03) &&
3055				(!rem_mitm || conn->io_capability == 0x03)) {
3056
3057		/* If we're not the initiators request authorization to
3058		 * proceed from user space (mgmt_user_confirm with
3059		 * confirm_hint set to 1). */
3060		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3061			BT_DBG("Confirming auto-accept as acceptor");
3062			confirm_hint = 1;
3063			goto confirm;
3064		}
3065
3066		BT_DBG("Auto-accept of user confirmation with %ums delay",
3067						hdev->auto_accept_delay);
3068
3069		if (hdev->auto_accept_delay > 0) {
3070			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3071			mod_timer(&conn->auto_accept_timer, jiffies + delay);
3072			goto unlock;
3073		}
3074
3075		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3076						sizeof(ev->bdaddr), &ev->bdaddr);
3077		goto unlock;
3078	}
3079
3080confirm:
3081	mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey,
3082								confirm_hint);
3083
3084unlock:
3085	hci_dev_unlock(hdev);
3086}
3087
3088static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3089							struct sk_buff *skb)
3090{
3091	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3092
3093	BT_DBG("%s", hdev->name);
3094
3095	hci_dev_lock(hdev);
3096
3097	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3098		mgmt_user_passkey_request(hdev, &ev->bdaddr);
3099
3100	hci_dev_unlock(hdev);
3101}
3102
3103static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3104{
3105	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3106	struct hci_conn *conn;
3107
3108	BT_DBG("%s", hdev->name);
3109
3110	hci_dev_lock(hdev);
3111
3112	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3113	if (!conn)
3114		goto unlock;
3115
3116	/* To avoid duplicate auth_failed events to user space we check
3117	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3118	 * initiated the authentication. A traditional auth_complete
3119	 * event gets always produced as initiator and is also mapped to
3120	 * the mgmt_auth_failed event */
3121	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3122		mgmt_auth_failed(hdev, &conn->dst, ev->status);
3123
3124	hci_conn_put(conn);
3125
3126unlock:
3127	hci_dev_unlock(hdev);
3128}
3129
3130static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3131{
3132	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3133	struct inquiry_entry *ie;
3134
3135	BT_DBG("%s", hdev->name);
3136
3137	hci_dev_lock(hdev);
3138
3139	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3140	if (ie)
3141		ie->data.ssp_mode = (ev->features[0] & 0x01);
3142
3143	hci_dev_unlock(hdev);
3144}
3145
3146static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3147							struct sk_buff *skb)
3148{
3149	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3150	struct oob_data *data;
3151
3152	BT_DBG("%s", hdev->name);
3153
3154	hci_dev_lock(hdev);
3155
3156	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3157		goto unlock;
3158
3159	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3160	if (data) {
3161		struct hci_cp_remote_oob_data_reply cp;
3162
3163		bacpy(&cp.bdaddr, &ev->bdaddr);
3164		memcpy(cp.hash, data->hash, sizeof(cp.hash));
3165		memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3166
3167		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3168									&cp);
3169	} else {
3170		struct hci_cp_remote_oob_data_neg_reply cp;
3171
3172		bacpy(&cp.bdaddr, &ev->bdaddr);
3173		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3174									&cp);
3175	}
3176
3177unlock:
3178	hci_dev_unlock(hdev);
3179}
3180
3181static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3182{
3183	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3184	struct hci_conn *conn;
3185
3186	BT_DBG("%s status %d", hdev->name, ev->status);
3187
3188	hci_dev_lock(hdev);
3189
3190	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3191	if (!conn) {
3192		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3193		if (!conn) {
3194			BT_ERR("No memory for new connection");
3195			hci_dev_unlock(hdev);
3196			return;
3197		}
3198
3199		conn->dst_type = ev->bdaddr_type;
3200	}
3201
3202	if (ev->status) {
3203		mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3204						conn->dst_type, ev->status);
3205		hci_proto_connect_cfm(conn, ev->status);
3206		conn->state = BT_CLOSED;
3207		hci_conn_del(conn);
3208		goto unlock;
3209	}
3210
3211	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3212		mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3213						conn->dst_type, NULL, 0, 0);
3214
3215	conn->sec_level = BT_SECURITY_LOW;
3216	conn->handle = __le16_to_cpu(ev->handle);
3217	conn->state = BT_CONNECTED;
3218
3219	hci_conn_hold_device(conn);
3220	hci_conn_add_sysfs(conn);
3221
3222	hci_proto_connect_cfm(conn, ev->status);
3223
3224unlock:
3225	hci_dev_unlock(hdev);
3226}
3227
3228static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3229						struct sk_buff *skb)
3230{
3231	u8 num_reports = skb->data[0];
3232	void *ptr = &skb->data[1];
3233	s8 rssi;
3234
3235	hci_dev_lock(hdev);
3236
3237	while (num_reports--) {
3238		struct hci_ev_le_advertising_info *ev = ptr;
3239
3240		hci_add_adv_entry(hdev, ev);
3241
3242		rssi = ev->data[ev->length];
3243		mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3244					NULL, rssi, 0, ev->data, ev->length);
3245
3246		ptr += sizeof(*ev) + ev->length + 1;
3247	}
3248
3249	hci_dev_unlock(hdev);
3250}
3251
3252static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3253						struct sk_buff *skb)
3254{
3255	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3256	struct hci_cp_le_ltk_reply cp;
3257	struct hci_cp_le_ltk_neg_reply neg;
3258	struct hci_conn *conn;
3259	struct smp_ltk *ltk;
3260
3261	BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3262
3263	hci_dev_lock(hdev);
3264
3265	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3266	if (conn == NULL)
3267		goto not_found;
3268
3269	ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3270	if (ltk == NULL)
3271		goto not_found;
3272
3273	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3274	cp.handle = cpu_to_le16(conn->handle);
3275
3276	if (ltk->authenticated)
3277		conn->sec_level = BT_SECURITY_HIGH;
3278
3279	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3280
3281	if (ltk->type & HCI_SMP_STK) {
3282		list_del(&ltk->list);
3283		kfree(ltk);
3284	}
3285
3286	hci_dev_unlock(hdev);
3287
3288	return;
3289
3290not_found:
3291	neg.handle = ev->handle;
3292	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3293	hci_dev_unlock(hdev);
3294}
3295
3296static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3297{
3298	struct hci_ev_le_meta *le_ev = (void *) skb->data;
3299
3300	skb_pull(skb, sizeof(*le_ev));
3301
3302	switch (le_ev->subevent) {
3303	case HCI_EV_LE_CONN_COMPLETE:
3304		hci_le_conn_complete_evt(hdev, skb);
3305		break;
3306
3307	case HCI_EV_LE_ADVERTISING_REPORT:
3308		hci_le_adv_report_evt(hdev, skb);
3309		break;
3310
3311	case HCI_EV_LE_LTK_REQ:
3312		hci_le_ltk_request_evt(hdev, skb);
3313		break;
3314
3315	default:
3316		break;
3317	}
3318}
3319
3320void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3321{
3322	struct hci_event_hdr *hdr = (void *) skb->data;
3323	__u8 event = hdr->evt;
3324
3325	skb_pull(skb, HCI_EVENT_HDR_SIZE);
3326
3327	switch (event) {
3328	case HCI_EV_INQUIRY_COMPLETE:
3329		hci_inquiry_complete_evt(hdev, skb);
3330		break;
3331
3332	case HCI_EV_INQUIRY_RESULT:
3333		hci_inquiry_result_evt(hdev, skb);
3334		break;
3335
3336	case HCI_EV_CONN_COMPLETE:
3337		hci_conn_complete_evt(hdev, skb);
3338		break;
3339
3340	case HCI_EV_CONN_REQUEST:
3341		hci_conn_request_evt(hdev, skb);
3342		break;
3343
3344	case HCI_EV_DISCONN_COMPLETE:
3345		hci_disconn_complete_evt(hdev, skb);
3346		break;
3347
3348	case HCI_EV_AUTH_COMPLETE:
3349		hci_auth_complete_evt(hdev, skb);
3350		break;
3351
3352	case HCI_EV_REMOTE_NAME:
3353		hci_remote_name_evt(hdev, skb);
3354		break;
3355
3356	case HCI_EV_ENCRYPT_CHANGE:
3357		hci_encrypt_change_evt(hdev, skb);
3358		break;
3359
3360	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3361		hci_change_link_key_complete_evt(hdev, skb);
3362		break;
3363
3364	case HCI_EV_REMOTE_FEATURES:
3365		hci_remote_features_evt(hdev, skb);
3366		break;
3367
3368	case HCI_EV_REMOTE_VERSION:
3369		hci_remote_version_evt(hdev, skb);
3370		break;
3371
3372	case HCI_EV_QOS_SETUP_COMPLETE:
3373		hci_qos_setup_complete_evt(hdev, skb);
3374		break;
3375
3376	case HCI_EV_CMD_COMPLETE:
3377		hci_cmd_complete_evt(hdev, skb);
3378		break;
3379
3380	case HCI_EV_CMD_STATUS:
3381		hci_cmd_status_evt(hdev, skb);
3382		break;
3383
3384	case HCI_EV_ROLE_CHANGE:
3385		hci_role_change_evt(hdev, skb);
3386		break;
3387
3388	case HCI_EV_NUM_COMP_PKTS:
3389		hci_num_comp_pkts_evt(hdev, skb);
3390		break;
3391
3392	case HCI_EV_MODE_CHANGE:
3393		hci_mode_change_evt(hdev, skb);
3394		break;
3395
3396	case HCI_EV_PIN_CODE_REQ:
3397		hci_pin_code_request_evt(hdev, skb);
3398		break;
3399
3400	case HCI_EV_LINK_KEY_REQ:
3401		hci_link_key_request_evt(hdev, skb);
3402		break;
3403
3404	case HCI_EV_LINK_KEY_NOTIFY:
3405		hci_link_key_notify_evt(hdev, skb);
3406		break;
3407
3408	case HCI_EV_CLOCK_OFFSET:
3409		hci_clock_offset_evt(hdev, skb);
3410		break;
3411
3412	case HCI_EV_PKT_TYPE_CHANGE:
3413		hci_pkt_type_change_evt(hdev, skb);
3414		break;
3415
3416	case HCI_EV_PSCAN_REP_MODE:
3417		hci_pscan_rep_mode_evt(hdev, skb);
3418		break;
3419
3420	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3421		hci_inquiry_result_with_rssi_evt(hdev, skb);
3422		break;
3423
3424	case HCI_EV_REMOTE_EXT_FEATURES:
3425		hci_remote_ext_features_evt(hdev, skb);
3426		break;
3427
3428	case HCI_EV_SYNC_CONN_COMPLETE:
3429		hci_sync_conn_complete_evt(hdev, skb);
3430		break;
3431
3432	case HCI_EV_SYNC_CONN_CHANGED:
3433		hci_sync_conn_changed_evt(hdev, skb);
3434		break;
3435
3436	case HCI_EV_SNIFF_SUBRATE:
3437		hci_sniff_subrate_evt(hdev, skb);
3438		break;
3439
3440	case HCI_EV_EXTENDED_INQUIRY_RESULT:
3441		hci_extended_inquiry_result_evt(hdev, skb);
3442		break;
3443
3444	case HCI_EV_IO_CAPA_REQUEST:
3445		hci_io_capa_request_evt(hdev, skb);
3446		break;
3447
3448	case HCI_EV_IO_CAPA_REPLY:
3449		hci_io_capa_reply_evt(hdev, skb);
3450		break;
3451
3452	case HCI_EV_USER_CONFIRM_REQUEST:
3453		hci_user_confirm_request_evt(hdev, skb);
3454		break;
3455
3456	case HCI_EV_USER_PASSKEY_REQUEST:
3457		hci_user_passkey_request_evt(hdev, skb);
3458		break;
3459
3460	case HCI_EV_SIMPLE_PAIR_COMPLETE:
3461		hci_simple_pair_complete_evt(hdev, skb);
3462		break;
3463
3464	case HCI_EV_REMOTE_HOST_FEATURES:
3465		hci_remote_host_features_evt(hdev, skb);
3466		break;
3467
3468	case HCI_EV_LE_META:
3469		hci_le_meta_evt(hdev, skb);
3470		break;
3471
3472	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3473		hci_remote_oob_data_request_evt(hdev, skb);
3474		break;
3475
3476	case HCI_EV_NUM_COMP_BLOCKS:
3477		hci_num_comp_blocks_evt(hdev, skb);
3478		break;
3479
3480	default:
3481		BT_DBG("%s event 0x%x", hdev->name, event);
3482		break;
3483	}
3484
3485	kfree_skb(skb);
3486	hdev->stat.evt_rx++;
3487}
3488
3489/* Generate internal stack event */
3490void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
3491{
3492	struct hci_event_hdr *hdr;
3493	struct hci_ev_stack_internal *ev;
3494	struct sk_buff *skb;
3495
3496	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
3497	if (!skb)
3498		return;
3499
3500	hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
3501	hdr->evt  = HCI_EV_STACK_INTERNAL;
3502	hdr->plen = sizeof(*ev) + dlen;
3503
3504	ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
3505	ev->type = type;
3506	memcpy(ev->data, data, dlen);
3507
3508	bt_cb(skb)->incoming = 1;
3509	__net_timestamp(skb);
3510
3511	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3512	skb->dev = (void *) hdev;
3513	hci_send_to_sock(hdev, skb, NULL);
3514	kfree_skb(skb);
3515}
3516
3517module_param(enable_le, bool, 0644);
3518MODULE_PARM_DESC(enable_le, "Enable LE support");
3519