hci_event.c revision 5e0452c00a2e4b04ec1482248c897dacf106f1df
1/*
2   BlueZ - Bluetooth protocol stack for Linux
3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/init.h>
36#include <linux/skbuff.h>
37#include <linux/interrupt.h>
38#include <linux/notifier.h>
39#include <net/sock.h>
40
41#include <asm/system.h>
42#include <linux/uaccess.h>
43#include <asm/unaligned.h>
44
45#include <net/bluetooth/bluetooth.h>
46#include <net/bluetooth/hci_core.h>
47
48static bool enable_le;
49
50/* Handle HCI Event packets */
51
52static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
53{
54	__u8 status = *((__u8 *) skb->data);
55
56	BT_DBG("%s status 0x%x", hdev->name, status);
57
58	if (status) {
59		hci_dev_lock(hdev);
60		mgmt_stop_discovery_failed(hdev, status);
61		hci_dev_unlock(hdev);
62		return;
63	}
64
65	clear_bit(HCI_INQUIRY, &hdev->flags);
66
67	hci_dev_lock(hdev);
68	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
69	hci_dev_unlock(hdev);
70
71	hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
72
73	hci_conn_check_pending(hdev);
74}
75
76static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77{
78	__u8 status = *((__u8 *) skb->data);
79
80	BT_DBG("%s status 0x%x", hdev->name, status);
81
82	if (status)
83		return;
84
85	hci_conn_check_pending(hdev);
86}
87
88static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
89{
90	BT_DBG("%s", hdev->name);
91}
92
93static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94{
95	struct hci_rp_role_discovery *rp = (void *) skb->data;
96	struct hci_conn *conn;
97
98	BT_DBG("%s status 0x%x", hdev->name, rp->status);
99
100	if (rp->status)
101		return;
102
103	hci_dev_lock(hdev);
104
105	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106	if (conn) {
107		if (rp->role)
108			conn->link_mode &= ~HCI_LM_MASTER;
109		else
110			conn->link_mode |= HCI_LM_MASTER;
111	}
112
113	hci_dev_unlock(hdev);
114}
115
116static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117{
118	struct hci_rp_read_link_policy *rp = (void *) skb->data;
119	struct hci_conn *conn;
120
121	BT_DBG("%s status 0x%x", hdev->name, rp->status);
122
123	if (rp->status)
124		return;
125
126	hci_dev_lock(hdev);
127
128	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129	if (conn)
130		conn->link_policy = __le16_to_cpu(rp->policy);
131
132	hci_dev_unlock(hdev);
133}
134
135static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136{
137	struct hci_rp_write_link_policy *rp = (void *) skb->data;
138	struct hci_conn *conn;
139	void *sent;
140
141	BT_DBG("%s status 0x%x", hdev->name, rp->status);
142
143	if (rp->status)
144		return;
145
146	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147	if (!sent)
148		return;
149
150	hci_dev_lock(hdev);
151
152	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153	if (conn)
154		conn->link_policy = get_unaligned_le16(sent + 2);
155
156	hci_dev_unlock(hdev);
157}
158
159static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
160{
161	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162
163	BT_DBG("%s status 0x%x", hdev->name, rp->status);
164
165	if (rp->status)
166		return;
167
168	hdev->link_policy = __le16_to_cpu(rp->policy);
169}
170
171static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
172{
173	__u8 status = *((__u8 *) skb->data);
174	void *sent;
175
176	BT_DBG("%s status 0x%x", hdev->name, status);
177
178	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179	if (!sent)
180		return;
181
182	if (!status)
183		hdev->link_policy = get_unaligned_le16(sent);
184
185	hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
186}
187
188static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189{
190	__u8 status = *((__u8 *) skb->data);
191
192	BT_DBG("%s status 0x%x", hdev->name, status);
193
194	clear_bit(HCI_RESET, &hdev->flags);
195
196	hci_req_complete(hdev, HCI_OP_RESET, status);
197
198	/* Reset all flags, except persistent ones */
199	hdev->dev_flags &= BIT(HCI_MGMT) | BIT(HCI_SETUP) | BIT(HCI_AUTO_OFF) |
200				BIT(HCI_LINK_KEYS) | BIT(HCI_DEBUG_KEYS);
201}
202
203static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
204{
205	__u8 status = *((__u8 *) skb->data);
206	void *sent;
207
208	BT_DBG("%s status 0x%x", hdev->name, status);
209
210	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
211	if (!sent)
212		return;
213
214	hci_dev_lock(hdev);
215
216	if (test_bit(HCI_MGMT, &hdev->dev_flags))
217		mgmt_set_local_name_complete(hdev, sent, status);
218
219	if (status == 0)
220		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
221
222	hci_dev_unlock(hdev);
223}
224
225static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
226{
227	struct hci_rp_read_local_name *rp = (void *) skb->data;
228
229	BT_DBG("%s status 0x%x", hdev->name, rp->status);
230
231	if (rp->status)
232		return;
233
234	memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
235}
236
237static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
238{
239	__u8 status = *((__u8 *) skb->data);
240	void *sent;
241
242	BT_DBG("%s status 0x%x", hdev->name, status);
243
244	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
245	if (!sent)
246		return;
247
248	if (!status) {
249		__u8 param = *((__u8 *) sent);
250
251		if (param == AUTH_ENABLED)
252			set_bit(HCI_AUTH, &hdev->flags);
253		else
254			clear_bit(HCI_AUTH, &hdev->flags);
255	}
256
257	if (test_bit(HCI_MGMT, &hdev->dev_flags))
258		mgmt_auth_enable_complete(hdev, status);
259
260	hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
261}
262
263static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
264{
265	__u8 status = *((__u8 *) skb->data);
266	void *sent;
267
268	BT_DBG("%s status 0x%x", hdev->name, status);
269
270	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
271	if (!sent)
272		return;
273
274	if (!status) {
275		__u8 param = *((__u8 *) sent);
276
277		if (param)
278			set_bit(HCI_ENCRYPT, &hdev->flags);
279		else
280			clear_bit(HCI_ENCRYPT, &hdev->flags);
281	}
282
283	hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
284}
285
286static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287{
288	__u8 param, status = *((__u8 *) skb->data);
289	int old_pscan, old_iscan;
290	void *sent;
291
292	BT_DBG("%s status 0x%x", hdev->name, status);
293
294	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295	if (!sent)
296		return;
297
298	param = *((__u8 *) sent);
299
300	hci_dev_lock(hdev);
301
302	if (status != 0) {
303		mgmt_write_scan_failed(hdev, param, status);
304		hdev->discov_timeout = 0;
305		goto done;
306	}
307
308	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
309	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
310
311	if (param & SCAN_INQUIRY) {
312		set_bit(HCI_ISCAN, &hdev->flags);
313		if (!old_iscan)
314			mgmt_discoverable(hdev, 1);
315		if (hdev->discov_timeout > 0) {
316			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
317			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
318									to);
319		}
320	} else if (old_iscan)
321		mgmt_discoverable(hdev, 0);
322
323	if (param & SCAN_PAGE) {
324		set_bit(HCI_PSCAN, &hdev->flags);
325		if (!old_pscan)
326			mgmt_connectable(hdev, 1);
327	} else if (old_pscan)
328		mgmt_connectable(hdev, 0);
329
330done:
331	hci_dev_unlock(hdev);
332	hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
333}
334
335static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
336{
337	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
338
339	BT_DBG("%s status 0x%x", hdev->name, rp->status);
340
341	if (rp->status)
342		return;
343
344	memcpy(hdev->dev_class, rp->dev_class, 3);
345
346	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
347		hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
348}
349
350static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
351{
352	__u8 status = *((__u8 *) skb->data);
353	void *sent;
354
355	BT_DBG("%s status 0x%x", hdev->name, status);
356
357	if (status)
358		return;
359
360	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
361	if (!sent)
362		return;
363
364	memcpy(hdev->dev_class, sent, 3);
365}
366
367static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
368{
369	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
370	__u16 setting;
371
372	BT_DBG("%s status 0x%x", hdev->name, rp->status);
373
374	if (rp->status)
375		return;
376
377	setting = __le16_to_cpu(rp->voice_setting);
378
379	if (hdev->voice_setting == setting)
380		return;
381
382	hdev->voice_setting = setting;
383
384	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
385
386	if (hdev->notify)
387		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
388}
389
390static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
391{
392	__u8 status = *((__u8 *) skb->data);
393	__u16 setting;
394	void *sent;
395
396	BT_DBG("%s status 0x%x", hdev->name, status);
397
398	if (status)
399		return;
400
401	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
402	if (!sent)
403		return;
404
405	setting = get_unaligned_le16(sent);
406
407	if (hdev->voice_setting == setting)
408		return;
409
410	hdev->voice_setting = setting;
411
412	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
413
414	if (hdev->notify)
415		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
416}
417
418static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
419{
420	__u8 status = *((__u8 *) skb->data);
421
422	BT_DBG("%s status 0x%x", hdev->name, status);
423
424	hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
425}
426
427static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
428{
429	struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
430
431	BT_DBG("%s status 0x%x", hdev->name, rp->status);
432
433	if (rp->status)
434		return;
435
436	if (rp->mode)
437		set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
438	else
439		clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
440}
441
442static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
443{
444	__u8 status = *((__u8 *) skb->data);
445	void *sent;
446
447	BT_DBG("%s status 0x%x", hdev->name, status);
448
449	if (status)
450		goto done;
451
452	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
453	if (!sent)
454		return;
455
456	if (*((u8 *) sent))
457		set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
458	else
459		clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
460
461done:
462	if (test_bit(HCI_MGMT, &hdev->dev_flags))
463		mgmt_ssp_enable_complete(hdev, status);
464}
465
466static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
467{
468	if (hdev->features[6] & LMP_EXT_INQ)
469		return 2;
470
471	if (hdev->features[3] & LMP_RSSI_INQ)
472		return 1;
473
474	if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
475						hdev->lmp_subver == 0x0757)
476		return 1;
477
478	if (hdev->manufacturer == 15) {
479		if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
480			return 1;
481		if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
482			return 1;
483		if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
484			return 1;
485	}
486
487	if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
488						hdev->lmp_subver == 0x1805)
489		return 1;
490
491	return 0;
492}
493
494static void hci_setup_inquiry_mode(struct hci_dev *hdev)
495{
496	u8 mode;
497
498	mode = hci_get_inquiry_mode(hdev);
499
500	hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
501}
502
503static void hci_setup_event_mask(struct hci_dev *hdev)
504{
505	/* The second byte is 0xff instead of 0x9f (two reserved bits
506	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
507	 * command otherwise */
508	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
509
510	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
511	 * any event mask for pre 1.2 devices */
512	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
513		return;
514
515	events[4] |= 0x01; /* Flow Specification Complete */
516	events[4] |= 0x02; /* Inquiry Result with RSSI */
517	events[4] |= 0x04; /* Read Remote Extended Features Complete */
518	events[5] |= 0x08; /* Synchronous Connection Complete */
519	events[5] |= 0x10; /* Synchronous Connection Changed */
520
521	if (hdev->features[3] & LMP_RSSI_INQ)
522		events[4] |= 0x04; /* Inquiry Result with RSSI */
523
524	if (hdev->features[5] & LMP_SNIFF_SUBR)
525		events[5] |= 0x20; /* Sniff Subrating */
526
527	if (hdev->features[5] & LMP_PAUSE_ENC)
528		events[5] |= 0x80; /* Encryption Key Refresh Complete */
529
530	if (hdev->features[6] & LMP_EXT_INQ)
531		events[5] |= 0x40; /* Extended Inquiry Result */
532
533	if (hdev->features[6] & LMP_NO_FLUSH)
534		events[7] |= 0x01; /* Enhanced Flush Complete */
535
536	if (hdev->features[7] & LMP_LSTO)
537		events[6] |= 0x80; /* Link Supervision Timeout Changed */
538
539	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
540		events[6] |= 0x01;	/* IO Capability Request */
541		events[6] |= 0x02;	/* IO Capability Response */
542		events[6] |= 0x04;	/* User Confirmation Request */
543		events[6] |= 0x08;	/* User Passkey Request */
544		events[6] |= 0x10;	/* Remote OOB Data Request */
545		events[6] |= 0x20;	/* Simple Pairing Complete */
546		events[7] |= 0x04;	/* User Passkey Notification */
547		events[7] |= 0x08;	/* Keypress Notification */
548		events[7] |= 0x10;	/* Remote Host Supported
549					 * Features Notification */
550	}
551
552	if (hdev->features[4] & LMP_LE)
553		events[7] |= 0x20;	/* LE Meta-Event */
554
555	hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
556}
557
558static void hci_set_le_support(struct hci_dev *hdev)
559{
560	struct hci_cp_write_le_host_supported cp;
561
562	memset(&cp, 0, sizeof(cp));
563
564	if (enable_le) {
565		cp.le = 1;
566		cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
567	}
568
569	hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
570}
571
572static void hci_setup(struct hci_dev *hdev)
573{
574	if (hdev->dev_type != HCI_BREDR)
575		return;
576
577	hci_setup_event_mask(hdev);
578
579	if (hdev->hci_ver > BLUETOOTH_VER_1_1)
580		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
581
582	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
583		u8 mode = 0x01;
584		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
585	}
586
587	if (hdev->features[3] & LMP_RSSI_INQ)
588		hci_setup_inquiry_mode(hdev);
589
590	if (hdev->features[7] & LMP_INQ_TX_PWR)
591		hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
592
593	if (hdev->features[7] & LMP_EXTFEATURES) {
594		struct hci_cp_read_local_ext_features cp;
595
596		cp.page = 0x01;
597		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
598							sizeof(cp), &cp);
599	}
600
601	if (hdev->features[4] & LMP_LE)
602		hci_set_le_support(hdev);
603}
604
605static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
606{
607	struct hci_rp_read_local_version *rp = (void *) skb->data;
608
609	BT_DBG("%s status 0x%x", hdev->name, rp->status);
610
611	if (rp->status)
612		return;
613
614	hdev->hci_ver = rp->hci_ver;
615	hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
616	hdev->lmp_ver = rp->lmp_ver;
617	hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
618	hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
619
620	BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
621					hdev->manufacturer,
622					hdev->hci_ver, hdev->hci_rev);
623
624	if (test_bit(HCI_INIT, &hdev->flags))
625		hci_setup(hdev);
626}
627
628static void hci_setup_link_policy(struct hci_dev *hdev)
629{
630	u16 link_policy = 0;
631
632	if (hdev->features[0] & LMP_RSWITCH)
633		link_policy |= HCI_LP_RSWITCH;
634	if (hdev->features[0] & LMP_HOLD)
635		link_policy |= HCI_LP_HOLD;
636	if (hdev->features[0] & LMP_SNIFF)
637		link_policy |= HCI_LP_SNIFF;
638	if (hdev->features[1] & LMP_PARK)
639		link_policy |= HCI_LP_PARK;
640
641	link_policy = cpu_to_le16(link_policy);
642	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
643					sizeof(link_policy), &link_policy);
644}
645
646static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
647{
648	struct hci_rp_read_local_commands *rp = (void *) skb->data;
649
650	BT_DBG("%s status 0x%x", hdev->name, rp->status);
651
652	if (rp->status)
653		goto done;
654
655	memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
656
657	if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
658		hci_setup_link_policy(hdev);
659
660done:
661	hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
662}
663
664static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
665{
666	struct hci_rp_read_local_features *rp = (void *) skb->data;
667
668	BT_DBG("%s status 0x%x", hdev->name, rp->status);
669
670	if (rp->status)
671		return;
672
673	memcpy(hdev->features, rp->features, 8);
674
675	/* Adjust default settings according to features
676	 * supported by device. */
677
678	if (hdev->features[0] & LMP_3SLOT)
679		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
680
681	if (hdev->features[0] & LMP_5SLOT)
682		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
683
684	if (hdev->features[1] & LMP_HV2) {
685		hdev->pkt_type  |= (HCI_HV2);
686		hdev->esco_type |= (ESCO_HV2);
687	}
688
689	if (hdev->features[1] & LMP_HV3) {
690		hdev->pkt_type  |= (HCI_HV3);
691		hdev->esco_type |= (ESCO_HV3);
692	}
693
694	if (hdev->features[3] & LMP_ESCO)
695		hdev->esco_type |= (ESCO_EV3);
696
697	if (hdev->features[4] & LMP_EV4)
698		hdev->esco_type |= (ESCO_EV4);
699
700	if (hdev->features[4] & LMP_EV5)
701		hdev->esco_type |= (ESCO_EV5);
702
703	if (hdev->features[5] & LMP_EDR_ESCO_2M)
704		hdev->esco_type |= (ESCO_2EV3);
705
706	if (hdev->features[5] & LMP_EDR_ESCO_3M)
707		hdev->esco_type |= (ESCO_3EV3);
708
709	if (hdev->features[5] & LMP_EDR_3S_ESCO)
710		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
711
712	BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
713					hdev->features[0], hdev->features[1],
714					hdev->features[2], hdev->features[3],
715					hdev->features[4], hdev->features[5],
716					hdev->features[6], hdev->features[7]);
717}
718
719static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
720							struct sk_buff *skb)
721{
722	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
723
724	BT_DBG("%s status 0x%x", hdev->name, rp->status);
725
726	if (rp->status)
727		return;
728
729	switch (rp->page) {
730	case 0:
731		memcpy(hdev->features, rp->features, 8);
732		break;
733	case 1:
734		memcpy(hdev->host_features, rp->features, 8);
735		break;
736	}
737
738	hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
739}
740
741static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
742						struct sk_buff *skb)
743{
744	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
745
746	BT_DBG("%s status 0x%x", hdev->name, rp->status);
747
748	if (rp->status)
749		return;
750
751	hdev->flow_ctl_mode = rp->mode;
752
753	hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
754}
755
756static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
757{
758	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
759
760	BT_DBG("%s status 0x%x", hdev->name, rp->status);
761
762	if (rp->status)
763		return;
764
765	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
766	hdev->sco_mtu  = rp->sco_mtu;
767	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
768	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
769
770	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
771		hdev->sco_mtu  = 64;
772		hdev->sco_pkts = 8;
773	}
774
775	hdev->acl_cnt = hdev->acl_pkts;
776	hdev->sco_cnt = hdev->sco_pkts;
777
778	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
779					hdev->acl_mtu, hdev->acl_pkts,
780					hdev->sco_mtu, hdev->sco_pkts);
781}
782
783static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
784{
785	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
786
787	BT_DBG("%s status 0x%x", hdev->name, rp->status);
788
789	if (!rp->status)
790		bacpy(&hdev->bdaddr, &rp->bdaddr);
791
792	hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
793}
794
795static void hci_cc_read_data_block_size(struct hci_dev *hdev,
796							struct sk_buff *skb)
797{
798	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
799
800	BT_DBG("%s status 0x%x", hdev->name, rp->status);
801
802	if (rp->status)
803		return;
804
805	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
806	hdev->block_len = __le16_to_cpu(rp->block_len);
807	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
808
809	hdev->block_cnt = hdev->num_blocks;
810
811	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
812					hdev->block_cnt, hdev->block_len);
813
814	hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
815}
816
817static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
818{
819	__u8 status = *((__u8 *) skb->data);
820
821	BT_DBG("%s status 0x%x", hdev->name, status);
822
823	hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
824}
825
826static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
827		struct sk_buff *skb)
828{
829	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
830
831	BT_DBG("%s status 0x%x", hdev->name, rp->status);
832
833	if (rp->status)
834		return;
835
836	hdev->amp_status = rp->amp_status;
837	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
838	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
839	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
840	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
841	hdev->amp_type = rp->amp_type;
842	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
843	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
844	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
845	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
846
847	hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
848}
849
850static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
851							struct sk_buff *skb)
852{
853	__u8 status = *((__u8 *) skb->data);
854
855	BT_DBG("%s status 0x%x", hdev->name, status);
856
857	hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
858}
859
860static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
861{
862	__u8 status = *((__u8 *) skb->data);
863
864	BT_DBG("%s status 0x%x", hdev->name, status);
865
866	hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
867}
868
869static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
870							struct sk_buff *skb)
871{
872	__u8 status = *((__u8 *) skb->data);
873
874	BT_DBG("%s status 0x%x", hdev->name, status);
875
876	hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
877}
878
879static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
880							struct sk_buff *skb)
881{
882	__u8 status = *((__u8 *) skb->data);
883
884	BT_DBG("%s status 0x%x", hdev->name, status);
885
886	hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
887}
888
889static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
890{
891	__u8 status = *((__u8 *) skb->data);
892
893	BT_DBG("%s status 0x%x", hdev->name, status);
894
895	hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
896}
897
898static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
899{
900	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
901	struct hci_cp_pin_code_reply *cp;
902	struct hci_conn *conn;
903
904	BT_DBG("%s status 0x%x", hdev->name, rp->status);
905
906	hci_dev_lock(hdev);
907
908	if (test_bit(HCI_MGMT, &hdev->dev_flags))
909		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
910
911	if (rp->status != 0)
912		goto unlock;
913
914	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
915	if (!cp)
916		goto unlock;
917
918	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
919	if (conn)
920		conn->pin_length = cp->pin_len;
921
922unlock:
923	hci_dev_unlock(hdev);
924}
925
926static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
927{
928	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
929
930	BT_DBG("%s status 0x%x", hdev->name, rp->status);
931
932	hci_dev_lock(hdev);
933
934	if (test_bit(HCI_MGMT, &hdev->dev_flags))
935		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
936								rp->status);
937
938	hci_dev_unlock(hdev);
939}
940
941static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
942				       struct sk_buff *skb)
943{
944	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
945
946	BT_DBG("%s status 0x%x", hdev->name, rp->status);
947
948	if (rp->status)
949		return;
950
951	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
952	hdev->le_pkts = rp->le_max_pkt;
953
954	hdev->le_cnt = hdev->le_pkts;
955
956	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
957
958	hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
959}
960
961static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
962{
963	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
964
965	BT_DBG("%s status 0x%x", hdev->name, rp->status);
966
967	hci_dev_lock(hdev);
968
969	if (test_bit(HCI_MGMT, &hdev->dev_flags))
970		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
971								0, rp->status);
972
973	hci_dev_unlock(hdev);
974}
975
976static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
977							struct sk_buff *skb)
978{
979	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
980
981	BT_DBG("%s status 0x%x", hdev->name, rp->status);
982
983	hci_dev_lock(hdev);
984
985	if (test_bit(HCI_MGMT, &hdev->dev_flags))
986		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
987								ACL_LINK, 0,
988								rp->status);
989
990	hci_dev_unlock(hdev);
991}
992
993static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
994{
995	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
996
997	BT_DBG("%s status 0x%x", hdev->name, rp->status);
998
999	hci_dev_lock(hdev);
1000
1001	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1002		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1003								0, rp->status);
1004
1005	hci_dev_unlock(hdev);
1006}
1007
1008static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1009							struct sk_buff *skb)
1010{
1011	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1012
1013	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1014
1015	hci_dev_lock(hdev);
1016
1017	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1018		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1019								ACL_LINK, 0,
1020								rp->status);
1021
1022	hci_dev_unlock(hdev);
1023}
1024
1025static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1026							struct sk_buff *skb)
1027{
1028	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1029
1030	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1031
1032	hci_dev_lock(hdev);
1033	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1034						rp->randomizer, rp->status);
1035	hci_dev_unlock(hdev);
1036}
1037
1038static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1039{
1040	__u8 status = *((__u8 *) skb->data);
1041
1042	BT_DBG("%s status 0x%x", hdev->name, status);
1043
1044	hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1045
1046	if (status) {
1047		hci_dev_lock(hdev);
1048		mgmt_start_discovery_failed(hdev, status);
1049		hci_dev_unlock(hdev);
1050		return;
1051	}
1052}
1053
1054static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1055					struct sk_buff *skb)
1056{
1057	struct hci_cp_le_set_scan_enable *cp;
1058	__u8 status = *((__u8 *) skb->data);
1059
1060	BT_DBG("%s status 0x%x", hdev->name, status);
1061
1062	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1063	if (!cp)
1064		return;
1065
1066	switch (cp->enable) {
1067	case LE_SCANNING_ENABLED:
1068		hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1069
1070		if (status) {
1071			hci_dev_lock(hdev);
1072			mgmt_start_discovery_failed(hdev, status);
1073			hci_dev_unlock(hdev);
1074			return;
1075		}
1076
1077		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1078
1079		cancel_delayed_work_sync(&hdev->adv_work);
1080
1081		hci_dev_lock(hdev);
1082		hci_adv_entries_clear(hdev);
1083		hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1084		hci_dev_unlock(hdev);
1085		break;
1086
1087	case LE_SCANNING_DISABLED:
1088		if (status)
1089			return;
1090
1091		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1092
1093		schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1094
1095		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
1096			mgmt_interleaved_discovery(hdev);
1097		} else {
1098			hci_dev_lock(hdev);
1099			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1100			hci_dev_unlock(hdev);
1101		}
1102
1103		break;
1104
1105	default:
1106		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1107		break;
1108	}
1109}
1110
1111static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1112{
1113	struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1114
1115	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1116
1117	if (rp->status)
1118		return;
1119
1120	hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1121}
1122
1123static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1124{
1125	struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1126
1127	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1128
1129	if (rp->status)
1130		return;
1131
1132	hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1133}
1134
1135static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1136							struct sk_buff *skb)
1137{
1138	struct hci_cp_read_local_ext_features cp;
1139	__u8 status = *((__u8 *) skb->data);
1140
1141	BT_DBG("%s status 0x%x", hdev->name, status);
1142
1143	if (status)
1144		return;
1145
1146	cp.page = 0x01;
1147	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
1148}
1149
1150static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1151{
1152	BT_DBG("%s status 0x%x", hdev->name, status);
1153
1154	if (status) {
1155		hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1156		hci_conn_check_pending(hdev);
1157		hci_dev_lock(hdev);
1158		if (test_bit(HCI_MGMT, &hdev->dev_flags))
1159			mgmt_start_discovery_failed(hdev, status);
1160		hci_dev_unlock(hdev);
1161		return;
1162	}
1163
1164	set_bit(HCI_INQUIRY, &hdev->flags);
1165
1166	hci_dev_lock(hdev);
1167	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1168	hci_dev_unlock(hdev);
1169}
1170
1171static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1172{
1173	struct hci_cp_create_conn *cp;
1174	struct hci_conn *conn;
1175
1176	BT_DBG("%s status 0x%x", hdev->name, status);
1177
1178	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1179	if (!cp)
1180		return;
1181
1182	hci_dev_lock(hdev);
1183
1184	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1185
1186	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1187
1188	if (status) {
1189		if (conn && conn->state == BT_CONNECT) {
1190			if (status != 0x0c || conn->attempt > 2) {
1191				conn->state = BT_CLOSED;
1192				hci_proto_connect_cfm(conn, status);
1193				hci_conn_del(conn);
1194			} else
1195				conn->state = BT_CONNECT2;
1196		}
1197	} else {
1198		if (!conn) {
1199			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1200			if (conn) {
1201				conn->out = true;
1202				conn->link_mode |= HCI_LM_MASTER;
1203			} else
1204				BT_ERR("No memory for new connection");
1205		}
1206	}
1207
1208	hci_dev_unlock(hdev);
1209}
1210
1211static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1212{
1213	struct hci_cp_add_sco *cp;
1214	struct hci_conn *acl, *sco;
1215	__u16 handle;
1216
1217	BT_DBG("%s status 0x%x", hdev->name, status);
1218
1219	if (!status)
1220		return;
1221
1222	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1223	if (!cp)
1224		return;
1225
1226	handle = __le16_to_cpu(cp->handle);
1227
1228	BT_DBG("%s handle %d", hdev->name, handle);
1229
1230	hci_dev_lock(hdev);
1231
1232	acl = hci_conn_hash_lookup_handle(hdev, handle);
1233	if (acl) {
1234		sco = acl->link;
1235		if (sco) {
1236			sco->state = BT_CLOSED;
1237
1238			hci_proto_connect_cfm(sco, status);
1239			hci_conn_del(sco);
1240		}
1241	}
1242
1243	hci_dev_unlock(hdev);
1244}
1245
1246static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1247{
1248	struct hci_cp_auth_requested *cp;
1249	struct hci_conn *conn;
1250
1251	BT_DBG("%s status 0x%x", hdev->name, status);
1252
1253	if (!status)
1254		return;
1255
1256	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1257	if (!cp)
1258		return;
1259
1260	hci_dev_lock(hdev);
1261
1262	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1263	if (conn) {
1264		if (conn->state == BT_CONFIG) {
1265			hci_proto_connect_cfm(conn, status);
1266			hci_conn_put(conn);
1267		}
1268	}
1269
1270	hci_dev_unlock(hdev);
1271}
1272
1273static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1274{
1275	struct hci_cp_set_conn_encrypt *cp;
1276	struct hci_conn *conn;
1277
1278	BT_DBG("%s status 0x%x", hdev->name, status);
1279
1280	if (!status)
1281		return;
1282
1283	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1284	if (!cp)
1285		return;
1286
1287	hci_dev_lock(hdev);
1288
1289	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1290	if (conn) {
1291		if (conn->state == BT_CONFIG) {
1292			hci_proto_connect_cfm(conn, status);
1293			hci_conn_put(conn);
1294		}
1295	}
1296
1297	hci_dev_unlock(hdev);
1298}
1299
1300static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1301							struct hci_conn *conn)
1302{
1303	if (conn->state != BT_CONFIG || !conn->out)
1304		return 0;
1305
1306	if (conn->pending_sec_level == BT_SECURITY_SDP)
1307		return 0;
1308
1309	/* Only request authentication for SSP connections or non-SSP
1310	 * devices with sec_level HIGH or if MITM protection is requested */
1311	if (!hci_conn_ssp_enabled(conn) &&
1312				conn->pending_sec_level != BT_SECURITY_HIGH &&
1313				!(conn->auth_type & 0x01))
1314		return 0;
1315
1316	return 1;
1317}
1318
1319static inline int hci_resolve_name(struct hci_dev *hdev, struct inquiry_entry *e)
1320{
1321	struct hci_cp_remote_name_req cp;
1322
1323	memset(&cp, 0, sizeof(cp));
1324
1325	bacpy(&cp.bdaddr, &e->data.bdaddr);
1326	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1327	cp.pscan_mode = e->data.pscan_mode;
1328	cp.clock_offset = e->data.clock_offset;
1329
1330	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1331}
1332
1333static bool hci_resolve_next_name(struct hci_dev *hdev)
1334{
1335	struct discovery_state *discov = &hdev->discovery;
1336	struct inquiry_entry *e;
1337
1338	if (list_empty(&discov->resolve))
1339		return false;
1340
1341	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1342	if (hci_resolve_name(hdev, e) == 0) {
1343		e->name_state = NAME_PENDING;
1344		return true;
1345	}
1346
1347	return false;
1348}
1349
1350static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1351					bdaddr_t *bdaddr, u8 *name, u8 name_len)
1352{
1353	struct discovery_state *discov = &hdev->discovery;
1354	struct inquiry_entry *e;
1355
1356	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1357		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00,
1358					name, name_len, conn->dev_class);
1359
1360	if (discov->state == DISCOVERY_STOPPED)
1361		return;
1362
1363	if (discov->state == DISCOVERY_STOPPING)
1364		goto discov_complete;
1365
1366	if (discov->state != DISCOVERY_RESOLVING)
1367		return;
1368
1369	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1370	if (e) {
1371		e->name_state = NAME_KNOWN;
1372		list_del(&e->list);
1373		if (name)
1374			mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1375					e->data.rssi, name, name_len);
1376	}
1377
1378	if (hci_resolve_next_name(hdev))
1379		return;
1380
1381discov_complete:
1382	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1383}
1384
1385static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1386{
1387	struct hci_cp_remote_name_req *cp;
1388	struct hci_conn *conn;
1389
1390	BT_DBG("%s status 0x%x", hdev->name, status);
1391
1392	/* If successful wait for the name req complete event before
1393	 * checking for the need to do authentication */
1394	if (!status)
1395		return;
1396
1397	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1398	if (!cp)
1399		return;
1400
1401	hci_dev_lock(hdev);
1402
1403	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1404
1405	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1406		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1407
1408	if (!conn)
1409		goto unlock;
1410
1411	if (!hci_outgoing_auth_needed(hdev, conn))
1412		goto unlock;
1413
1414	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1415		struct hci_cp_auth_requested cp;
1416		cp.handle = __cpu_to_le16(conn->handle);
1417		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1418	}
1419
1420unlock:
1421	hci_dev_unlock(hdev);
1422}
1423
1424static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1425{
1426	struct hci_cp_read_remote_features *cp;
1427	struct hci_conn *conn;
1428
1429	BT_DBG("%s status 0x%x", hdev->name, status);
1430
1431	if (!status)
1432		return;
1433
1434	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1435	if (!cp)
1436		return;
1437
1438	hci_dev_lock(hdev);
1439
1440	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1441	if (conn) {
1442		if (conn->state == BT_CONFIG) {
1443			hci_proto_connect_cfm(conn, status);
1444			hci_conn_put(conn);
1445		}
1446	}
1447
1448	hci_dev_unlock(hdev);
1449}
1450
1451static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1452{
1453	struct hci_cp_read_remote_ext_features *cp;
1454	struct hci_conn *conn;
1455
1456	BT_DBG("%s status 0x%x", hdev->name, status);
1457
1458	if (!status)
1459		return;
1460
1461	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1462	if (!cp)
1463		return;
1464
1465	hci_dev_lock(hdev);
1466
1467	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1468	if (conn) {
1469		if (conn->state == BT_CONFIG) {
1470			hci_proto_connect_cfm(conn, status);
1471			hci_conn_put(conn);
1472		}
1473	}
1474
1475	hci_dev_unlock(hdev);
1476}
1477
1478static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1479{
1480	struct hci_cp_setup_sync_conn *cp;
1481	struct hci_conn *acl, *sco;
1482	__u16 handle;
1483
1484	BT_DBG("%s status 0x%x", hdev->name, status);
1485
1486	if (!status)
1487		return;
1488
1489	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1490	if (!cp)
1491		return;
1492
1493	handle = __le16_to_cpu(cp->handle);
1494
1495	BT_DBG("%s handle %d", hdev->name, handle);
1496
1497	hci_dev_lock(hdev);
1498
1499	acl = hci_conn_hash_lookup_handle(hdev, handle);
1500	if (acl) {
1501		sco = acl->link;
1502		if (sco) {
1503			sco->state = BT_CLOSED;
1504
1505			hci_proto_connect_cfm(sco, status);
1506			hci_conn_del(sco);
1507		}
1508	}
1509
1510	hci_dev_unlock(hdev);
1511}
1512
1513static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1514{
1515	struct hci_cp_sniff_mode *cp;
1516	struct hci_conn *conn;
1517
1518	BT_DBG("%s status 0x%x", hdev->name, status);
1519
1520	if (!status)
1521		return;
1522
1523	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1524	if (!cp)
1525		return;
1526
1527	hci_dev_lock(hdev);
1528
1529	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1530	if (conn) {
1531		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1532
1533		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1534			hci_sco_setup(conn, status);
1535	}
1536
1537	hci_dev_unlock(hdev);
1538}
1539
1540static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1541{
1542	struct hci_cp_exit_sniff_mode *cp;
1543	struct hci_conn *conn;
1544
1545	BT_DBG("%s status 0x%x", hdev->name, status);
1546
1547	if (!status)
1548		return;
1549
1550	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1551	if (!cp)
1552		return;
1553
1554	hci_dev_lock(hdev);
1555
1556	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1557	if (conn) {
1558		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1559
1560		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1561			hci_sco_setup(conn, status);
1562	}
1563
1564	hci_dev_unlock(hdev);
1565}
1566
1567static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1568{
1569	struct hci_cp_disconnect *cp;
1570	struct hci_conn *conn;
1571
1572	if (!status)
1573		return;
1574
1575	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1576	if (!cp)
1577		return;
1578
1579	hci_dev_lock(hdev);
1580
1581	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1582	if (conn)
1583		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1584						conn->dst_type, status);
1585
1586	hci_dev_unlock(hdev);
1587}
1588
1589static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1590{
1591	struct hci_cp_le_create_conn *cp;
1592	struct hci_conn *conn;
1593
1594	BT_DBG("%s status 0x%x", hdev->name, status);
1595
1596	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1597	if (!cp)
1598		return;
1599
1600	hci_dev_lock(hdev);
1601
1602	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1603
1604	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1605		conn);
1606
1607	if (status) {
1608		if (conn && conn->state == BT_CONNECT) {
1609			conn->state = BT_CLOSED;
1610			hci_proto_connect_cfm(conn, status);
1611			hci_conn_del(conn);
1612		}
1613	} else {
1614		if (!conn) {
1615			conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1616			if (conn) {
1617				conn->dst_type = cp->peer_addr_type;
1618				conn->out = true;
1619			} else {
1620				BT_ERR("No memory for new connection");
1621			}
1622		}
1623	}
1624
1625	hci_dev_unlock(hdev);
1626}
1627
1628static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1629{
1630	BT_DBG("%s status 0x%x", hdev->name, status);
1631}
1632
1633static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1634{
1635	__u8 status = *((__u8 *) skb->data);
1636	struct discovery_state *discov = &hdev->discovery;
1637	struct inquiry_entry *e;
1638
1639	BT_DBG("%s status %d", hdev->name, status);
1640
1641	hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1642
1643	hci_conn_check_pending(hdev);
1644
1645	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1646		return;
1647
1648	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1649		return;
1650
1651	hci_dev_lock(hdev);
1652
1653	if (discov->state != DISCOVERY_FINDING)
1654		goto unlock;
1655
1656	if (list_empty(&discov->resolve)) {
1657		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1658		goto unlock;
1659	}
1660
1661	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1662	if (e && hci_resolve_name(hdev, e) == 0) {
1663		e->name_state = NAME_PENDING;
1664		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1665	} else {
1666		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1667	}
1668
1669unlock:
1670	hci_dev_unlock(hdev);
1671}
1672
1673static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1674{
1675	struct inquiry_data data;
1676	struct inquiry_info *info = (void *) (skb->data + 1);
1677	int num_rsp = *((__u8 *) skb->data);
1678
1679	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1680
1681	if (!num_rsp)
1682		return;
1683
1684	hci_dev_lock(hdev);
1685
1686	for (; num_rsp; num_rsp--, info++) {
1687		bool name_known;
1688
1689		bacpy(&data.bdaddr, &info->bdaddr);
1690		data.pscan_rep_mode	= info->pscan_rep_mode;
1691		data.pscan_period_mode	= info->pscan_period_mode;
1692		data.pscan_mode		= info->pscan_mode;
1693		memcpy(data.dev_class, info->dev_class, 3);
1694		data.clock_offset	= info->clock_offset;
1695		data.rssi		= 0x00;
1696		data.ssp_mode		= 0x00;
1697
1698		name_known = hci_inquiry_cache_update(hdev, &data, false);
1699		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1700					info->dev_class, 0, !name_known,
1701					NULL, 0);
1702	}
1703
1704	hci_dev_unlock(hdev);
1705}
1706
1707static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1708{
1709	struct hci_ev_conn_complete *ev = (void *) skb->data;
1710	struct hci_conn *conn;
1711
1712	BT_DBG("%s", hdev->name);
1713
1714	hci_dev_lock(hdev);
1715
1716	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1717	if (!conn) {
1718		if (ev->link_type != SCO_LINK)
1719			goto unlock;
1720
1721		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1722		if (!conn)
1723			goto unlock;
1724
1725		conn->type = SCO_LINK;
1726	}
1727
1728	if (!ev->status) {
1729		conn->handle = __le16_to_cpu(ev->handle);
1730
1731		if (conn->type == ACL_LINK) {
1732			conn->state = BT_CONFIG;
1733			hci_conn_hold(conn);
1734			conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1735		} else
1736			conn->state = BT_CONNECTED;
1737
1738		hci_conn_hold_device(conn);
1739		hci_conn_add_sysfs(conn);
1740
1741		if (test_bit(HCI_AUTH, &hdev->flags))
1742			conn->link_mode |= HCI_LM_AUTH;
1743
1744		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1745			conn->link_mode |= HCI_LM_ENCRYPT;
1746
1747		/* Get remote features */
1748		if (conn->type == ACL_LINK) {
1749			struct hci_cp_read_remote_features cp;
1750			cp.handle = ev->handle;
1751			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1752							sizeof(cp), &cp);
1753		}
1754
1755		/* Set packet type for incoming connection */
1756		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1757			struct hci_cp_change_conn_ptype cp;
1758			cp.handle = ev->handle;
1759			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1760			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1761							sizeof(cp), &cp);
1762		}
1763	} else {
1764		conn->state = BT_CLOSED;
1765		if (conn->type == ACL_LINK)
1766			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1767						conn->dst_type, ev->status);
1768	}
1769
1770	if (conn->type == ACL_LINK)
1771		hci_sco_setup(conn, ev->status);
1772
1773	if (ev->status) {
1774		hci_proto_connect_cfm(conn, ev->status);
1775		hci_conn_del(conn);
1776	} else if (ev->link_type != ACL_LINK)
1777		hci_proto_connect_cfm(conn, ev->status);
1778
1779unlock:
1780	hci_dev_unlock(hdev);
1781
1782	hci_conn_check_pending(hdev);
1783}
1784
1785static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1786{
1787	struct hci_ev_conn_request *ev = (void *) skb->data;
1788	int mask = hdev->link_mode;
1789
1790	BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1791					batostr(&ev->bdaddr), ev->link_type);
1792
1793	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1794
1795	if ((mask & HCI_LM_ACCEPT) &&
1796			!hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1797		/* Connection accepted */
1798		struct inquiry_entry *ie;
1799		struct hci_conn *conn;
1800
1801		hci_dev_lock(hdev);
1802
1803		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1804		if (ie)
1805			memcpy(ie->data.dev_class, ev->dev_class, 3);
1806
1807		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1808		if (!conn) {
1809			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1810			if (!conn) {
1811				BT_ERR("No memory for new connection");
1812				hci_dev_unlock(hdev);
1813				return;
1814			}
1815		}
1816
1817		memcpy(conn->dev_class, ev->dev_class, 3);
1818		conn->state = BT_CONNECT;
1819
1820		hci_dev_unlock(hdev);
1821
1822		if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1823			struct hci_cp_accept_conn_req cp;
1824
1825			bacpy(&cp.bdaddr, &ev->bdaddr);
1826
1827			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1828				cp.role = 0x00; /* Become master */
1829			else
1830				cp.role = 0x01; /* Remain slave */
1831
1832			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1833							sizeof(cp), &cp);
1834		} else {
1835			struct hci_cp_accept_sync_conn_req cp;
1836
1837			bacpy(&cp.bdaddr, &ev->bdaddr);
1838			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1839
1840			cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
1841			cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
1842			cp.max_latency    = cpu_to_le16(0xffff);
1843			cp.content_format = cpu_to_le16(hdev->voice_setting);
1844			cp.retrans_effort = 0xff;
1845
1846			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1847							sizeof(cp), &cp);
1848		}
1849	} else {
1850		/* Connection rejected */
1851		struct hci_cp_reject_conn_req cp;
1852
1853		bacpy(&cp.bdaddr, &ev->bdaddr);
1854		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1855		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1856	}
1857}
1858
1859static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1860{
1861	struct hci_ev_disconn_complete *ev = (void *) skb->data;
1862	struct hci_conn *conn;
1863
1864	BT_DBG("%s status %d", hdev->name, ev->status);
1865
1866	hci_dev_lock(hdev);
1867
1868	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1869	if (!conn)
1870		goto unlock;
1871
1872	if (ev->status == 0)
1873		conn->state = BT_CLOSED;
1874
1875	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1876			(conn->type == ACL_LINK || conn->type == LE_LINK)) {
1877		if (ev->status != 0)
1878			mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1879						conn->dst_type, ev->status);
1880		else
1881			mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1882							conn->dst_type);
1883	}
1884
1885	if (ev->status == 0) {
1886		hci_proto_disconn_cfm(conn, ev->reason);
1887		hci_conn_del(conn);
1888	}
1889
1890unlock:
1891	hci_dev_unlock(hdev);
1892}
1893
1894static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1895{
1896	struct hci_ev_auth_complete *ev = (void *) skb->data;
1897	struct hci_conn *conn;
1898
1899	BT_DBG("%s status %d", hdev->name, ev->status);
1900
1901	hci_dev_lock(hdev);
1902
1903	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1904	if (!conn)
1905		goto unlock;
1906
1907	if (!ev->status) {
1908		if (!hci_conn_ssp_enabled(conn) &&
1909				test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1910			BT_INFO("re-auth of legacy device is not possible.");
1911		} else {
1912			conn->link_mode |= HCI_LM_AUTH;
1913			conn->sec_level = conn->pending_sec_level;
1914		}
1915	} else {
1916		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1917								ev->status);
1918	}
1919
1920	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1921	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1922
1923	if (conn->state == BT_CONFIG) {
1924		if (!ev->status && hci_conn_ssp_enabled(conn)) {
1925			struct hci_cp_set_conn_encrypt cp;
1926			cp.handle  = ev->handle;
1927			cp.encrypt = 0x01;
1928			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1929									&cp);
1930		} else {
1931			conn->state = BT_CONNECTED;
1932			hci_proto_connect_cfm(conn, ev->status);
1933			hci_conn_put(conn);
1934		}
1935	} else {
1936		hci_auth_cfm(conn, ev->status);
1937
1938		hci_conn_hold(conn);
1939		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1940		hci_conn_put(conn);
1941	}
1942
1943	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1944		if (!ev->status) {
1945			struct hci_cp_set_conn_encrypt cp;
1946			cp.handle  = ev->handle;
1947			cp.encrypt = 0x01;
1948			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1949									&cp);
1950		} else {
1951			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1952			hci_encrypt_cfm(conn, ev->status, 0x00);
1953		}
1954	}
1955
1956unlock:
1957	hci_dev_unlock(hdev);
1958}
1959
1960static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1961{
1962	struct hci_ev_remote_name *ev = (void *) skb->data;
1963	struct hci_conn *conn;
1964
1965	BT_DBG("%s", hdev->name);
1966
1967	hci_conn_check_pending(hdev);
1968
1969	hci_dev_lock(hdev);
1970
1971	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1972
1973	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1974		goto check_auth;
1975
1976	if (ev->status == 0)
1977		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1978					strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1979	else
1980		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1981
1982check_auth:
1983	if (!conn)
1984		goto unlock;
1985
1986	if (!hci_outgoing_auth_needed(hdev, conn))
1987		goto unlock;
1988
1989	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1990		struct hci_cp_auth_requested cp;
1991		cp.handle = __cpu_to_le16(conn->handle);
1992		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1993	}
1994
1995unlock:
1996	hci_dev_unlock(hdev);
1997}
1998
1999static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2000{
2001	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2002	struct hci_conn *conn;
2003
2004	BT_DBG("%s status %d", hdev->name, ev->status);
2005
2006	hci_dev_lock(hdev);
2007
2008	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2009	if (conn) {
2010		if (!ev->status) {
2011			if (ev->encrypt) {
2012				/* Encryption implies authentication */
2013				conn->link_mode |= HCI_LM_AUTH;
2014				conn->link_mode |= HCI_LM_ENCRYPT;
2015				conn->sec_level = conn->pending_sec_level;
2016			} else
2017				conn->link_mode &= ~HCI_LM_ENCRYPT;
2018		}
2019
2020		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2021
2022		if (conn->state == BT_CONFIG) {
2023			if (!ev->status)
2024				conn->state = BT_CONNECTED;
2025
2026			hci_proto_connect_cfm(conn, ev->status);
2027			hci_conn_put(conn);
2028		} else
2029			hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2030	}
2031
2032	hci_dev_unlock(hdev);
2033}
2034
2035static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2036{
2037	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2038	struct hci_conn *conn;
2039
2040	BT_DBG("%s status %d", hdev->name, ev->status);
2041
2042	hci_dev_lock(hdev);
2043
2044	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2045	if (conn) {
2046		if (!ev->status)
2047			conn->link_mode |= HCI_LM_SECURE;
2048
2049		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2050
2051		hci_key_change_cfm(conn, ev->status);
2052	}
2053
2054	hci_dev_unlock(hdev);
2055}
2056
2057static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2058{
2059	struct hci_ev_remote_features *ev = (void *) skb->data;
2060	struct hci_conn *conn;
2061
2062	BT_DBG("%s status %d", hdev->name, ev->status);
2063
2064	hci_dev_lock(hdev);
2065
2066	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2067	if (!conn)
2068		goto unlock;
2069
2070	if (!ev->status)
2071		memcpy(conn->features, ev->features, 8);
2072
2073	if (conn->state != BT_CONFIG)
2074		goto unlock;
2075
2076	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2077		struct hci_cp_read_remote_ext_features cp;
2078		cp.handle = ev->handle;
2079		cp.page = 0x01;
2080		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2081							sizeof(cp), &cp);
2082		goto unlock;
2083	}
2084
2085	if (!ev->status) {
2086		struct hci_cp_remote_name_req cp;
2087		memset(&cp, 0, sizeof(cp));
2088		bacpy(&cp.bdaddr, &conn->dst);
2089		cp.pscan_rep_mode = 0x02;
2090		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2091	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2092		mgmt_device_connected(hdev, &conn->dst, conn->type,
2093						conn->dst_type, NULL, 0,
2094						conn->dev_class);
2095
2096	if (!hci_outgoing_auth_needed(hdev, conn)) {
2097		conn->state = BT_CONNECTED;
2098		hci_proto_connect_cfm(conn, ev->status);
2099		hci_conn_put(conn);
2100	}
2101
2102unlock:
2103	hci_dev_unlock(hdev);
2104}
2105
2106static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2107{
2108	BT_DBG("%s", hdev->name);
2109}
2110
2111static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2112{
2113	BT_DBG("%s", hdev->name);
2114}
2115
2116static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2117{
2118	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2119	__u16 opcode;
2120
2121	skb_pull(skb, sizeof(*ev));
2122
2123	opcode = __le16_to_cpu(ev->opcode);
2124
2125	switch (opcode) {
2126	case HCI_OP_INQUIRY_CANCEL:
2127		hci_cc_inquiry_cancel(hdev, skb);
2128		break;
2129
2130	case HCI_OP_EXIT_PERIODIC_INQ:
2131		hci_cc_exit_periodic_inq(hdev, skb);
2132		break;
2133
2134	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2135		hci_cc_remote_name_req_cancel(hdev, skb);
2136		break;
2137
2138	case HCI_OP_ROLE_DISCOVERY:
2139		hci_cc_role_discovery(hdev, skb);
2140		break;
2141
2142	case HCI_OP_READ_LINK_POLICY:
2143		hci_cc_read_link_policy(hdev, skb);
2144		break;
2145
2146	case HCI_OP_WRITE_LINK_POLICY:
2147		hci_cc_write_link_policy(hdev, skb);
2148		break;
2149
2150	case HCI_OP_READ_DEF_LINK_POLICY:
2151		hci_cc_read_def_link_policy(hdev, skb);
2152		break;
2153
2154	case HCI_OP_WRITE_DEF_LINK_POLICY:
2155		hci_cc_write_def_link_policy(hdev, skb);
2156		break;
2157
2158	case HCI_OP_RESET:
2159		hci_cc_reset(hdev, skb);
2160		break;
2161
2162	case HCI_OP_WRITE_LOCAL_NAME:
2163		hci_cc_write_local_name(hdev, skb);
2164		break;
2165
2166	case HCI_OP_READ_LOCAL_NAME:
2167		hci_cc_read_local_name(hdev, skb);
2168		break;
2169
2170	case HCI_OP_WRITE_AUTH_ENABLE:
2171		hci_cc_write_auth_enable(hdev, skb);
2172		break;
2173
2174	case HCI_OP_WRITE_ENCRYPT_MODE:
2175		hci_cc_write_encrypt_mode(hdev, skb);
2176		break;
2177
2178	case HCI_OP_WRITE_SCAN_ENABLE:
2179		hci_cc_write_scan_enable(hdev, skb);
2180		break;
2181
2182	case HCI_OP_READ_CLASS_OF_DEV:
2183		hci_cc_read_class_of_dev(hdev, skb);
2184		break;
2185
2186	case HCI_OP_WRITE_CLASS_OF_DEV:
2187		hci_cc_write_class_of_dev(hdev, skb);
2188		break;
2189
2190	case HCI_OP_READ_VOICE_SETTING:
2191		hci_cc_read_voice_setting(hdev, skb);
2192		break;
2193
2194	case HCI_OP_WRITE_VOICE_SETTING:
2195		hci_cc_write_voice_setting(hdev, skb);
2196		break;
2197
2198	case HCI_OP_HOST_BUFFER_SIZE:
2199		hci_cc_host_buffer_size(hdev, skb);
2200		break;
2201
2202	case HCI_OP_READ_SSP_MODE:
2203		hci_cc_read_ssp_mode(hdev, skb);
2204		break;
2205
2206	case HCI_OP_WRITE_SSP_MODE:
2207		hci_cc_write_ssp_mode(hdev, skb);
2208		break;
2209
2210	case HCI_OP_READ_LOCAL_VERSION:
2211		hci_cc_read_local_version(hdev, skb);
2212		break;
2213
2214	case HCI_OP_READ_LOCAL_COMMANDS:
2215		hci_cc_read_local_commands(hdev, skb);
2216		break;
2217
2218	case HCI_OP_READ_LOCAL_FEATURES:
2219		hci_cc_read_local_features(hdev, skb);
2220		break;
2221
2222	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2223		hci_cc_read_local_ext_features(hdev, skb);
2224		break;
2225
2226	case HCI_OP_READ_BUFFER_SIZE:
2227		hci_cc_read_buffer_size(hdev, skb);
2228		break;
2229
2230	case HCI_OP_READ_BD_ADDR:
2231		hci_cc_read_bd_addr(hdev, skb);
2232		break;
2233
2234	case HCI_OP_READ_DATA_BLOCK_SIZE:
2235		hci_cc_read_data_block_size(hdev, skb);
2236		break;
2237
2238	case HCI_OP_WRITE_CA_TIMEOUT:
2239		hci_cc_write_ca_timeout(hdev, skb);
2240		break;
2241
2242	case HCI_OP_READ_FLOW_CONTROL_MODE:
2243		hci_cc_read_flow_control_mode(hdev, skb);
2244		break;
2245
2246	case HCI_OP_READ_LOCAL_AMP_INFO:
2247		hci_cc_read_local_amp_info(hdev, skb);
2248		break;
2249
2250	case HCI_OP_DELETE_STORED_LINK_KEY:
2251		hci_cc_delete_stored_link_key(hdev, skb);
2252		break;
2253
2254	case HCI_OP_SET_EVENT_MASK:
2255		hci_cc_set_event_mask(hdev, skb);
2256		break;
2257
2258	case HCI_OP_WRITE_INQUIRY_MODE:
2259		hci_cc_write_inquiry_mode(hdev, skb);
2260		break;
2261
2262	case HCI_OP_READ_INQ_RSP_TX_POWER:
2263		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2264		break;
2265
2266	case HCI_OP_SET_EVENT_FLT:
2267		hci_cc_set_event_flt(hdev, skb);
2268		break;
2269
2270	case HCI_OP_PIN_CODE_REPLY:
2271		hci_cc_pin_code_reply(hdev, skb);
2272		break;
2273
2274	case HCI_OP_PIN_CODE_NEG_REPLY:
2275		hci_cc_pin_code_neg_reply(hdev, skb);
2276		break;
2277
2278	case HCI_OP_READ_LOCAL_OOB_DATA:
2279		hci_cc_read_local_oob_data_reply(hdev, skb);
2280		break;
2281
2282	case HCI_OP_LE_READ_BUFFER_SIZE:
2283		hci_cc_le_read_buffer_size(hdev, skb);
2284		break;
2285
2286	case HCI_OP_USER_CONFIRM_REPLY:
2287		hci_cc_user_confirm_reply(hdev, skb);
2288		break;
2289
2290	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2291		hci_cc_user_confirm_neg_reply(hdev, skb);
2292		break;
2293
2294	case HCI_OP_USER_PASSKEY_REPLY:
2295		hci_cc_user_passkey_reply(hdev, skb);
2296		break;
2297
2298	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2299		hci_cc_user_passkey_neg_reply(hdev, skb);
2300
2301	case HCI_OP_LE_SET_SCAN_PARAM:
2302		hci_cc_le_set_scan_param(hdev, skb);
2303		break;
2304
2305	case HCI_OP_LE_SET_SCAN_ENABLE:
2306		hci_cc_le_set_scan_enable(hdev, skb);
2307		break;
2308
2309	case HCI_OP_LE_LTK_REPLY:
2310		hci_cc_le_ltk_reply(hdev, skb);
2311		break;
2312
2313	case HCI_OP_LE_LTK_NEG_REPLY:
2314		hci_cc_le_ltk_neg_reply(hdev, skb);
2315		break;
2316
2317	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2318		hci_cc_write_le_host_supported(hdev, skb);
2319		break;
2320
2321	default:
2322		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2323		break;
2324	}
2325
2326	if (ev->opcode != HCI_OP_NOP)
2327		del_timer(&hdev->cmd_timer);
2328
2329	if (ev->ncmd) {
2330		atomic_set(&hdev->cmd_cnt, 1);
2331		if (!skb_queue_empty(&hdev->cmd_q))
2332			queue_work(hdev->workqueue, &hdev->cmd_work);
2333	}
2334}
2335
2336static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2337{
2338	struct hci_ev_cmd_status *ev = (void *) skb->data;
2339	__u16 opcode;
2340
2341	skb_pull(skb, sizeof(*ev));
2342
2343	opcode = __le16_to_cpu(ev->opcode);
2344
2345	switch (opcode) {
2346	case HCI_OP_INQUIRY:
2347		hci_cs_inquiry(hdev, ev->status);
2348		break;
2349
2350	case HCI_OP_CREATE_CONN:
2351		hci_cs_create_conn(hdev, ev->status);
2352		break;
2353
2354	case HCI_OP_ADD_SCO:
2355		hci_cs_add_sco(hdev, ev->status);
2356		break;
2357
2358	case HCI_OP_AUTH_REQUESTED:
2359		hci_cs_auth_requested(hdev, ev->status);
2360		break;
2361
2362	case HCI_OP_SET_CONN_ENCRYPT:
2363		hci_cs_set_conn_encrypt(hdev, ev->status);
2364		break;
2365
2366	case HCI_OP_REMOTE_NAME_REQ:
2367		hci_cs_remote_name_req(hdev, ev->status);
2368		break;
2369
2370	case HCI_OP_READ_REMOTE_FEATURES:
2371		hci_cs_read_remote_features(hdev, ev->status);
2372		break;
2373
2374	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2375		hci_cs_read_remote_ext_features(hdev, ev->status);
2376		break;
2377
2378	case HCI_OP_SETUP_SYNC_CONN:
2379		hci_cs_setup_sync_conn(hdev, ev->status);
2380		break;
2381
2382	case HCI_OP_SNIFF_MODE:
2383		hci_cs_sniff_mode(hdev, ev->status);
2384		break;
2385
2386	case HCI_OP_EXIT_SNIFF_MODE:
2387		hci_cs_exit_sniff_mode(hdev, ev->status);
2388		break;
2389
2390	case HCI_OP_DISCONNECT:
2391		hci_cs_disconnect(hdev, ev->status);
2392		break;
2393
2394	case HCI_OP_LE_CREATE_CONN:
2395		hci_cs_le_create_conn(hdev, ev->status);
2396		break;
2397
2398	case HCI_OP_LE_START_ENC:
2399		hci_cs_le_start_enc(hdev, ev->status);
2400		break;
2401
2402	default:
2403		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2404		break;
2405	}
2406
2407	if (ev->opcode != HCI_OP_NOP)
2408		del_timer(&hdev->cmd_timer);
2409
2410	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2411		atomic_set(&hdev->cmd_cnt, 1);
2412		if (!skb_queue_empty(&hdev->cmd_q))
2413			queue_work(hdev->workqueue, &hdev->cmd_work);
2414	}
2415}
2416
2417static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2418{
2419	struct hci_ev_role_change *ev = (void *) skb->data;
2420	struct hci_conn *conn;
2421
2422	BT_DBG("%s status %d", hdev->name, ev->status);
2423
2424	hci_dev_lock(hdev);
2425
2426	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2427	if (conn) {
2428		if (!ev->status) {
2429			if (ev->role)
2430				conn->link_mode &= ~HCI_LM_MASTER;
2431			else
2432				conn->link_mode |= HCI_LM_MASTER;
2433		}
2434
2435		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2436
2437		hci_role_switch_cfm(conn, ev->status, ev->role);
2438	}
2439
2440	hci_dev_unlock(hdev);
2441}
2442
2443static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2444{
2445	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2446	int i;
2447
2448	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2449		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2450		return;
2451	}
2452
2453	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2454			ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2455		BT_DBG("%s bad parameters", hdev->name);
2456		return;
2457	}
2458
2459	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2460
2461	for (i = 0; i < ev->num_hndl; i++) {
2462		struct hci_comp_pkts_info *info = &ev->handles[i];
2463		struct hci_conn *conn;
2464		__u16  handle, count;
2465
2466		handle = __le16_to_cpu(info->handle);
2467		count  = __le16_to_cpu(info->count);
2468
2469		conn = hci_conn_hash_lookup_handle(hdev, handle);
2470		if (!conn)
2471			continue;
2472
2473		conn->sent -= count;
2474
2475		switch (conn->type) {
2476		case ACL_LINK:
2477			hdev->acl_cnt += count;
2478			if (hdev->acl_cnt > hdev->acl_pkts)
2479				hdev->acl_cnt = hdev->acl_pkts;
2480			break;
2481
2482		case LE_LINK:
2483			if (hdev->le_pkts) {
2484				hdev->le_cnt += count;
2485				if (hdev->le_cnt > hdev->le_pkts)
2486					hdev->le_cnt = hdev->le_pkts;
2487			} else {
2488				hdev->acl_cnt += count;
2489				if (hdev->acl_cnt > hdev->acl_pkts)
2490					hdev->acl_cnt = hdev->acl_pkts;
2491			}
2492			break;
2493
2494		case SCO_LINK:
2495			hdev->sco_cnt += count;
2496			if (hdev->sco_cnt > hdev->sco_pkts)
2497				hdev->sco_cnt = hdev->sco_pkts;
2498			break;
2499
2500		default:
2501			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2502			break;
2503		}
2504	}
2505
2506	queue_work(hdev->workqueue, &hdev->tx_work);
2507}
2508
2509static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2510							struct sk_buff *skb)
2511{
2512	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2513	int i;
2514
2515	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2516		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2517		return;
2518	}
2519
2520	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2521			ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2522		BT_DBG("%s bad parameters", hdev->name);
2523		return;
2524	}
2525
2526	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2527								ev->num_hndl);
2528
2529	for (i = 0; i < ev->num_hndl; i++) {
2530		struct hci_comp_blocks_info *info = &ev->handles[i];
2531		struct hci_conn *conn;
2532		__u16  handle, block_count;
2533
2534		handle = __le16_to_cpu(info->handle);
2535		block_count = __le16_to_cpu(info->blocks);
2536
2537		conn = hci_conn_hash_lookup_handle(hdev, handle);
2538		if (!conn)
2539			continue;
2540
2541		conn->sent -= block_count;
2542
2543		switch (conn->type) {
2544		case ACL_LINK:
2545			hdev->block_cnt += block_count;
2546			if (hdev->block_cnt > hdev->num_blocks)
2547				hdev->block_cnt = hdev->num_blocks;
2548			break;
2549
2550		default:
2551			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2552			break;
2553		}
2554	}
2555
2556	queue_work(hdev->workqueue, &hdev->tx_work);
2557}
2558
2559static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2560{
2561	struct hci_ev_mode_change *ev = (void *) skb->data;
2562	struct hci_conn *conn;
2563
2564	BT_DBG("%s status %d", hdev->name, ev->status);
2565
2566	hci_dev_lock(hdev);
2567
2568	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2569	if (conn) {
2570		conn->mode = ev->mode;
2571		conn->interval = __le16_to_cpu(ev->interval);
2572
2573		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2574			if (conn->mode == HCI_CM_ACTIVE)
2575				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2576			else
2577				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2578		}
2579
2580		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2581			hci_sco_setup(conn, ev->status);
2582	}
2583
2584	hci_dev_unlock(hdev);
2585}
2586
2587static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2588{
2589	struct hci_ev_pin_code_req *ev = (void *) skb->data;
2590	struct hci_conn *conn;
2591
2592	BT_DBG("%s", hdev->name);
2593
2594	hci_dev_lock(hdev);
2595
2596	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2597	if (!conn)
2598		goto unlock;
2599
2600	if (conn->state == BT_CONNECTED) {
2601		hci_conn_hold(conn);
2602		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2603		hci_conn_put(conn);
2604	}
2605
2606	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2607		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2608					sizeof(ev->bdaddr), &ev->bdaddr);
2609	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2610		u8 secure;
2611
2612		if (conn->pending_sec_level == BT_SECURITY_HIGH)
2613			secure = 1;
2614		else
2615			secure = 0;
2616
2617		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2618	}
2619
2620unlock:
2621	hci_dev_unlock(hdev);
2622}
2623
2624static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2625{
2626	struct hci_ev_link_key_req *ev = (void *) skb->data;
2627	struct hci_cp_link_key_reply cp;
2628	struct hci_conn *conn;
2629	struct link_key *key;
2630
2631	BT_DBG("%s", hdev->name);
2632
2633	if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2634		return;
2635
2636	hci_dev_lock(hdev);
2637
2638	key = hci_find_link_key(hdev, &ev->bdaddr);
2639	if (!key) {
2640		BT_DBG("%s link key not found for %s", hdev->name,
2641							batostr(&ev->bdaddr));
2642		goto not_found;
2643	}
2644
2645	BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2646							batostr(&ev->bdaddr));
2647
2648	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2649				key->type == HCI_LK_DEBUG_COMBINATION) {
2650		BT_DBG("%s ignoring debug key", hdev->name);
2651		goto not_found;
2652	}
2653
2654	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2655	if (conn) {
2656		if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2657				conn->auth_type != 0xff &&
2658				(conn->auth_type & 0x01)) {
2659			BT_DBG("%s ignoring unauthenticated key", hdev->name);
2660			goto not_found;
2661		}
2662
2663		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2664				conn->pending_sec_level == BT_SECURITY_HIGH) {
2665			BT_DBG("%s ignoring key unauthenticated for high \
2666							security", hdev->name);
2667			goto not_found;
2668		}
2669
2670		conn->key_type = key->type;
2671		conn->pin_length = key->pin_len;
2672	}
2673
2674	bacpy(&cp.bdaddr, &ev->bdaddr);
2675	memcpy(cp.link_key, key->val, 16);
2676
2677	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2678
2679	hci_dev_unlock(hdev);
2680
2681	return;
2682
2683not_found:
2684	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2685	hci_dev_unlock(hdev);
2686}
2687
2688static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2689{
2690	struct hci_ev_link_key_notify *ev = (void *) skb->data;
2691	struct hci_conn *conn;
2692	u8 pin_len = 0;
2693
2694	BT_DBG("%s", hdev->name);
2695
2696	hci_dev_lock(hdev);
2697
2698	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2699	if (conn) {
2700		hci_conn_hold(conn);
2701		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2702		pin_len = conn->pin_length;
2703
2704		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2705			conn->key_type = ev->key_type;
2706
2707		hci_conn_put(conn);
2708	}
2709
2710	if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2711		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2712							ev->key_type, pin_len);
2713
2714	hci_dev_unlock(hdev);
2715}
2716
2717static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2718{
2719	struct hci_ev_clock_offset *ev = (void *) skb->data;
2720	struct hci_conn *conn;
2721
2722	BT_DBG("%s status %d", hdev->name, ev->status);
2723
2724	hci_dev_lock(hdev);
2725
2726	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2727	if (conn && !ev->status) {
2728		struct inquiry_entry *ie;
2729
2730		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2731		if (ie) {
2732			ie->data.clock_offset = ev->clock_offset;
2733			ie->timestamp = jiffies;
2734		}
2735	}
2736
2737	hci_dev_unlock(hdev);
2738}
2739
2740static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2741{
2742	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2743	struct hci_conn *conn;
2744
2745	BT_DBG("%s status %d", hdev->name, ev->status);
2746
2747	hci_dev_lock(hdev);
2748
2749	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2750	if (conn && !ev->status)
2751		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2752
2753	hci_dev_unlock(hdev);
2754}
2755
2756static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2757{
2758	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2759	struct inquiry_entry *ie;
2760
2761	BT_DBG("%s", hdev->name);
2762
2763	hci_dev_lock(hdev);
2764
2765	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2766	if (ie) {
2767		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2768		ie->timestamp = jiffies;
2769	}
2770
2771	hci_dev_unlock(hdev);
2772}
2773
2774static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2775{
2776	struct inquiry_data data;
2777	int num_rsp = *((__u8 *) skb->data);
2778	bool name_known;
2779
2780	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2781
2782	if (!num_rsp)
2783		return;
2784
2785	hci_dev_lock(hdev);
2786
2787	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2788		struct inquiry_info_with_rssi_and_pscan_mode *info;
2789		info = (void *) (skb->data + 1);
2790
2791		for (; num_rsp; num_rsp--, info++) {
2792			bacpy(&data.bdaddr, &info->bdaddr);
2793			data.pscan_rep_mode	= info->pscan_rep_mode;
2794			data.pscan_period_mode	= info->pscan_period_mode;
2795			data.pscan_mode		= info->pscan_mode;
2796			memcpy(data.dev_class, info->dev_class, 3);
2797			data.clock_offset	= info->clock_offset;
2798			data.rssi		= info->rssi;
2799			data.ssp_mode		= 0x00;
2800
2801			name_known = hci_inquiry_cache_update(hdev, &data,
2802								false);
2803			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2804						info->dev_class, info->rssi,
2805						!name_known, NULL, 0);
2806		}
2807	} else {
2808		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2809
2810		for (; num_rsp; num_rsp--, info++) {
2811			bacpy(&data.bdaddr, &info->bdaddr);
2812			data.pscan_rep_mode	= info->pscan_rep_mode;
2813			data.pscan_period_mode	= info->pscan_period_mode;
2814			data.pscan_mode		= 0x00;
2815			memcpy(data.dev_class, info->dev_class, 3);
2816			data.clock_offset	= info->clock_offset;
2817			data.rssi		= info->rssi;
2818			data.ssp_mode		= 0x00;
2819			name_known = hci_inquiry_cache_update(hdev, &data,
2820								false);
2821			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2822						info->dev_class, info->rssi,
2823						!name_known, NULL, 0);
2824		}
2825	}
2826
2827	hci_dev_unlock(hdev);
2828}
2829
2830static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2831{
2832	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2833	struct hci_conn *conn;
2834
2835	BT_DBG("%s", hdev->name);
2836
2837	hci_dev_lock(hdev);
2838
2839	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2840	if (!conn)
2841		goto unlock;
2842
2843	if (!ev->status && ev->page == 0x01) {
2844		struct inquiry_entry *ie;
2845
2846		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2847		if (ie)
2848			ie->data.ssp_mode = (ev->features[0] & 0x01);
2849
2850		if (ev->features[0] & 0x01)
2851			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2852	}
2853
2854	if (conn->state != BT_CONFIG)
2855		goto unlock;
2856
2857	if (!ev->status) {
2858		struct hci_cp_remote_name_req cp;
2859		memset(&cp, 0, sizeof(cp));
2860		bacpy(&cp.bdaddr, &conn->dst);
2861		cp.pscan_rep_mode = 0x02;
2862		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2863	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2864		mgmt_device_connected(hdev, &conn->dst, conn->type,
2865						conn->dst_type, NULL, 0,
2866						conn->dev_class);
2867
2868	if (!hci_outgoing_auth_needed(hdev, conn)) {
2869		conn->state = BT_CONNECTED;
2870		hci_proto_connect_cfm(conn, ev->status);
2871		hci_conn_put(conn);
2872	}
2873
2874unlock:
2875	hci_dev_unlock(hdev);
2876}
2877
2878static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2879{
2880	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2881	struct hci_conn *conn;
2882
2883	BT_DBG("%s status %d", hdev->name, ev->status);
2884
2885	hci_dev_lock(hdev);
2886
2887	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2888	if (!conn) {
2889		if (ev->link_type == ESCO_LINK)
2890			goto unlock;
2891
2892		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2893		if (!conn)
2894			goto unlock;
2895
2896		conn->type = SCO_LINK;
2897	}
2898
2899	switch (ev->status) {
2900	case 0x00:
2901		conn->handle = __le16_to_cpu(ev->handle);
2902		conn->state  = BT_CONNECTED;
2903
2904		hci_conn_hold_device(conn);
2905		hci_conn_add_sysfs(conn);
2906		break;
2907
2908	case 0x11:	/* Unsupported Feature or Parameter Value */
2909	case 0x1c:	/* SCO interval rejected */
2910	case 0x1a:	/* Unsupported Remote Feature */
2911	case 0x1f:	/* Unspecified error */
2912		if (conn->out && conn->attempt < 2) {
2913			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2914					(hdev->esco_type & EDR_ESCO_MASK);
2915			hci_setup_sync(conn, conn->link->handle);
2916			goto unlock;
2917		}
2918		/* fall through */
2919
2920	default:
2921		conn->state = BT_CLOSED;
2922		break;
2923	}
2924
2925	hci_proto_connect_cfm(conn, ev->status);
2926	if (ev->status)
2927		hci_conn_del(conn);
2928
2929unlock:
2930	hci_dev_unlock(hdev);
2931}
2932
2933static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2934{
2935	BT_DBG("%s", hdev->name);
2936}
2937
2938static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2939{
2940	struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2941
2942	BT_DBG("%s status %d", hdev->name, ev->status);
2943}
2944
2945static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2946{
2947	struct inquiry_data data;
2948	struct extended_inquiry_info *info = (void *) (skb->data + 1);
2949	int num_rsp = *((__u8 *) skb->data);
2950
2951	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2952
2953	if (!num_rsp)
2954		return;
2955
2956	hci_dev_lock(hdev);
2957
2958	for (; num_rsp; num_rsp--, info++) {
2959		bool name_known;
2960
2961		bacpy(&data.bdaddr, &info->bdaddr);
2962		data.pscan_rep_mode	= info->pscan_rep_mode;
2963		data.pscan_period_mode	= info->pscan_period_mode;
2964		data.pscan_mode		= 0x00;
2965		memcpy(data.dev_class, info->dev_class, 3);
2966		data.clock_offset	= info->clock_offset;
2967		data.rssi		= info->rssi;
2968		data.ssp_mode		= 0x01;
2969
2970		if (test_bit(HCI_MGMT, &hdev->dev_flags))
2971			name_known = eir_has_data_type(info->data,
2972							sizeof(info->data),
2973							EIR_NAME_COMPLETE);
2974		else
2975			name_known = true;
2976
2977		name_known = hci_inquiry_cache_update(hdev, &data, name_known);
2978		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2979						info->dev_class, info->rssi,
2980						!name_known, info->data,
2981						sizeof(info->data));
2982	}
2983
2984	hci_dev_unlock(hdev);
2985}
2986
2987static inline u8 hci_get_auth_req(struct hci_conn *conn)
2988{
2989	/* If remote requests dedicated bonding follow that lead */
2990	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2991		/* If both remote and local IO capabilities allow MITM
2992		 * protection then require it, otherwise don't */
2993		if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2994			return 0x02;
2995		else
2996			return 0x03;
2997	}
2998
2999	/* If remote requests no-bonding follow that lead */
3000	if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3001		return conn->remote_auth | (conn->auth_type & 0x01);
3002
3003	return conn->auth_type;
3004}
3005
3006static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3007{
3008	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3009	struct hci_conn *conn;
3010
3011	BT_DBG("%s", hdev->name);
3012
3013	hci_dev_lock(hdev);
3014
3015	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3016	if (!conn)
3017		goto unlock;
3018
3019	hci_conn_hold(conn);
3020
3021	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3022		goto unlock;
3023
3024	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3025			(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3026		struct hci_cp_io_capability_reply cp;
3027
3028		bacpy(&cp.bdaddr, &ev->bdaddr);
3029		/* Change the IO capability from KeyboardDisplay
3030		 * to DisplayYesNo as it is not supported by BT spec. */
3031		cp.capability = (conn->io_capability == 0x04) ?
3032						0x01 : conn->io_capability;
3033		conn->auth_type = hci_get_auth_req(conn);
3034		cp.authentication = conn->auth_type;
3035
3036		if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
3037				hci_find_remote_oob_data(hdev, &conn->dst))
3038			cp.oob_data = 0x01;
3039		else
3040			cp.oob_data = 0x00;
3041
3042		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3043							sizeof(cp), &cp);
3044	} else {
3045		struct hci_cp_io_capability_neg_reply cp;
3046
3047		bacpy(&cp.bdaddr, &ev->bdaddr);
3048		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3049
3050		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3051							sizeof(cp), &cp);
3052	}
3053
3054unlock:
3055	hci_dev_unlock(hdev);
3056}
3057
3058static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3059{
3060	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3061	struct hci_conn *conn;
3062
3063	BT_DBG("%s", hdev->name);
3064
3065	hci_dev_lock(hdev);
3066
3067	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3068	if (!conn)
3069		goto unlock;
3070
3071	conn->remote_cap = ev->capability;
3072	conn->remote_auth = ev->authentication;
3073	if (ev->oob_data)
3074		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3075
3076unlock:
3077	hci_dev_unlock(hdev);
3078}
3079
3080static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3081							struct sk_buff *skb)
3082{
3083	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3084	int loc_mitm, rem_mitm, confirm_hint = 0;
3085	struct hci_conn *conn;
3086
3087	BT_DBG("%s", hdev->name);
3088
3089	hci_dev_lock(hdev);
3090
3091	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3092		goto unlock;
3093
3094	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3095	if (!conn)
3096		goto unlock;
3097
3098	loc_mitm = (conn->auth_type & 0x01);
3099	rem_mitm = (conn->remote_auth & 0x01);
3100
3101	/* If we require MITM but the remote device can't provide that
3102	 * (it has NoInputNoOutput) then reject the confirmation
3103	 * request. The only exception is when we're dedicated bonding
3104	 * initiators (connect_cfm_cb set) since then we always have the MITM
3105	 * bit set. */
3106	if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3107		BT_DBG("Rejecting request: remote device can't provide MITM");
3108		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3109					sizeof(ev->bdaddr), &ev->bdaddr);
3110		goto unlock;
3111	}
3112
3113	/* If no side requires MITM protection; auto-accept */
3114	if ((!loc_mitm || conn->remote_cap == 0x03) &&
3115				(!rem_mitm || conn->io_capability == 0x03)) {
3116
3117		/* If we're not the initiators request authorization to
3118		 * proceed from user space (mgmt_user_confirm with
3119		 * confirm_hint set to 1). */
3120		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3121			BT_DBG("Confirming auto-accept as acceptor");
3122			confirm_hint = 1;
3123			goto confirm;
3124		}
3125
3126		BT_DBG("Auto-accept of user confirmation with %ums delay",
3127						hdev->auto_accept_delay);
3128
3129		if (hdev->auto_accept_delay > 0) {
3130			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3131			mod_timer(&conn->auto_accept_timer, jiffies + delay);
3132			goto unlock;
3133		}
3134
3135		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3136						sizeof(ev->bdaddr), &ev->bdaddr);
3137		goto unlock;
3138	}
3139
3140confirm:
3141	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3142								confirm_hint);
3143
3144unlock:
3145	hci_dev_unlock(hdev);
3146}
3147
3148static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3149							struct sk_buff *skb)
3150{
3151	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3152
3153	BT_DBG("%s", hdev->name);
3154
3155	hci_dev_lock(hdev);
3156
3157	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3158		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3159
3160	hci_dev_unlock(hdev);
3161}
3162
3163static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3164{
3165	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3166	struct hci_conn *conn;
3167
3168	BT_DBG("%s", hdev->name);
3169
3170	hci_dev_lock(hdev);
3171
3172	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3173	if (!conn)
3174		goto unlock;
3175
3176	/* To avoid duplicate auth_failed events to user space we check
3177	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3178	 * initiated the authentication. A traditional auth_complete
3179	 * event gets always produced as initiator and is also mapped to
3180	 * the mgmt_auth_failed event */
3181	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3182		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3183								ev->status);
3184
3185	hci_conn_put(conn);
3186
3187unlock:
3188	hci_dev_unlock(hdev);
3189}
3190
3191static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3192{
3193	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3194	struct inquiry_entry *ie;
3195
3196	BT_DBG("%s", hdev->name);
3197
3198	hci_dev_lock(hdev);
3199
3200	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3201	if (ie)
3202		ie->data.ssp_mode = (ev->features[0] & 0x01);
3203
3204	hci_dev_unlock(hdev);
3205}
3206
3207static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3208							struct sk_buff *skb)
3209{
3210	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3211	struct oob_data *data;
3212
3213	BT_DBG("%s", hdev->name);
3214
3215	hci_dev_lock(hdev);
3216
3217	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3218		goto unlock;
3219
3220	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3221	if (data) {
3222		struct hci_cp_remote_oob_data_reply cp;
3223
3224		bacpy(&cp.bdaddr, &ev->bdaddr);
3225		memcpy(cp.hash, data->hash, sizeof(cp.hash));
3226		memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3227
3228		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3229									&cp);
3230	} else {
3231		struct hci_cp_remote_oob_data_neg_reply cp;
3232
3233		bacpy(&cp.bdaddr, &ev->bdaddr);
3234		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3235									&cp);
3236	}
3237
3238unlock:
3239	hci_dev_unlock(hdev);
3240}
3241
3242static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3243{
3244	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3245	struct hci_conn *conn;
3246
3247	BT_DBG("%s status %d", hdev->name, ev->status);
3248
3249	hci_dev_lock(hdev);
3250
3251	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3252	if (!conn) {
3253		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3254		if (!conn) {
3255			BT_ERR("No memory for new connection");
3256			hci_dev_unlock(hdev);
3257			return;
3258		}
3259
3260		conn->dst_type = ev->bdaddr_type;
3261	}
3262
3263	if (ev->status) {
3264		mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3265						conn->dst_type, ev->status);
3266		hci_proto_connect_cfm(conn, ev->status);
3267		conn->state = BT_CLOSED;
3268		hci_conn_del(conn);
3269		goto unlock;
3270	}
3271
3272	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3273		mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3274						conn->dst_type, NULL, 0, 0);
3275
3276	conn->sec_level = BT_SECURITY_LOW;
3277	conn->handle = __le16_to_cpu(ev->handle);
3278	conn->state = BT_CONNECTED;
3279
3280	hci_conn_hold_device(conn);
3281	hci_conn_add_sysfs(conn);
3282
3283	hci_proto_connect_cfm(conn, ev->status);
3284
3285unlock:
3286	hci_dev_unlock(hdev);
3287}
3288
3289static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3290						struct sk_buff *skb)
3291{
3292	u8 num_reports = skb->data[0];
3293	void *ptr = &skb->data[1];
3294	s8 rssi;
3295
3296	hci_dev_lock(hdev);
3297
3298	while (num_reports--) {
3299		struct hci_ev_le_advertising_info *ev = ptr;
3300
3301		hci_add_adv_entry(hdev, ev);
3302
3303		rssi = ev->data[ev->length];
3304		mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3305					NULL, rssi, 0, ev->data, ev->length);
3306
3307		ptr += sizeof(*ev) + ev->length + 1;
3308	}
3309
3310	hci_dev_unlock(hdev);
3311}
3312
3313static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3314						struct sk_buff *skb)
3315{
3316	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3317	struct hci_cp_le_ltk_reply cp;
3318	struct hci_cp_le_ltk_neg_reply neg;
3319	struct hci_conn *conn;
3320	struct smp_ltk *ltk;
3321
3322	BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3323
3324	hci_dev_lock(hdev);
3325
3326	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3327	if (conn == NULL)
3328		goto not_found;
3329
3330	ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3331	if (ltk == NULL)
3332		goto not_found;
3333
3334	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3335	cp.handle = cpu_to_le16(conn->handle);
3336
3337	if (ltk->authenticated)
3338		conn->sec_level = BT_SECURITY_HIGH;
3339
3340	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3341
3342	if (ltk->type & HCI_SMP_STK) {
3343		list_del(&ltk->list);
3344		kfree(ltk);
3345	}
3346
3347	hci_dev_unlock(hdev);
3348
3349	return;
3350
3351not_found:
3352	neg.handle = ev->handle;
3353	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3354	hci_dev_unlock(hdev);
3355}
3356
3357static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3358{
3359	struct hci_ev_le_meta *le_ev = (void *) skb->data;
3360
3361	skb_pull(skb, sizeof(*le_ev));
3362
3363	switch (le_ev->subevent) {
3364	case HCI_EV_LE_CONN_COMPLETE:
3365		hci_le_conn_complete_evt(hdev, skb);
3366		break;
3367
3368	case HCI_EV_LE_ADVERTISING_REPORT:
3369		hci_le_adv_report_evt(hdev, skb);
3370		break;
3371
3372	case HCI_EV_LE_LTK_REQ:
3373		hci_le_ltk_request_evt(hdev, skb);
3374		break;
3375
3376	default:
3377		break;
3378	}
3379}
3380
3381void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3382{
3383	struct hci_event_hdr *hdr = (void *) skb->data;
3384	__u8 event = hdr->evt;
3385
3386	skb_pull(skb, HCI_EVENT_HDR_SIZE);
3387
3388	switch (event) {
3389	case HCI_EV_INQUIRY_COMPLETE:
3390		hci_inquiry_complete_evt(hdev, skb);
3391		break;
3392
3393	case HCI_EV_INQUIRY_RESULT:
3394		hci_inquiry_result_evt(hdev, skb);
3395		break;
3396
3397	case HCI_EV_CONN_COMPLETE:
3398		hci_conn_complete_evt(hdev, skb);
3399		break;
3400
3401	case HCI_EV_CONN_REQUEST:
3402		hci_conn_request_evt(hdev, skb);
3403		break;
3404
3405	case HCI_EV_DISCONN_COMPLETE:
3406		hci_disconn_complete_evt(hdev, skb);
3407		break;
3408
3409	case HCI_EV_AUTH_COMPLETE:
3410		hci_auth_complete_evt(hdev, skb);
3411		break;
3412
3413	case HCI_EV_REMOTE_NAME:
3414		hci_remote_name_evt(hdev, skb);
3415		break;
3416
3417	case HCI_EV_ENCRYPT_CHANGE:
3418		hci_encrypt_change_evt(hdev, skb);
3419		break;
3420
3421	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3422		hci_change_link_key_complete_evt(hdev, skb);
3423		break;
3424
3425	case HCI_EV_REMOTE_FEATURES:
3426		hci_remote_features_evt(hdev, skb);
3427		break;
3428
3429	case HCI_EV_REMOTE_VERSION:
3430		hci_remote_version_evt(hdev, skb);
3431		break;
3432
3433	case HCI_EV_QOS_SETUP_COMPLETE:
3434		hci_qos_setup_complete_evt(hdev, skb);
3435		break;
3436
3437	case HCI_EV_CMD_COMPLETE:
3438		hci_cmd_complete_evt(hdev, skb);
3439		break;
3440
3441	case HCI_EV_CMD_STATUS:
3442		hci_cmd_status_evt(hdev, skb);
3443		break;
3444
3445	case HCI_EV_ROLE_CHANGE:
3446		hci_role_change_evt(hdev, skb);
3447		break;
3448
3449	case HCI_EV_NUM_COMP_PKTS:
3450		hci_num_comp_pkts_evt(hdev, skb);
3451		break;
3452
3453	case HCI_EV_MODE_CHANGE:
3454		hci_mode_change_evt(hdev, skb);
3455		break;
3456
3457	case HCI_EV_PIN_CODE_REQ:
3458		hci_pin_code_request_evt(hdev, skb);
3459		break;
3460
3461	case HCI_EV_LINK_KEY_REQ:
3462		hci_link_key_request_evt(hdev, skb);
3463		break;
3464
3465	case HCI_EV_LINK_KEY_NOTIFY:
3466		hci_link_key_notify_evt(hdev, skb);
3467		break;
3468
3469	case HCI_EV_CLOCK_OFFSET:
3470		hci_clock_offset_evt(hdev, skb);
3471		break;
3472
3473	case HCI_EV_PKT_TYPE_CHANGE:
3474		hci_pkt_type_change_evt(hdev, skb);
3475		break;
3476
3477	case HCI_EV_PSCAN_REP_MODE:
3478		hci_pscan_rep_mode_evt(hdev, skb);
3479		break;
3480
3481	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3482		hci_inquiry_result_with_rssi_evt(hdev, skb);
3483		break;
3484
3485	case HCI_EV_REMOTE_EXT_FEATURES:
3486		hci_remote_ext_features_evt(hdev, skb);
3487		break;
3488
3489	case HCI_EV_SYNC_CONN_COMPLETE:
3490		hci_sync_conn_complete_evt(hdev, skb);
3491		break;
3492
3493	case HCI_EV_SYNC_CONN_CHANGED:
3494		hci_sync_conn_changed_evt(hdev, skb);
3495		break;
3496
3497	case HCI_EV_SNIFF_SUBRATE:
3498		hci_sniff_subrate_evt(hdev, skb);
3499		break;
3500
3501	case HCI_EV_EXTENDED_INQUIRY_RESULT:
3502		hci_extended_inquiry_result_evt(hdev, skb);
3503		break;
3504
3505	case HCI_EV_IO_CAPA_REQUEST:
3506		hci_io_capa_request_evt(hdev, skb);
3507		break;
3508
3509	case HCI_EV_IO_CAPA_REPLY:
3510		hci_io_capa_reply_evt(hdev, skb);
3511		break;
3512
3513	case HCI_EV_USER_CONFIRM_REQUEST:
3514		hci_user_confirm_request_evt(hdev, skb);
3515		break;
3516
3517	case HCI_EV_USER_PASSKEY_REQUEST:
3518		hci_user_passkey_request_evt(hdev, skb);
3519		break;
3520
3521	case HCI_EV_SIMPLE_PAIR_COMPLETE:
3522		hci_simple_pair_complete_evt(hdev, skb);
3523		break;
3524
3525	case HCI_EV_REMOTE_HOST_FEATURES:
3526		hci_remote_host_features_evt(hdev, skb);
3527		break;
3528
3529	case HCI_EV_LE_META:
3530		hci_le_meta_evt(hdev, skb);
3531		break;
3532
3533	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3534		hci_remote_oob_data_request_evt(hdev, skb);
3535		break;
3536
3537	case HCI_EV_NUM_COMP_BLOCKS:
3538		hci_num_comp_blocks_evt(hdev, skb);
3539		break;
3540
3541	default:
3542		BT_DBG("%s event 0x%x", hdev->name, event);
3543		break;
3544	}
3545
3546	kfree_skb(skb);
3547	hdev->stat.evt_rx++;
3548}
3549
3550/* Generate internal stack event */
3551void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
3552{
3553	struct hci_event_hdr *hdr;
3554	struct hci_ev_stack_internal *ev;
3555	struct sk_buff *skb;
3556
3557	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
3558	if (!skb)
3559		return;
3560
3561	hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
3562	hdr->evt  = HCI_EV_STACK_INTERNAL;
3563	hdr->plen = sizeof(*ev) + dlen;
3564
3565	ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
3566	ev->type = type;
3567	memcpy(ev->data, data, dlen);
3568
3569	bt_cb(skb)->incoming = 1;
3570	__net_timestamp(skb);
3571
3572	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3573	skb->dev = (void *) hdev;
3574	hci_send_to_sock(hdev, skb, NULL);
3575	kfree_skb(skb);
3576}
3577
3578module_param(enable_le, bool, 0644);
3579MODULE_PARM_DESC(enable_le, "Enable LE support");
3580