1/*
2   BlueZ - Bluetooth protocol stack for Linux
3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI connection handling. */
26
27#include <linux/export.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h>
32
33#include "smp.h"
34#include "a2mp.h"
35
36struct sco_param {
37	u16 pkt_type;
38	u16 max_latency;
39	u8  retrans_effort;
40};
41
42static const struct sco_param esco_param_cvsd[] = {
43	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
44	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
45	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
46	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
47	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
48};
49
50static const struct sco_param sco_param_cvsd[] = {
51	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
52	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
53};
54
55static const struct sco_param esco_param_msbc[] = {
56	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
57	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
58};
59
60static void hci_le_create_connection_cancel(struct hci_conn *conn)
61{
62	hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
63}
64
65static void hci_acl_create_connection(struct hci_conn *conn)
66{
67	struct hci_dev *hdev = conn->hdev;
68	struct inquiry_entry *ie;
69	struct hci_cp_create_conn cp;
70
71	BT_DBG("hcon %p", conn);
72
73	conn->state = BT_CONNECT;
74	conn->out = true;
75	conn->role = HCI_ROLE_MASTER;
76
77	conn->attempt++;
78
79	conn->link_policy = hdev->link_policy;
80
81	memset(&cp, 0, sizeof(cp));
82	bacpy(&cp.bdaddr, &conn->dst);
83	cp.pscan_rep_mode = 0x02;
84
85	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
86	if (ie) {
87		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
88			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
89			cp.pscan_mode     = ie->data.pscan_mode;
90			cp.clock_offset   = ie->data.clock_offset |
91					    cpu_to_le16(0x8000);
92		}
93
94		memcpy(conn->dev_class, ie->data.dev_class, 3);
95		if (ie->data.ssp_mode > 0)
96			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
97	}
98
99	cp.pkt_type = cpu_to_le16(conn->pkt_type);
100	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
101		cp.role_switch = 0x01;
102	else
103		cp.role_switch = 0x00;
104
105	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
106}
107
108static void hci_acl_create_connection_cancel(struct hci_conn *conn)
109{
110	struct hci_cp_create_conn_cancel cp;
111
112	BT_DBG("hcon %p", conn);
113
114	if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
115		return;
116
117	bacpy(&cp.bdaddr, &conn->dst);
118	hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
119}
120
121static void hci_reject_sco(struct hci_conn *conn)
122{
123	struct hci_cp_reject_sync_conn_req cp;
124
125	cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
126	bacpy(&cp.bdaddr, &conn->dst);
127
128	hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
129}
130
131int hci_disconnect(struct hci_conn *conn, __u8 reason)
132{
133	struct hci_cp_disconnect cp;
134
135	BT_DBG("hcon %p", conn);
136
137	/* When we are master of an established connection and it enters
138	 * the disconnect timeout, then go ahead and try to read the
139	 * current clock offset.  Processing of the result is done
140	 * within the event handling and hci_clock_offset_evt function.
141	 */
142	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) {
143		struct hci_dev *hdev = conn->hdev;
144		struct hci_cp_read_clock_offset cp;
145
146		cp.handle = cpu_to_le16(conn->handle);
147		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(cp), &cp);
148	}
149
150	conn->state = BT_DISCONN;
151
152	cp.handle = cpu_to_le16(conn->handle);
153	cp.reason = reason;
154	return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
155}
156
157static void hci_amp_disconn(struct hci_conn *conn)
158{
159	struct hci_cp_disconn_phy_link cp;
160
161	BT_DBG("hcon %p", conn);
162
163	conn->state = BT_DISCONN;
164
165	cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
166	cp.reason = hci_proto_disconn_ind(conn);
167	hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
168		     sizeof(cp), &cp);
169}
170
171static void hci_add_sco(struct hci_conn *conn, __u16 handle)
172{
173	struct hci_dev *hdev = conn->hdev;
174	struct hci_cp_add_sco cp;
175
176	BT_DBG("hcon %p", conn);
177
178	conn->state = BT_CONNECT;
179	conn->out = true;
180
181	conn->attempt++;
182
183	cp.handle   = cpu_to_le16(handle);
184	cp.pkt_type = cpu_to_le16(conn->pkt_type);
185
186	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
187}
188
189bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
190{
191	struct hci_dev *hdev = conn->hdev;
192	struct hci_cp_setup_sync_conn cp;
193	const struct sco_param *param;
194
195	BT_DBG("hcon %p", conn);
196
197	conn->state = BT_CONNECT;
198	conn->out = true;
199
200	conn->attempt++;
201
202	cp.handle   = cpu_to_le16(handle);
203
204	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
205	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
206	cp.voice_setting  = cpu_to_le16(conn->setting);
207
208	switch (conn->setting & SCO_AIRMODE_MASK) {
209	case SCO_AIRMODE_TRANSP:
210		if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
211			return false;
212		param = &esco_param_msbc[conn->attempt - 1];
213		break;
214	case SCO_AIRMODE_CVSD:
215		if (lmp_esco_capable(conn->link)) {
216			if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
217				return false;
218			param = &esco_param_cvsd[conn->attempt - 1];
219		} else {
220			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
221				return false;
222			param = &sco_param_cvsd[conn->attempt - 1];
223		}
224		break;
225	default:
226		return false;
227	}
228
229	cp.retrans_effort = param->retrans_effort;
230	cp.pkt_type = __cpu_to_le16(param->pkt_type);
231	cp.max_latency = __cpu_to_le16(param->max_latency);
232
233	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
234		return false;
235
236	return true;
237}
238
239u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
240		      u16 to_multiplier)
241{
242	struct hci_dev *hdev = conn->hdev;
243	struct hci_conn_params *params;
244	struct hci_cp_le_conn_update cp;
245
246	hci_dev_lock(hdev);
247
248	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
249	if (params) {
250		params->conn_min_interval = min;
251		params->conn_max_interval = max;
252		params->conn_latency = latency;
253		params->supervision_timeout = to_multiplier;
254	}
255
256	hci_dev_unlock(hdev);
257
258	memset(&cp, 0, sizeof(cp));
259	cp.handle		= cpu_to_le16(conn->handle);
260	cp.conn_interval_min	= cpu_to_le16(min);
261	cp.conn_interval_max	= cpu_to_le16(max);
262	cp.conn_latency		= cpu_to_le16(latency);
263	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
264	cp.min_ce_len		= cpu_to_le16(0x0000);
265	cp.max_ce_len		= cpu_to_le16(0x0000);
266
267	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
268
269	if (params)
270		return 0x01;
271
272	return 0x00;
273}
274
275void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
276		      __u8 ltk[16])
277{
278	struct hci_dev *hdev = conn->hdev;
279	struct hci_cp_le_start_enc cp;
280
281	BT_DBG("hcon %p", conn);
282
283	memset(&cp, 0, sizeof(cp));
284
285	cp.handle = cpu_to_le16(conn->handle);
286	cp.rand = rand;
287	cp.ediv = ediv;
288	memcpy(cp.ltk, ltk, sizeof(cp.ltk));
289
290	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
291}
292
293/* Device _must_ be locked */
294void hci_sco_setup(struct hci_conn *conn, __u8 status)
295{
296	struct hci_conn *sco = conn->link;
297
298	if (!sco)
299		return;
300
301	BT_DBG("hcon %p", conn);
302
303	if (!status) {
304		if (lmp_esco_capable(conn->hdev))
305			hci_setup_sync(sco, conn->handle);
306		else
307			hci_add_sco(sco, conn->handle);
308	} else {
309		hci_proto_connect_cfm(sco, status);
310		hci_conn_del(sco);
311	}
312}
313
314static void hci_conn_timeout(struct work_struct *work)
315{
316	struct hci_conn *conn = container_of(work, struct hci_conn,
317					     disc_work.work);
318	int refcnt = atomic_read(&conn->refcnt);
319
320	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
321
322	WARN_ON(refcnt < 0);
323
324	/* FIXME: It was observed that in pairing failed scenario, refcnt
325	 * drops below 0. Probably this is because l2cap_conn_del calls
326	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
327	 * dropped. After that loop hci_chan_del is called which also drops
328	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
329	 * otherwise drop it.
330	 */
331	if (refcnt > 0)
332		return;
333
334	switch (conn->state) {
335	case BT_CONNECT:
336	case BT_CONNECT2:
337		if (conn->out) {
338			if (conn->type == ACL_LINK)
339				hci_acl_create_connection_cancel(conn);
340			else if (conn->type == LE_LINK)
341				hci_le_create_connection_cancel(conn);
342		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
343			hci_reject_sco(conn);
344		}
345		break;
346	case BT_CONFIG:
347	case BT_CONNECTED:
348		if (conn->type == AMP_LINK) {
349			hci_amp_disconn(conn);
350		} else {
351			__u8 reason = hci_proto_disconn_ind(conn);
352			hci_disconnect(conn, reason);
353		}
354		break;
355	default:
356		conn->state = BT_CLOSED;
357		break;
358	}
359}
360
361/* Enter sniff mode */
362static void hci_conn_idle(struct work_struct *work)
363{
364	struct hci_conn *conn = container_of(work, struct hci_conn,
365					     idle_work.work);
366	struct hci_dev *hdev = conn->hdev;
367
368	BT_DBG("hcon %p mode %d", conn, conn->mode);
369
370	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
371		return;
372
373	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
374		return;
375
376	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
377		struct hci_cp_sniff_subrate cp;
378		cp.handle             = cpu_to_le16(conn->handle);
379		cp.max_latency        = cpu_to_le16(0);
380		cp.min_remote_timeout = cpu_to_le16(0);
381		cp.min_local_timeout  = cpu_to_le16(0);
382		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
383	}
384
385	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
386		struct hci_cp_sniff_mode cp;
387		cp.handle       = cpu_to_le16(conn->handle);
388		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
389		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
390		cp.attempt      = cpu_to_le16(4);
391		cp.timeout      = cpu_to_le16(1);
392		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
393	}
394}
395
396static void hci_conn_auto_accept(struct work_struct *work)
397{
398	struct hci_conn *conn = container_of(work, struct hci_conn,
399					     auto_accept_work.work);
400
401	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
402		     &conn->dst);
403}
404
405static void le_conn_timeout(struct work_struct *work)
406{
407	struct hci_conn *conn = container_of(work, struct hci_conn,
408					     le_conn_timeout.work);
409	struct hci_dev *hdev = conn->hdev;
410
411	BT_DBG("");
412
413	/* We could end up here due to having done directed advertising,
414	 * so clean up the state if necessary. This should however only
415	 * happen with broken hardware or if low duty cycle was used
416	 * (which doesn't have a timeout of its own).
417	 */
418	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
419		u8 enable = 0x00;
420		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
421			     &enable);
422		hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
423		return;
424	}
425
426	hci_le_create_connection_cancel(conn);
427}
428
429struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
430			      u8 role)
431{
432	struct hci_conn *conn;
433
434	BT_DBG("%s dst %pMR", hdev->name, dst);
435
436	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
437	if (!conn)
438		return NULL;
439
440	bacpy(&conn->dst, dst);
441	bacpy(&conn->src, &hdev->bdaddr);
442	conn->hdev  = hdev;
443	conn->type  = type;
444	conn->role  = role;
445	conn->mode  = HCI_CM_ACTIVE;
446	conn->state = BT_OPEN;
447	conn->auth_type = HCI_AT_GENERAL_BONDING;
448	conn->io_capability = hdev->io_capability;
449	conn->remote_auth = 0xff;
450	conn->key_type = 0xff;
451	conn->tx_power = HCI_TX_POWER_INVALID;
452	conn->max_tx_power = HCI_TX_POWER_INVALID;
453
454	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
455	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
456
457	if (conn->role == HCI_ROLE_MASTER)
458		conn->out = true;
459
460	switch (type) {
461	case ACL_LINK:
462		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
463		break;
464	case LE_LINK:
465		/* conn->src should reflect the local identity address */
466		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
467		break;
468	case SCO_LINK:
469		if (lmp_esco_capable(hdev))
470			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
471					(hdev->esco_type & EDR_ESCO_MASK);
472		else
473			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
474		break;
475	case ESCO_LINK:
476		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
477		break;
478	}
479
480	skb_queue_head_init(&conn->data_q);
481
482	INIT_LIST_HEAD(&conn->chan_list);
483
484	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
485	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
486	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
487	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
488
489	atomic_set(&conn->refcnt, 0);
490
491	hci_dev_hold(hdev);
492
493	hci_conn_hash_add(hdev, conn);
494	if (hdev->notify)
495		hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
496
497	hci_conn_init_sysfs(conn);
498
499	return conn;
500}
501
502int hci_conn_del(struct hci_conn *conn)
503{
504	struct hci_dev *hdev = conn->hdev;
505
506	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
507
508	cancel_delayed_work_sync(&conn->disc_work);
509	cancel_delayed_work_sync(&conn->auto_accept_work);
510	cancel_delayed_work_sync(&conn->idle_work);
511
512	if (conn->type == ACL_LINK) {
513		struct hci_conn *sco = conn->link;
514		if (sco)
515			sco->link = NULL;
516
517		/* Unacked frames */
518		hdev->acl_cnt += conn->sent;
519	} else if (conn->type == LE_LINK) {
520		cancel_delayed_work_sync(&conn->le_conn_timeout);
521
522		if (hdev->le_pkts)
523			hdev->le_cnt += conn->sent;
524		else
525			hdev->acl_cnt += conn->sent;
526	} else {
527		struct hci_conn *acl = conn->link;
528		if (acl) {
529			acl->link = NULL;
530			hci_conn_drop(acl);
531		}
532	}
533
534	hci_chan_list_flush(conn);
535
536	if (conn->amp_mgr)
537		amp_mgr_put(conn->amp_mgr);
538
539	hci_conn_hash_del(hdev, conn);
540	if (hdev->notify)
541		hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
542
543	skb_queue_purge(&conn->data_q);
544
545	hci_conn_del_sysfs(conn);
546
547	hci_dev_put(hdev);
548
549	hci_conn_put(conn);
550
551	return 0;
552}
553
554struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
555{
556	int use_src = bacmp(src, BDADDR_ANY);
557	struct hci_dev *hdev = NULL, *d;
558
559	BT_DBG("%pMR -> %pMR", src, dst);
560
561	read_lock(&hci_dev_list_lock);
562
563	list_for_each_entry(d, &hci_dev_list, list) {
564		if (!test_bit(HCI_UP, &d->flags) ||
565		    test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
566		    d->dev_type != HCI_BREDR)
567			continue;
568
569		/* Simple routing:
570		 *   No source address - find interface with bdaddr != dst
571		 *   Source address    - find interface with bdaddr == src
572		 */
573
574		if (use_src) {
575			if (!bacmp(&d->bdaddr, src)) {
576				hdev = d; break;
577			}
578		} else {
579			if (bacmp(&d->bdaddr, dst)) {
580				hdev = d; break;
581			}
582		}
583	}
584
585	if (hdev)
586		hdev = hci_dev_hold(hdev);
587
588	read_unlock(&hci_dev_list_lock);
589	return hdev;
590}
591EXPORT_SYMBOL(hci_get_route);
592
593/* This function requires the caller holds hdev->lock */
594void hci_le_conn_failed(struct hci_conn *conn, u8 status)
595{
596	struct hci_dev *hdev = conn->hdev;
597	struct hci_conn_params *params;
598
599	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
600					   conn->dst_type);
601	if (params && params->conn) {
602		hci_conn_drop(params->conn);
603		hci_conn_put(params->conn);
604		params->conn = NULL;
605	}
606
607	conn->state = BT_CLOSED;
608
609	mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
610			    status);
611
612	hci_proto_connect_cfm(conn, status);
613
614	hci_conn_del(conn);
615
616	/* Since we may have temporarily stopped the background scanning in
617	 * favor of connection establishment, we should restart it.
618	 */
619	hci_update_background_scan(hdev);
620
621	/* Re-enable advertising in case this was a failed connection
622	 * attempt as a peripheral.
623	 */
624	mgmt_reenable_advertising(hdev);
625}
626
627static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
628{
629	struct hci_conn *conn;
630
631	if (status == 0)
632		return;
633
634	BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
635	       status);
636
637	hci_dev_lock(hdev);
638
639	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
640	if (!conn)
641		goto done;
642
643	hci_le_conn_failed(conn, status);
644
645done:
646	hci_dev_unlock(hdev);
647}
648
649static void hci_req_add_le_create_conn(struct hci_request *req,
650				       struct hci_conn *conn)
651{
652	struct hci_cp_le_create_conn cp;
653	struct hci_dev *hdev = conn->hdev;
654	u8 own_addr_type;
655
656	memset(&cp, 0, sizeof(cp));
657
658	/* Update random address, but set require_privacy to false so
659	 * that we never connect with an unresolvable address.
660	 */
661	if (hci_update_random_address(req, false, &own_addr_type))
662		return;
663
664	cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
665	cp.scan_window = cpu_to_le16(hdev->le_scan_window);
666	bacpy(&cp.peer_addr, &conn->dst);
667	cp.peer_addr_type = conn->dst_type;
668	cp.own_address_type = own_addr_type;
669	cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
670	cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
671	cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
672	cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
673	cp.min_ce_len = cpu_to_le16(0x0000);
674	cp.max_ce_len = cpu_to_le16(0x0000);
675
676	hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
677
678	conn->state = BT_CONNECT;
679}
680
681static void hci_req_directed_advertising(struct hci_request *req,
682					 struct hci_conn *conn)
683{
684	struct hci_dev *hdev = req->hdev;
685	struct hci_cp_le_set_adv_param cp;
686	u8 own_addr_type;
687	u8 enable;
688
689	/* Clear the HCI_LE_ADV bit temporarily so that the
690	 * hci_update_random_address knows that it's safe to go ahead
691	 * and write a new random address. The flag will be set back on
692	 * as soon as the SET_ADV_ENABLE HCI command completes.
693	 */
694	clear_bit(HCI_LE_ADV, &hdev->dev_flags);
695
696	/* Set require_privacy to false so that the remote device has a
697	 * chance of identifying us.
698	 */
699	if (hci_update_random_address(req, false, &own_addr_type) < 0)
700		return;
701
702	memset(&cp, 0, sizeof(cp));
703	cp.type = LE_ADV_DIRECT_IND;
704	cp.own_address_type = own_addr_type;
705	cp.direct_addr_type = conn->dst_type;
706	bacpy(&cp.direct_addr, &conn->dst);
707	cp.channel_map = hdev->le_adv_channel_map;
708
709	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
710
711	enable = 0x01;
712	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
713
714	conn->state = BT_CONNECT;
715}
716
717struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
718				u8 dst_type, u8 sec_level, u16 conn_timeout,
719				u8 role)
720{
721	struct hci_conn_params *params;
722	struct hci_conn *conn;
723	struct smp_irk *irk;
724	struct hci_request req;
725	int err;
726
727	/* Some devices send ATT messages as soon as the physical link is
728	 * established. To be able to handle these ATT messages, the user-
729	 * space first establishes the connection and then starts the pairing
730	 * process.
731	 *
732	 * So if a hci_conn object already exists for the following connection
733	 * attempt, we simply update pending_sec_level and auth_type fields
734	 * and return the object found.
735	 */
736	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
737	if (conn) {
738		conn->pending_sec_level = sec_level;
739		goto done;
740	}
741
742	/* Since the controller supports only one LE connection attempt at a
743	 * time, we return -EBUSY if there is any connection attempt running.
744	 */
745	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
746	if (conn)
747		return ERR_PTR(-EBUSY);
748
749	/* When given an identity address with existing identity
750	 * resolving key, the connection needs to be established
751	 * to a resolvable random address.
752	 *
753	 * This uses the cached random resolvable address from
754	 * a previous scan. When no cached address is available,
755	 * try connecting to the identity address instead.
756	 *
757	 * Storing the resolvable random address is required here
758	 * to handle connection failures. The address will later
759	 * be resolved back into the original identity address
760	 * from the connect request.
761	 */
762	irk = hci_find_irk_by_addr(hdev, dst, dst_type);
763	if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
764		dst = &irk->rpa;
765		dst_type = ADDR_LE_DEV_RANDOM;
766	}
767
768	conn = hci_conn_add(hdev, LE_LINK, dst, role);
769	if (!conn)
770		return ERR_PTR(-ENOMEM);
771
772	conn->dst_type = dst_type;
773	conn->sec_level = BT_SECURITY_LOW;
774	conn->pending_sec_level = sec_level;
775	conn->conn_timeout = conn_timeout;
776
777	hci_req_init(&req, hdev);
778
779	/* Disable advertising if we're active. For master role
780	 * connections most controllers will refuse to connect if
781	 * advertising is enabled, and for slave role connections we
782	 * anyway have to disable it in order to start directed
783	 * advertising.
784	 */
785	if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
786		u8 enable = 0x00;
787		hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
788			    &enable);
789	}
790
791	/* If requested to connect as slave use directed advertising */
792	if (conn->role == HCI_ROLE_SLAVE) {
793		/* If we're active scanning most controllers are unable
794		 * to initiate advertising. Simply reject the attempt.
795		 */
796		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
797		    hdev->le_scan_type == LE_SCAN_ACTIVE) {
798			skb_queue_purge(&req.cmd_q);
799			hci_conn_del(conn);
800			return ERR_PTR(-EBUSY);
801		}
802
803		hci_req_directed_advertising(&req, conn);
804		goto create_conn;
805	}
806
807	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
808	if (params) {
809		conn->le_conn_min_interval = params->conn_min_interval;
810		conn->le_conn_max_interval = params->conn_max_interval;
811		conn->le_conn_latency = params->conn_latency;
812		conn->le_supv_timeout = params->supervision_timeout;
813	} else {
814		conn->le_conn_min_interval = hdev->le_conn_min_interval;
815		conn->le_conn_max_interval = hdev->le_conn_max_interval;
816		conn->le_conn_latency = hdev->le_conn_latency;
817		conn->le_supv_timeout = hdev->le_supv_timeout;
818	}
819
820	/* If controller is scanning, we stop it since some controllers are
821	 * not able to scan and connect at the same time. Also set the
822	 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
823	 * handler for scan disabling knows to set the correct discovery
824	 * state.
825	 */
826	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
827		hci_req_add_le_scan_disable(&req);
828		set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
829	}
830
831	hci_req_add_le_create_conn(&req, conn);
832
833create_conn:
834	err = hci_req_run(&req, create_le_conn_complete);
835	if (err) {
836		hci_conn_del(conn);
837		return ERR_PTR(err);
838	}
839
840done:
841	hci_conn_hold(conn);
842	return conn;
843}
844
845struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
846				 u8 sec_level, u8 auth_type)
847{
848	struct hci_conn *acl;
849
850	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
851		return ERR_PTR(-EOPNOTSUPP);
852
853	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
854	if (!acl) {
855		acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
856		if (!acl)
857			return ERR_PTR(-ENOMEM);
858	}
859
860	hci_conn_hold(acl);
861
862	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
863		acl->sec_level = BT_SECURITY_LOW;
864		acl->pending_sec_level = sec_level;
865		acl->auth_type = auth_type;
866		hci_acl_create_connection(acl);
867	}
868
869	return acl;
870}
871
872struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
873				 __u16 setting)
874{
875	struct hci_conn *acl;
876	struct hci_conn *sco;
877
878	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
879	if (IS_ERR(acl))
880		return acl;
881
882	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
883	if (!sco) {
884		sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
885		if (!sco) {
886			hci_conn_drop(acl);
887			return ERR_PTR(-ENOMEM);
888		}
889	}
890
891	acl->link = sco;
892	sco->link = acl;
893
894	hci_conn_hold(sco);
895
896	sco->setting = setting;
897
898	if (acl->state == BT_CONNECTED &&
899	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
900		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
901		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
902
903		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
904			/* defer SCO setup until mode change completed */
905			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
906			return sco;
907		}
908
909		hci_sco_setup(acl, 0x00);
910	}
911
912	return sco;
913}
914
915/* Check link security requirement */
916int hci_conn_check_link_mode(struct hci_conn *conn)
917{
918	BT_DBG("hcon %p", conn);
919
920	/* In Secure Connections Only mode, it is required that Secure
921	 * Connections is used and the link is encrypted with AES-CCM
922	 * using a P-256 authenticated combination key.
923	 */
924	if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
925		if (!hci_conn_sc_enabled(conn) ||
926		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
927		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
928			return 0;
929	}
930
931	if (hci_conn_ssp_enabled(conn) &&
932	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
933		return 0;
934
935	return 1;
936}
937
938/* Authenticate remote device */
939static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
940{
941	BT_DBG("hcon %p", conn);
942
943	if (conn->pending_sec_level > sec_level)
944		sec_level = conn->pending_sec_level;
945
946	if (sec_level > conn->sec_level)
947		conn->pending_sec_level = sec_level;
948	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
949		return 1;
950
951	/* Make sure we preserve an existing MITM requirement*/
952	auth_type |= (conn->auth_type & 0x01);
953
954	conn->auth_type = auth_type;
955
956	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
957		struct hci_cp_auth_requested cp;
958
959		cp.handle = cpu_to_le16(conn->handle);
960		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
961			     sizeof(cp), &cp);
962
963		/* If we're already encrypted set the REAUTH_PEND flag,
964		 * otherwise set the ENCRYPT_PEND.
965		 */
966		if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
967			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
968		else
969			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
970	}
971
972	return 0;
973}
974
975/* Encrypt the the link */
976static void hci_conn_encrypt(struct hci_conn *conn)
977{
978	BT_DBG("hcon %p", conn);
979
980	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
981		struct hci_cp_set_conn_encrypt cp;
982		cp.handle  = cpu_to_le16(conn->handle);
983		cp.encrypt = 0x01;
984		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
985			     &cp);
986	}
987}
988
989/* Enable security */
990int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
991		      bool initiator)
992{
993	BT_DBG("hcon %p", conn);
994
995	if (conn->type == LE_LINK)
996		return smp_conn_security(conn, sec_level);
997
998	/* For sdp we don't need the link key. */
999	if (sec_level == BT_SECURITY_SDP)
1000		return 1;
1001
1002	/* For non 2.1 devices and low security level we don't need the link
1003	   key. */
1004	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1005		return 1;
1006
1007	/* For other security levels we need the link key. */
1008	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1009		goto auth;
1010
1011	/* An authenticated FIPS approved combination key has sufficient
1012	 * security for security level 4. */
1013	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1014	    sec_level == BT_SECURITY_FIPS)
1015		goto encrypt;
1016
1017	/* An authenticated combination key has sufficient security for
1018	   security level 3. */
1019	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1020	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1021	    sec_level == BT_SECURITY_HIGH)
1022		goto encrypt;
1023
1024	/* An unauthenticated combination key has sufficient security for
1025	   security level 1 and 2. */
1026	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1027	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1028	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1029		goto encrypt;
1030
1031	/* A combination key has always sufficient security for the security
1032	   levels 1 or 2. High security level requires the combination key
1033	   is generated using maximum PIN code length (16).
1034	   For pre 2.1 units. */
1035	if (conn->key_type == HCI_LK_COMBINATION &&
1036	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1037	     conn->pin_length == 16))
1038		goto encrypt;
1039
1040auth:
1041	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1042		return 0;
1043
1044	if (initiator)
1045		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1046
1047	if (!hci_conn_auth(conn, sec_level, auth_type))
1048		return 0;
1049
1050encrypt:
1051	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1052		return 1;
1053
1054	hci_conn_encrypt(conn);
1055	return 0;
1056}
1057EXPORT_SYMBOL(hci_conn_security);
1058
1059/* Check secure link requirement */
1060int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1061{
1062	BT_DBG("hcon %p", conn);
1063
1064	/* Accept if non-secure or higher security level is required */
1065	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1066		return 1;
1067
1068	/* Accept if secure or higher security level is already present */
1069	if (conn->sec_level == BT_SECURITY_HIGH ||
1070	    conn->sec_level == BT_SECURITY_FIPS)
1071		return 1;
1072
1073	/* Reject not secure link */
1074	return 0;
1075}
1076EXPORT_SYMBOL(hci_conn_check_secure);
1077
1078/* Change link key */
1079int hci_conn_change_link_key(struct hci_conn *conn)
1080{
1081	BT_DBG("hcon %p", conn);
1082
1083	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1084		struct hci_cp_change_conn_link_key cp;
1085		cp.handle = cpu_to_le16(conn->handle);
1086		hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1087			     sizeof(cp), &cp);
1088	}
1089
1090	return 0;
1091}
1092
1093/* Switch role */
1094int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1095{
1096	BT_DBG("hcon %p", conn);
1097
1098	if (role == conn->role)
1099		return 1;
1100
1101	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1102		struct hci_cp_switch_role cp;
1103		bacpy(&cp.bdaddr, &conn->dst);
1104		cp.role = role;
1105		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1106	}
1107
1108	return 0;
1109}
1110EXPORT_SYMBOL(hci_conn_switch_role);
1111
1112/* Enter active mode */
1113void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1114{
1115	struct hci_dev *hdev = conn->hdev;
1116
1117	BT_DBG("hcon %p mode %d", conn, conn->mode);
1118
1119	if (conn->mode != HCI_CM_SNIFF)
1120		goto timer;
1121
1122	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1123		goto timer;
1124
1125	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1126		struct hci_cp_exit_sniff_mode cp;
1127		cp.handle = cpu_to_le16(conn->handle);
1128		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1129	}
1130
1131timer:
1132	if (hdev->idle_timeout > 0)
1133		queue_delayed_work(hdev->workqueue, &conn->idle_work,
1134				   msecs_to_jiffies(hdev->idle_timeout));
1135}
1136
1137/* Drop all connection on the device */
1138void hci_conn_hash_flush(struct hci_dev *hdev)
1139{
1140	struct hci_conn_hash *h = &hdev->conn_hash;
1141	struct hci_conn *c, *n;
1142
1143	BT_DBG("hdev %s", hdev->name);
1144
1145	list_for_each_entry_safe(c, n, &h->list, list) {
1146		c->state = BT_CLOSED;
1147
1148		hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1149		hci_conn_del(c);
1150	}
1151}
1152
1153/* Check pending connect attempts */
1154void hci_conn_check_pending(struct hci_dev *hdev)
1155{
1156	struct hci_conn *conn;
1157
1158	BT_DBG("hdev %s", hdev->name);
1159
1160	hci_dev_lock(hdev);
1161
1162	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1163	if (conn)
1164		hci_acl_create_connection(conn);
1165
1166	hci_dev_unlock(hdev);
1167}
1168
1169static u32 get_link_mode(struct hci_conn *conn)
1170{
1171	u32 link_mode = 0;
1172
1173	if (conn->role == HCI_ROLE_MASTER)
1174		link_mode |= HCI_LM_MASTER;
1175
1176	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1177		link_mode |= HCI_LM_ENCRYPT;
1178
1179	if (test_bit(HCI_CONN_AUTH, &conn->flags))
1180		link_mode |= HCI_LM_AUTH;
1181
1182	if (test_bit(HCI_CONN_SECURE, &conn->flags))
1183		link_mode |= HCI_LM_SECURE;
1184
1185	if (test_bit(HCI_CONN_FIPS, &conn->flags))
1186		link_mode |= HCI_LM_FIPS;
1187
1188	return link_mode;
1189}
1190
1191int hci_get_conn_list(void __user *arg)
1192{
1193	struct hci_conn *c;
1194	struct hci_conn_list_req req, *cl;
1195	struct hci_conn_info *ci;
1196	struct hci_dev *hdev;
1197	int n = 0, size, err;
1198
1199	if (copy_from_user(&req, arg, sizeof(req)))
1200		return -EFAULT;
1201
1202	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1203		return -EINVAL;
1204
1205	size = sizeof(req) + req.conn_num * sizeof(*ci);
1206
1207	cl = kmalloc(size, GFP_KERNEL);
1208	if (!cl)
1209		return -ENOMEM;
1210
1211	hdev = hci_dev_get(req.dev_id);
1212	if (!hdev) {
1213		kfree(cl);
1214		return -ENODEV;
1215	}
1216
1217	ci = cl->conn_info;
1218
1219	hci_dev_lock(hdev);
1220	list_for_each_entry(c, &hdev->conn_hash.list, list) {
1221		bacpy(&(ci + n)->bdaddr, &c->dst);
1222		(ci + n)->handle = c->handle;
1223		(ci + n)->type  = c->type;
1224		(ci + n)->out   = c->out;
1225		(ci + n)->state = c->state;
1226		(ci + n)->link_mode = get_link_mode(c);
1227		if (++n >= req.conn_num)
1228			break;
1229	}
1230	hci_dev_unlock(hdev);
1231
1232	cl->dev_id = hdev->id;
1233	cl->conn_num = n;
1234	size = sizeof(req) + n * sizeof(*ci);
1235
1236	hci_dev_put(hdev);
1237
1238	err = copy_to_user(arg, cl, size);
1239	kfree(cl);
1240
1241	return err ? -EFAULT : 0;
1242}
1243
1244int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1245{
1246	struct hci_conn_info_req req;
1247	struct hci_conn_info ci;
1248	struct hci_conn *conn;
1249	char __user *ptr = arg + sizeof(req);
1250
1251	if (copy_from_user(&req, arg, sizeof(req)))
1252		return -EFAULT;
1253
1254	hci_dev_lock(hdev);
1255	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1256	if (conn) {
1257		bacpy(&ci.bdaddr, &conn->dst);
1258		ci.handle = conn->handle;
1259		ci.type  = conn->type;
1260		ci.out   = conn->out;
1261		ci.state = conn->state;
1262		ci.link_mode = get_link_mode(conn);
1263	}
1264	hci_dev_unlock(hdev);
1265
1266	if (!conn)
1267		return -ENOENT;
1268
1269	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1270}
1271
1272int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1273{
1274	struct hci_auth_info_req req;
1275	struct hci_conn *conn;
1276
1277	if (copy_from_user(&req, arg, sizeof(req)))
1278		return -EFAULT;
1279
1280	hci_dev_lock(hdev);
1281	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1282	if (conn)
1283		req.type = conn->auth_type;
1284	hci_dev_unlock(hdev);
1285
1286	if (!conn)
1287		return -ENOENT;
1288
1289	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1290}
1291
1292struct hci_chan *hci_chan_create(struct hci_conn *conn)
1293{
1294	struct hci_dev *hdev = conn->hdev;
1295	struct hci_chan *chan;
1296
1297	BT_DBG("%s hcon %p", hdev->name, conn);
1298
1299	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1300		BT_DBG("Refusing to create new hci_chan");
1301		return NULL;
1302	}
1303
1304	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1305	if (!chan)
1306		return NULL;
1307
1308	chan->conn = hci_conn_get(conn);
1309	skb_queue_head_init(&chan->data_q);
1310	chan->state = BT_CONNECTED;
1311
1312	list_add_rcu(&chan->list, &conn->chan_list);
1313
1314	return chan;
1315}
1316
1317void hci_chan_del(struct hci_chan *chan)
1318{
1319	struct hci_conn *conn = chan->conn;
1320	struct hci_dev *hdev = conn->hdev;
1321
1322	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1323
1324	list_del_rcu(&chan->list);
1325
1326	synchronize_rcu();
1327
1328	/* Prevent new hci_chan's to be created for this hci_conn */
1329	set_bit(HCI_CONN_DROP, &conn->flags);
1330
1331	hci_conn_put(conn);
1332
1333	skb_queue_purge(&chan->data_q);
1334	kfree(chan);
1335}
1336
1337void hci_chan_list_flush(struct hci_conn *conn)
1338{
1339	struct hci_chan *chan, *n;
1340
1341	BT_DBG("hcon %p", conn);
1342
1343	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1344		hci_chan_del(chan);
1345}
1346
1347static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1348						 __u16 handle)
1349{
1350	struct hci_chan *hchan;
1351
1352	list_for_each_entry(hchan, &hcon->chan_list, list) {
1353		if (hchan->handle == handle)
1354			return hchan;
1355	}
1356
1357	return NULL;
1358}
1359
1360struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1361{
1362	struct hci_conn_hash *h = &hdev->conn_hash;
1363	struct hci_conn *hcon;
1364	struct hci_chan *hchan = NULL;
1365
1366	rcu_read_lock();
1367
1368	list_for_each_entry_rcu(hcon, &h->list, list) {
1369		hchan = __hci_chan_lookup_handle(hcon, handle);
1370		if (hchan)
1371			break;
1372	}
1373
1374	rcu_read_unlock();
1375
1376	return hchan;
1377}
1378