mgmt.c revision e546099c3194ef6ecf46a8a50414005c29a46bc4
1/*
2   BlueZ - Bluetooth protocol stack for Linux
3
4   Copyright (C) 2010  Nokia Corporation
5   Copyright (C) 2011-2012 Intel Corporation
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI Management interface */
26
27#include <linux/module.h>
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/mgmt.h>
33
34#include "smp.h"
35
36#define MGMT_VERSION	1
37#define MGMT_REVISION	4
38
39static const u16 mgmt_commands[] = {
40	MGMT_OP_READ_INDEX_LIST,
41	MGMT_OP_READ_INFO,
42	MGMT_OP_SET_POWERED,
43	MGMT_OP_SET_DISCOVERABLE,
44	MGMT_OP_SET_CONNECTABLE,
45	MGMT_OP_SET_FAST_CONNECTABLE,
46	MGMT_OP_SET_PAIRABLE,
47	MGMT_OP_SET_LINK_SECURITY,
48	MGMT_OP_SET_SSP,
49	MGMT_OP_SET_HS,
50	MGMT_OP_SET_LE,
51	MGMT_OP_SET_DEV_CLASS,
52	MGMT_OP_SET_LOCAL_NAME,
53	MGMT_OP_ADD_UUID,
54	MGMT_OP_REMOVE_UUID,
55	MGMT_OP_LOAD_LINK_KEYS,
56	MGMT_OP_LOAD_LONG_TERM_KEYS,
57	MGMT_OP_DISCONNECT,
58	MGMT_OP_GET_CONNECTIONS,
59	MGMT_OP_PIN_CODE_REPLY,
60	MGMT_OP_PIN_CODE_NEG_REPLY,
61	MGMT_OP_SET_IO_CAPABILITY,
62	MGMT_OP_PAIR_DEVICE,
63	MGMT_OP_CANCEL_PAIR_DEVICE,
64	MGMT_OP_UNPAIR_DEVICE,
65	MGMT_OP_USER_CONFIRM_REPLY,
66	MGMT_OP_USER_CONFIRM_NEG_REPLY,
67	MGMT_OP_USER_PASSKEY_REPLY,
68	MGMT_OP_USER_PASSKEY_NEG_REPLY,
69	MGMT_OP_READ_LOCAL_OOB_DATA,
70	MGMT_OP_ADD_REMOTE_OOB_DATA,
71	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72	MGMT_OP_START_DISCOVERY,
73	MGMT_OP_STOP_DISCOVERY,
74	MGMT_OP_CONFIRM_NAME,
75	MGMT_OP_BLOCK_DEVICE,
76	MGMT_OP_UNBLOCK_DEVICE,
77	MGMT_OP_SET_DEVICE_ID,
78	MGMT_OP_SET_ADVERTISING,
79	MGMT_OP_SET_BREDR,
80	MGMT_OP_SET_STATIC_ADDRESS,
81	MGMT_OP_SET_SCAN_PARAMS,
82};
83
84static const u16 mgmt_events[] = {
85	MGMT_EV_CONTROLLER_ERROR,
86	MGMT_EV_INDEX_ADDED,
87	MGMT_EV_INDEX_REMOVED,
88	MGMT_EV_NEW_SETTINGS,
89	MGMT_EV_CLASS_OF_DEV_CHANGED,
90	MGMT_EV_LOCAL_NAME_CHANGED,
91	MGMT_EV_NEW_LINK_KEY,
92	MGMT_EV_NEW_LONG_TERM_KEY,
93	MGMT_EV_DEVICE_CONNECTED,
94	MGMT_EV_DEVICE_DISCONNECTED,
95	MGMT_EV_CONNECT_FAILED,
96	MGMT_EV_PIN_CODE_REQUEST,
97	MGMT_EV_USER_CONFIRM_REQUEST,
98	MGMT_EV_USER_PASSKEY_REQUEST,
99	MGMT_EV_AUTH_FAILED,
100	MGMT_EV_DEVICE_FOUND,
101	MGMT_EV_DISCOVERING,
102	MGMT_EV_DEVICE_BLOCKED,
103	MGMT_EV_DEVICE_UNBLOCKED,
104	MGMT_EV_DEVICE_UNPAIRED,
105	MGMT_EV_PASSKEY_NOTIFY,
106};
107
108#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
109
110#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
111				!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
112
113struct pending_cmd {
114	struct list_head list;
115	u16 opcode;
116	int index;
117	void *param;
118	struct sock *sk;
119	void *user_data;
120};
121
122/* HCI to MGMT error code conversion table */
123static u8 mgmt_status_table[] = {
124	MGMT_STATUS_SUCCESS,
125	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
126	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
127	MGMT_STATUS_FAILED,		/* Hardware Failure */
128	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
129	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
130	MGMT_STATUS_NOT_PAIRED,		/* PIN or Key Missing */
131	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
132	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
133	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
134	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
135	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
136	MGMT_STATUS_BUSY,		/* Command Disallowed */
137	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
138	MGMT_STATUS_REJECTED,		/* Rejected Security */
139	MGMT_STATUS_REJECTED,		/* Rejected Personal */
140	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
141	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
142	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
143	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
144	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
145	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
146	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
147	MGMT_STATUS_BUSY,		/* Repeated Attempts */
148	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
149	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
150	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
151	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
152	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
153	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
154	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
155	MGMT_STATUS_FAILED,		/* Unspecified Error */
156	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
157	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
158	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
159	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
160	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
161	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
162	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
163	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
164	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
165	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
166	MGMT_STATUS_FAILED,		/* Transaction Collision */
167	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
168	MGMT_STATUS_REJECTED,		/* QoS Rejected */
169	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
170	MGMT_STATUS_REJECTED,		/* Insufficient Security */
171	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
172	MGMT_STATUS_BUSY,		/* Role Switch Pending */
173	MGMT_STATUS_FAILED,		/* Slot Violation */
174	MGMT_STATUS_FAILED,		/* Role Switch Failed */
175	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
176	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
177	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
178	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
179	MGMT_STATUS_BUSY,		/* Controller Busy */
180	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
181	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
182	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
183	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
184	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
185};
186
187static u8 mgmt_status(u8 hci_status)
188{
189	if (hci_status < ARRAY_SIZE(mgmt_status_table))
190		return mgmt_status_table[hci_status];
191
192	return MGMT_STATUS_FAILED;
193}
194
195static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
196{
197	struct sk_buff *skb;
198	struct mgmt_hdr *hdr;
199	struct mgmt_ev_cmd_status *ev;
200	int err;
201
202	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
203
204	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
205	if (!skb)
206		return -ENOMEM;
207
208	hdr = (void *) skb_put(skb, sizeof(*hdr));
209
210	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
211	hdr->index = cpu_to_le16(index);
212	hdr->len = cpu_to_le16(sizeof(*ev));
213
214	ev = (void *) skb_put(skb, sizeof(*ev));
215	ev->status = status;
216	ev->opcode = cpu_to_le16(cmd);
217
218	err = sock_queue_rcv_skb(sk, skb);
219	if (err < 0)
220		kfree_skb(skb);
221
222	return err;
223}
224
225static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
226			void *rp, size_t rp_len)
227{
228	struct sk_buff *skb;
229	struct mgmt_hdr *hdr;
230	struct mgmt_ev_cmd_complete *ev;
231	int err;
232
233	BT_DBG("sock %p", sk);
234
235	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
236	if (!skb)
237		return -ENOMEM;
238
239	hdr = (void *) skb_put(skb, sizeof(*hdr));
240
241	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
242	hdr->index = cpu_to_le16(index);
243	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
244
245	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
246	ev->opcode = cpu_to_le16(cmd);
247	ev->status = status;
248
249	if (rp)
250		memcpy(ev->data, rp, rp_len);
251
252	err = sock_queue_rcv_skb(sk, skb);
253	if (err < 0)
254		kfree_skb(skb);
255
256	return err;
257}
258
259static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
260			u16 data_len)
261{
262	struct mgmt_rp_read_version rp;
263
264	BT_DBG("sock %p", sk);
265
266	rp.version = MGMT_VERSION;
267	rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
268
269	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
270			    sizeof(rp));
271}
272
273static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
274			 u16 data_len)
275{
276	struct mgmt_rp_read_commands *rp;
277	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
278	const u16 num_events = ARRAY_SIZE(mgmt_events);
279	__le16 *opcode;
280	size_t rp_size;
281	int i, err;
282
283	BT_DBG("sock %p", sk);
284
285	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
286
287	rp = kmalloc(rp_size, GFP_KERNEL);
288	if (!rp)
289		return -ENOMEM;
290
291	rp->num_commands = __constant_cpu_to_le16(num_commands);
292	rp->num_events = __constant_cpu_to_le16(num_events);
293
294	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
295		put_unaligned_le16(mgmt_commands[i], opcode);
296
297	for (i = 0; i < num_events; i++, opcode++)
298		put_unaligned_le16(mgmt_events[i], opcode);
299
300	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
301			   rp_size);
302	kfree(rp);
303
304	return err;
305}
306
307static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
308			   u16 data_len)
309{
310	struct mgmt_rp_read_index_list *rp;
311	struct hci_dev *d;
312	size_t rp_len;
313	u16 count;
314	int err;
315
316	BT_DBG("sock %p", sk);
317
318	read_lock(&hci_dev_list_lock);
319
320	count = 0;
321	list_for_each_entry(d, &hci_dev_list, list) {
322		if (d->dev_type == HCI_BREDR)
323			count++;
324	}
325
326	rp_len = sizeof(*rp) + (2 * count);
327	rp = kmalloc(rp_len, GFP_ATOMIC);
328	if (!rp) {
329		read_unlock(&hci_dev_list_lock);
330		return -ENOMEM;
331	}
332
333	count = 0;
334	list_for_each_entry(d, &hci_dev_list, list) {
335		if (test_bit(HCI_SETUP, &d->dev_flags))
336			continue;
337
338		if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
339			continue;
340
341		if (d->dev_type == HCI_BREDR) {
342			rp->index[count++] = cpu_to_le16(d->id);
343			BT_DBG("Added hci%u", d->id);
344		}
345	}
346
347	rp->num_controllers = cpu_to_le16(count);
348	rp_len = sizeof(*rp) + (2 * count);
349
350	read_unlock(&hci_dev_list_lock);
351
352	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
353			   rp_len);
354
355	kfree(rp);
356
357	return err;
358}
359
360static u32 get_supported_settings(struct hci_dev *hdev)
361{
362	u32 settings = 0;
363
364	settings |= MGMT_SETTING_POWERED;
365	settings |= MGMT_SETTING_PAIRABLE;
366
367	if (lmp_bredr_capable(hdev)) {
368		settings |= MGMT_SETTING_CONNECTABLE;
369		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
370			settings |= MGMT_SETTING_FAST_CONNECTABLE;
371		settings |= MGMT_SETTING_DISCOVERABLE;
372		settings |= MGMT_SETTING_BREDR;
373		settings |= MGMT_SETTING_LINK_SECURITY;
374
375		if (lmp_ssp_capable(hdev)) {
376			settings |= MGMT_SETTING_SSP;
377			settings |= MGMT_SETTING_HS;
378		}
379	}
380
381	if (lmp_le_capable(hdev)) {
382		settings |= MGMT_SETTING_LE;
383		settings |= MGMT_SETTING_ADVERTISING;
384	}
385
386	return settings;
387}
388
389static u32 get_current_settings(struct hci_dev *hdev)
390{
391	u32 settings = 0;
392
393	if (hdev_is_powered(hdev))
394		settings |= MGMT_SETTING_POWERED;
395
396	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
397		settings |= MGMT_SETTING_CONNECTABLE;
398
399	if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
400		settings |= MGMT_SETTING_FAST_CONNECTABLE;
401
402	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
403		settings |= MGMT_SETTING_DISCOVERABLE;
404
405	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
406		settings |= MGMT_SETTING_PAIRABLE;
407
408	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
409		settings |= MGMT_SETTING_BREDR;
410
411	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
412		settings |= MGMT_SETTING_LE;
413
414	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
415		settings |= MGMT_SETTING_LINK_SECURITY;
416
417	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
418		settings |= MGMT_SETTING_SSP;
419
420	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
421		settings |= MGMT_SETTING_HS;
422
423	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
424		settings |= MGMT_SETTING_ADVERTISING;
425
426	return settings;
427}
428
429#define PNP_INFO_SVCLASS_ID		0x1200
430
431static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
432{
433	u8 *ptr = data, *uuids_start = NULL;
434	struct bt_uuid *uuid;
435
436	if (len < 4)
437		return ptr;
438
439	list_for_each_entry(uuid, &hdev->uuids, list) {
440		u16 uuid16;
441
442		if (uuid->size != 16)
443			continue;
444
445		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
446		if (uuid16 < 0x1100)
447			continue;
448
449		if (uuid16 == PNP_INFO_SVCLASS_ID)
450			continue;
451
452		if (!uuids_start) {
453			uuids_start = ptr;
454			uuids_start[0] = 1;
455			uuids_start[1] = EIR_UUID16_ALL;
456			ptr += 2;
457		}
458
459		/* Stop if not enough space to put next UUID */
460		if ((ptr - data) + sizeof(u16) > len) {
461			uuids_start[1] = EIR_UUID16_SOME;
462			break;
463		}
464
465		*ptr++ = (uuid16 & 0x00ff);
466		*ptr++ = (uuid16 & 0xff00) >> 8;
467		uuids_start[0] += sizeof(uuid16);
468	}
469
470	return ptr;
471}
472
473static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
474{
475	u8 *ptr = data, *uuids_start = NULL;
476	struct bt_uuid *uuid;
477
478	if (len < 6)
479		return ptr;
480
481	list_for_each_entry(uuid, &hdev->uuids, list) {
482		if (uuid->size != 32)
483			continue;
484
485		if (!uuids_start) {
486			uuids_start = ptr;
487			uuids_start[0] = 1;
488			uuids_start[1] = EIR_UUID32_ALL;
489			ptr += 2;
490		}
491
492		/* Stop if not enough space to put next UUID */
493		if ((ptr - data) + sizeof(u32) > len) {
494			uuids_start[1] = EIR_UUID32_SOME;
495			break;
496		}
497
498		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
499		ptr += sizeof(u32);
500		uuids_start[0] += sizeof(u32);
501	}
502
503	return ptr;
504}
505
506static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
507{
508	u8 *ptr = data, *uuids_start = NULL;
509	struct bt_uuid *uuid;
510
511	if (len < 18)
512		return ptr;
513
514	list_for_each_entry(uuid, &hdev->uuids, list) {
515		if (uuid->size != 128)
516			continue;
517
518		if (!uuids_start) {
519			uuids_start = ptr;
520			uuids_start[0] = 1;
521			uuids_start[1] = EIR_UUID128_ALL;
522			ptr += 2;
523		}
524
525		/* Stop if not enough space to put next UUID */
526		if ((ptr - data) + 16 > len) {
527			uuids_start[1] = EIR_UUID128_SOME;
528			break;
529		}
530
531		memcpy(ptr, uuid->uuid, 16);
532		ptr += 16;
533		uuids_start[0] += 16;
534	}
535
536	return ptr;
537}
538
539static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
540{
541	u8 ad_len = 0, flags = 0;
542	size_t name_len;
543
544	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
545		flags |= LE_AD_GENERAL;
546
547	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
548		if (lmp_le_br_capable(hdev))
549			flags |= LE_AD_SIM_LE_BREDR_CTRL;
550		if (lmp_host_le_br_capable(hdev))
551			flags |= LE_AD_SIM_LE_BREDR_HOST;
552	} else {
553		flags |= LE_AD_NO_BREDR;
554	}
555
556	if (flags) {
557		BT_DBG("adv flags 0x%02x", flags);
558
559		ptr[0] = 2;
560		ptr[1] = EIR_FLAGS;
561		ptr[2] = flags;
562
563		ad_len += 3;
564		ptr += 3;
565	}
566
567	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
568		ptr[0] = 2;
569		ptr[1] = EIR_TX_POWER;
570		ptr[2] = (u8) hdev->adv_tx_power;
571
572		ad_len += 3;
573		ptr += 3;
574	}
575
576	name_len = strlen(hdev->dev_name);
577	if (name_len > 0) {
578		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
579
580		if (name_len > max_len) {
581			name_len = max_len;
582			ptr[1] = EIR_NAME_SHORT;
583		} else
584			ptr[1] = EIR_NAME_COMPLETE;
585
586		ptr[0] = name_len + 1;
587
588		memcpy(ptr + 2, hdev->dev_name, name_len);
589
590		ad_len += (name_len + 2);
591		ptr += (name_len + 2);
592	}
593
594	return ad_len;
595}
596
597static void update_ad(struct hci_request *req)
598{
599	struct hci_dev *hdev = req->hdev;
600	struct hci_cp_le_set_adv_data cp;
601	u8 len;
602
603	if (!lmp_le_capable(hdev))
604		return;
605
606	memset(&cp, 0, sizeof(cp));
607
608	len = create_ad(hdev, cp.data);
609
610	if (hdev->adv_data_len == len &&
611	    memcmp(cp.data, hdev->adv_data, len) == 0)
612		return;
613
614	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
615	hdev->adv_data_len = len;
616
617	cp.length = len;
618
619	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
620}
621
622static void create_eir(struct hci_dev *hdev, u8 *data)
623{
624	u8 *ptr = data;
625	size_t name_len;
626
627	name_len = strlen(hdev->dev_name);
628
629	if (name_len > 0) {
630		/* EIR Data type */
631		if (name_len > 48) {
632			name_len = 48;
633			ptr[1] = EIR_NAME_SHORT;
634		} else
635			ptr[1] = EIR_NAME_COMPLETE;
636
637		/* EIR Data length */
638		ptr[0] = name_len + 1;
639
640		memcpy(ptr + 2, hdev->dev_name, name_len);
641
642		ptr += (name_len + 2);
643	}
644
645	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
646		ptr[0] = 2;
647		ptr[1] = EIR_TX_POWER;
648		ptr[2] = (u8) hdev->inq_tx_power;
649
650		ptr += 3;
651	}
652
653	if (hdev->devid_source > 0) {
654		ptr[0] = 9;
655		ptr[1] = EIR_DEVICE_ID;
656
657		put_unaligned_le16(hdev->devid_source, ptr + 2);
658		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
659		put_unaligned_le16(hdev->devid_product, ptr + 6);
660		put_unaligned_le16(hdev->devid_version, ptr + 8);
661
662		ptr += 10;
663	}
664
665	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
666	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
667	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
668}
669
670static void update_eir(struct hci_request *req)
671{
672	struct hci_dev *hdev = req->hdev;
673	struct hci_cp_write_eir cp;
674
675	if (!hdev_is_powered(hdev))
676		return;
677
678	if (!lmp_ext_inq_capable(hdev))
679		return;
680
681	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
682		return;
683
684	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
685		return;
686
687	memset(&cp, 0, sizeof(cp));
688
689	create_eir(hdev, cp.data);
690
691	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
692		return;
693
694	memcpy(hdev->eir, cp.data, sizeof(cp.data));
695
696	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
697}
698
699static u8 get_service_classes(struct hci_dev *hdev)
700{
701	struct bt_uuid *uuid;
702	u8 val = 0;
703
704	list_for_each_entry(uuid, &hdev->uuids, list)
705		val |= uuid->svc_hint;
706
707	return val;
708}
709
710static void update_class(struct hci_request *req)
711{
712	struct hci_dev *hdev = req->hdev;
713	u8 cod[3];
714
715	BT_DBG("%s", hdev->name);
716
717	if (!hdev_is_powered(hdev))
718		return;
719
720	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
721		return;
722
723	cod[0] = hdev->minor_class;
724	cod[1] = hdev->major_class;
725	cod[2] = get_service_classes(hdev);
726
727	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
728		cod[1] |= 0x20;
729
730	if (memcmp(cod, hdev->dev_class, 3) == 0)
731		return;
732
733	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
734}
735
736static void service_cache_off(struct work_struct *work)
737{
738	struct hci_dev *hdev = container_of(work, struct hci_dev,
739					    service_cache.work);
740	struct hci_request req;
741
742	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
743		return;
744
745	hci_req_init(&req, hdev);
746
747	hci_dev_lock(hdev);
748
749	update_eir(&req);
750	update_class(&req);
751
752	hci_dev_unlock(hdev);
753
754	hci_req_run(&req, NULL);
755}
756
757static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
758{
759	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
760		return;
761
762	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
763
764	/* Non-mgmt controlled devices get this bit set
765	 * implicitly so that pairing works for them, however
766	 * for mgmt we require user-space to explicitly enable
767	 * it
768	 */
769	clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
770}
771
772static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
773				void *data, u16 data_len)
774{
775	struct mgmt_rp_read_info rp;
776
777	BT_DBG("sock %p %s", sk, hdev->name);
778
779	hci_dev_lock(hdev);
780
781	memset(&rp, 0, sizeof(rp));
782
783	bacpy(&rp.bdaddr, &hdev->bdaddr);
784
785	rp.version = hdev->hci_ver;
786	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
787
788	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
789	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
790
791	memcpy(rp.dev_class, hdev->dev_class, 3);
792
793	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
794	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
795
796	hci_dev_unlock(hdev);
797
798	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
799			    sizeof(rp));
800}
801
802static void mgmt_pending_free(struct pending_cmd *cmd)
803{
804	sock_put(cmd->sk);
805	kfree(cmd->param);
806	kfree(cmd);
807}
808
809static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
810					    struct hci_dev *hdev, void *data,
811					    u16 len)
812{
813	struct pending_cmd *cmd;
814
815	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
816	if (!cmd)
817		return NULL;
818
819	cmd->opcode = opcode;
820	cmd->index = hdev->id;
821
822	cmd->param = kmalloc(len, GFP_KERNEL);
823	if (!cmd->param) {
824		kfree(cmd);
825		return NULL;
826	}
827
828	if (data)
829		memcpy(cmd->param, data, len);
830
831	cmd->sk = sk;
832	sock_hold(sk);
833
834	list_add(&cmd->list, &hdev->mgmt_pending);
835
836	return cmd;
837}
838
839static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
840				 void (*cb)(struct pending_cmd *cmd,
841					    void *data),
842				 void *data)
843{
844	struct pending_cmd *cmd, *tmp;
845
846	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
847		if (opcode > 0 && cmd->opcode != opcode)
848			continue;
849
850		cb(cmd, data);
851	}
852}
853
854static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
855{
856	struct pending_cmd *cmd;
857
858	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
859		if (cmd->opcode == opcode)
860			return cmd;
861	}
862
863	return NULL;
864}
865
866static void mgmt_pending_remove(struct pending_cmd *cmd)
867{
868	list_del(&cmd->list);
869	mgmt_pending_free(cmd);
870}
871
872static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
873{
874	__le32 settings = cpu_to_le32(get_current_settings(hdev));
875
876	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
877			    sizeof(settings));
878}
879
880static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
881		       u16 len)
882{
883	struct mgmt_mode *cp = data;
884	struct pending_cmd *cmd;
885	int err;
886
887	BT_DBG("request for %s", hdev->name);
888
889	if (cp->val != 0x00 && cp->val != 0x01)
890		return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
891				  MGMT_STATUS_INVALID_PARAMS);
892
893	hci_dev_lock(hdev);
894
895	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
896		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
897				 MGMT_STATUS_BUSY);
898		goto failed;
899	}
900
901	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
902		cancel_delayed_work(&hdev->power_off);
903
904		if (cp->val) {
905			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
906					 data, len);
907			err = mgmt_powered(hdev, 1);
908			goto failed;
909		}
910	}
911
912	if (!!cp->val == hdev_is_powered(hdev)) {
913		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
914		goto failed;
915	}
916
917	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
918	if (!cmd) {
919		err = -ENOMEM;
920		goto failed;
921	}
922
923	if (cp->val)
924		queue_work(hdev->req_workqueue, &hdev->power_on);
925	else
926		queue_work(hdev->req_workqueue, &hdev->power_off.work);
927
928	err = 0;
929
930failed:
931	hci_dev_unlock(hdev);
932	return err;
933}
934
935static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
936		      struct sock *skip_sk)
937{
938	struct sk_buff *skb;
939	struct mgmt_hdr *hdr;
940
941	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
942	if (!skb)
943		return -ENOMEM;
944
945	hdr = (void *) skb_put(skb, sizeof(*hdr));
946	hdr->opcode = cpu_to_le16(event);
947	if (hdev)
948		hdr->index = cpu_to_le16(hdev->id);
949	else
950		hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
951	hdr->len = cpu_to_le16(data_len);
952
953	if (data)
954		memcpy(skb_put(skb, data_len), data, data_len);
955
956	/* Time stamp */
957	__net_timestamp(skb);
958
959	hci_send_to_control(skb, skip_sk);
960	kfree_skb(skb);
961
962	return 0;
963}
964
965static int new_settings(struct hci_dev *hdev, struct sock *skip)
966{
967	__le32 ev;
968
969	ev = cpu_to_le32(get_current_settings(hdev));
970
971	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
972}
973
974struct cmd_lookup {
975	struct sock *sk;
976	struct hci_dev *hdev;
977	u8 mgmt_status;
978};
979
980static void settings_rsp(struct pending_cmd *cmd, void *data)
981{
982	struct cmd_lookup *match = data;
983
984	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
985
986	list_del(&cmd->list);
987
988	if (match->sk == NULL) {
989		match->sk = cmd->sk;
990		sock_hold(match->sk);
991	}
992
993	mgmt_pending_free(cmd);
994}
995
996static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
997{
998	u8 *status = data;
999
1000	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1001	mgmt_pending_remove(cmd);
1002}
1003
1004static u8 mgmt_bredr_support(struct hci_dev *hdev)
1005{
1006	if (!lmp_bredr_capable(hdev))
1007		return MGMT_STATUS_NOT_SUPPORTED;
1008	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1009		return MGMT_STATUS_REJECTED;
1010	else
1011		return MGMT_STATUS_SUCCESS;
1012}
1013
1014static u8 mgmt_le_support(struct hci_dev *hdev)
1015{
1016	if (!lmp_le_capable(hdev))
1017		return MGMT_STATUS_NOT_SUPPORTED;
1018	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1019		return MGMT_STATUS_REJECTED;
1020	else
1021		return MGMT_STATUS_SUCCESS;
1022}
1023
1024static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1025{
1026	struct pending_cmd *cmd;
1027	struct mgmt_mode *cp;
1028	struct hci_request req;
1029	bool changed;
1030
1031	BT_DBG("status 0x%02x", status);
1032
1033	hci_dev_lock(hdev);
1034
1035	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1036	if (!cmd)
1037		goto unlock;
1038
1039	if (status) {
1040		u8 mgmt_err = mgmt_status(status);
1041		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1042		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1043		goto remove_cmd;
1044	}
1045
1046	cp = cmd->param;
1047	if (cp->val) {
1048		changed = !test_and_set_bit(HCI_DISCOVERABLE,
1049					    &hdev->dev_flags);
1050
1051		if (hdev->discov_timeout > 0) {
1052			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1053			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1054					   to);
1055		}
1056	} else {
1057		changed = test_and_clear_bit(HCI_DISCOVERABLE,
1058					     &hdev->dev_flags);
1059	}
1060
1061	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1062
1063	if (changed)
1064		new_settings(hdev, cmd->sk);
1065
1066	/* When the discoverable mode gets changed, make sure
1067	 * that class of device has the limited discoverable
1068	 * bit correctly set.
1069	 */
1070	hci_req_init(&req, hdev);
1071	update_class(&req);
1072	hci_req_run(&req, NULL);
1073
1074remove_cmd:
1075	mgmt_pending_remove(cmd);
1076
1077unlock:
1078	hci_dev_unlock(hdev);
1079}
1080
1081static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1082			    u16 len)
1083{
1084	struct mgmt_cp_set_discoverable *cp = data;
1085	struct pending_cmd *cmd;
1086	struct hci_request req;
1087	u16 timeout;
1088	u8 scan, status;
1089	int err;
1090
1091	BT_DBG("request for %s", hdev->name);
1092
1093	status = mgmt_bredr_support(hdev);
1094	if (status)
1095		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1096				  status);
1097
1098	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1099		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1100				  MGMT_STATUS_INVALID_PARAMS);
1101
1102	timeout = __le16_to_cpu(cp->timeout);
1103
1104	/* Disabling discoverable requires that no timeout is set,
1105	 * and enabling limited discoverable requires a timeout.
1106	 */
1107	if ((cp->val == 0x00 && timeout > 0) ||
1108	    (cp->val == 0x02 && timeout == 0))
1109		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1110				  MGMT_STATUS_INVALID_PARAMS);
1111
1112	hci_dev_lock(hdev);
1113
1114	if (!hdev_is_powered(hdev) && timeout > 0) {
1115		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1116				 MGMT_STATUS_NOT_POWERED);
1117		goto failed;
1118	}
1119
1120	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1121	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1122		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1123				 MGMT_STATUS_BUSY);
1124		goto failed;
1125	}
1126
1127	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1128		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1129				 MGMT_STATUS_REJECTED);
1130		goto failed;
1131	}
1132
1133	if (!hdev_is_powered(hdev)) {
1134		bool changed = false;
1135
1136		/* Setting limited discoverable when powered off is
1137		 * not a valid operation since it requires a timeout
1138		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1139		 */
1140		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1141			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1142			changed = true;
1143		}
1144
1145		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1146		if (err < 0)
1147			goto failed;
1148
1149		if (changed)
1150			err = new_settings(hdev, sk);
1151
1152		goto failed;
1153	}
1154
1155	/* If the current mode is the same, then just update the timeout
1156	 * value with the new value. And if only the timeout gets updated,
1157	 * then no need for any HCI transactions.
1158	 */
1159	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1160	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1161					  &hdev->dev_flags)) {
1162		cancel_delayed_work(&hdev->discov_off);
1163		hdev->discov_timeout = timeout;
1164
1165		if (cp->val && hdev->discov_timeout > 0) {
1166			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1167			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1168					   to);
1169		}
1170
1171		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1172		goto failed;
1173	}
1174
1175	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1176	if (!cmd) {
1177		err = -ENOMEM;
1178		goto failed;
1179	}
1180
1181	/* Cancel any potential discoverable timeout that might be
1182	 * still active and store new timeout value. The arming of
1183	 * the timeout happens in the complete handler.
1184	 */
1185	cancel_delayed_work(&hdev->discov_off);
1186	hdev->discov_timeout = timeout;
1187
1188	hci_req_init(&req, hdev);
1189
1190	scan = SCAN_PAGE;
1191
1192	if (cp->val) {
1193		struct hci_cp_write_current_iac_lap hci_cp;
1194
1195		if (cp->val == 0x02) {
1196			/* Limited discoverable mode */
1197			set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1198
1199			hci_cp.num_iac = 2;
1200			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
1201			hci_cp.iac_lap[1] = 0x8b;
1202			hci_cp.iac_lap[2] = 0x9e;
1203			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
1204			hci_cp.iac_lap[4] = 0x8b;
1205			hci_cp.iac_lap[5] = 0x9e;
1206		} else {
1207			/* General discoverable mode */
1208			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1209
1210			hci_cp.num_iac = 1;
1211			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
1212			hci_cp.iac_lap[1] = 0x8b;
1213			hci_cp.iac_lap[2] = 0x9e;
1214		}
1215
1216		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1217			    (hci_cp.num_iac * 3) + 1, &hci_cp);
1218
1219		scan |= SCAN_INQUIRY;
1220	} else {
1221		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1222	}
1223
1224	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1225
1226	err = hci_req_run(&req, set_discoverable_complete);
1227	if (err < 0)
1228		mgmt_pending_remove(cmd);
1229
1230failed:
1231	hci_dev_unlock(hdev);
1232	return err;
1233}
1234
1235static void write_fast_connectable(struct hci_request *req, bool enable)
1236{
1237	struct hci_dev *hdev = req->hdev;
1238	struct hci_cp_write_page_scan_activity acp;
1239	u8 type;
1240
1241	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1242		return;
1243
1244	if (enable) {
1245		type = PAGE_SCAN_TYPE_INTERLACED;
1246
1247		/* 160 msec page scan interval */
1248		acp.interval = __constant_cpu_to_le16(0x0100);
1249	} else {
1250		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
1251
1252		/* default 1.28 sec page scan */
1253		acp.interval = __constant_cpu_to_le16(0x0800);
1254	}
1255
1256	acp.window = __constant_cpu_to_le16(0x0012);
1257
1258	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1259	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
1260		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1261			    sizeof(acp), &acp);
1262
1263	if (hdev->page_scan_type != type)
1264		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1265}
1266
1267static u8 get_adv_type(struct hci_dev *hdev)
1268{
1269	struct pending_cmd *cmd;
1270	bool connectable;
1271
1272	/* If there's a pending mgmt command the flag will not yet have
1273	 * it's final value, so check for this first.
1274	 */
1275	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1276	if (cmd) {
1277		struct mgmt_mode *cp = cmd->param;
1278		connectable = !!cp->val;
1279	} else {
1280		connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1281	}
1282
1283	return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1284}
1285
1286static void enable_advertising(struct hci_request *req)
1287{
1288	struct hci_dev *hdev = req->hdev;
1289	struct hci_cp_le_set_adv_param cp;
1290	u8 enable = 0x01;
1291
1292	memset(&cp, 0, sizeof(cp));
1293	cp.min_interval = __constant_cpu_to_le16(0x0800);
1294	cp.max_interval = __constant_cpu_to_le16(0x0800);
1295	cp.type = get_adv_type(hdev);
1296	if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1297		cp.own_address_type = ADDR_LE_DEV_PUBLIC;
1298	else
1299		cp.own_address_type = ADDR_LE_DEV_RANDOM;
1300	cp.channel_map = 0x07;
1301
1302	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1303
1304	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1305}
1306
1307static void disable_advertising(struct hci_request *req)
1308{
1309	u8 enable = 0x00;
1310
1311	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1312}
1313
1314static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1315{
1316	struct pending_cmd *cmd;
1317	struct mgmt_mode *cp;
1318	bool changed;
1319
1320	BT_DBG("status 0x%02x", status);
1321
1322	hci_dev_lock(hdev);
1323
1324	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1325	if (!cmd)
1326		goto unlock;
1327
1328	if (status) {
1329		u8 mgmt_err = mgmt_status(status);
1330		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1331		goto remove_cmd;
1332	}
1333
1334	cp = cmd->param;
1335	if (cp->val)
1336		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1337	else
1338		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1339
1340	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1341
1342	if (changed)
1343		new_settings(hdev, cmd->sk);
1344
1345remove_cmd:
1346	mgmt_pending_remove(cmd);
1347
1348unlock:
1349	hci_dev_unlock(hdev);
1350}
1351
1352static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1353			   u16 len)
1354{
1355	struct mgmt_mode *cp = data;
1356	struct pending_cmd *cmd;
1357	struct hci_request req;
1358	u8 scan;
1359	int err;
1360
1361	BT_DBG("request for %s", hdev->name);
1362
1363	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1364	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1365		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1366				  MGMT_STATUS_REJECTED);
1367
1368	if (cp->val != 0x00 && cp->val != 0x01)
1369		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1370				  MGMT_STATUS_INVALID_PARAMS);
1371
1372	hci_dev_lock(hdev);
1373
1374	if (!hdev_is_powered(hdev)) {
1375		bool changed = false;
1376
1377		if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1378			changed = true;
1379
1380		if (cp->val) {
1381			set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1382		} else {
1383			clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1384			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1385		}
1386
1387		err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1388		if (err < 0)
1389			goto failed;
1390
1391		if (changed)
1392			err = new_settings(hdev, sk);
1393
1394		goto failed;
1395	}
1396
1397	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1398	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1399		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1400				 MGMT_STATUS_BUSY);
1401		goto failed;
1402	}
1403
1404	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1405	if (!cmd) {
1406		err = -ENOMEM;
1407		goto failed;
1408	}
1409
1410	hci_req_init(&req, hdev);
1411
1412	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
1413	    cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1414		if (cp->val) {
1415			scan = SCAN_PAGE;
1416		} else {
1417			scan = 0;
1418
1419			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1420			    hdev->discov_timeout > 0)
1421				cancel_delayed_work(&hdev->discov_off);
1422		}
1423
1424		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1425	}
1426
1427	/* If we're going from non-connectable to connectable or
1428	 * vice-versa when fast connectable is enabled ensure that fast
1429	 * connectable gets disabled. write_fast_connectable won't do
1430	 * anything if the page scan parameters are already what they
1431	 * should be.
1432	 */
1433	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1434		write_fast_connectable(&req, false);
1435
1436	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1437	    hci_conn_num(hdev, LE_LINK) == 0) {
1438		disable_advertising(&req);
1439		enable_advertising(&req);
1440	}
1441
1442	err = hci_req_run(&req, set_connectable_complete);
1443	if (err < 0) {
1444		mgmt_pending_remove(cmd);
1445		if (err == -ENODATA)
1446			err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE,
1447						hdev);
1448		goto failed;
1449	}
1450
1451failed:
1452	hci_dev_unlock(hdev);
1453	return err;
1454}
1455
1456static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1457			u16 len)
1458{
1459	struct mgmt_mode *cp = data;
1460	bool changed;
1461	int err;
1462
1463	BT_DBG("request for %s", hdev->name);
1464
1465	if (cp->val != 0x00 && cp->val != 0x01)
1466		return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1467				  MGMT_STATUS_INVALID_PARAMS);
1468
1469	hci_dev_lock(hdev);
1470
1471	if (cp->val)
1472		changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1473	else
1474		changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1475
1476	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1477	if (err < 0)
1478		goto unlock;
1479
1480	if (changed)
1481		err = new_settings(hdev, sk);
1482
1483unlock:
1484	hci_dev_unlock(hdev);
1485	return err;
1486}
1487
1488static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1489			     u16 len)
1490{
1491	struct mgmt_mode *cp = data;
1492	struct pending_cmd *cmd;
1493	u8 val, status;
1494	int err;
1495
1496	BT_DBG("request for %s", hdev->name);
1497
1498	status = mgmt_bredr_support(hdev);
1499	if (status)
1500		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1501				  status);
1502
1503	if (cp->val != 0x00 && cp->val != 0x01)
1504		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1505				  MGMT_STATUS_INVALID_PARAMS);
1506
1507	hci_dev_lock(hdev);
1508
1509	if (!hdev_is_powered(hdev)) {
1510		bool changed = false;
1511
1512		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1513					  &hdev->dev_flags)) {
1514			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1515			changed = true;
1516		}
1517
1518		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1519		if (err < 0)
1520			goto failed;
1521
1522		if (changed)
1523			err = new_settings(hdev, sk);
1524
1525		goto failed;
1526	}
1527
1528	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1529		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1530				 MGMT_STATUS_BUSY);
1531		goto failed;
1532	}
1533
1534	val = !!cp->val;
1535
1536	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1537		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1538		goto failed;
1539	}
1540
1541	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1542	if (!cmd) {
1543		err = -ENOMEM;
1544		goto failed;
1545	}
1546
1547	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1548	if (err < 0) {
1549		mgmt_pending_remove(cmd);
1550		goto failed;
1551	}
1552
1553failed:
1554	hci_dev_unlock(hdev);
1555	return err;
1556}
1557
1558static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1559{
1560	struct mgmt_mode *cp = data;
1561	struct pending_cmd *cmd;
1562	u8 status;
1563	int err;
1564
1565	BT_DBG("request for %s", hdev->name);
1566
1567	status = mgmt_bredr_support(hdev);
1568	if (status)
1569		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1570
1571	if (!lmp_ssp_capable(hdev))
1572		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1573				  MGMT_STATUS_NOT_SUPPORTED);
1574
1575	if (cp->val != 0x00 && cp->val != 0x01)
1576		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1577				  MGMT_STATUS_INVALID_PARAMS);
1578
1579	hci_dev_lock(hdev);
1580
1581	if (!hdev_is_powered(hdev)) {
1582		bool changed;
1583
1584		if (cp->val) {
1585			changed = !test_and_set_bit(HCI_SSP_ENABLED,
1586						    &hdev->dev_flags);
1587		} else {
1588			changed = test_and_clear_bit(HCI_SSP_ENABLED,
1589						     &hdev->dev_flags);
1590			if (!changed)
1591				changed = test_and_clear_bit(HCI_HS_ENABLED,
1592							     &hdev->dev_flags);
1593			else
1594				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1595		}
1596
1597		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1598		if (err < 0)
1599			goto failed;
1600
1601		if (changed)
1602			err = new_settings(hdev, sk);
1603
1604		goto failed;
1605	}
1606
1607	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1608	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1609		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1610				 MGMT_STATUS_BUSY);
1611		goto failed;
1612	}
1613
1614	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1615		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1616		goto failed;
1617	}
1618
1619	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1620	if (!cmd) {
1621		err = -ENOMEM;
1622		goto failed;
1623	}
1624
1625	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1626	if (err < 0) {
1627		mgmt_pending_remove(cmd);
1628		goto failed;
1629	}
1630
1631failed:
1632	hci_dev_unlock(hdev);
1633	return err;
1634}
1635
1636static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1637{
1638	struct mgmt_mode *cp = data;
1639	bool changed;
1640	u8 status;
1641	int err;
1642
1643	BT_DBG("request for %s", hdev->name);
1644
1645	status = mgmt_bredr_support(hdev);
1646	if (status)
1647		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1648
1649	if (!lmp_ssp_capable(hdev))
1650		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1651				  MGMT_STATUS_NOT_SUPPORTED);
1652
1653	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1654		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1655				  MGMT_STATUS_REJECTED);
1656
1657	if (cp->val != 0x00 && cp->val != 0x01)
1658		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1659				  MGMT_STATUS_INVALID_PARAMS);
1660
1661	hci_dev_lock(hdev);
1662
1663	if (cp->val) {
1664		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1665	} else {
1666		if (hdev_is_powered(hdev)) {
1667			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1668					 MGMT_STATUS_REJECTED);
1669			goto unlock;
1670		}
1671
1672		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1673	}
1674
1675	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1676	if (err < 0)
1677		goto unlock;
1678
1679	if (changed)
1680		err = new_settings(hdev, sk);
1681
1682unlock:
1683	hci_dev_unlock(hdev);
1684	return err;
1685}
1686
1687static void le_enable_complete(struct hci_dev *hdev, u8 status)
1688{
1689	struct cmd_lookup match = { NULL, hdev };
1690
1691	if (status) {
1692		u8 mgmt_err = mgmt_status(status);
1693
1694		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1695				     &mgmt_err);
1696		return;
1697	}
1698
1699	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1700
1701	new_settings(hdev, match.sk);
1702
1703	if (match.sk)
1704		sock_put(match.sk);
1705
1706	/* Make sure the controller has a good default for
1707	 * advertising data. Restrict the update to when LE
1708	 * has actually been enabled. During power on, the
1709	 * update in powered_update_hci will take care of it.
1710	 */
1711	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1712		struct hci_request req;
1713
1714		hci_dev_lock(hdev);
1715
1716		hci_req_init(&req, hdev);
1717		update_ad(&req);
1718		hci_req_run(&req, NULL);
1719
1720		hci_dev_unlock(hdev);
1721	}
1722}
1723
1724static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1725{
1726	struct mgmt_mode *cp = data;
1727	struct hci_cp_write_le_host_supported hci_cp;
1728	struct pending_cmd *cmd;
1729	struct hci_request req;
1730	int err;
1731	u8 val, enabled;
1732
1733	BT_DBG("request for %s", hdev->name);
1734
1735	if (!lmp_le_capable(hdev))
1736		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1737				  MGMT_STATUS_NOT_SUPPORTED);
1738
1739	if (cp->val != 0x00 && cp->val != 0x01)
1740		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1741				  MGMT_STATUS_INVALID_PARAMS);
1742
1743	/* LE-only devices do not allow toggling LE on/off */
1744	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1745		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1746				  MGMT_STATUS_REJECTED);
1747
1748	hci_dev_lock(hdev);
1749
1750	val = !!cp->val;
1751	enabled = lmp_host_le_capable(hdev);
1752
1753	if (!hdev_is_powered(hdev) || val == enabled) {
1754		bool changed = false;
1755
1756		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1757			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1758			changed = true;
1759		}
1760
1761		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1762			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1763			changed = true;
1764		}
1765
1766		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1767		if (err < 0)
1768			goto unlock;
1769
1770		if (changed)
1771			err = new_settings(hdev, sk);
1772
1773		goto unlock;
1774	}
1775
1776	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1777	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1778		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1779				 MGMT_STATUS_BUSY);
1780		goto unlock;
1781	}
1782
1783	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1784	if (!cmd) {
1785		err = -ENOMEM;
1786		goto unlock;
1787	}
1788
1789	hci_req_init(&req, hdev);
1790
1791	memset(&hci_cp, 0, sizeof(hci_cp));
1792
1793	if (val) {
1794		hci_cp.le = val;
1795		hci_cp.simul = lmp_le_br_capable(hdev);
1796	} else {
1797		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1798			disable_advertising(&req);
1799	}
1800
1801	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1802		    &hci_cp);
1803
1804	err = hci_req_run(&req, le_enable_complete);
1805	if (err < 0)
1806		mgmt_pending_remove(cmd);
1807
1808unlock:
1809	hci_dev_unlock(hdev);
1810	return err;
1811}
1812
1813/* This is a helper function to test for pending mgmt commands that can
1814 * cause CoD or EIR HCI commands. We can only allow one such pending
1815 * mgmt command at a time since otherwise we cannot easily track what
1816 * the current values are, will be, and based on that calculate if a new
1817 * HCI command needs to be sent and if yes with what value.
1818 */
1819static bool pending_eir_or_class(struct hci_dev *hdev)
1820{
1821	struct pending_cmd *cmd;
1822
1823	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1824		switch (cmd->opcode) {
1825		case MGMT_OP_ADD_UUID:
1826		case MGMT_OP_REMOVE_UUID:
1827		case MGMT_OP_SET_DEV_CLASS:
1828		case MGMT_OP_SET_POWERED:
1829			return true;
1830		}
1831	}
1832
1833	return false;
1834}
1835
1836static const u8 bluetooth_base_uuid[] = {
1837			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1838			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1839};
1840
1841static u8 get_uuid_size(const u8 *uuid)
1842{
1843	u32 val;
1844
1845	if (memcmp(uuid, bluetooth_base_uuid, 12))
1846		return 128;
1847
1848	val = get_unaligned_le32(&uuid[12]);
1849	if (val > 0xffff)
1850		return 32;
1851
1852	return 16;
1853}
1854
1855static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1856{
1857	struct pending_cmd *cmd;
1858
1859	hci_dev_lock(hdev);
1860
1861	cmd = mgmt_pending_find(mgmt_op, hdev);
1862	if (!cmd)
1863		goto unlock;
1864
1865	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1866		     hdev->dev_class, 3);
1867
1868	mgmt_pending_remove(cmd);
1869
1870unlock:
1871	hci_dev_unlock(hdev);
1872}
1873
1874static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1875{
1876	BT_DBG("status 0x%02x", status);
1877
1878	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1879}
1880
1881static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1882{
1883	struct mgmt_cp_add_uuid *cp = data;
1884	struct pending_cmd *cmd;
1885	struct hci_request req;
1886	struct bt_uuid *uuid;
1887	int err;
1888
1889	BT_DBG("request for %s", hdev->name);
1890
1891	hci_dev_lock(hdev);
1892
1893	if (pending_eir_or_class(hdev)) {
1894		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1895				 MGMT_STATUS_BUSY);
1896		goto failed;
1897	}
1898
1899	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1900	if (!uuid) {
1901		err = -ENOMEM;
1902		goto failed;
1903	}
1904
1905	memcpy(uuid->uuid, cp->uuid, 16);
1906	uuid->svc_hint = cp->svc_hint;
1907	uuid->size = get_uuid_size(cp->uuid);
1908
1909	list_add_tail(&uuid->list, &hdev->uuids);
1910
1911	hci_req_init(&req, hdev);
1912
1913	update_class(&req);
1914	update_eir(&req);
1915
1916	err = hci_req_run(&req, add_uuid_complete);
1917	if (err < 0) {
1918		if (err != -ENODATA)
1919			goto failed;
1920
1921		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1922				   hdev->dev_class, 3);
1923		goto failed;
1924	}
1925
1926	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1927	if (!cmd) {
1928		err = -ENOMEM;
1929		goto failed;
1930	}
1931
1932	err = 0;
1933
1934failed:
1935	hci_dev_unlock(hdev);
1936	return err;
1937}
1938
1939static bool enable_service_cache(struct hci_dev *hdev)
1940{
1941	if (!hdev_is_powered(hdev))
1942		return false;
1943
1944	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1945		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1946				   CACHE_TIMEOUT);
1947		return true;
1948	}
1949
1950	return false;
1951}
1952
1953static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1954{
1955	BT_DBG("status 0x%02x", status);
1956
1957	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1958}
1959
1960static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1961		       u16 len)
1962{
1963	struct mgmt_cp_remove_uuid *cp = data;
1964	struct pending_cmd *cmd;
1965	struct bt_uuid *match, *tmp;
1966	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1967	struct hci_request req;
1968	int err, found;
1969
1970	BT_DBG("request for %s", hdev->name);
1971
1972	hci_dev_lock(hdev);
1973
1974	if (pending_eir_or_class(hdev)) {
1975		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1976				 MGMT_STATUS_BUSY);
1977		goto unlock;
1978	}
1979
1980	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1981		err = hci_uuids_clear(hdev);
1982
1983		if (enable_service_cache(hdev)) {
1984			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1985					   0, hdev->dev_class, 3);
1986			goto unlock;
1987		}
1988
1989		goto update_class;
1990	}
1991
1992	found = 0;
1993
1994	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1995		if (memcmp(match->uuid, cp->uuid, 16) != 0)
1996			continue;
1997
1998		list_del(&match->list);
1999		kfree(match);
2000		found++;
2001	}
2002
2003	if (found == 0) {
2004		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2005				 MGMT_STATUS_INVALID_PARAMS);
2006		goto unlock;
2007	}
2008
2009update_class:
2010	hci_req_init(&req, hdev);
2011
2012	update_class(&req);
2013	update_eir(&req);
2014
2015	err = hci_req_run(&req, remove_uuid_complete);
2016	if (err < 0) {
2017		if (err != -ENODATA)
2018			goto unlock;
2019
2020		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2021				   hdev->dev_class, 3);
2022		goto unlock;
2023	}
2024
2025	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2026	if (!cmd) {
2027		err = -ENOMEM;
2028		goto unlock;
2029	}
2030
2031	err = 0;
2032
2033unlock:
2034	hci_dev_unlock(hdev);
2035	return err;
2036}
2037
2038static void set_class_complete(struct hci_dev *hdev, u8 status)
2039{
2040	BT_DBG("status 0x%02x", status);
2041
2042	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2043}
2044
2045static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2046			 u16 len)
2047{
2048	struct mgmt_cp_set_dev_class *cp = data;
2049	struct pending_cmd *cmd;
2050	struct hci_request req;
2051	int err;
2052
2053	BT_DBG("request for %s", hdev->name);
2054
2055	if (!lmp_bredr_capable(hdev))
2056		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2057				  MGMT_STATUS_NOT_SUPPORTED);
2058
2059	hci_dev_lock(hdev);
2060
2061	if (pending_eir_or_class(hdev)) {
2062		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2063				 MGMT_STATUS_BUSY);
2064		goto unlock;
2065	}
2066
2067	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2068		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2069				 MGMT_STATUS_INVALID_PARAMS);
2070		goto unlock;
2071	}
2072
2073	hdev->major_class = cp->major;
2074	hdev->minor_class = cp->minor;
2075
2076	if (!hdev_is_powered(hdev)) {
2077		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2078				   hdev->dev_class, 3);
2079		goto unlock;
2080	}
2081
2082	hci_req_init(&req, hdev);
2083
2084	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2085		hci_dev_unlock(hdev);
2086		cancel_delayed_work_sync(&hdev->service_cache);
2087		hci_dev_lock(hdev);
2088		update_eir(&req);
2089	}
2090
2091	update_class(&req);
2092
2093	err = hci_req_run(&req, set_class_complete);
2094	if (err < 0) {
2095		if (err != -ENODATA)
2096			goto unlock;
2097
2098		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2099				   hdev->dev_class, 3);
2100		goto unlock;
2101	}
2102
2103	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2104	if (!cmd) {
2105		err = -ENOMEM;
2106		goto unlock;
2107	}
2108
2109	err = 0;
2110
2111unlock:
2112	hci_dev_unlock(hdev);
2113	return err;
2114}
2115
2116static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2117			  u16 len)
2118{
2119	struct mgmt_cp_load_link_keys *cp = data;
2120	u16 key_count, expected_len;
2121	int i;
2122
2123	BT_DBG("request for %s", hdev->name);
2124
2125	if (!lmp_bredr_capable(hdev))
2126		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2127				  MGMT_STATUS_NOT_SUPPORTED);
2128
2129	key_count = __le16_to_cpu(cp->key_count);
2130
2131	expected_len = sizeof(*cp) + key_count *
2132					sizeof(struct mgmt_link_key_info);
2133	if (expected_len != len) {
2134		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2135		       len, expected_len);
2136		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2137				  MGMT_STATUS_INVALID_PARAMS);
2138	}
2139
2140	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2141		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2142				  MGMT_STATUS_INVALID_PARAMS);
2143
2144	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2145	       key_count);
2146
2147	for (i = 0; i < key_count; i++) {
2148		struct mgmt_link_key_info *key = &cp->keys[i];
2149
2150		if (key->addr.type != BDADDR_BREDR)
2151			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2152					  MGMT_STATUS_INVALID_PARAMS);
2153	}
2154
2155	hci_dev_lock(hdev);
2156
2157	hci_link_keys_clear(hdev);
2158
2159	if (cp->debug_keys)
2160		set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2161	else
2162		clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2163
2164	for (i = 0; i < key_count; i++) {
2165		struct mgmt_link_key_info *key = &cp->keys[i];
2166
2167		hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2168				 key->type, key->pin_len);
2169	}
2170
2171	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2172
2173	hci_dev_unlock(hdev);
2174
2175	return 0;
2176}
2177
2178static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2179			   u8 addr_type, struct sock *skip_sk)
2180{
2181	struct mgmt_ev_device_unpaired ev;
2182
2183	bacpy(&ev.addr.bdaddr, bdaddr);
2184	ev.addr.type = addr_type;
2185
2186	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2187			  skip_sk);
2188}
2189
2190static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2191			 u16 len)
2192{
2193	struct mgmt_cp_unpair_device *cp = data;
2194	struct mgmt_rp_unpair_device rp;
2195	struct hci_cp_disconnect dc;
2196	struct pending_cmd *cmd;
2197	struct hci_conn *conn;
2198	int err;
2199
2200	memset(&rp, 0, sizeof(rp));
2201	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2202	rp.addr.type = cp->addr.type;
2203
2204	if (!bdaddr_type_is_valid(cp->addr.type))
2205		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2206				    MGMT_STATUS_INVALID_PARAMS,
2207				    &rp, sizeof(rp));
2208
2209	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2210		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2211				    MGMT_STATUS_INVALID_PARAMS,
2212				    &rp, sizeof(rp));
2213
2214	hci_dev_lock(hdev);
2215
2216	if (!hdev_is_powered(hdev)) {
2217		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2218				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2219		goto unlock;
2220	}
2221
2222	if (cp->addr.type == BDADDR_BREDR)
2223		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2224	else
2225		err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
2226
2227	if (err < 0) {
2228		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2229				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2230		goto unlock;
2231	}
2232
2233	if (cp->disconnect) {
2234		if (cp->addr.type == BDADDR_BREDR)
2235			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2236						       &cp->addr.bdaddr);
2237		else
2238			conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2239						       &cp->addr.bdaddr);
2240	} else {
2241		conn = NULL;
2242	}
2243
2244	if (!conn) {
2245		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2246				   &rp, sizeof(rp));
2247		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2248		goto unlock;
2249	}
2250
2251	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2252			       sizeof(*cp));
2253	if (!cmd) {
2254		err = -ENOMEM;
2255		goto unlock;
2256	}
2257
2258	dc.handle = cpu_to_le16(conn->handle);
2259	dc.reason = 0x13; /* Remote User Terminated Connection */
2260	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2261	if (err < 0)
2262		mgmt_pending_remove(cmd);
2263
2264unlock:
2265	hci_dev_unlock(hdev);
2266	return err;
2267}
2268
2269static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2270		      u16 len)
2271{
2272	struct mgmt_cp_disconnect *cp = data;
2273	struct mgmt_rp_disconnect rp;
2274	struct hci_cp_disconnect dc;
2275	struct pending_cmd *cmd;
2276	struct hci_conn *conn;
2277	int err;
2278
2279	BT_DBG("");
2280
2281	memset(&rp, 0, sizeof(rp));
2282	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2283	rp.addr.type = cp->addr.type;
2284
2285	if (!bdaddr_type_is_valid(cp->addr.type))
2286		return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2287				    MGMT_STATUS_INVALID_PARAMS,
2288				    &rp, sizeof(rp));
2289
2290	hci_dev_lock(hdev);
2291
2292	if (!test_bit(HCI_UP, &hdev->flags)) {
2293		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2294				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2295		goto failed;
2296	}
2297
2298	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2299		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2300				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2301		goto failed;
2302	}
2303
2304	if (cp->addr.type == BDADDR_BREDR)
2305		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2306					       &cp->addr.bdaddr);
2307	else
2308		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2309
2310	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2311		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2312				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2313		goto failed;
2314	}
2315
2316	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2317	if (!cmd) {
2318		err = -ENOMEM;
2319		goto failed;
2320	}
2321
2322	dc.handle = cpu_to_le16(conn->handle);
2323	dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2324
2325	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2326	if (err < 0)
2327		mgmt_pending_remove(cmd);
2328
2329failed:
2330	hci_dev_unlock(hdev);
2331	return err;
2332}
2333
2334static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2335{
2336	switch (link_type) {
2337	case LE_LINK:
2338		switch (addr_type) {
2339		case ADDR_LE_DEV_PUBLIC:
2340			return BDADDR_LE_PUBLIC;
2341
2342		default:
2343			/* Fallback to LE Random address type */
2344			return BDADDR_LE_RANDOM;
2345		}
2346
2347	default:
2348		/* Fallback to BR/EDR type */
2349		return BDADDR_BREDR;
2350	}
2351}
2352
2353static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2354			   u16 data_len)
2355{
2356	struct mgmt_rp_get_connections *rp;
2357	struct hci_conn *c;
2358	size_t rp_len;
2359	int err;
2360	u16 i;
2361
2362	BT_DBG("");
2363
2364	hci_dev_lock(hdev);
2365
2366	if (!hdev_is_powered(hdev)) {
2367		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2368				 MGMT_STATUS_NOT_POWERED);
2369		goto unlock;
2370	}
2371
2372	i = 0;
2373	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2374		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2375			i++;
2376	}
2377
2378	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2379	rp = kmalloc(rp_len, GFP_KERNEL);
2380	if (!rp) {
2381		err = -ENOMEM;
2382		goto unlock;
2383	}
2384
2385	i = 0;
2386	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2387		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2388			continue;
2389		bacpy(&rp->addr[i].bdaddr, &c->dst);
2390		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2391		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2392			continue;
2393		i++;
2394	}
2395
2396	rp->conn_count = cpu_to_le16(i);
2397
2398	/* Recalculate length in case of filtered SCO connections, etc */
2399	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2400
2401	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2402			   rp_len);
2403
2404	kfree(rp);
2405
2406unlock:
2407	hci_dev_unlock(hdev);
2408	return err;
2409}
2410
2411static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2412				   struct mgmt_cp_pin_code_neg_reply *cp)
2413{
2414	struct pending_cmd *cmd;
2415	int err;
2416
2417	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2418			       sizeof(*cp));
2419	if (!cmd)
2420		return -ENOMEM;
2421
2422	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2423			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2424	if (err < 0)
2425		mgmt_pending_remove(cmd);
2426
2427	return err;
2428}
2429
2430static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2431			  u16 len)
2432{
2433	struct hci_conn *conn;
2434	struct mgmt_cp_pin_code_reply *cp = data;
2435	struct hci_cp_pin_code_reply reply;
2436	struct pending_cmd *cmd;
2437	int err;
2438
2439	BT_DBG("");
2440
2441	hci_dev_lock(hdev);
2442
2443	if (!hdev_is_powered(hdev)) {
2444		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2445				 MGMT_STATUS_NOT_POWERED);
2446		goto failed;
2447	}
2448
2449	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2450	if (!conn) {
2451		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2452				 MGMT_STATUS_NOT_CONNECTED);
2453		goto failed;
2454	}
2455
2456	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2457		struct mgmt_cp_pin_code_neg_reply ncp;
2458
2459		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2460
2461		BT_ERR("PIN code is not 16 bytes long");
2462
2463		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2464		if (err >= 0)
2465			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2466					 MGMT_STATUS_INVALID_PARAMS);
2467
2468		goto failed;
2469	}
2470
2471	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2472	if (!cmd) {
2473		err = -ENOMEM;
2474		goto failed;
2475	}
2476
2477	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2478	reply.pin_len = cp->pin_len;
2479	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2480
2481	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2482	if (err < 0)
2483		mgmt_pending_remove(cmd);
2484
2485failed:
2486	hci_dev_unlock(hdev);
2487	return err;
2488}
2489
2490static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2491			     u16 len)
2492{
2493	struct mgmt_cp_set_io_capability *cp = data;
2494
2495	BT_DBG("");
2496
2497	hci_dev_lock(hdev);
2498
2499	hdev->io_capability = cp->io_capability;
2500
2501	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2502	       hdev->io_capability);
2503
2504	hci_dev_unlock(hdev);
2505
2506	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2507			    0);
2508}
2509
2510static struct pending_cmd *find_pairing(struct hci_conn *conn)
2511{
2512	struct hci_dev *hdev = conn->hdev;
2513	struct pending_cmd *cmd;
2514
2515	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2516		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2517			continue;
2518
2519		if (cmd->user_data != conn)
2520			continue;
2521
2522		return cmd;
2523	}
2524
2525	return NULL;
2526}
2527
2528static void pairing_complete(struct pending_cmd *cmd, u8 status)
2529{
2530	struct mgmt_rp_pair_device rp;
2531	struct hci_conn *conn = cmd->user_data;
2532
2533	bacpy(&rp.addr.bdaddr, &conn->dst);
2534	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2535
2536	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2537		     &rp, sizeof(rp));
2538
2539	/* So we don't get further callbacks for this connection */
2540	conn->connect_cfm_cb = NULL;
2541	conn->security_cfm_cb = NULL;
2542	conn->disconn_cfm_cb = NULL;
2543
2544	hci_conn_drop(conn);
2545
2546	mgmt_pending_remove(cmd);
2547}
2548
2549static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2550{
2551	struct pending_cmd *cmd;
2552
2553	BT_DBG("status %u", status);
2554
2555	cmd = find_pairing(conn);
2556	if (!cmd)
2557		BT_DBG("Unable to find a pending command");
2558	else
2559		pairing_complete(cmd, mgmt_status(status));
2560}
2561
2562static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2563{
2564	struct pending_cmd *cmd;
2565
2566	BT_DBG("status %u", status);
2567
2568	if (!status)
2569		return;
2570
2571	cmd = find_pairing(conn);
2572	if (!cmd)
2573		BT_DBG("Unable to find a pending command");
2574	else
2575		pairing_complete(cmd, mgmt_status(status));
2576}
2577
2578static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2579		       u16 len)
2580{
2581	struct mgmt_cp_pair_device *cp = data;
2582	struct mgmt_rp_pair_device rp;
2583	struct pending_cmd *cmd;
2584	u8 sec_level, auth_type;
2585	struct hci_conn *conn;
2586	int err;
2587
2588	BT_DBG("");
2589
2590	memset(&rp, 0, sizeof(rp));
2591	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2592	rp.addr.type = cp->addr.type;
2593
2594	if (!bdaddr_type_is_valid(cp->addr.type))
2595		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2596				    MGMT_STATUS_INVALID_PARAMS,
2597				    &rp, sizeof(rp));
2598
2599	hci_dev_lock(hdev);
2600
2601	if (!hdev_is_powered(hdev)) {
2602		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2603				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2604		goto unlock;
2605	}
2606
2607	sec_level = BT_SECURITY_MEDIUM;
2608	if (cp->io_cap == 0x03)
2609		auth_type = HCI_AT_DEDICATED_BONDING;
2610	else
2611		auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2612
2613	if (cp->addr.type == BDADDR_BREDR)
2614		conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2615				   cp->addr.type, sec_level, auth_type);
2616	else
2617		conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2618				   cp->addr.type, sec_level, auth_type);
2619
2620	if (IS_ERR(conn)) {
2621		int status;
2622
2623		if (PTR_ERR(conn) == -EBUSY)
2624			status = MGMT_STATUS_BUSY;
2625		else
2626			status = MGMT_STATUS_CONNECT_FAILED;
2627
2628		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2629				   status, &rp,
2630				   sizeof(rp));
2631		goto unlock;
2632	}
2633
2634	if (conn->connect_cfm_cb) {
2635		hci_conn_drop(conn);
2636		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2637				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2638		goto unlock;
2639	}
2640
2641	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2642	if (!cmd) {
2643		err = -ENOMEM;
2644		hci_conn_drop(conn);
2645		goto unlock;
2646	}
2647
2648	/* For LE, just connecting isn't a proof that the pairing finished */
2649	if (cp->addr.type == BDADDR_BREDR)
2650		conn->connect_cfm_cb = pairing_complete_cb;
2651	else
2652		conn->connect_cfm_cb = le_connect_complete_cb;
2653
2654	conn->security_cfm_cb = pairing_complete_cb;
2655	conn->disconn_cfm_cb = pairing_complete_cb;
2656	conn->io_capability = cp->io_cap;
2657	cmd->user_data = conn;
2658
2659	if (conn->state == BT_CONNECTED &&
2660	    hci_conn_security(conn, sec_level, auth_type))
2661		pairing_complete(cmd, 0);
2662
2663	err = 0;
2664
2665unlock:
2666	hci_dev_unlock(hdev);
2667	return err;
2668}
2669
2670static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2671			      u16 len)
2672{
2673	struct mgmt_addr_info *addr = data;
2674	struct pending_cmd *cmd;
2675	struct hci_conn *conn;
2676	int err;
2677
2678	BT_DBG("");
2679
2680	hci_dev_lock(hdev);
2681
2682	if (!hdev_is_powered(hdev)) {
2683		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2684				 MGMT_STATUS_NOT_POWERED);
2685		goto unlock;
2686	}
2687
2688	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2689	if (!cmd) {
2690		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2691				 MGMT_STATUS_INVALID_PARAMS);
2692		goto unlock;
2693	}
2694
2695	conn = cmd->user_data;
2696
2697	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2698		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2699				 MGMT_STATUS_INVALID_PARAMS);
2700		goto unlock;
2701	}
2702
2703	pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2704
2705	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2706			   addr, sizeof(*addr));
2707unlock:
2708	hci_dev_unlock(hdev);
2709	return err;
2710}
2711
2712static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2713			     struct mgmt_addr_info *addr, u16 mgmt_op,
2714			     u16 hci_op, __le32 passkey)
2715{
2716	struct pending_cmd *cmd;
2717	struct hci_conn *conn;
2718	int err;
2719
2720	hci_dev_lock(hdev);
2721
2722	if (!hdev_is_powered(hdev)) {
2723		err = cmd_complete(sk, hdev->id, mgmt_op,
2724				   MGMT_STATUS_NOT_POWERED, addr,
2725				   sizeof(*addr));
2726		goto done;
2727	}
2728
2729	if (addr->type == BDADDR_BREDR)
2730		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2731	else
2732		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2733
2734	if (!conn) {
2735		err = cmd_complete(sk, hdev->id, mgmt_op,
2736				   MGMT_STATUS_NOT_CONNECTED, addr,
2737				   sizeof(*addr));
2738		goto done;
2739	}
2740
2741	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2742		/* Continue with pairing via SMP */
2743		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2744
2745		if (!err)
2746			err = cmd_complete(sk, hdev->id, mgmt_op,
2747					   MGMT_STATUS_SUCCESS, addr,
2748					   sizeof(*addr));
2749		else
2750			err = cmd_complete(sk, hdev->id, mgmt_op,
2751					   MGMT_STATUS_FAILED, addr,
2752					   sizeof(*addr));
2753
2754		goto done;
2755	}
2756
2757	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2758	if (!cmd) {
2759		err = -ENOMEM;
2760		goto done;
2761	}
2762
2763	/* Continue with pairing via HCI */
2764	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2765		struct hci_cp_user_passkey_reply cp;
2766
2767		bacpy(&cp.bdaddr, &addr->bdaddr);
2768		cp.passkey = passkey;
2769		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2770	} else
2771		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2772				   &addr->bdaddr);
2773
2774	if (err < 0)
2775		mgmt_pending_remove(cmd);
2776
2777done:
2778	hci_dev_unlock(hdev);
2779	return err;
2780}
2781
2782static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2783			      void *data, u16 len)
2784{
2785	struct mgmt_cp_pin_code_neg_reply *cp = data;
2786
2787	BT_DBG("");
2788
2789	return user_pairing_resp(sk, hdev, &cp->addr,
2790				MGMT_OP_PIN_CODE_NEG_REPLY,
2791				HCI_OP_PIN_CODE_NEG_REPLY, 0);
2792}
2793
2794static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2795			      u16 len)
2796{
2797	struct mgmt_cp_user_confirm_reply *cp = data;
2798
2799	BT_DBG("");
2800
2801	if (len != sizeof(*cp))
2802		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2803				  MGMT_STATUS_INVALID_PARAMS);
2804
2805	return user_pairing_resp(sk, hdev, &cp->addr,
2806				 MGMT_OP_USER_CONFIRM_REPLY,
2807				 HCI_OP_USER_CONFIRM_REPLY, 0);
2808}
2809
2810static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2811				  void *data, u16 len)
2812{
2813	struct mgmt_cp_user_confirm_neg_reply *cp = data;
2814
2815	BT_DBG("");
2816
2817	return user_pairing_resp(sk, hdev, &cp->addr,
2818				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2819				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2820}
2821
2822static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2823			      u16 len)
2824{
2825	struct mgmt_cp_user_passkey_reply *cp = data;
2826
2827	BT_DBG("");
2828
2829	return user_pairing_resp(sk, hdev, &cp->addr,
2830				 MGMT_OP_USER_PASSKEY_REPLY,
2831				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2832}
2833
2834static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2835				  void *data, u16 len)
2836{
2837	struct mgmt_cp_user_passkey_neg_reply *cp = data;
2838
2839	BT_DBG("");
2840
2841	return user_pairing_resp(sk, hdev, &cp->addr,
2842				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2843				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2844}
2845
2846static void update_name(struct hci_request *req)
2847{
2848	struct hci_dev *hdev = req->hdev;
2849	struct hci_cp_write_local_name cp;
2850
2851	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2852
2853	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2854}
2855
2856static void set_name_complete(struct hci_dev *hdev, u8 status)
2857{
2858	struct mgmt_cp_set_local_name *cp;
2859	struct pending_cmd *cmd;
2860
2861	BT_DBG("status 0x%02x", status);
2862
2863	hci_dev_lock(hdev);
2864
2865	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2866	if (!cmd)
2867		goto unlock;
2868
2869	cp = cmd->param;
2870
2871	if (status)
2872		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2873			   mgmt_status(status));
2874	else
2875		cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2876			     cp, sizeof(*cp));
2877
2878	mgmt_pending_remove(cmd);
2879
2880unlock:
2881	hci_dev_unlock(hdev);
2882}
2883
2884static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2885			  u16 len)
2886{
2887	struct mgmt_cp_set_local_name *cp = data;
2888	struct pending_cmd *cmd;
2889	struct hci_request req;
2890	int err;
2891
2892	BT_DBG("");
2893
2894	hci_dev_lock(hdev);
2895
2896	/* If the old values are the same as the new ones just return a
2897	 * direct command complete event.
2898	 */
2899	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2900	    !memcmp(hdev->short_name, cp->short_name,
2901		    sizeof(hdev->short_name))) {
2902		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2903				   data, len);
2904		goto failed;
2905	}
2906
2907	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2908
2909	if (!hdev_is_powered(hdev)) {
2910		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2911
2912		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2913				   data, len);
2914		if (err < 0)
2915			goto failed;
2916
2917		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2918				 sk);
2919
2920		goto failed;
2921	}
2922
2923	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2924	if (!cmd) {
2925		err = -ENOMEM;
2926		goto failed;
2927	}
2928
2929	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2930
2931	hci_req_init(&req, hdev);
2932
2933	if (lmp_bredr_capable(hdev)) {
2934		update_name(&req);
2935		update_eir(&req);
2936	}
2937
2938	if (lmp_le_capable(hdev))
2939		update_ad(&req);
2940
2941	err = hci_req_run(&req, set_name_complete);
2942	if (err < 0)
2943		mgmt_pending_remove(cmd);
2944
2945failed:
2946	hci_dev_unlock(hdev);
2947	return err;
2948}
2949
2950static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2951			       void *data, u16 data_len)
2952{
2953	struct pending_cmd *cmd;
2954	int err;
2955
2956	BT_DBG("%s", hdev->name);
2957
2958	hci_dev_lock(hdev);
2959
2960	if (!hdev_is_powered(hdev)) {
2961		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2962				 MGMT_STATUS_NOT_POWERED);
2963		goto unlock;
2964	}
2965
2966	if (!lmp_ssp_capable(hdev)) {
2967		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2968				 MGMT_STATUS_NOT_SUPPORTED);
2969		goto unlock;
2970	}
2971
2972	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2973		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2974				 MGMT_STATUS_BUSY);
2975		goto unlock;
2976	}
2977
2978	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2979	if (!cmd) {
2980		err = -ENOMEM;
2981		goto unlock;
2982	}
2983
2984	err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2985	if (err < 0)
2986		mgmt_pending_remove(cmd);
2987
2988unlock:
2989	hci_dev_unlock(hdev);
2990	return err;
2991}
2992
2993static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2994			       void *data, u16 len)
2995{
2996	struct mgmt_cp_add_remote_oob_data *cp = data;
2997	u8 status;
2998	int err;
2999
3000	BT_DBG("%s ", hdev->name);
3001
3002	hci_dev_lock(hdev);
3003
3004	err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
3005				      cp->randomizer);
3006	if (err < 0)
3007		status = MGMT_STATUS_FAILED;
3008	else
3009		status = MGMT_STATUS_SUCCESS;
3010
3011	err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3012			   &cp->addr, sizeof(cp->addr));
3013
3014	hci_dev_unlock(hdev);
3015	return err;
3016}
3017
3018static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3019				  void *data, u16 len)
3020{
3021	struct mgmt_cp_remove_remote_oob_data *cp = data;
3022	u8 status;
3023	int err;
3024
3025	BT_DBG("%s", hdev->name);
3026
3027	hci_dev_lock(hdev);
3028
3029	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3030	if (err < 0)
3031		status = MGMT_STATUS_INVALID_PARAMS;
3032	else
3033		status = MGMT_STATUS_SUCCESS;
3034
3035	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3036			   status, &cp->addr, sizeof(cp->addr));
3037
3038	hci_dev_unlock(hdev);
3039	return err;
3040}
3041
3042static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3043{
3044	struct pending_cmd *cmd;
3045	u8 type;
3046	int err;
3047
3048	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3049
3050	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3051	if (!cmd)
3052		return -ENOENT;
3053
3054	type = hdev->discovery.type;
3055
3056	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3057			   &type, sizeof(type));
3058	mgmt_pending_remove(cmd);
3059
3060	return err;
3061}
3062
3063static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3064{
3065	BT_DBG("status %d", status);
3066
3067	if (status) {
3068		hci_dev_lock(hdev);
3069		mgmt_start_discovery_failed(hdev, status);
3070		hci_dev_unlock(hdev);
3071		return;
3072	}
3073
3074	hci_dev_lock(hdev);
3075	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3076	hci_dev_unlock(hdev);
3077
3078	switch (hdev->discovery.type) {
3079	case DISCOV_TYPE_LE:
3080		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3081				   DISCOV_LE_TIMEOUT);
3082		break;
3083
3084	case DISCOV_TYPE_INTERLEAVED:
3085		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3086				   DISCOV_INTERLEAVED_TIMEOUT);
3087		break;
3088
3089	case DISCOV_TYPE_BREDR:
3090		break;
3091
3092	default:
3093		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3094	}
3095}
3096
3097static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3098			   void *data, u16 len)
3099{
3100	struct mgmt_cp_start_discovery *cp = data;
3101	struct pending_cmd *cmd;
3102	struct hci_cp_le_set_scan_param param_cp;
3103	struct hci_cp_le_set_scan_enable enable_cp;
3104	struct hci_cp_inquiry inq_cp;
3105	struct hci_request req;
3106	/* General inquiry access code (GIAC) */
3107	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3108	u8 status;
3109	int err;
3110
3111	BT_DBG("%s", hdev->name);
3112
3113	hci_dev_lock(hdev);
3114
3115	if (!hdev_is_powered(hdev)) {
3116		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3117				 MGMT_STATUS_NOT_POWERED);
3118		goto failed;
3119	}
3120
3121	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3122		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3123				 MGMT_STATUS_BUSY);
3124		goto failed;
3125	}
3126
3127	if (hdev->discovery.state != DISCOVERY_STOPPED) {
3128		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3129				 MGMT_STATUS_BUSY);
3130		goto failed;
3131	}
3132
3133	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3134	if (!cmd) {
3135		err = -ENOMEM;
3136		goto failed;
3137	}
3138
3139	hdev->discovery.type = cp->type;
3140
3141	hci_req_init(&req, hdev);
3142
3143	switch (hdev->discovery.type) {
3144	case DISCOV_TYPE_BREDR:
3145		status = mgmt_bredr_support(hdev);
3146		if (status) {
3147			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3148					 status);
3149			mgmt_pending_remove(cmd);
3150			goto failed;
3151		}
3152
3153		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3154			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3155					 MGMT_STATUS_BUSY);
3156			mgmt_pending_remove(cmd);
3157			goto failed;
3158		}
3159
3160		hci_inquiry_cache_flush(hdev);
3161
3162		memset(&inq_cp, 0, sizeof(inq_cp));
3163		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3164		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3165		hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3166		break;
3167
3168	case DISCOV_TYPE_LE:
3169	case DISCOV_TYPE_INTERLEAVED:
3170		status = mgmt_le_support(hdev);
3171		if (status) {
3172			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3173					 status);
3174			mgmt_pending_remove(cmd);
3175			goto failed;
3176		}
3177
3178		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3179		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3180			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3181					 MGMT_STATUS_NOT_SUPPORTED);
3182			mgmt_pending_remove(cmd);
3183			goto failed;
3184		}
3185
3186		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3187			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3188					 MGMT_STATUS_REJECTED);
3189			mgmt_pending_remove(cmd);
3190			goto failed;
3191		}
3192
3193		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3194			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3195					 MGMT_STATUS_BUSY);
3196			mgmt_pending_remove(cmd);
3197			goto failed;
3198		}
3199
3200		memset(&param_cp, 0, sizeof(param_cp));
3201		param_cp.type = LE_SCAN_ACTIVE;
3202		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3203		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3204		if (bacmp(&hdev->bdaddr, BDADDR_ANY))
3205			param_cp.own_address_type = ADDR_LE_DEV_PUBLIC;
3206		else
3207			param_cp.own_address_type = ADDR_LE_DEV_RANDOM;
3208		hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3209			    &param_cp);
3210
3211		memset(&enable_cp, 0, sizeof(enable_cp));
3212		enable_cp.enable = LE_SCAN_ENABLE;
3213		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3214		hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3215			    &enable_cp);
3216		break;
3217
3218	default:
3219		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3220				 MGMT_STATUS_INVALID_PARAMS);
3221		mgmt_pending_remove(cmd);
3222		goto failed;
3223	}
3224
3225	err = hci_req_run(&req, start_discovery_complete);
3226	if (err < 0)
3227		mgmt_pending_remove(cmd);
3228	else
3229		hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3230
3231failed:
3232	hci_dev_unlock(hdev);
3233	return err;
3234}
3235
3236static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3237{
3238	struct pending_cmd *cmd;
3239	int err;
3240
3241	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3242	if (!cmd)
3243		return -ENOENT;
3244
3245	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3246			   &hdev->discovery.type, sizeof(hdev->discovery.type));
3247	mgmt_pending_remove(cmd);
3248
3249	return err;
3250}
3251
3252static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3253{
3254	BT_DBG("status %d", status);
3255
3256	hci_dev_lock(hdev);
3257
3258	if (status) {
3259		mgmt_stop_discovery_failed(hdev, status);
3260		goto unlock;
3261	}
3262
3263	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3264
3265unlock:
3266	hci_dev_unlock(hdev);
3267}
3268
3269static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3270			  u16 len)
3271{
3272	struct mgmt_cp_stop_discovery *mgmt_cp = data;
3273	struct pending_cmd *cmd;
3274	struct hci_cp_remote_name_req_cancel cp;
3275	struct inquiry_entry *e;
3276	struct hci_request req;
3277	struct hci_cp_le_set_scan_enable enable_cp;
3278	int err;
3279
3280	BT_DBG("%s", hdev->name);
3281
3282	hci_dev_lock(hdev);
3283
3284	if (!hci_discovery_active(hdev)) {
3285		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3286				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
3287				   sizeof(mgmt_cp->type));
3288		goto unlock;
3289	}
3290
3291	if (hdev->discovery.type != mgmt_cp->type) {
3292		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3293				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3294				   sizeof(mgmt_cp->type));
3295		goto unlock;
3296	}
3297
3298	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3299	if (!cmd) {
3300		err = -ENOMEM;
3301		goto unlock;
3302	}
3303
3304	hci_req_init(&req, hdev);
3305
3306	switch (hdev->discovery.state) {
3307	case DISCOVERY_FINDING:
3308		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3309			hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3310		} else {
3311			cancel_delayed_work(&hdev->le_scan_disable);
3312
3313			memset(&enable_cp, 0, sizeof(enable_cp));
3314			enable_cp.enable = LE_SCAN_DISABLE;
3315			hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3316				    sizeof(enable_cp), &enable_cp);
3317		}
3318
3319		break;
3320
3321	case DISCOVERY_RESOLVING:
3322		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3323						     NAME_PENDING);
3324		if (!e) {
3325			mgmt_pending_remove(cmd);
3326			err = cmd_complete(sk, hdev->id,
3327					   MGMT_OP_STOP_DISCOVERY, 0,
3328					   &mgmt_cp->type,
3329					   sizeof(mgmt_cp->type));
3330			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3331			goto unlock;
3332		}
3333
3334		bacpy(&cp.bdaddr, &e->data.bdaddr);
3335		hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3336			    &cp);
3337
3338		break;
3339
3340	default:
3341		BT_DBG("unknown discovery state %u", hdev->discovery.state);
3342
3343		mgmt_pending_remove(cmd);
3344		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3345				   MGMT_STATUS_FAILED, &mgmt_cp->type,
3346				   sizeof(mgmt_cp->type));
3347		goto unlock;
3348	}
3349
3350	err = hci_req_run(&req, stop_discovery_complete);
3351	if (err < 0)
3352		mgmt_pending_remove(cmd);
3353	else
3354		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3355
3356unlock:
3357	hci_dev_unlock(hdev);
3358	return err;
3359}
3360
3361static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3362			u16 len)
3363{
3364	struct mgmt_cp_confirm_name *cp = data;
3365	struct inquiry_entry *e;
3366	int err;
3367
3368	BT_DBG("%s", hdev->name);
3369
3370	hci_dev_lock(hdev);
3371
3372	if (!hci_discovery_active(hdev)) {
3373		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3374				 MGMT_STATUS_FAILED);
3375		goto failed;
3376	}
3377
3378	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3379	if (!e) {
3380		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3381				 MGMT_STATUS_INVALID_PARAMS);
3382		goto failed;
3383	}
3384
3385	if (cp->name_known) {
3386		e->name_state = NAME_KNOWN;
3387		list_del(&e->list);
3388	} else {
3389		e->name_state = NAME_NEEDED;
3390		hci_inquiry_cache_update_resolve(hdev, e);
3391	}
3392
3393	err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3394			   sizeof(cp->addr));
3395
3396failed:
3397	hci_dev_unlock(hdev);
3398	return err;
3399}
3400
3401static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3402			u16 len)
3403{
3404	struct mgmt_cp_block_device *cp = data;
3405	u8 status;
3406	int err;
3407
3408	BT_DBG("%s", hdev->name);
3409
3410	if (!bdaddr_type_is_valid(cp->addr.type))
3411		return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3412				    MGMT_STATUS_INVALID_PARAMS,
3413				    &cp->addr, sizeof(cp->addr));
3414
3415	hci_dev_lock(hdev);
3416
3417	err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3418	if (err < 0)
3419		status = MGMT_STATUS_FAILED;
3420	else
3421		status = MGMT_STATUS_SUCCESS;
3422
3423	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3424			   &cp->addr, sizeof(cp->addr));
3425
3426	hci_dev_unlock(hdev);
3427
3428	return err;
3429}
3430
3431static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3432			  u16 len)
3433{
3434	struct mgmt_cp_unblock_device *cp = data;
3435	u8 status;
3436	int err;
3437
3438	BT_DBG("%s", hdev->name);
3439
3440	if (!bdaddr_type_is_valid(cp->addr.type))
3441		return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3442				    MGMT_STATUS_INVALID_PARAMS,
3443				    &cp->addr, sizeof(cp->addr));
3444
3445	hci_dev_lock(hdev);
3446
3447	err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3448	if (err < 0)
3449		status = MGMT_STATUS_INVALID_PARAMS;
3450	else
3451		status = MGMT_STATUS_SUCCESS;
3452
3453	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3454			   &cp->addr, sizeof(cp->addr));
3455
3456	hci_dev_unlock(hdev);
3457
3458	return err;
3459}
3460
3461static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3462			 u16 len)
3463{
3464	struct mgmt_cp_set_device_id *cp = data;
3465	struct hci_request req;
3466	int err;
3467	__u16 source;
3468
3469	BT_DBG("%s", hdev->name);
3470
3471	source = __le16_to_cpu(cp->source);
3472
3473	if (source > 0x0002)
3474		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3475				  MGMT_STATUS_INVALID_PARAMS);
3476
3477	hci_dev_lock(hdev);
3478
3479	hdev->devid_source = source;
3480	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3481	hdev->devid_product = __le16_to_cpu(cp->product);
3482	hdev->devid_version = __le16_to_cpu(cp->version);
3483
3484	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3485
3486	hci_req_init(&req, hdev);
3487	update_eir(&req);
3488	hci_req_run(&req, NULL);
3489
3490	hci_dev_unlock(hdev);
3491
3492	return err;
3493}
3494
3495static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3496{
3497	struct cmd_lookup match = { NULL, hdev };
3498
3499	if (status) {
3500		u8 mgmt_err = mgmt_status(status);
3501
3502		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3503				     cmd_status_rsp, &mgmt_err);
3504		return;
3505	}
3506
3507	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3508			     &match);
3509
3510	new_settings(hdev, match.sk);
3511
3512	if (match.sk)
3513		sock_put(match.sk);
3514}
3515
3516static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3517			   u16 len)
3518{
3519	struct mgmt_mode *cp = data;
3520	struct pending_cmd *cmd;
3521	struct hci_request req;
3522	u8 val, enabled, status;
3523	int err;
3524
3525	BT_DBG("request for %s", hdev->name);
3526
3527	status = mgmt_le_support(hdev);
3528	if (status)
3529		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3530				  status);
3531
3532	if (cp->val != 0x00 && cp->val != 0x01)
3533		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3534				  MGMT_STATUS_INVALID_PARAMS);
3535
3536	hci_dev_lock(hdev);
3537
3538	val = !!cp->val;
3539	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3540
3541	/* The following conditions are ones which mean that we should
3542	 * not do any HCI communication but directly send a mgmt
3543	 * response to user space (after toggling the flag if
3544	 * necessary).
3545	 */
3546	if (!hdev_is_powered(hdev) || val == enabled ||
3547	    hci_conn_num(hdev, LE_LINK) > 0) {
3548		bool changed = false;
3549
3550		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3551			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3552			changed = true;
3553		}
3554
3555		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3556		if (err < 0)
3557			goto unlock;
3558
3559		if (changed)
3560			err = new_settings(hdev, sk);
3561
3562		goto unlock;
3563	}
3564
3565	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3566	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3567		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3568				 MGMT_STATUS_BUSY);
3569		goto unlock;
3570	}
3571
3572	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3573	if (!cmd) {
3574		err = -ENOMEM;
3575		goto unlock;
3576	}
3577
3578	hci_req_init(&req, hdev);
3579
3580	if (val)
3581		enable_advertising(&req);
3582	else
3583		disable_advertising(&req);
3584
3585	err = hci_req_run(&req, set_advertising_complete);
3586	if (err < 0)
3587		mgmt_pending_remove(cmd);
3588
3589unlock:
3590	hci_dev_unlock(hdev);
3591	return err;
3592}
3593
3594static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3595			      void *data, u16 len)
3596{
3597	struct mgmt_cp_set_static_address *cp = data;
3598	int err;
3599
3600	BT_DBG("%s", hdev->name);
3601
3602	if (!lmp_le_capable(hdev))
3603		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3604				  MGMT_STATUS_NOT_SUPPORTED);
3605
3606	if (hdev_is_powered(hdev))
3607		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3608				  MGMT_STATUS_REJECTED);
3609
3610	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3611		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3612			return cmd_status(sk, hdev->id,
3613					  MGMT_OP_SET_STATIC_ADDRESS,
3614					  MGMT_STATUS_INVALID_PARAMS);
3615
3616		/* Two most significant bits shall be set */
3617		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3618			return cmd_status(sk, hdev->id,
3619					  MGMT_OP_SET_STATIC_ADDRESS,
3620					  MGMT_STATUS_INVALID_PARAMS);
3621	}
3622
3623	hci_dev_lock(hdev);
3624
3625	bacpy(&hdev->static_addr, &cp->bdaddr);
3626
3627	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3628
3629	hci_dev_unlock(hdev);
3630
3631	return err;
3632}
3633
3634static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3635			   void *data, u16 len)
3636{
3637	struct mgmt_cp_set_scan_params *cp = data;
3638	__u16 interval, window;
3639	int err;
3640
3641	BT_DBG("%s", hdev->name);
3642
3643	if (!lmp_le_capable(hdev))
3644		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3645				  MGMT_STATUS_NOT_SUPPORTED);
3646
3647	interval = __le16_to_cpu(cp->interval);
3648
3649	if (interval < 0x0004 || interval > 0x4000)
3650		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3651				  MGMT_STATUS_INVALID_PARAMS);
3652
3653	window = __le16_to_cpu(cp->window);
3654
3655	if (window < 0x0004 || window > 0x4000)
3656		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3657				  MGMT_STATUS_INVALID_PARAMS);
3658
3659	if (window > interval)
3660		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3661				  MGMT_STATUS_INVALID_PARAMS);
3662
3663	hci_dev_lock(hdev);
3664
3665	hdev->le_scan_interval = interval;
3666	hdev->le_scan_window = window;
3667
3668	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3669
3670	hci_dev_unlock(hdev);
3671
3672	return err;
3673}
3674
3675static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3676{
3677	struct pending_cmd *cmd;
3678
3679	BT_DBG("status 0x%02x", status);
3680
3681	hci_dev_lock(hdev);
3682
3683	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3684	if (!cmd)
3685		goto unlock;
3686
3687	if (status) {
3688		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3689			   mgmt_status(status));
3690	} else {
3691		struct mgmt_mode *cp = cmd->param;
3692
3693		if (cp->val)
3694			set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3695		else
3696			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3697
3698		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3699		new_settings(hdev, cmd->sk);
3700	}
3701
3702	mgmt_pending_remove(cmd);
3703
3704unlock:
3705	hci_dev_unlock(hdev);
3706}
3707
3708static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3709				void *data, u16 len)
3710{
3711	struct mgmt_mode *cp = data;
3712	struct pending_cmd *cmd;
3713	struct hci_request req;
3714	int err;
3715
3716	BT_DBG("%s", hdev->name);
3717
3718	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3719	    hdev->hci_ver < BLUETOOTH_VER_1_2)
3720		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3721				  MGMT_STATUS_NOT_SUPPORTED);
3722
3723	if (cp->val != 0x00 && cp->val != 0x01)
3724		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3725				  MGMT_STATUS_INVALID_PARAMS);
3726
3727	if (!hdev_is_powered(hdev))
3728		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3729				  MGMT_STATUS_NOT_POWERED);
3730
3731	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3732		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3733				  MGMT_STATUS_REJECTED);
3734
3735	hci_dev_lock(hdev);
3736
3737	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3738		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3739				 MGMT_STATUS_BUSY);
3740		goto unlock;
3741	}
3742
3743	if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3744		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3745					hdev);
3746		goto unlock;
3747	}
3748
3749	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3750			       data, len);
3751	if (!cmd) {
3752		err = -ENOMEM;
3753		goto unlock;
3754	}
3755
3756	hci_req_init(&req, hdev);
3757
3758	write_fast_connectable(&req, cp->val);
3759
3760	err = hci_req_run(&req, fast_connectable_complete);
3761	if (err < 0) {
3762		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3763				 MGMT_STATUS_FAILED);
3764		mgmt_pending_remove(cmd);
3765	}
3766
3767unlock:
3768	hci_dev_unlock(hdev);
3769
3770	return err;
3771}
3772
3773static void set_bredr_scan(struct hci_request *req)
3774{
3775	struct hci_dev *hdev = req->hdev;
3776	u8 scan = 0;
3777
3778	/* Ensure that fast connectable is disabled. This function will
3779	 * not do anything if the page scan parameters are already what
3780	 * they should be.
3781	 */
3782	write_fast_connectable(req, false);
3783
3784	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3785		scan |= SCAN_PAGE;
3786	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3787		scan |= SCAN_INQUIRY;
3788
3789	if (scan)
3790		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3791}
3792
3793static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3794{
3795	struct pending_cmd *cmd;
3796
3797	BT_DBG("status 0x%02x", status);
3798
3799	hci_dev_lock(hdev);
3800
3801	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
3802	if (!cmd)
3803		goto unlock;
3804
3805	if (status) {
3806		u8 mgmt_err = mgmt_status(status);
3807
3808		/* We need to restore the flag if related HCI commands
3809		 * failed.
3810		 */
3811		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3812
3813		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
3814	} else {
3815		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
3816		new_settings(hdev, cmd->sk);
3817	}
3818
3819	mgmt_pending_remove(cmd);
3820
3821unlock:
3822	hci_dev_unlock(hdev);
3823}
3824
3825static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3826{
3827	struct mgmt_mode *cp = data;
3828	struct pending_cmd *cmd;
3829	struct hci_request req;
3830	int err;
3831
3832	BT_DBG("request for %s", hdev->name);
3833
3834	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
3835		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3836				  MGMT_STATUS_NOT_SUPPORTED);
3837
3838	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3839		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3840				  MGMT_STATUS_REJECTED);
3841
3842	if (cp->val != 0x00 && cp->val != 0x01)
3843		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3844				  MGMT_STATUS_INVALID_PARAMS);
3845
3846	hci_dev_lock(hdev);
3847
3848	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3849		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3850		goto unlock;
3851	}
3852
3853	if (!hdev_is_powered(hdev)) {
3854		if (!cp->val) {
3855			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
3856			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
3857			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3858			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3859			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
3860		}
3861
3862		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3863
3864		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3865		if (err < 0)
3866			goto unlock;
3867
3868		err = new_settings(hdev, sk);
3869		goto unlock;
3870	}
3871
3872	/* Reject disabling when powered on */
3873	if (!cp->val) {
3874		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3875				 MGMT_STATUS_REJECTED);
3876		goto unlock;
3877	}
3878
3879	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
3880		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3881				 MGMT_STATUS_BUSY);
3882		goto unlock;
3883	}
3884
3885	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
3886	if (!cmd) {
3887		err = -ENOMEM;
3888		goto unlock;
3889	}
3890
3891	/* We need to flip the bit already here so that update_ad
3892	 * generates the correct flags.
3893	 */
3894	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3895
3896	hci_req_init(&req, hdev);
3897
3898	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3899		set_bredr_scan(&req);
3900
3901	update_ad(&req);
3902
3903	err = hci_req_run(&req, set_bredr_complete);
3904	if (err < 0)
3905		mgmt_pending_remove(cmd);
3906
3907unlock:
3908	hci_dev_unlock(hdev);
3909	return err;
3910}
3911
3912static bool ltk_is_valid(struct mgmt_ltk_info *key)
3913{
3914	if (key->authenticated != 0x00 && key->authenticated != 0x01)
3915		return false;
3916	if (key->master != 0x00 && key->master != 0x01)
3917		return false;
3918	if (!bdaddr_type_is_le(key->addr.type))
3919		return false;
3920	return true;
3921}
3922
3923static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3924			       void *cp_data, u16 len)
3925{
3926	struct mgmt_cp_load_long_term_keys *cp = cp_data;
3927	u16 key_count, expected_len;
3928	int i, err;
3929
3930	BT_DBG("request for %s", hdev->name);
3931
3932	if (!lmp_le_capable(hdev))
3933		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3934				  MGMT_STATUS_NOT_SUPPORTED);
3935
3936	key_count = __le16_to_cpu(cp->key_count);
3937
3938	expected_len = sizeof(*cp) + key_count *
3939					sizeof(struct mgmt_ltk_info);
3940	if (expected_len != len) {
3941		BT_ERR("load_keys: expected %u bytes, got %u bytes",
3942		       len, expected_len);
3943		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3944				  MGMT_STATUS_INVALID_PARAMS);
3945	}
3946
3947	BT_DBG("%s key_count %u", hdev->name, key_count);
3948
3949	for (i = 0; i < key_count; i++) {
3950		struct mgmt_ltk_info *key = &cp->keys[i];
3951
3952		if (!ltk_is_valid(key))
3953			return cmd_status(sk, hdev->id,
3954					  MGMT_OP_LOAD_LONG_TERM_KEYS,
3955					  MGMT_STATUS_INVALID_PARAMS);
3956	}
3957
3958	hci_dev_lock(hdev);
3959
3960	hci_smp_ltks_clear(hdev);
3961
3962	for (i = 0; i < key_count; i++) {
3963		struct mgmt_ltk_info *key = &cp->keys[i];
3964		u8 type, addr_type;
3965
3966		if (key->addr.type == BDADDR_LE_PUBLIC)
3967			addr_type = ADDR_LE_DEV_PUBLIC;
3968		else
3969			addr_type = ADDR_LE_DEV_RANDOM;
3970
3971		if (key->master)
3972			type = HCI_SMP_LTK;
3973		else
3974			type = HCI_SMP_LTK_SLAVE;
3975
3976		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
3977			    type, 0, key->authenticated, key->val,
3978			    key->enc_size, key->ediv, key->rand);
3979	}
3980
3981	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3982			   NULL, 0);
3983
3984	hci_dev_unlock(hdev);
3985
3986	return err;
3987}
3988
3989static const struct mgmt_handler {
3990	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3991		     u16 data_len);
3992	bool var_len;
3993	size_t data_len;
3994} mgmt_handlers[] = {
3995	{ NULL }, /* 0x0000 (no command) */
3996	{ read_version,           false, MGMT_READ_VERSION_SIZE },
3997	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
3998	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
3999	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
4000	{ set_powered,            false, MGMT_SETTING_SIZE },
4001	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
4002	{ set_connectable,        false, MGMT_SETTING_SIZE },
4003	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
4004	{ set_pairable,           false, MGMT_SETTING_SIZE },
4005	{ set_link_security,      false, MGMT_SETTING_SIZE },
4006	{ set_ssp,                false, MGMT_SETTING_SIZE },
4007	{ set_hs,                 false, MGMT_SETTING_SIZE },
4008	{ set_le,                 false, MGMT_SETTING_SIZE },
4009	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
4010	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
4011	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
4012	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
4013	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
4014	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4015	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
4016	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
4017	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
4018	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4019	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
4020	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
4021	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4022	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
4023	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
4024	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4025	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
4026	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4027	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4028	{ add_remote_oob_data,    false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4029	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4030	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
4031	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
4032	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
4033	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
4034	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
4035	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
4036	{ set_advertising,        false, MGMT_SETTING_SIZE },
4037	{ set_bredr,              false, MGMT_SETTING_SIZE },
4038	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
4039	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
4040};
4041
4042
4043int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4044{
4045	void *buf;
4046	u8 *cp;
4047	struct mgmt_hdr *hdr;
4048	u16 opcode, index, len;
4049	struct hci_dev *hdev = NULL;
4050	const struct mgmt_handler *handler;
4051	int err;
4052
4053	BT_DBG("got %zu bytes", msglen);
4054
4055	if (msglen < sizeof(*hdr))
4056		return -EINVAL;
4057
4058	buf = kmalloc(msglen, GFP_KERNEL);
4059	if (!buf)
4060		return -ENOMEM;
4061
4062	if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4063		err = -EFAULT;
4064		goto done;
4065	}
4066
4067	hdr = buf;
4068	opcode = __le16_to_cpu(hdr->opcode);
4069	index = __le16_to_cpu(hdr->index);
4070	len = __le16_to_cpu(hdr->len);
4071
4072	if (len != msglen - sizeof(*hdr)) {
4073		err = -EINVAL;
4074		goto done;
4075	}
4076
4077	if (index != MGMT_INDEX_NONE) {
4078		hdev = hci_dev_get(index);
4079		if (!hdev) {
4080			err = cmd_status(sk, index, opcode,
4081					 MGMT_STATUS_INVALID_INDEX);
4082			goto done;
4083		}
4084
4085		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4086		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4087			err = cmd_status(sk, index, opcode,
4088					 MGMT_STATUS_INVALID_INDEX);
4089			goto done;
4090		}
4091	}
4092
4093	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4094	    mgmt_handlers[opcode].func == NULL) {
4095		BT_DBG("Unknown op %u", opcode);
4096		err = cmd_status(sk, index, opcode,
4097				 MGMT_STATUS_UNKNOWN_COMMAND);
4098		goto done;
4099	}
4100
4101	if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4102	    (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4103		err = cmd_status(sk, index, opcode,
4104				 MGMT_STATUS_INVALID_INDEX);
4105		goto done;
4106	}
4107
4108	handler = &mgmt_handlers[opcode];
4109
4110	if ((handler->var_len && len < handler->data_len) ||
4111	    (!handler->var_len && len != handler->data_len)) {
4112		err = cmd_status(sk, index, opcode,
4113				 MGMT_STATUS_INVALID_PARAMS);
4114		goto done;
4115	}
4116
4117	if (hdev)
4118		mgmt_init_hdev(sk, hdev);
4119
4120	cp = buf + sizeof(*hdr);
4121
4122	err = handler->func(sk, hdev, cp, len);
4123	if (err < 0)
4124		goto done;
4125
4126	err = msglen;
4127
4128done:
4129	if (hdev)
4130		hci_dev_put(hdev);
4131
4132	kfree(buf);
4133	return err;
4134}
4135
4136void mgmt_index_added(struct hci_dev *hdev)
4137{
4138	if (hdev->dev_type != HCI_BREDR)
4139		return;
4140
4141	mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4142}
4143
4144void mgmt_index_removed(struct hci_dev *hdev)
4145{
4146	u8 status = MGMT_STATUS_INVALID_INDEX;
4147
4148	if (hdev->dev_type != HCI_BREDR)
4149		return;
4150
4151	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4152
4153	mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4154}
4155
4156static void powered_complete(struct hci_dev *hdev, u8 status)
4157{
4158	struct cmd_lookup match = { NULL, hdev };
4159
4160	BT_DBG("status 0x%02x", status);
4161
4162	hci_dev_lock(hdev);
4163
4164	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4165
4166	new_settings(hdev, match.sk);
4167
4168	hci_dev_unlock(hdev);
4169
4170	if (match.sk)
4171		sock_put(match.sk);
4172}
4173
4174static int powered_update_hci(struct hci_dev *hdev)
4175{
4176	struct hci_request req;
4177	u8 link_sec;
4178
4179	hci_req_init(&req, hdev);
4180
4181	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4182	    !lmp_host_ssp_capable(hdev)) {
4183		u8 ssp = 1;
4184
4185		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4186	}
4187
4188	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4189	    lmp_bredr_capable(hdev)) {
4190		struct hci_cp_write_le_host_supported cp;
4191
4192		cp.le = 1;
4193		cp.simul = lmp_le_br_capable(hdev);
4194
4195		/* Check first if we already have the right
4196		 * host state (host features set)
4197		 */
4198		if (cp.le != lmp_host_le_capable(hdev) ||
4199		    cp.simul != lmp_host_le_br_capable(hdev))
4200			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4201				    sizeof(cp), &cp);
4202	}
4203
4204	if (lmp_le_capable(hdev)) {
4205		/* Set random address to static address if configured */
4206		if (bacmp(&hdev->static_addr, BDADDR_ANY))
4207			hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4208				    &hdev->static_addr);
4209
4210		/* Make sure the controller has a good default for
4211		 * advertising data. This also applies to the case
4212		 * where BR/EDR was toggled during the AUTO_OFF phase.
4213		 */
4214		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4215			update_ad(&req);
4216
4217		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4218			enable_advertising(&req);
4219	}
4220
4221	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4222	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4223		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4224			    sizeof(link_sec), &link_sec);
4225
4226	if (lmp_bredr_capable(hdev)) {
4227		if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4228			set_bredr_scan(&req);
4229		update_class(&req);
4230		update_name(&req);
4231		update_eir(&req);
4232	}
4233
4234	return hci_req_run(&req, powered_complete);
4235}
4236
4237int mgmt_powered(struct hci_dev *hdev, u8 powered)
4238{
4239	struct cmd_lookup match = { NULL, hdev };
4240	u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4241	u8 zero_cod[] = { 0, 0, 0 };
4242	int err;
4243
4244	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4245		return 0;
4246
4247	if (powered) {
4248		if (powered_update_hci(hdev) == 0)
4249			return 0;
4250
4251		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4252				     &match);
4253		goto new_settings;
4254	}
4255
4256	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4257	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4258
4259	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4260		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4261			   zero_cod, sizeof(zero_cod), NULL);
4262
4263new_settings:
4264	err = new_settings(hdev, match.sk);
4265
4266	if (match.sk)
4267		sock_put(match.sk);
4268
4269	return err;
4270}
4271
4272void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4273{
4274	struct pending_cmd *cmd;
4275	u8 status;
4276
4277	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4278	if (!cmd)
4279		return;
4280
4281	if (err == -ERFKILL)
4282		status = MGMT_STATUS_RFKILLED;
4283	else
4284		status = MGMT_STATUS_FAILED;
4285
4286	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4287
4288	mgmt_pending_remove(cmd);
4289}
4290
4291void mgmt_discoverable_timeout(struct hci_dev *hdev)
4292{
4293	struct hci_request req;
4294	u8 scan = SCAN_PAGE;
4295
4296	hci_dev_lock(hdev);
4297
4298	/* When discoverable timeout triggers, then just make sure
4299	 * the limited discoverable flag is cleared. Even in the case
4300	 * of a timeout triggered from general discoverable, it is
4301	 * safe to unconditionally clear the flag.
4302	 */
4303	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4304
4305	hci_req_init(&req, hdev);
4306	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
4307	update_class(&req);
4308	hci_req_run(&req, NULL);
4309
4310	hdev->discov_timeout = 0;
4311
4312	hci_dev_unlock(hdev);
4313}
4314
4315void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4316{
4317	bool changed;
4318
4319	/* Nothing needed here if there's a pending command since that
4320	 * commands request completion callback takes care of everything
4321	 * necessary.
4322	 */
4323	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4324		return;
4325
4326	if (discoverable)
4327		changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4328	else
4329		changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4330
4331	if (changed)
4332		new_settings(hdev, NULL);
4333}
4334
4335void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4336{
4337	bool changed;
4338
4339	/* Nothing needed here if there's a pending command since that
4340	 * commands request completion callback takes care of everything
4341	 * necessary.
4342	 */
4343	if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4344		return;
4345
4346	if (connectable)
4347		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4348	else
4349		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4350
4351	if (changed)
4352		new_settings(hdev, NULL);
4353}
4354
4355void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4356{
4357	u8 mgmt_err = mgmt_status(status);
4358
4359	if (scan & SCAN_PAGE)
4360		mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4361				     cmd_status_rsp, &mgmt_err);
4362
4363	if (scan & SCAN_INQUIRY)
4364		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4365				     cmd_status_rsp, &mgmt_err);
4366}
4367
4368void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4369		       bool persistent)
4370{
4371	struct mgmt_ev_new_link_key ev;
4372
4373	memset(&ev, 0, sizeof(ev));
4374
4375	ev.store_hint = persistent;
4376	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4377	ev.key.addr.type = BDADDR_BREDR;
4378	ev.key.type = key->type;
4379	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4380	ev.key.pin_len = key->pin_len;
4381
4382	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4383}
4384
4385int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4386{
4387	struct mgmt_ev_new_long_term_key ev;
4388
4389	memset(&ev, 0, sizeof(ev));
4390
4391	ev.store_hint = persistent;
4392	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4393	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4394	ev.key.authenticated = key->authenticated;
4395	ev.key.enc_size = key->enc_size;
4396	ev.key.ediv = key->ediv;
4397
4398	if (key->type == HCI_SMP_LTK)
4399		ev.key.master = 1;
4400
4401	memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4402	memcpy(ev.key.val, key->val, sizeof(key->val));
4403
4404	return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
4405			  NULL);
4406}
4407
4408static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4409				  u8 data_len)
4410{
4411	eir[eir_len++] = sizeof(type) + data_len;
4412	eir[eir_len++] = type;
4413	memcpy(&eir[eir_len], data, data_len);
4414	eir_len += data_len;
4415
4416	return eir_len;
4417}
4418
4419void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4420			   u8 addr_type, u32 flags, u8 *name, u8 name_len,
4421			   u8 *dev_class)
4422{
4423	char buf[512];
4424	struct mgmt_ev_device_connected *ev = (void *) buf;
4425	u16 eir_len = 0;
4426
4427	bacpy(&ev->addr.bdaddr, bdaddr);
4428	ev->addr.type = link_to_bdaddr(link_type, addr_type);
4429
4430	ev->flags = __cpu_to_le32(flags);
4431
4432	if (name_len > 0)
4433		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4434					  name, name_len);
4435
4436	if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4437		eir_len = eir_append_data(ev->eir, eir_len,
4438					  EIR_CLASS_OF_DEV, dev_class, 3);
4439
4440	ev->eir_len = cpu_to_le16(eir_len);
4441
4442	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4443		    sizeof(*ev) + eir_len, NULL);
4444}
4445
4446static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4447{
4448	struct mgmt_cp_disconnect *cp = cmd->param;
4449	struct sock **sk = data;
4450	struct mgmt_rp_disconnect rp;
4451
4452	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4453	rp.addr.type = cp->addr.type;
4454
4455	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4456		     sizeof(rp));
4457
4458	*sk = cmd->sk;
4459	sock_hold(*sk);
4460
4461	mgmt_pending_remove(cmd);
4462}
4463
4464static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4465{
4466	struct hci_dev *hdev = data;
4467	struct mgmt_cp_unpair_device *cp = cmd->param;
4468	struct mgmt_rp_unpair_device rp;
4469
4470	memset(&rp, 0, sizeof(rp));
4471	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4472	rp.addr.type = cp->addr.type;
4473
4474	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
4475
4476	cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4477
4478	mgmt_pending_remove(cmd);
4479}
4480
4481void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
4482			      u8 link_type, u8 addr_type, u8 reason)
4483{
4484	struct mgmt_ev_device_disconnected ev;
4485	struct sock *sk = NULL;
4486
4487	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4488
4489	bacpy(&ev.addr.bdaddr, bdaddr);
4490	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4491	ev.reason = reason;
4492
4493	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4494
4495	if (sk)
4496		sock_put(sk);
4497
4498	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4499			     hdev);
4500}
4501
4502void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
4503			    u8 link_type, u8 addr_type, u8 status)
4504{
4505	struct mgmt_rp_disconnect rp;
4506	struct pending_cmd *cmd;
4507
4508	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4509			     hdev);
4510
4511	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4512	if (!cmd)
4513		return;
4514
4515	bacpy(&rp.addr.bdaddr, bdaddr);
4516	rp.addr.type = link_to_bdaddr(link_type, addr_type);
4517
4518	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
4519		     mgmt_status(status), &rp, sizeof(rp));
4520
4521	mgmt_pending_remove(cmd);
4522}
4523
4524void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4525			 u8 addr_type, u8 status)
4526{
4527	struct mgmt_ev_connect_failed ev;
4528
4529	bacpy(&ev.addr.bdaddr, bdaddr);
4530	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4531	ev.status = mgmt_status(status);
4532
4533	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4534}
4535
4536void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4537{
4538	struct mgmt_ev_pin_code_request ev;
4539
4540	bacpy(&ev.addr.bdaddr, bdaddr);
4541	ev.addr.type = BDADDR_BREDR;
4542	ev.secure = secure;
4543
4544	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4545}
4546
4547void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4548				  u8 status)
4549{
4550	struct pending_cmd *cmd;
4551	struct mgmt_rp_pin_code_reply rp;
4552
4553	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4554	if (!cmd)
4555		return;
4556
4557	bacpy(&rp.addr.bdaddr, bdaddr);
4558	rp.addr.type = BDADDR_BREDR;
4559
4560	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
4561		     mgmt_status(status), &rp, sizeof(rp));
4562
4563	mgmt_pending_remove(cmd);
4564}
4565
4566void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4567				      u8 status)
4568{
4569	struct pending_cmd *cmd;
4570	struct mgmt_rp_pin_code_reply rp;
4571
4572	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4573	if (!cmd)
4574		return;
4575
4576	bacpy(&rp.addr.bdaddr, bdaddr);
4577	rp.addr.type = BDADDR_BREDR;
4578
4579	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
4580		     mgmt_status(status), &rp, sizeof(rp));
4581
4582	mgmt_pending_remove(cmd);
4583}
4584
4585int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4586			      u8 link_type, u8 addr_type, __le32 value,
4587			      u8 confirm_hint)
4588{
4589	struct mgmt_ev_user_confirm_request ev;
4590
4591	BT_DBG("%s", hdev->name);
4592
4593	bacpy(&ev.addr.bdaddr, bdaddr);
4594	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4595	ev.confirm_hint = confirm_hint;
4596	ev.value = value;
4597
4598	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4599			  NULL);
4600}
4601
4602int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4603			      u8 link_type, u8 addr_type)
4604{
4605	struct mgmt_ev_user_passkey_request ev;
4606
4607	BT_DBG("%s", hdev->name);
4608
4609	bacpy(&ev.addr.bdaddr, bdaddr);
4610	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4611
4612	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4613			  NULL);
4614}
4615
4616static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4617				      u8 link_type, u8 addr_type, u8 status,
4618				      u8 opcode)
4619{
4620	struct pending_cmd *cmd;
4621	struct mgmt_rp_user_confirm_reply rp;
4622	int err;
4623
4624	cmd = mgmt_pending_find(opcode, hdev);
4625	if (!cmd)
4626		return -ENOENT;
4627
4628	bacpy(&rp.addr.bdaddr, bdaddr);
4629	rp.addr.type = link_to_bdaddr(link_type, addr_type);
4630	err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4631			   &rp, sizeof(rp));
4632
4633	mgmt_pending_remove(cmd);
4634
4635	return err;
4636}
4637
4638int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4639				     u8 link_type, u8 addr_type, u8 status)
4640{
4641	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4642					  status, MGMT_OP_USER_CONFIRM_REPLY);
4643}
4644
4645int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4646					 u8 link_type, u8 addr_type, u8 status)
4647{
4648	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4649					  status,
4650					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
4651}
4652
4653int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4654				     u8 link_type, u8 addr_type, u8 status)
4655{
4656	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4657					  status, MGMT_OP_USER_PASSKEY_REPLY);
4658}
4659
4660int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4661					 u8 link_type, u8 addr_type, u8 status)
4662{
4663	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4664					  status,
4665					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
4666}
4667
4668int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
4669			     u8 link_type, u8 addr_type, u32 passkey,
4670			     u8 entered)
4671{
4672	struct mgmt_ev_passkey_notify ev;
4673
4674	BT_DBG("%s", hdev->name);
4675
4676	bacpy(&ev.addr.bdaddr, bdaddr);
4677	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4678	ev.passkey = __cpu_to_le32(passkey);
4679	ev.entered = entered;
4680
4681	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
4682}
4683
4684void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4685		      u8 addr_type, u8 status)
4686{
4687	struct mgmt_ev_auth_failed ev;
4688
4689	bacpy(&ev.addr.bdaddr, bdaddr);
4690	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4691	ev.status = mgmt_status(status);
4692
4693	mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4694}
4695
4696int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4697{
4698	struct cmd_lookup match = { NULL, hdev };
4699	bool changed = false;
4700	int err = 0;
4701
4702	if (status) {
4703		u8 mgmt_err = mgmt_status(status);
4704		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4705				     cmd_status_rsp, &mgmt_err);
4706		return 0;
4707	}
4708
4709	if (test_bit(HCI_AUTH, &hdev->flags)) {
4710		if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
4711			changed = true;
4712	} else {
4713		if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
4714			changed = true;
4715	}
4716
4717	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4718			     &match);
4719
4720	if (changed)
4721		err = new_settings(hdev, match.sk);
4722
4723	if (match.sk)
4724		sock_put(match.sk);
4725
4726	return err;
4727}
4728
4729static void clear_eir(struct hci_request *req)
4730{
4731	struct hci_dev *hdev = req->hdev;
4732	struct hci_cp_write_eir cp;
4733
4734	if (!lmp_ext_inq_capable(hdev))
4735		return;
4736
4737	memset(hdev->eir, 0, sizeof(hdev->eir));
4738
4739	memset(&cp, 0, sizeof(cp));
4740
4741	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4742}
4743
4744int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4745{
4746	struct cmd_lookup match = { NULL, hdev };
4747	struct hci_request req;
4748	bool changed = false;
4749	int err = 0;
4750
4751	if (status) {
4752		u8 mgmt_err = mgmt_status(status);
4753
4754		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4755						 &hdev->dev_flags)) {
4756			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4757			err = new_settings(hdev, NULL);
4758		}
4759
4760		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4761				     &mgmt_err);
4762
4763		return err;
4764	}
4765
4766	if (enable) {
4767		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4768	} else {
4769		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4770		if (!changed)
4771			changed = test_and_clear_bit(HCI_HS_ENABLED,
4772						     &hdev->dev_flags);
4773		else
4774			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4775	}
4776
4777	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4778
4779	if (changed)
4780		err = new_settings(hdev, match.sk);
4781
4782	if (match.sk)
4783		sock_put(match.sk);
4784
4785	hci_req_init(&req, hdev);
4786
4787	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4788		update_eir(&req);
4789	else
4790		clear_eir(&req);
4791
4792	hci_req_run(&req, NULL);
4793
4794	return err;
4795}
4796
4797static void sk_lookup(struct pending_cmd *cmd, void *data)
4798{
4799	struct cmd_lookup *match = data;
4800
4801	if (match->sk == NULL) {
4802		match->sk = cmd->sk;
4803		sock_hold(match->sk);
4804	}
4805}
4806
4807int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4808				   u8 status)
4809{
4810	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4811	int err = 0;
4812
4813	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4814	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4815	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4816
4817	if (!status)
4818		err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
4819				 3, NULL);
4820
4821	if (match.sk)
4822		sock_put(match.sk);
4823
4824	return err;
4825}
4826
4827int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4828{
4829	struct mgmt_cp_set_local_name ev;
4830	struct pending_cmd *cmd;
4831
4832	if (status)
4833		return 0;
4834
4835	memset(&ev, 0, sizeof(ev));
4836	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4837	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4838
4839	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4840	if (!cmd) {
4841		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4842
4843		/* If this is a HCI command related to powering on the
4844		 * HCI dev don't send any mgmt signals.
4845		 */
4846		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4847			return 0;
4848	}
4849
4850	return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4851			  cmd ? cmd->sk : NULL);
4852}
4853
4854int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4855					    u8 *randomizer, u8 status)
4856{
4857	struct pending_cmd *cmd;
4858	int err;
4859
4860	BT_DBG("%s status %u", hdev->name, status);
4861
4862	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4863	if (!cmd)
4864		return -ENOENT;
4865
4866	if (status) {
4867		err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4868				 mgmt_status(status));
4869	} else {
4870		struct mgmt_rp_read_local_oob_data rp;
4871
4872		memcpy(rp.hash, hash, sizeof(rp.hash));
4873		memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4874
4875		err = cmd_complete(cmd->sk, hdev->id,
4876				   MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
4877				   sizeof(rp));
4878	}
4879
4880	mgmt_pending_remove(cmd);
4881
4882	return err;
4883}
4884
4885void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4886		       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4887		       ssp, u8 *eir, u16 eir_len)
4888{
4889	char buf[512];
4890	struct mgmt_ev_device_found *ev = (void *) buf;
4891	size_t ev_size;
4892
4893	if (!hci_discovery_active(hdev))
4894		return;
4895
4896	/* Leave 5 bytes for a potential CoD field */
4897	if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4898		return;
4899
4900	memset(buf, 0, sizeof(buf));
4901
4902	bacpy(&ev->addr.bdaddr, bdaddr);
4903	ev->addr.type = link_to_bdaddr(link_type, addr_type);
4904	ev->rssi = rssi;
4905	if (cfm_name)
4906		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4907	if (!ssp)
4908		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4909
4910	if (eir_len > 0)
4911		memcpy(ev->eir, eir, eir_len);
4912
4913	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4914		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4915					  dev_class, 3);
4916
4917	ev->eir_len = cpu_to_le16(eir_len);
4918	ev_size = sizeof(*ev) + eir_len;
4919
4920	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4921}
4922
4923void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4924		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4925{
4926	struct mgmt_ev_device_found *ev;
4927	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4928	u16 eir_len;
4929
4930	ev = (struct mgmt_ev_device_found *) buf;
4931
4932	memset(buf, 0, sizeof(buf));
4933
4934	bacpy(&ev->addr.bdaddr, bdaddr);
4935	ev->addr.type = link_to_bdaddr(link_type, addr_type);
4936	ev->rssi = rssi;
4937
4938	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4939				  name_len);
4940
4941	ev->eir_len = cpu_to_le16(eir_len);
4942
4943	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
4944}
4945
4946void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4947{
4948	struct mgmt_ev_discovering ev;
4949	struct pending_cmd *cmd;
4950
4951	BT_DBG("%s discovering %u", hdev->name, discovering);
4952
4953	if (discovering)
4954		cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4955	else
4956		cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4957
4958	if (cmd != NULL) {
4959		u8 type = hdev->discovery.type;
4960
4961		cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4962			     sizeof(type));
4963		mgmt_pending_remove(cmd);
4964	}
4965
4966	memset(&ev, 0, sizeof(ev));
4967	ev.type = hdev->discovery.type;
4968	ev.discovering = discovering;
4969
4970	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4971}
4972
4973int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4974{
4975	struct pending_cmd *cmd;
4976	struct mgmt_ev_device_blocked ev;
4977
4978	cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4979
4980	bacpy(&ev.addr.bdaddr, bdaddr);
4981	ev.addr.type = type;
4982
4983	return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4984			  cmd ? cmd->sk : NULL);
4985}
4986
4987int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4988{
4989	struct pending_cmd *cmd;
4990	struct mgmt_ev_device_unblocked ev;
4991
4992	cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4993
4994	bacpy(&ev.addr.bdaddr, bdaddr);
4995	ev.addr.type = type;
4996
4997	return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4998			  cmd ? cmd->sk : NULL);
4999}
5000
5001static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5002{
5003	BT_DBG("%s status %u", hdev->name, status);
5004
5005	/* Clear the advertising mgmt setting if we failed to re-enable it */
5006	if (status) {
5007		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5008		new_settings(hdev, NULL);
5009	}
5010}
5011
5012void mgmt_reenable_advertising(struct hci_dev *hdev)
5013{
5014	struct hci_request req;
5015
5016	if (hci_conn_num(hdev, LE_LINK) > 0)
5017		return;
5018
5019	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5020		return;
5021
5022	hci_req_init(&req, hdev);
5023	enable_advertising(&req);
5024
5025	/* If this fails we have no option but to let user space know
5026	 * that we've disabled advertising.
5027	 */
5028	if (hci_req_run(&req, adv_enable_complete) < 0) {
5029		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5030		new_settings(hdev, NULL);
5031	}
5032}
5033