mgmt.c revision dcf4adbfdc7ad14ca50c1133f93f998c78493c2d
1/*
2   BlueZ - Bluetooth protocol stack for Linux
3
4   Copyright (C) 2010  Nokia Corporation
5   Copyright (C) 2011-2012 Intel Corporation
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI Management interface */
26
27#include <linux/module.h>
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/mgmt.h>
33
34#include "smp.h"
35
36#define MGMT_VERSION	1
37#define MGMT_REVISION	5
38
39static const u16 mgmt_commands[] = {
40	MGMT_OP_READ_INDEX_LIST,
41	MGMT_OP_READ_INFO,
42	MGMT_OP_SET_POWERED,
43	MGMT_OP_SET_DISCOVERABLE,
44	MGMT_OP_SET_CONNECTABLE,
45	MGMT_OP_SET_FAST_CONNECTABLE,
46	MGMT_OP_SET_PAIRABLE,
47	MGMT_OP_SET_LINK_SECURITY,
48	MGMT_OP_SET_SSP,
49	MGMT_OP_SET_HS,
50	MGMT_OP_SET_LE,
51	MGMT_OP_SET_DEV_CLASS,
52	MGMT_OP_SET_LOCAL_NAME,
53	MGMT_OP_ADD_UUID,
54	MGMT_OP_REMOVE_UUID,
55	MGMT_OP_LOAD_LINK_KEYS,
56	MGMT_OP_LOAD_LONG_TERM_KEYS,
57	MGMT_OP_DISCONNECT,
58	MGMT_OP_GET_CONNECTIONS,
59	MGMT_OP_PIN_CODE_REPLY,
60	MGMT_OP_PIN_CODE_NEG_REPLY,
61	MGMT_OP_SET_IO_CAPABILITY,
62	MGMT_OP_PAIR_DEVICE,
63	MGMT_OP_CANCEL_PAIR_DEVICE,
64	MGMT_OP_UNPAIR_DEVICE,
65	MGMT_OP_USER_CONFIRM_REPLY,
66	MGMT_OP_USER_CONFIRM_NEG_REPLY,
67	MGMT_OP_USER_PASSKEY_REPLY,
68	MGMT_OP_USER_PASSKEY_NEG_REPLY,
69	MGMT_OP_READ_LOCAL_OOB_DATA,
70	MGMT_OP_ADD_REMOTE_OOB_DATA,
71	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72	MGMT_OP_START_DISCOVERY,
73	MGMT_OP_STOP_DISCOVERY,
74	MGMT_OP_CONFIRM_NAME,
75	MGMT_OP_BLOCK_DEVICE,
76	MGMT_OP_UNBLOCK_DEVICE,
77	MGMT_OP_SET_DEVICE_ID,
78	MGMT_OP_SET_ADVERTISING,
79	MGMT_OP_SET_BREDR,
80	MGMT_OP_SET_STATIC_ADDRESS,
81	MGMT_OP_SET_SCAN_PARAMS,
82	MGMT_OP_SET_SECURE_CONN,
83	MGMT_OP_SET_DEBUG_KEYS,
84	MGMT_OP_SET_PRIVACY,
85	MGMT_OP_LOAD_IRKS,
86};
87
88static const u16 mgmt_events[] = {
89	MGMT_EV_CONTROLLER_ERROR,
90	MGMT_EV_INDEX_ADDED,
91	MGMT_EV_INDEX_REMOVED,
92	MGMT_EV_NEW_SETTINGS,
93	MGMT_EV_CLASS_OF_DEV_CHANGED,
94	MGMT_EV_LOCAL_NAME_CHANGED,
95	MGMT_EV_NEW_LINK_KEY,
96	MGMT_EV_NEW_LONG_TERM_KEY,
97	MGMT_EV_DEVICE_CONNECTED,
98	MGMT_EV_DEVICE_DISCONNECTED,
99	MGMT_EV_CONNECT_FAILED,
100	MGMT_EV_PIN_CODE_REQUEST,
101	MGMT_EV_USER_CONFIRM_REQUEST,
102	MGMT_EV_USER_PASSKEY_REQUEST,
103	MGMT_EV_AUTH_FAILED,
104	MGMT_EV_DEVICE_FOUND,
105	MGMT_EV_DISCOVERING,
106	MGMT_EV_DEVICE_BLOCKED,
107	MGMT_EV_DEVICE_UNBLOCKED,
108	MGMT_EV_DEVICE_UNPAIRED,
109	MGMT_EV_PASSKEY_NOTIFY,
110	MGMT_EV_NEW_IRK,
111	MGMT_EV_NEW_CSRK,
112};
113
114#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
115
116#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
117				!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
118
119struct pending_cmd {
120	struct list_head list;
121	u16 opcode;
122	int index;
123	void *param;
124	struct sock *sk;
125	void *user_data;
126};
127
128/* HCI to MGMT error code conversion table */
129static u8 mgmt_status_table[] = {
130	MGMT_STATUS_SUCCESS,
131	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
132	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
133	MGMT_STATUS_FAILED,		/* Hardware Failure */
134	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
135	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
136	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
137	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
138	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
139	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
140	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
141	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
142	MGMT_STATUS_BUSY,		/* Command Disallowed */
143	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
144	MGMT_STATUS_REJECTED,		/* Rejected Security */
145	MGMT_STATUS_REJECTED,		/* Rejected Personal */
146	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
147	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
148	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
149	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
150	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
151	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
152	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
153	MGMT_STATUS_BUSY,		/* Repeated Attempts */
154	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
155	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
156	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
157	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
158	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
159	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
160	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
161	MGMT_STATUS_FAILED,		/* Unspecified Error */
162	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
163	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
164	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
165	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
166	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
167	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
168	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
169	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
170	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
171	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
172	MGMT_STATUS_FAILED,		/* Transaction Collision */
173	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
174	MGMT_STATUS_REJECTED,		/* QoS Rejected */
175	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
176	MGMT_STATUS_REJECTED,		/* Insufficient Security */
177	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
178	MGMT_STATUS_BUSY,		/* Role Switch Pending */
179	MGMT_STATUS_FAILED,		/* Slot Violation */
180	MGMT_STATUS_FAILED,		/* Role Switch Failed */
181	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
182	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
183	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
184	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
185	MGMT_STATUS_BUSY,		/* Controller Busy */
186	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
187	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
188	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
189	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
190	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
191};
192
193static u8 mgmt_status(u8 hci_status)
194{
195	if (hci_status < ARRAY_SIZE(mgmt_status_table))
196		return mgmt_status_table[hci_status];
197
198	return MGMT_STATUS_FAILED;
199}
200
201static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
202{
203	struct sk_buff *skb;
204	struct mgmt_hdr *hdr;
205	struct mgmt_ev_cmd_status *ev;
206	int err;
207
208	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
209
210	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
211	if (!skb)
212		return -ENOMEM;
213
214	hdr = (void *) skb_put(skb, sizeof(*hdr));
215
216	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
217	hdr->index = cpu_to_le16(index);
218	hdr->len = cpu_to_le16(sizeof(*ev));
219
220	ev = (void *) skb_put(skb, sizeof(*ev));
221	ev->status = status;
222	ev->opcode = cpu_to_le16(cmd);
223
224	err = sock_queue_rcv_skb(sk, skb);
225	if (err < 0)
226		kfree_skb(skb);
227
228	return err;
229}
230
231static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
232			void *rp, size_t rp_len)
233{
234	struct sk_buff *skb;
235	struct mgmt_hdr *hdr;
236	struct mgmt_ev_cmd_complete *ev;
237	int err;
238
239	BT_DBG("sock %p", sk);
240
241	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
242	if (!skb)
243		return -ENOMEM;
244
245	hdr = (void *) skb_put(skb, sizeof(*hdr));
246
247	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
248	hdr->index = cpu_to_le16(index);
249	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
250
251	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
252	ev->opcode = cpu_to_le16(cmd);
253	ev->status = status;
254
255	if (rp)
256		memcpy(ev->data, rp, rp_len);
257
258	err = sock_queue_rcv_skb(sk, skb);
259	if (err < 0)
260		kfree_skb(skb);
261
262	return err;
263}
264
265static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
266			u16 data_len)
267{
268	struct mgmt_rp_read_version rp;
269
270	BT_DBG("sock %p", sk);
271
272	rp.version = MGMT_VERSION;
273	rp.revision = cpu_to_le16(MGMT_REVISION);
274
275	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
276			    sizeof(rp));
277}
278
279static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
280			 u16 data_len)
281{
282	struct mgmt_rp_read_commands *rp;
283	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
284	const u16 num_events = ARRAY_SIZE(mgmt_events);
285	__le16 *opcode;
286	size_t rp_size;
287	int i, err;
288
289	BT_DBG("sock %p", sk);
290
291	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
292
293	rp = kmalloc(rp_size, GFP_KERNEL);
294	if (!rp)
295		return -ENOMEM;
296
297	rp->num_commands = cpu_to_le16(num_commands);
298	rp->num_events = cpu_to_le16(num_events);
299
300	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
301		put_unaligned_le16(mgmt_commands[i], opcode);
302
303	for (i = 0; i < num_events; i++, opcode++)
304		put_unaligned_le16(mgmt_events[i], opcode);
305
306	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
307			   rp_size);
308	kfree(rp);
309
310	return err;
311}
312
313static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
314			   u16 data_len)
315{
316	struct mgmt_rp_read_index_list *rp;
317	struct hci_dev *d;
318	size_t rp_len;
319	u16 count;
320	int err;
321
322	BT_DBG("sock %p", sk);
323
324	read_lock(&hci_dev_list_lock);
325
326	count = 0;
327	list_for_each_entry(d, &hci_dev_list, list) {
328		if (d->dev_type == HCI_BREDR)
329			count++;
330	}
331
332	rp_len = sizeof(*rp) + (2 * count);
333	rp = kmalloc(rp_len, GFP_ATOMIC);
334	if (!rp) {
335		read_unlock(&hci_dev_list_lock);
336		return -ENOMEM;
337	}
338
339	count = 0;
340	list_for_each_entry(d, &hci_dev_list, list) {
341		if (test_bit(HCI_SETUP, &d->dev_flags))
342			continue;
343
344		if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
345			continue;
346
347		if (d->dev_type == HCI_BREDR) {
348			rp->index[count++] = cpu_to_le16(d->id);
349			BT_DBG("Added hci%u", d->id);
350		}
351	}
352
353	rp->num_controllers = cpu_to_le16(count);
354	rp_len = sizeof(*rp) + (2 * count);
355
356	read_unlock(&hci_dev_list_lock);
357
358	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
359			   rp_len);
360
361	kfree(rp);
362
363	return err;
364}
365
366static u32 get_supported_settings(struct hci_dev *hdev)
367{
368	u32 settings = 0;
369
370	settings |= MGMT_SETTING_POWERED;
371	settings |= MGMT_SETTING_PAIRABLE;
372	settings |= MGMT_SETTING_DEBUG_KEYS;
373
374	if (lmp_bredr_capable(hdev)) {
375		settings |= MGMT_SETTING_CONNECTABLE;
376		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
377			settings |= MGMT_SETTING_FAST_CONNECTABLE;
378		settings |= MGMT_SETTING_DISCOVERABLE;
379		settings |= MGMT_SETTING_BREDR;
380		settings |= MGMT_SETTING_LINK_SECURITY;
381
382		if (lmp_ssp_capable(hdev)) {
383			settings |= MGMT_SETTING_SSP;
384			settings |= MGMT_SETTING_HS;
385		}
386
387		if (lmp_sc_capable(hdev) ||
388		    test_bit(HCI_FORCE_SC, &hdev->dev_flags))
389			settings |= MGMT_SETTING_SECURE_CONN;
390	}
391
392	if (lmp_le_capable(hdev)) {
393		settings |= MGMT_SETTING_LE;
394		settings |= MGMT_SETTING_ADVERTISING;
395		settings |= MGMT_SETTING_PRIVACY;
396	}
397
398	return settings;
399}
400
401static u32 get_current_settings(struct hci_dev *hdev)
402{
403	u32 settings = 0;
404
405	if (hdev_is_powered(hdev))
406		settings |= MGMT_SETTING_POWERED;
407
408	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
409		settings |= MGMT_SETTING_CONNECTABLE;
410
411	if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
412		settings |= MGMT_SETTING_FAST_CONNECTABLE;
413
414	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
415		settings |= MGMT_SETTING_DISCOVERABLE;
416
417	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
418		settings |= MGMT_SETTING_PAIRABLE;
419
420	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
421		settings |= MGMT_SETTING_BREDR;
422
423	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
424		settings |= MGMT_SETTING_LE;
425
426	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
427		settings |= MGMT_SETTING_LINK_SECURITY;
428
429	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
430		settings |= MGMT_SETTING_SSP;
431
432	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
433		settings |= MGMT_SETTING_HS;
434
435	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
436		settings |= MGMT_SETTING_ADVERTISING;
437
438	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
439		settings |= MGMT_SETTING_SECURE_CONN;
440
441	if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
442		settings |= MGMT_SETTING_DEBUG_KEYS;
443
444	if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
445		settings |= MGMT_SETTING_PRIVACY;
446
447	return settings;
448}
449
450#define PNP_INFO_SVCLASS_ID		0x1200
451
452static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
453{
454	u8 *ptr = data, *uuids_start = NULL;
455	struct bt_uuid *uuid;
456
457	if (len < 4)
458		return ptr;
459
460	list_for_each_entry(uuid, &hdev->uuids, list) {
461		u16 uuid16;
462
463		if (uuid->size != 16)
464			continue;
465
466		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
467		if (uuid16 < 0x1100)
468			continue;
469
470		if (uuid16 == PNP_INFO_SVCLASS_ID)
471			continue;
472
473		if (!uuids_start) {
474			uuids_start = ptr;
475			uuids_start[0] = 1;
476			uuids_start[1] = EIR_UUID16_ALL;
477			ptr += 2;
478		}
479
480		/* Stop if not enough space to put next UUID */
481		if ((ptr - data) + sizeof(u16) > len) {
482			uuids_start[1] = EIR_UUID16_SOME;
483			break;
484		}
485
486		*ptr++ = (uuid16 & 0x00ff);
487		*ptr++ = (uuid16 & 0xff00) >> 8;
488		uuids_start[0] += sizeof(uuid16);
489	}
490
491	return ptr;
492}
493
494static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
495{
496	u8 *ptr = data, *uuids_start = NULL;
497	struct bt_uuid *uuid;
498
499	if (len < 6)
500		return ptr;
501
502	list_for_each_entry(uuid, &hdev->uuids, list) {
503		if (uuid->size != 32)
504			continue;
505
506		if (!uuids_start) {
507			uuids_start = ptr;
508			uuids_start[0] = 1;
509			uuids_start[1] = EIR_UUID32_ALL;
510			ptr += 2;
511		}
512
513		/* Stop if not enough space to put next UUID */
514		if ((ptr - data) + sizeof(u32) > len) {
515			uuids_start[1] = EIR_UUID32_SOME;
516			break;
517		}
518
519		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
520		ptr += sizeof(u32);
521		uuids_start[0] += sizeof(u32);
522	}
523
524	return ptr;
525}
526
527static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
528{
529	u8 *ptr = data, *uuids_start = NULL;
530	struct bt_uuid *uuid;
531
532	if (len < 18)
533		return ptr;
534
535	list_for_each_entry(uuid, &hdev->uuids, list) {
536		if (uuid->size != 128)
537			continue;
538
539		if (!uuids_start) {
540			uuids_start = ptr;
541			uuids_start[0] = 1;
542			uuids_start[1] = EIR_UUID128_ALL;
543			ptr += 2;
544		}
545
546		/* Stop if not enough space to put next UUID */
547		if ((ptr - data) + 16 > len) {
548			uuids_start[1] = EIR_UUID128_SOME;
549			break;
550		}
551
552		memcpy(ptr, uuid->uuid, 16);
553		ptr += 16;
554		uuids_start[0] += 16;
555	}
556
557	return ptr;
558}
559
560static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
561{
562	struct pending_cmd *cmd;
563
564	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
565		if (cmd->opcode == opcode)
566			return cmd;
567	}
568
569	return NULL;
570}
571
572static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
573{
574	u8 ad_len = 0;
575	size_t name_len;
576
577	name_len = strlen(hdev->dev_name);
578	if (name_len > 0) {
579		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
580
581		if (name_len > max_len) {
582			name_len = max_len;
583			ptr[1] = EIR_NAME_SHORT;
584		} else
585			ptr[1] = EIR_NAME_COMPLETE;
586
587		ptr[0] = name_len + 1;
588
589		memcpy(ptr + 2, hdev->dev_name, name_len);
590
591		ad_len += (name_len + 2);
592		ptr += (name_len + 2);
593	}
594
595	return ad_len;
596}
597
598static void update_scan_rsp_data(struct hci_request *req)
599{
600	struct hci_dev *hdev = req->hdev;
601	struct hci_cp_le_set_scan_rsp_data cp;
602	u8 len;
603
604	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
605		return;
606
607	memset(&cp, 0, sizeof(cp));
608
609	len = create_scan_rsp_data(hdev, cp.data);
610
611	if (hdev->scan_rsp_data_len == len &&
612	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
613		return;
614
615	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
616	hdev->scan_rsp_data_len = len;
617
618	cp.length = len;
619
620	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
621}
622
623static u8 get_adv_discov_flags(struct hci_dev *hdev)
624{
625	struct pending_cmd *cmd;
626
627	/* If there's a pending mgmt command the flags will not yet have
628	 * their final values, so check for this first.
629	 */
630	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
631	if (cmd) {
632		struct mgmt_mode *cp = cmd->param;
633		if (cp->val == 0x01)
634			return LE_AD_GENERAL;
635		else if (cp->val == 0x02)
636			return LE_AD_LIMITED;
637	} else {
638		if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
639			return LE_AD_LIMITED;
640		else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
641			return LE_AD_GENERAL;
642	}
643
644	return 0;
645}
646
647static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
648{
649	u8 ad_len = 0, flags = 0;
650
651	flags |= get_adv_discov_flags(hdev);
652
653	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
654		flags |= LE_AD_NO_BREDR;
655
656	if (flags) {
657		BT_DBG("adv flags 0x%02x", flags);
658
659		ptr[0] = 2;
660		ptr[1] = EIR_FLAGS;
661		ptr[2] = flags;
662
663		ad_len += 3;
664		ptr += 3;
665	}
666
667	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
668		ptr[0] = 2;
669		ptr[1] = EIR_TX_POWER;
670		ptr[2] = (u8) hdev->adv_tx_power;
671
672		ad_len += 3;
673		ptr += 3;
674	}
675
676	return ad_len;
677}
678
679static void update_adv_data(struct hci_request *req)
680{
681	struct hci_dev *hdev = req->hdev;
682	struct hci_cp_le_set_adv_data cp;
683	u8 len;
684
685	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
686		return;
687
688	memset(&cp, 0, sizeof(cp));
689
690	len = create_adv_data(hdev, cp.data);
691
692	if (hdev->adv_data_len == len &&
693	    memcmp(cp.data, hdev->adv_data, len) == 0)
694		return;
695
696	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
697	hdev->adv_data_len = len;
698
699	cp.length = len;
700
701	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
702}
703
704static void create_eir(struct hci_dev *hdev, u8 *data)
705{
706	u8 *ptr = data;
707	size_t name_len;
708
709	name_len = strlen(hdev->dev_name);
710
711	if (name_len > 0) {
712		/* EIR Data type */
713		if (name_len > 48) {
714			name_len = 48;
715			ptr[1] = EIR_NAME_SHORT;
716		} else
717			ptr[1] = EIR_NAME_COMPLETE;
718
719		/* EIR Data length */
720		ptr[0] = name_len + 1;
721
722		memcpy(ptr + 2, hdev->dev_name, name_len);
723
724		ptr += (name_len + 2);
725	}
726
727	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
728		ptr[0] = 2;
729		ptr[1] = EIR_TX_POWER;
730		ptr[2] = (u8) hdev->inq_tx_power;
731
732		ptr += 3;
733	}
734
735	if (hdev->devid_source > 0) {
736		ptr[0] = 9;
737		ptr[1] = EIR_DEVICE_ID;
738
739		put_unaligned_le16(hdev->devid_source, ptr + 2);
740		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
741		put_unaligned_le16(hdev->devid_product, ptr + 6);
742		put_unaligned_le16(hdev->devid_version, ptr + 8);
743
744		ptr += 10;
745	}
746
747	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
748	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
749	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
750}
751
752static void update_eir(struct hci_request *req)
753{
754	struct hci_dev *hdev = req->hdev;
755	struct hci_cp_write_eir cp;
756
757	if (!hdev_is_powered(hdev))
758		return;
759
760	if (!lmp_ext_inq_capable(hdev))
761		return;
762
763	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
764		return;
765
766	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
767		return;
768
769	memset(&cp, 0, sizeof(cp));
770
771	create_eir(hdev, cp.data);
772
773	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
774		return;
775
776	memcpy(hdev->eir, cp.data, sizeof(cp.data));
777
778	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
779}
780
781static u8 get_service_classes(struct hci_dev *hdev)
782{
783	struct bt_uuid *uuid;
784	u8 val = 0;
785
786	list_for_each_entry(uuid, &hdev->uuids, list)
787		val |= uuid->svc_hint;
788
789	return val;
790}
791
792static void update_class(struct hci_request *req)
793{
794	struct hci_dev *hdev = req->hdev;
795	u8 cod[3];
796
797	BT_DBG("%s", hdev->name);
798
799	if (!hdev_is_powered(hdev))
800		return;
801
802	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
803		return;
804
805	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
806		return;
807
808	cod[0] = hdev->minor_class;
809	cod[1] = hdev->major_class;
810	cod[2] = get_service_classes(hdev);
811
812	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
813		cod[1] |= 0x20;
814
815	if (memcmp(cod, hdev->dev_class, 3) == 0)
816		return;
817
818	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
819}
820
821static bool get_connectable(struct hci_dev *hdev)
822{
823	struct pending_cmd *cmd;
824
825	/* If there's a pending mgmt command the flag will not yet have
826	 * it's final value, so check for this first.
827	 */
828	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
829	if (cmd) {
830		struct mgmt_mode *cp = cmd->param;
831		return cp->val;
832	}
833
834	return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
835}
836
837static void enable_advertising(struct hci_request *req)
838{
839	struct hci_dev *hdev = req->hdev;
840	struct hci_cp_le_set_adv_param cp;
841	u8 own_addr_type, enable = 0x01;
842	bool connectable;
843
844	/* Clear the HCI_ADVERTISING bit temporarily so that the
845	 * hci_update_random_address knows that it's safe to go ahead
846	 * and write a new random address. The flag will be set back on
847	 * as soon as the SET_ADV_ENABLE HCI command completes.
848	 */
849	clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
850
851	connectable = get_connectable(hdev);
852
853	/* Set require_privacy to true only when non-connectable
854	 * advertising is used. In that case it is fine to use a
855	 * non-resolvable private address.
856	 */
857	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
858		return;
859
860	memset(&cp, 0, sizeof(cp));
861	cp.min_interval = cpu_to_le16(0x0800);
862	cp.max_interval = cpu_to_le16(0x0800);
863	cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
864	cp.own_address_type = own_addr_type;
865	cp.channel_map = hdev->le_adv_channel_map;
866
867	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
868
869	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
870}
871
872static void disable_advertising(struct hci_request *req)
873{
874	u8 enable = 0x00;
875
876	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
877}
878
879static void service_cache_off(struct work_struct *work)
880{
881	struct hci_dev *hdev = container_of(work, struct hci_dev,
882					    service_cache.work);
883	struct hci_request req;
884
885	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
886		return;
887
888	hci_req_init(&req, hdev);
889
890	hci_dev_lock(hdev);
891
892	update_eir(&req);
893	update_class(&req);
894
895	hci_dev_unlock(hdev);
896
897	hci_req_run(&req, NULL);
898}
899
900static void rpa_expired(struct work_struct *work)
901{
902	struct hci_dev *hdev = container_of(work, struct hci_dev,
903					    rpa_expired.work);
904	struct hci_request req;
905
906	BT_DBG("");
907
908	set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
909
910	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
911	    hci_conn_num(hdev, LE_LINK) > 0)
912		return;
913
914	/* The generation of a new RPA and programming it into the
915	 * controller happens in the enable_advertising() function.
916	 */
917
918	hci_req_init(&req, hdev);
919
920	disable_advertising(&req);
921	enable_advertising(&req);
922
923	hci_req_run(&req, NULL);
924}
925
926static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
927{
928	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
929		return;
930
931	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
932	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
933
934	/* Non-mgmt controlled devices get this bit set
935	 * implicitly so that pairing works for them, however
936	 * for mgmt we require user-space to explicitly enable
937	 * it
938	 */
939	clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
940}
941
942static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
943				void *data, u16 data_len)
944{
945	struct mgmt_rp_read_info rp;
946
947	BT_DBG("sock %p %s", sk, hdev->name);
948
949	hci_dev_lock(hdev);
950
951	memset(&rp, 0, sizeof(rp));
952
953	bacpy(&rp.bdaddr, &hdev->bdaddr);
954
955	rp.version = hdev->hci_ver;
956	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
957
958	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
959	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
960
961	memcpy(rp.dev_class, hdev->dev_class, 3);
962
963	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
964	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
965
966	hci_dev_unlock(hdev);
967
968	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
969			    sizeof(rp));
970}
971
972static void mgmt_pending_free(struct pending_cmd *cmd)
973{
974	sock_put(cmd->sk);
975	kfree(cmd->param);
976	kfree(cmd);
977}
978
979static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
980					    struct hci_dev *hdev, void *data,
981					    u16 len)
982{
983	struct pending_cmd *cmd;
984
985	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
986	if (!cmd)
987		return NULL;
988
989	cmd->opcode = opcode;
990	cmd->index = hdev->id;
991
992	cmd->param = kmalloc(len, GFP_KERNEL);
993	if (!cmd->param) {
994		kfree(cmd);
995		return NULL;
996	}
997
998	if (data)
999		memcpy(cmd->param, data, len);
1000
1001	cmd->sk = sk;
1002	sock_hold(sk);
1003
1004	list_add(&cmd->list, &hdev->mgmt_pending);
1005
1006	return cmd;
1007}
1008
1009static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1010				 void (*cb)(struct pending_cmd *cmd,
1011					    void *data),
1012				 void *data)
1013{
1014	struct pending_cmd *cmd, *tmp;
1015
1016	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1017		if (opcode > 0 && cmd->opcode != opcode)
1018			continue;
1019
1020		cb(cmd, data);
1021	}
1022}
1023
1024static void mgmt_pending_remove(struct pending_cmd *cmd)
1025{
1026	list_del(&cmd->list);
1027	mgmt_pending_free(cmd);
1028}
1029
1030static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1031{
1032	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1033
1034	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1035			    sizeof(settings));
1036}
1037
1038static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1039{
1040	BT_DBG("%s status 0x%02x", hdev->name, status);
1041
1042	if (hci_conn_count(hdev) == 0) {
1043		cancel_delayed_work(&hdev->power_off);
1044		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1045	}
1046}
1047
1048static int clean_up_hci_state(struct hci_dev *hdev)
1049{
1050	struct hci_request req;
1051	struct hci_conn *conn;
1052
1053	hci_req_init(&req, hdev);
1054
1055	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1056	    test_bit(HCI_PSCAN, &hdev->flags)) {
1057		u8 scan = 0x00;
1058		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1059	}
1060
1061	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1062		disable_advertising(&req);
1063
1064	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1065		hci_req_add_le_scan_disable(&req);
1066	}
1067
1068	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1069		struct hci_cp_disconnect dc;
1070		struct hci_cp_reject_conn_req rej;
1071
1072		switch (conn->state) {
1073		case BT_CONNECTED:
1074		case BT_CONFIG:
1075			dc.handle = cpu_to_le16(conn->handle);
1076			dc.reason = 0x15; /* Terminated due to Power Off */
1077			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1078			break;
1079		case BT_CONNECT:
1080			if (conn->type == LE_LINK)
1081				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1082					    0, NULL);
1083			else if (conn->type == ACL_LINK)
1084				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1085					    6, &conn->dst);
1086			break;
1087		case BT_CONNECT2:
1088			bacpy(&rej.bdaddr, &conn->dst);
1089			rej.reason = 0x15; /* Terminated due to Power Off */
1090			if (conn->type == ACL_LINK)
1091				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1092					    sizeof(rej), &rej);
1093			else if (conn->type == SCO_LINK)
1094				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1095					    sizeof(rej), &rej);
1096			break;
1097		}
1098	}
1099
1100	return hci_req_run(&req, clean_up_hci_complete);
1101}
1102
1103static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1104		       u16 len)
1105{
1106	struct mgmt_mode *cp = data;
1107	struct pending_cmd *cmd;
1108	int err;
1109
1110	BT_DBG("request for %s", hdev->name);
1111
1112	if (cp->val != 0x00 && cp->val != 0x01)
1113		return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1114				  MGMT_STATUS_INVALID_PARAMS);
1115
1116	hci_dev_lock(hdev);
1117
1118	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1119		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1120				 MGMT_STATUS_BUSY);
1121		goto failed;
1122	}
1123
1124	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1125		cancel_delayed_work(&hdev->power_off);
1126
1127		if (cp->val) {
1128			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1129					 data, len);
1130			err = mgmt_powered(hdev, 1);
1131			goto failed;
1132		}
1133	}
1134
1135	if (!!cp->val == hdev_is_powered(hdev)) {
1136		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1137		goto failed;
1138	}
1139
1140	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1141	if (!cmd) {
1142		err = -ENOMEM;
1143		goto failed;
1144	}
1145
1146	if (cp->val) {
1147		queue_work(hdev->req_workqueue, &hdev->power_on);
1148		err = 0;
1149	} else {
1150		/* Disconnect connections, stop scans, etc */
1151		err = clean_up_hci_state(hdev);
1152		if (!err)
1153			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1154					   HCI_POWER_OFF_TIMEOUT);
1155
1156		/* ENODATA means there were no HCI commands queued */
1157		if (err == -ENODATA) {
1158			cancel_delayed_work(&hdev->power_off);
1159			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1160			err = 0;
1161		}
1162	}
1163
1164failed:
1165	hci_dev_unlock(hdev);
1166	return err;
1167}
1168
1169static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1170		      struct sock *skip_sk)
1171{
1172	struct sk_buff *skb;
1173	struct mgmt_hdr *hdr;
1174
1175	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1176	if (!skb)
1177		return -ENOMEM;
1178
1179	hdr = (void *) skb_put(skb, sizeof(*hdr));
1180	hdr->opcode = cpu_to_le16(event);
1181	if (hdev)
1182		hdr->index = cpu_to_le16(hdev->id);
1183	else
1184		hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1185	hdr->len = cpu_to_le16(data_len);
1186
1187	if (data)
1188		memcpy(skb_put(skb, data_len), data, data_len);
1189
1190	/* Time stamp */
1191	__net_timestamp(skb);
1192
1193	hci_send_to_control(skb, skip_sk);
1194	kfree_skb(skb);
1195
1196	return 0;
1197}
1198
1199static int new_settings(struct hci_dev *hdev, struct sock *skip)
1200{
1201	__le32 ev;
1202
1203	ev = cpu_to_le32(get_current_settings(hdev));
1204
1205	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1206}
1207
1208struct cmd_lookup {
1209	struct sock *sk;
1210	struct hci_dev *hdev;
1211	u8 mgmt_status;
1212};
1213
1214static void settings_rsp(struct pending_cmd *cmd, void *data)
1215{
1216	struct cmd_lookup *match = data;
1217
1218	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1219
1220	list_del(&cmd->list);
1221
1222	if (match->sk == NULL) {
1223		match->sk = cmd->sk;
1224		sock_hold(match->sk);
1225	}
1226
1227	mgmt_pending_free(cmd);
1228}
1229
1230static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1231{
1232	u8 *status = data;
1233
1234	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1235	mgmt_pending_remove(cmd);
1236}
1237
1238static u8 mgmt_bredr_support(struct hci_dev *hdev)
1239{
1240	if (!lmp_bredr_capable(hdev))
1241		return MGMT_STATUS_NOT_SUPPORTED;
1242	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1243		return MGMT_STATUS_REJECTED;
1244	else
1245		return MGMT_STATUS_SUCCESS;
1246}
1247
1248static u8 mgmt_le_support(struct hci_dev *hdev)
1249{
1250	if (!lmp_le_capable(hdev))
1251		return MGMT_STATUS_NOT_SUPPORTED;
1252	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1253		return MGMT_STATUS_REJECTED;
1254	else
1255		return MGMT_STATUS_SUCCESS;
1256}
1257
1258static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1259{
1260	struct pending_cmd *cmd;
1261	struct mgmt_mode *cp;
1262	struct hci_request req;
1263	bool changed;
1264
1265	BT_DBG("status 0x%02x", status);
1266
1267	hci_dev_lock(hdev);
1268
1269	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1270	if (!cmd)
1271		goto unlock;
1272
1273	if (status) {
1274		u8 mgmt_err = mgmt_status(status);
1275		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1276		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1277		goto remove_cmd;
1278	}
1279
1280	cp = cmd->param;
1281	if (cp->val) {
1282		changed = !test_and_set_bit(HCI_DISCOVERABLE,
1283					    &hdev->dev_flags);
1284
1285		if (hdev->discov_timeout > 0) {
1286			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1287			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1288					   to);
1289		}
1290	} else {
1291		changed = test_and_clear_bit(HCI_DISCOVERABLE,
1292					     &hdev->dev_flags);
1293	}
1294
1295	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1296
1297	if (changed)
1298		new_settings(hdev, cmd->sk);
1299
1300	/* When the discoverable mode gets changed, make sure
1301	 * that class of device has the limited discoverable
1302	 * bit correctly set.
1303	 */
1304	hci_req_init(&req, hdev);
1305	update_class(&req);
1306	hci_req_run(&req, NULL);
1307
1308remove_cmd:
1309	mgmt_pending_remove(cmd);
1310
1311unlock:
1312	hci_dev_unlock(hdev);
1313}
1314
1315static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1316			    u16 len)
1317{
1318	struct mgmt_cp_set_discoverable *cp = data;
1319	struct pending_cmd *cmd;
1320	struct hci_request req;
1321	u16 timeout;
1322	u8 scan;
1323	int err;
1324
1325	BT_DBG("request for %s", hdev->name);
1326
1327	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1328	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1329		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1330				  MGMT_STATUS_REJECTED);
1331
1332	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1333		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1334				  MGMT_STATUS_INVALID_PARAMS);
1335
1336	timeout = __le16_to_cpu(cp->timeout);
1337
1338	/* Disabling discoverable requires that no timeout is set,
1339	 * and enabling limited discoverable requires a timeout.
1340	 */
1341	if ((cp->val == 0x00 && timeout > 0) ||
1342	    (cp->val == 0x02 && timeout == 0))
1343		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1344				  MGMT_STATUS_INVALID_PARAMS);
1345
1346	hci_dev_lock(hdev);
1347
1348	if (!hdev_is_powered(hdev) && timeout > 0) {
1349		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1350				 MGMT_STATUS_NOT_POWERED);
1351		goto failed;
1352	}
1353
1354	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1355	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1356		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1357				 MGMT_STATUS_BUSY);
1358		goto failed;
1359	}
1360
1361	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1362		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1363				 MGMT_STATUS_REJECTED);
1364		goto failed;
1365	}
1366
1367	if (!hdev_is_powered(hdev)) {
1368		bool changed = false;
1369
1370		/* Setting limited discoverable when powered off is
1371		 * not a valid operation since it requires a timeout
1372		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1373		 */
1374		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1375			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1376			changed = true;
1377		}
1378
1379		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1380		if (err < 0)
1381			goto failed;
1382
1383		if (changed)
1384			err = new_settings(hdev, sk);
1385
1386		goto failed;
1387	}
1388
1389	/* If the current mode is the same, then just update the timeout
1390	 * value with the new value. And if only the timeout gets updated,
1391	 * then no need for any HCI transactions.
1392	 */
1393	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1394	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1395					  &hdev->dev_flags)) {
1396		cancel_delayed_work(&hdev->discov_off);
1397		hdev->discov_timeout = timeout;
1398
1399		if (cp->val && hdev->discov_timeout > 0) {
1400			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1401			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1402					   to);
1403		}
1404
1405		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1406		goto failed;
1407	}
1408
1409	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1410	if (!cmd) {
1411		err = -ENOMEM;
1412		goto failed;
1413	}
1414
1415	/* Cancel any potential discoverable timeout that might be
1416	 * still active and store new timeout value. The arming of
1417	 * the timeout happens in the complete handler.
1418	 */
1419	cancel_delayed_work(&hdev->discov_off);
1420	hdev->discov_timeout = timeout;
1421
1422	/* Limited discoverable mode */
1423	if (cp->val == 0x02)
1424		set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1425	else
1426		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1427
1428	hci_req_init(&req, hdev);
1429
1430	/* The procedure for LE-only controllers is much simpler - just
1431	 * update the advertising data.
1432	 */
1433	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1434		goto update_ad;
1435
1436	scan = SCAN_PAGE;
1437
1438	if (cp->val) {
1439		struct hci_cp_write_current_iac_lap hci_cp;
1440
1441		if (cp->val == 0x02) {
1442			/* Limited discoverable mode */
1443			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1444			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
1445			hci_cp.iac_lap[1] = 0x8b;
1446			hci_cp.iac_lap[2] = 0x9e;
1447			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
1448			hci_cp.iac_lap[4] = 0x8b;
1449			hci_cp.iac_lap[5] = 0x9e;
1450		} else {
1451			/* General discoverable mode */
1452			hci_cp.num_iac = 1;
1453			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
1454			hci_cp.iac_lap[1] = 0x8b;
1455			hci_cp.iac_lap[2] = 0x9e;
1456		}
1457
1458		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1459			    (hci_cp.num_iac * 3) + 1, &hci_cp);
1460
1461		scan |= SCAN_INQUIRY;
1462	} else {
1463		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1464	}
1465
1466	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1467
1468update_ad:
1469	update_adv_data(&req);
1470
1471	err = hci_req_run(&req, set_discoverable_complete);
1472	if (err < 0)
1473		mgmt_pending_remove(cmd);
1474
1475failed:
1476	hci_dev_unlock(hdev);
1477	return err;
1478}
1479
1480static void write_fast_connectable(struct hci_request *req, bool enable)
1481{
1482	struct hci_dev *hdev = req->hdev;
1483	struct hci_cp_write_page_scan_activity acp;
1484	u8 type;
1485
1486	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1487		return;
1488
1489	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1490		return;
1491
1492	if (enable) {
1493		type = PAGE_SCAN_TYPE_INTERLACED;
1494
1495		/* 160 msec page scan interval */
1496		acp.interval = cpu_to_le16(0x0100);
1497	} else {
1498		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
1499
1500		/* default 1.28 sec page scan */
1501		acp.interval = cpu_to_le16(0x0800);
1502	}
1503
1504	acp.window = cpu_to_le16(0x0012);
1505
1506	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1507	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
1508		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1509			    sizeof(acp), &acp);
1510
1511	if (hdev->page_scan_type != type)
1512		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1513}
1514
1515static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1516{
1517	struct pending_cmd *cmd;
1518	struct mgmt_mode *cp;
1519	bool changed;
1520
1521	BT_DBG("status 0x%02x", status);
1522
1523	hci_dev_lock(hdev);
1524
1525	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1526	if (!cmd)
1527		goto unlock;
1528
1529	if (status) {
1530		u8 mgmt_err = mgmt_status(status);
1531		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1532		goto remove_cmd;
1533	}
1534
1535	cp = cmd->param;
1536	if (cp->val)
1537		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1538	else
1539		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1540
1541	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1542
1543	if (changed)
1544		new_settings(hdev, cmd->sk);
1545
1546remove_cmd:
1547	mgmt_pending_remove(cmd);
1548
1549unlock:
1550	hci_dev_unlock(hdev);
1551}
1552
1553static int set_connectable_update_settings(struct hci_dev *hdev,
1554					   struct sock *sk, u8 val)
1555{
1556	bool changed = false;
1557	int err;
1558
1559	if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1560		changed = true;
1561
1562	if (val) {
1563		set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1564	} else {
1565		clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1566		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1567	}
1568
1569	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1570	if (err < 0)
1571		return err;
1572
1573	if (changed)
1574		return new_settings(hdev, sk);
1575
1576	return 0;
1577}
1578
1579static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1580			   u16 len)
1581{
1582	struct mgmt_mode *cp = data;
1583	struct pending_cmd *cmd;
1584	struct hci_request req;
1585	u8 scan;
1586	int err;
1587
1588	BT_DBG("request for %s", hdev->name);
1589
1590	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1591	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1592		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1593				  MGMT_STATUS_REJECTED);
1594
1595	if (cp->val != 0x00 && cp->val != 0x01)
1596		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1597				  MGMT_STATUS_INVALID_PARAMS);
1598
1599	hci_dev_lock(hdev);
1600
1601	if (!hdev_is_powered(hdev)) {
1602		err = set_connectable_update_settings(hdev, sk, cp->val);
1603		goto failed;
1604	}
1605
1606	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1607	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1608		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1609				 MGMT_STATUS_BUSY);
1610		goto failed;
1611	}
1612
1613	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1614	if (!cmd) {
1615		err = -ENOMEM;
1616		goto failed;
1617	}
1618
1619	hci_req_init(&req, hdev);
1620
1621	/* If BR/EDR is not enabled and we disable advertising as a
1622	 * by-product of disabling connectable, we need to update the
1623	 * advertising flags.
1624	 */
1625	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1626		if (!cp->val) {
1627			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1628			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1629		}
1630		update_adv_data(&req);
1631	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1632		if (cp->val) {
1633			scan = SCAN_PAGE;
1634		} else {
1635			scan = 0;
1636
1637			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1638			    hdev->discov_timeout > 0)
1639				cancel_delayed_work(&hdev->discov_off);
1640		}
1641
1642		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1643	}
1644
1645	/* If we're going from non-connectable to connectable or
1646	 * vice-versa when fast connectable is enabled ensure that fast
1647	 * connectable gets disabled. write_fast_connectable won't do
1648	 * anything if the page scan parameters are already what they
1649	 * should be.
1650	 */
1651	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1652		write_fast_connectable(&req, false);
1653
1654	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1655	    hci_conn_num(hdev, LE_LINK) == 0) {
1656		disable_advertising(&req);
1657		enable_advertising(&req);
1658	}
1659
1660	err = hci_req_run(&req, set_connectable_complete);
1661	if (err < 0) {
1662		mgmt_pending_remove(cmd);
1663		if (err == -ENODATA)
1664			err = set_connectable_update_settings(hdev, sk,
1665							      cp->val);
1666		goto failed;
1667	}
1668
1669failed:
1670	hci_dev_unlock(hdev);
1671	return err;
1672}
1673
1674static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1675			u16 len)
1676{
1677	struct mgmt_mode *cp = data;
1678	bool changed;
1679	int err;
1680
1681	BT_DBG("request for %s", hdev->name);
1682
1683	if (cp->val != 0x00 && cp->val != 0x01)
1684		return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1685				  MGMT_STATUS_INVALID_PARAMS);
1686
1687	hci_dev_lock(hdev);
1688
1689	if (cp->val)
1690		changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1691	else
1692		changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1693
1694	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1695	if (err < 0)
1696		goto unlock;
1697
1698	if (changed)
1699		err = new_settings(hdev, sk);
1700
1701unlock:
1702	hci_dev_unlock(hdev);
1703	return err;
1704}
1705
1706static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1707			     u16 len)
1708{
1709	struct mgmt_mode *cp = data;
1710	struct pending_cmd *cmd;
1711	u8 val, status;
1712	int err;
1713
1714	BT_DBG("request for %s", hdev->name);
1715
1716	status = mgmt_bredr_support(hdev);
1717	if (status)
1718		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1719				  status);
1720
1721	if (cp->val != 0x00 && cp->val != 0x01)
1722		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1723				  MGMT_STATUS_INVALID_PARAMS);
1724
1725	hci_dev_lock(hdev);
1726
1727	if (!hdev_is_powered(hdev)) {
1728		bool changed = false;
1729
1730		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1731					  &hdev->dev_flags)) {
1732			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1733			changed = true;
1734		}
1735
1736		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1737		if (err < 0)
1738			goto failed;
1739
1740		if (changed)
1741			err = new_settings(hdev, sk);
1742
1743		goto failed;
1744	}
1745
1746	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1747		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1748				 MGMT_STATUS_BUSY);
1749		goto failed;
1750	}
1751
1752	val = !!cp->val;
1753
1754	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1755		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1756		goto failed;
1757	}
1758
1759	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1760	if (!cmd) {
1761		err = -ENOMEM;
1762		goto failed;
1763	}
1764
1765	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1766	if (err < 0) {
1767		mgmt_pending_remove(cmd);
1768		goto failed;
1769	}
1770
1771failed:
1772	hci_dev_unlock(hdev);
1773	return err;
1774}
1775
1776static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1777{
1778	struct mgmt_mode *cp = data;
1779	struct pending_cmd *cmd;
1780	u8 status;
1781	int err;
1782
1783	BT_DBG("request for %s", hdev->name);
1784
1785	status = mgmt_bredr_support(hdev);
1786	if (status)
1787		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1788
1789	if (!lmp_ssp_capable(hdev))
1790		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1791				  MGMT_STATUS_NOT_SUPPORTED);
1792
1793	if (cp->val != 0x00 && cp->val != 0x01)
1794		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1795				  MGMT_STATUS_INVALID_PARAMS);
1796
1797	hci_dev_lock(hdev);
1798
1799	if (!hdev_is_powered(hdev)) {
1800		bool changed;
1801
1802		if (cp->val) {
1803			changed = !test_and_set_bit(HCI_SSP_ENABLED,
1804						    &hdev->dev_flags);
1805		} else {
1806			changed = test_and_clear_bit(HCI_SSP_ENABLED,
1807						     &hdev->dev_flags);
1808			if (!changed)
1809				changed = test_and_clear_bit(HCI_HS_ENABLED,
1810							     &hdev->dev_flags);
1811			else
1812				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1813		}
1814
1815		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1816		if (err < 0)
1817			goto failed;
1818
1819		if (changed)
1820			err = new_settings(hdev, sk);
1821
1822		goto failed;
1823	}
1824
1825	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1826	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1827		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1828				 MGMT_STATUS_BUSY);
1829		goto failed;
1830	}
1831
1832	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1833		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1834		goto failed;
1835	}
1836
1837	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1838	if (!cmd) {
1839		err = -ENOMEM;
1840		goto failed;
1841	}
1842
1843	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1844	if (err < 0) {
1845		mgmt_pending_remove(cmd);
1846		goto failed;
1847	}
1848
1849failed:
1850	hci_dev_unlock(hdev);
1851	return err;
1852}
1853
1854static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1855{
1856	struct mgmt_mode *cp = data;
1857	bool changed;
1858	u8 status;
1859	int err;
1860
1861	BT_DBG("request for %s", hdev->name);
1862
1863	status = mgmt_bredr_support(hdev);
1864	if (status)
1865		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1866
1867	if (!lmp_ssp_capable(hdev))
1868		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1869				  MGMT_STATUS_NOT_SUPPORTED);
1870
1871	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1872		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1873				  MGMT_STATUS_REJECTED);
1874
1875	if (cp->val != 0x00 && cp->val != 0x01)
1876		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1877				  MGMT_STATUS_INVALID_PARAMS);
1878
1879	hci_dev_lock(hdev);
1880
1881	if (cp->val) {
1882		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1883	} else {
1884		if (hdev_is_powered(hdev)) {
1885			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1886					 MGMT_STATUS_REJECTED);
1887			goto unlock;
1888		}
1889
1890		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1891	}
1892
1893	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1894	if (err < 0)
1895		goto unlock;
1896
1897	if (changed)
1898		err = new_settings(hdev, sk);
1899
1900unlock:
1901	hci_dev_unlock(hdev);
1902	return err;
1903}
1904
1905static void le_enable_complete(struct hci_dev *hdev, u8 status)
1906{
1907	struct cmd_lookup match = { NULL, hdev };
1908
1909	if (status) {
1910		u8 mgmt_err = mgmt_status(status);
1911
1912		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1913				     &mgmt_err);
1914		return;
1915	}
1916
1917	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1918
1919	new_settings(hdev, match.sk);
1920
1921	if (match.sk)
1922		sock_put(match.sk);
1923
1924	/* Make sure the controller has a good default for
1925	 * advertising data. Restrict the update to when LE
1926	 * has actually been enabled. During power on, the
1927	 * update in powered_update_hci will take care of it.
1928	 */
1929	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1930		struct hci_request req;
1931
1932		hci_dev_lock(hdev);
1933
1934		hci_req_init(&req, hdev);
1935		update_adv_data(&req);
1936		update_scan_rsp_data(&req);
1937		hci_req_run(&req, NULL);
1938
1939		hci_dev_unlock(hdev);
1940	}
1941}
1942
1943static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1944{
1945	struct mgmt_mode *cp = data;
1946	struct hci_cp_write_le_host_supported hci_cp;
1947	struct pending_cmd *cmd;
1948	struct hci_request req;
1949	int err;
1950	u8 val, enabled;
1951
1952	BT_DBG("request for %s", hdev->name);
1953
1954	if (!lmp_le_capable(hdev))
1955		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1956				  MGMT_STATUS_NOT_SUPPORTED);
1957
1958	if (cp->val != 0x00 && cp->val != 0x01)
1959		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1960				  MGMT_STATUS_INVALID_PARAMS);
1961
1962	/* LE-only devices do not allow toggling LE on/off */
1963	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1964		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1965				  MGMT_STATUS_REJECTED);
1966
1967	hci_dev_lock(hdev);
1968
1969	val = !!cp->val;
1970	enabled = lmp_host_le_capable(hdev);
1971
1972	if (!hdev_is_powered(hdev) || val == enabled) {
1973		bool changed = false;
1974
1975		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1976			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1977			changed = true;
1978		}
1979
1980		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1981			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1982			changed = true;
1983		}
1984
1985		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1986		if (err < 0)
1987			goto unlock;
1988
1989		if (changed)
1990			err = new_settings(hdev, sk);
1991
1992		goto unlock;
1993	}
1994
1995	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1996	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1997		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1998				 MGMT_STATUS_BUSY);
1999		goto unlock;
2000	}
2001
2002	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2003	if (!cmd) {
2004		err = -ENOMEM;
2005		goto unlock;
2006	}
2007
2008	hci_req_init(&req, hdev);
2009
2010	memset(&hci_cp, 0, sizeof(hci_cp));
2011
2012	if (val) {
2013		hci_cp.le = val;
2014		hci_cp.simul = lmp_le_br_capable(hdev);
2015	} else {
2016		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2017			disable_advertising(&req);
2018	}
2019
2020	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2021		    &hci_cp);
2022
2023	err = hci_req_run(&req, le_enable_complete);
2024	if (err < 0)
2025		mgmt_pending_remove(cmd);
2026
2027unlock:
2028	hci_dev_unlock(hdev);
2029	return err;
2030}
2031
2032/* This is a helper function to test for pending mgmt commands that can
2033 * cause CoD or EIR HCI commands. We can only allow one such pending
2034 * mgmt command at a time since otherwise we cannot easily track what
2035 * the current values are, will be, and based on that calculate if a new
2036 * HCI command needs to be sent and if yes with what value.
2037 */
2038static bool pending_eir_or_class(struct hci_dev *hdev)
2039{
2040	struct pending_cmd *cmd;
2041
2042	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2043		switch (cmd->opcode) {
2044		case MGMT_OP_ADD_UUID:
2045		case MGMT_OP_REMOVE_UUID:
2046		case MGMT_OP_SET_DEV_CLASS:
2047		case MGMT_OP_SET_POWERED:
2048			return true;
2049		}
2050	}
2051
2052	return false;
2053}
2054
2055static const u8 bluetooth_base_uuid[] = {
2056			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2057			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2058};
2059
2060static u8 get_uuid_size(const u8 *uuid)
2061{
2062	u32 val;
2063
2064	if (memcmp(uuid, bluetooth_base_uuid, 12))
2065		return 128;
2066
2067	val = get_unaligned_le32(&uuid[12]);
2068	if (val > 0xffff)
2069		return 32;
2070
2071	return 16;
2072}
2073
2074static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2075{
2076	struct pending_cmd *cmd;
2077
2078	hci_dev_lock(hdev);
2079
2080	cmd = mgmt_pending_find(mgmt_op, hdev);
2081	if (!cmd)
2082		goto unlock;
2083
2084	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2085		     hdev->dev_class, 3);
2086
2087	mgmt_pending_remove(cmd);
2088
2089unlock:
2090	hci_dev_unlock(hdev);
2091}
2092
2093static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2094{
2095	BT_DBG("status 0x%02x", status);
2096
2097	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2098}
2099
2100static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2101{
2102	struct mgmt_cp_add_uuid *cp = data;
2103	struct pending_cmd *cmd;
2104	struct hci_request req;
2105	struct bt_uuid *uuid;
2106	int err;
2107
2108	BT_DBG("request for %s", hdev->name);
2109
2110	hci_dev_lock(hdev);
2111
2112	if (pending_eir_or_class(hdev)) {
2113		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2114				 MGMT_STATUS_BUSY);
2115		goto failed;
2116	}
2117
2118	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2119	if (!uuid) {
2120		err = -ENOMEM;
2121		goto failed;
2122	}
2123
2124	memcpy(uuid->uuid, cp->uuid, 16);
2125	uuid->svc_hint = cp->svc_hint;
2126	uuid->size = get_uuid_size(cp->uuid);
2127
2128	list_add_tail(&uuid->list, &hdev->uuids);
2129
2130	hci_req_init(&req, hdev);
2131
2132	update_class(&req);
2133	update_eir(&req);
2134
2135	err = hci_req_run(&req, add_uuid_complete);
2136	if (err < 0) {
2137		if (err != -ENODATA)
2138			goto failed;
2139
2140		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2141				   hdev->dev_class, 3);
2142		goto failed;
2143	}
2144
2145	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2146	if (!cmd) {
2147		err = -ENOMEM;
2148		goto failed;
2149	}
2150
2151	err = 0;
2152
2153failed:
2154	hci_dev_unlock(hdev);
2155	return err;
2156}
2157
2158static bool enable_service_cache(struct hci_dev *hdev)
2159{
2160	if (!hdev_is_powered(hdev))
2161		return false;
2162
2163	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2164		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2165				   CACHE_TIMEOUT);
2166		return true;
2167	}
2168
2169	return false;
2170}
2171
2172static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2173{
2174	BT_DBG("status 0x%02x", status);
2175
2176	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2177}
2178
2179static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2180		       u16 len)
2181{
2182	struct mgmt_cp_remove_uuid *cp = data;
2183	struct pending_cmd *cmd;
2184	struct bt_uuid *match, *tmp;
2185	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2186	struct hci_request req;
2187	int err, found;
2188
2189	BT_DBG("request for %s", hdev->name);
2190
2191	hci_dev_lock(hdev);
2192
2193	if (pending_eir_or_class(hdev)) {
2194		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2195				 MGMT_STATUS_BUSY);
2196		goto unlock;
2197	}
2198
2199	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2200		hci_uuids_clear(hdev);
2201
2202		if (enable_service_cache(hdev)) {
2203			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2204					   0, hdev->dev_class, 3);
2205			goto unlock;
2206		}
2207
2208		goto update_class;
2209	}
2210
2211	found = 0;
2212
2213	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2214		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2215			continue;
2216
2217		list_del(&match->list);
2218		kfree(match);
2219		found++;
2220	}
2221
2222	if (found == 0) {
2223		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2224				 MGMT_STATUS_INVALID_PARAMS);
2225		goto unlock;
2226	}
2227
2228update_class:
2229	hci_req_init(&req, hdev);
2230
2231	update_class(&req);
2232	update_eir(&req);
2233
2234	err = hci_req_run(&req, remove_uuid_complete);
2235	if (err < 0) {
2236		if (err != -ENODATA)
2237			goto unlock;
2238
2239		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2240				   hdev->dev_class, 3);
2241		goto unlock;
2242	}
2243
2244	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2245	if (!cmd) {
2246		err = -ENOMEM;
2247		goto unlock;
2248	}
2249
2250	err = 0;
2251
2252unlock:
2253	hci_dev_unlock(hdev);
2254	return err;
2255}
2256
2257static void set_class_complete(struct hci_dev *hdev, u8 status)
2258{
2259	BT_DBG("status 0x%02x", status);
2260
2261	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2262}
2263
2264static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2265			 u16 len)
2266{
2267	struct mgmt_cp_set_dev_class *cp = data;
2268	struct pending_cmd *cmd;
2269	struct hci_request req;
2270	int err;
2271
2272	BT_DBG("request for %s", hdev->name);
2273
2274	if (!lmp_bredr_capable(hdev))
2275		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2276				  MGMT_STATUS_NOT_SUPPORTED);
2277
2278	hci_dev_lock(hdev);
2279
2280	if (pending_eir_or_class(hdev)) {
2281		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2282				 MGMT_STATUS_BUSY);
2283		goto unlock;
2284	}
2285
2286	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2287		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2288				 MGMT_STATUS_INVALID_PARAMS);
2289		goto unlock;
2290	}
2291
2292	hdev->major_class = cp->major;
2293	hdev->minor_class = cp->minor;
2294
2295	if (!hdev_is_powered(hdev)) {
2296		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2297				   hdev->dev_class, 3);
2298		goto unlock;
2299	}
2300
2301	hci_req_init(&req, hdev);
2302
2303	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2304		hci_dev_unlock(hdev);
2305		cancel_delayed_work_sync(&hdev->service_cache);
2306		hci_dev_lock(hdev);
2307		update_eir(&req);
2308	}
2309
2310	update_class(&req);
2311
2312	err = hci_req_run(&req, set_class_complete);
2313	if (err < 0) {
2314		if (err != -ENODATA)
2315			goto unlock;
2316
2317		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2318				   hdev->dev_class, 3);
2319		goto unlock;
2320	}
2321
2322	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2323	if (!cmd) {
2324		err = -ENOMEM;
2325		goto unlock;
2326	}
2327
2328	err = 0;
2329
2330unlock:
2331	hci_dev_unlock(hdev);
2332	return err;
2333}
2334
2335static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2336			  u16 len)
2337{
2338	struct mgmt_cp_load_link_keys *cp = data;
2339	u16 key_count, expected_len;
2340	bool changed;
2341	int i;
2342
2343	BT_DBG("request for %s", hdev->name);
2344
2345	if (!lmp_bredr_capable(hdev))
2346		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2347				  MGMT_STATUS_NOT_SUPPORTED);
2348
2349	key_count = __le16_to_cpu(cp->key_count);
2350
2351	expected_len = sizeof(*cp) + key_count *
2352					sizeof(struct mgmt_link_key_info);
2353	if (expected_len != len) {
2354		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2355		       expected_len, len);
2356		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2357				  MGMT_STATUS_INVALID_PARAMS);
2358	}
2359
2360	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2361		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2362				  MGMT_STATUS_INVALID_PARAMS);
2363
2364	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2365	       key_count);
2366
2367	for (i = 0; i < key_count; i++) {
2368		struct mgmt_link_key_info *key = &cp->keys[i];
2369
2370		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2371			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2372					  MGMT_STATUS_INVALID_PARAMS);
2373	}
2374
2375	hci_dev_lock(hdev);
2376
2377	hci_link_keys_clear(hdev);
2378
2379	if (cp->debug_keys)
2380		changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2381	else
2382		changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2383
2384	if (changed)
2385		new_settings(hdev, NULL);
2386
2387	for (i = 0; i < key_count; i++) {
2388		struct mgmt_link_key_info *key = &cp->keys[i];
2389
2390		hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2391				 key->type, key->pin_len);
2392	}
2393
2394	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2395
2396	hci_dev_unlock(hdev);
2397
2398	return 0;
2399}
2400
2401static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2402			   u8 addr_type, struct sock *skip_sk)
2403{
2404	struct mgmt_ev_device_unpaired ev;
2405
2406	bacpy(&ev.addr.bdaddr, bdaddr);
2407	ev.addr.type = addr_type;
2408
2409	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2410			  skip_sk);
2411}
2412
2413static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2414			 u16 len)
2415{
2416	struct mgmt_cp_unpair_device *cp = data;
2417	struct mgmt_rp_unpair_device rp;
2418	struct hci_cp_disconnect dc;
2419	struct pending_cmd *cmd;
2420	struct hci_conn *conn;
2421	int err;
2422
2423	memset(&rp, 0, sizeof(rp));
2424	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2425	rp.addr.type = cp->addr.type;
2426
2427	if (!bdaddr_type_is_valid(cp->addr.type))
2428		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2429				    MGMT_STATUS_INVALID_PARAMS,
2430				    &rp, sizeof(rp));
2431
2432	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2433		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2434				    MGMT_STATUS_INVALID_PARAMS,
2435				    &rp, sizeof(rp));
2436
2437	hci_dev_lock(hdev);
2438
2439	if (!hdev_is_powered(hdev)) {
2440		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2441				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2442		goto unlock;
2443	}
2444
2445	if (cp->addr.type == BDADDR_BREDR) {
2446		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2447	} else {
2448		u8 addr_type;
2449
2450		if (cp->addr.type == BDADDR_LE_PUBLIC)
2451			addr_type = ADDR_LE_DEV_PUBLIC;
2452		else
2453			addr_type = ADDR_LE_DEV_RANDOM;
2454
2455		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2456
2457		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2458
2459		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2460	}
2461
2462	if (err < 0) {
2463		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2464				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2465		goto unlock;
2466	}
2467
2468	if (cp->disconnect) {
2469		if (cp->addr.type == BDADDR_BREDR)
2470			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2471						       &cp->addr.bdaddr);
2472		else
2473			conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2474						       &cp->addr.bdaddr);
2475	} else {
2476		conn = NULL;
2477	}
2478
2479	if (!conn) {
2480		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2481				   &rp, sizeof(rp));
2482		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2483		goto unlock;
2484	}
2485
2486	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2487			       sizeof(*cp));
2488	if (!cmd) {
2489		err = -ENOMEM;
2490		goto unlock;
2491	}
2492
2493	dc.handle = cpu_to_le16(conn->handle);
2494	dc.reason = 0x13; /* Remote User Terminated Connection */
2495	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2496	if (err < 0)
2497		mgmt_pending_remove(cmd);
2498
2499unlock:
2500	hci_dev_unlock(hdev);
2501	return err;
2502}
2503
2504static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2505		      u16 len)
2506{
2507	struct mgmt_cp_disconnect *cp = data;
2508	struct mgmt_rp_disconnect rp;
2509	struct hci_cp_disconnect dc;
2510	struct pending_cmd *cmd;
2511	struct hci_conn *conn;
2512	int err;
2513
2514	BT_DBG("");
2515
2516	memset(&rp, 0, sizeof(rp));
2517	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2518	rp.addr.type = cp->addr.type;
2519
2520	if (!bdaddr_type_is_valid(cp->addr.type))
2521		return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2522				    MGMT_STATUS_INVALID_PARAMS,
2523				    &rp, sizeof(rp));
2524
2525	hci_dev_lock(hdev);
2526
2527	if (!test_bit(HCI_UP, &hdev->flags)) {
2528		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2529				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2530		goto failed;
2531	}
2532
2533	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2534		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2535				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2536		goto failed;
2537	}
2538
2539	if (cp->addr.type == BDADDR_BREDR)
2540		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2541					       &cp->addr.bdaddr);
2542	else
2543		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2544
2545	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2546		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2547				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2548		goto failed;
2549	}
2550
2551	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2552	if (!cmd) {
2553		err = -ENOMEM;
2554		goto failed;
2555	}
2556
2557	dc.handle = cpu_to_le16(conn->handle);
2558	dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2559
2560	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2561	if (err < 0)
2562		mgmt_pending_remove(cmd);
2563
2564failed:
2565	hci_dev_unlock(hdev);
2566	return err;
2567}
2568
2569static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2570{
2571	switch (link_type) {
2572	case LE_LINK:
2573		switch (addr_type) {
2574		case ADDR_LE_DEV_PUBLIC:
2575			return BDADDR_LE_PUBLIC;
2576
2577		default:
2578			/* Fallback to LE Random address type */
2579			return BDADDR_LE_RANDOM;
2580		}
2581
2582	default:
2583		/* Fallback to BR/EDR type */
2584		return BDADDR_BREDR;
2585	}
2586}
2587
2588static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2589			   u16 data_len)
2590{
2591	struct mgmt_rp_get_connections *rp;
2592	struct hci_conn *c;
2593	size_t rp_len;
2594	int err;
2595	u16 i;
2596
2597	BT_DBG("");
2598
2599	hci_dev_lock(hdev);
2600
2601	if (!hdev_is_powered(hdev)) {
2602		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2603				 MGMT_STATUS_NOT_POWERED);
2604		goto unlock;
2605	}
2606
2607	i = 0;
2608	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2609		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2610			i++;
2611	}
2612
2613	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2614	rp = kmalloc(rp_len, GFP_KERNEL);
2615	if (!rp) {
2616		err = -ENOMEM;
2617		goto unlock;
2618	}
2619
2620	i = 0;
2621	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2622		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2623			continue;
2624		bacpy(&rp->addr[i].bdaddr, &c->dst);
2625		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2626		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2627			continue;
2628		i++;
2629	}
2630
2631	rp->conn_count = cpu_to_le16(i);
2632
2633	/* Recalculate length in case of filtered SCO connections, etc */
2634	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2635
2636	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2637			   rp_len);
2638
2639	kfree(rp);
2640
2641unlock:
2642	hci_dev_unlock(hdev);
2643	return err;
2644}
2645
2646static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2647				   struct mgmt_cp_pin_code_neg_reply *cp)
2648{
2649	struct pending_cmd *cmd;
2650	int err;
2651
2652	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2653			       sizeof(*cp));
2654	if (!cmd)
2655		return -ENOMEM;
2656
2657	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2658			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2659	if (err < 0)
2660		mgmt_pending_remove(cmd);
2661
2662	return err;
2663}
2664
2665static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2666			  u16 len)
2667{
2668	struct hci_conn *conn;
2669	struct mgmt_cp_pin_code_reply *cp = data;
2670	struct hci_cp_pin_code_reply reply;
2671	struct pending_cmd *cmd;
2672	int err;
2673
2674	BT_DBG("");
2675
2676	hci_dev_lock(hdev);
2677
2678	if (!hdev_is_powered(hdev)) {
2679		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2680				 MGMT_STATUS_NOT_POWERED);
2681		goto failed;
2682	}
2683
2684	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2685	if (!conn) {
2686		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2687				 MGMT_STATUS_NOT_CONNECTED);
2688		goto failed;
2689	}
2690
2691	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2692		struct mgmt_cp_pin_code_neg_reply ncp;
2693
2694		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2695
2696		BT_ERR("PIN code is not 16 bytes long");
2697
2698		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2699		if (err >= 0)
2700			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2701					 MGMT_STATUS_INVALID_PARAMS);
2702
2703		goto failed;
2704	}
2705
2706	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2707	if (!cmd) {
2708		err = -ENOMEM;
2709		goto failed;
2710	}
2711
2712	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2713	reply.pin_len = cp->pin_len;
2714	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2715
2716	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2717	if (err < 0)
2718		mgmt_pending_remove(cmd);
2719
2720failed:
2721	hci_dev_unlock(hdev);
2722	return err;
2723}
2724
2725static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2726			     u16 len)
2727{
2728	struct mgmt_cp_set_io_capability *cp = data;
2729
2730	BT_DBG("");
2731
2732	hci_dev_lock(hdev);
2733
2734	hdev->io_capability = cp->io_capability;
2735
2736	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2737	       hdev->io_capability);
2738
2739	hci_dev_unlock(hdev);
2740
2741	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2742			    0);
2743}
2744
2745static struct pending_cmd *find_pairing(struct hci_conn *conn)
2746{
2747	struct hci_dev *hdev = conn->hdev;
2748	struct pending_cmd *cmd;
2749
2750	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2751		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2752			continue;
2753
2754		if (cmd->user_data != conn)
2755			continue;
2756
2757		return cmd;
2758	}
2759
2760	return NULL;
2761}
2762
2763static void pairing_complete(struct pending_cmd *cmd, u8 status)
2764{
2765	struct mgmt_rp_pair_device rp;
2766	struct hci_conn *conn = cmd->user_data;
2767
2768	bacpy(&rp.addr.bdaddr, &conn->dst);
2769	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2770
2771	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2772		     &rp, sizeof(rp));
2773
2774	/* So we don't get further callbacks for this connection */
2775	conn->connect_cfm_cb = NULL;
2776	conn->security_cfm_cb = NULL;
2777	conn->disconn_cfm_cb = NULL;
2778
2779	hci_conn_drop(conn);
2780
2781	mgmt_pending_remove(cmd);
2782}
2783
2784void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2785{
2786	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2787	struct pending_cmd *cmd;
2788
2789	cmd = find_pairing(conn);
2790	if (cmd)
2791		pairing_complete(cmd, status);
2792}
2793
2794static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2795{
2796	struct pending_cmd *cmd;
2797
2798	BT_DBG("status %u", status);
2799
2800	cmd = find_pairing(conn);
2801	if (!cmd)
2802		BT_DBG("Unable to find a pending command");
2803	else
2804		pairing_complete(cmd, mgmt_status(status));
2805}
2806
2807static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2808{
2809	struct pending_cmd *cmd;
2810
2811	BT_DBG("status %u", status);
2812
2813	if (!status)
2814		return;
2815
2816	cmd = find_pairing(conn);
2817	if (!cmd)
2818		BT_DBG("Unable to find a pending command");
2819	else
2820		pairing_complete(cmd, mgmt_status(status));
2821}
2822
2823static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2824		       u16 len)
2825{
2826	struct mgmt_cp_pair_device *cp = data;
2827	struct mgmt_rp_pair_device rp;
2828	struct pending_cmd *cmd;
2829	u8 sec_level, auth_type;
2830	struct hci_conn *conn;
2831	int err;
2832
2833	BT_DBG("");
2834
2835	memset(&rp, 0, sizeof(rp));
2836	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2837	rp.addr.type = cp->addr.type;
2838
2839	if (!bdaddr_type_is_valid(cp->addr.type))
2840		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2841				    MGMT_STATUS_INVALID_PARAMS,
2842				    &rp, sizeof(rp));
2843
2844	hci_dev_lock(hdev);
2845
2846	if (!hdev_is_powered(hdev)) {
2847		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2848				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2849		goto unlock;
2850	}
2851
2852	sec_level = BT_SECURITY_MEDIUM;
2853	if (cp->io_cap == 0x03)
2854		auth_type = HCI_AT_DEDICATED_BONDING;
2855	else
2856		auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2857
2858	if (cp->addr.type == BDADDR_BREDR) {
2859		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2860				       auth_type);
2861	} else {
2862		u8 addr_type;
2863
2864		/* Convert from L2CAP channel address type to HCI address type
2865		 */
2866		if (cp->addr.type == BDADDR_LE_PUBLIC)
2867			addr_type = ADDR_LE_DEV_PUBLIC;
2868		else
2869			addr_type = ADDR_LE_DEV_RANDOM;
2870
2871		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2872				      sec_level, auth_type);
2873	}
2874
2875	if (IS_ERR(conn)) {
2876		int status;
2877
2878		if (PTR_ERR(conn) == -EBUSY)
2879			status = MGMT_STATUS_BUSY;
2880		else
2881			status = MGMT_STATUS_CONNECT_FAILED;
2882
2883		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2884				   status, &rp,
2885				   sizeof(rp));
2886		goto unlock;
2887	}
2888
2889	if (conn->connect_cfm_cb) {
2890		hci_conn_drop(conn);
2891		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2892				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2893		goto unlock;
2894	}
2895
2896	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2897	if (!cmd) {
2898		err = -ENOMEM;
2899		hci_conn_drop(conn);
2900		goto unlock;
2901	}
2902
2903	/* For LE, just connecting isn't a proof that the pairing finished */
2904	if (cp->addr.type == BDADDR_BREDR) {
2905		conn->connect_cfm_cb = pairing_complete_cb;
2906		conn->security_cfm_cb = pairing_complete_cb;
2907		conn->disconn_cfm_cb = pairing_complete_cb;
2908	} else {
2909		conn->connect_cfm_cb = le_pairing_complete_cb;
2910		conn->security_cfm_cb = le_pairing_complete_cb;
2911		conn->disconn_cfm_cb = le_pairing_complete_cb;
2912	}
2913
2914	conn->io_capability = cp->io_cap;
2915	cmd->user_data = conn;
2916
2917	if (conn->state == BT_CONNECTED &&
2918	    hci_conn_security(conn, sec_level, auth_type))
2919		pairing_complete(cmd, 0);
2920
2921	err = 0;
2922
2923unlock:
2924	hci_dev_unlock(hdev);
2925	return err;
2926}
2927
2928static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2929			      u16 len)
2930{
2931	struct mgmt_addr_info *addr = data;
2932	struct pending_cmd *cmd;
2933	struct hci_conn *conn;
2934	int err;
2935
2936	BT_DBG("");
2937
2938	hci_dev_lock(hdev);
2939
2940	if (!hdev_is_powered(hdev)) {
2941		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2942				 MGMT_STATUS_NOT_POWERED);
2943		goto unlock;
2944	}
2945
2946	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2947	if (!cmd) {
2948		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2949				 MGMT_STATUS_INVALID_PARAMS);
2950		goto unlock;
2951	}
2952
2953	conn = cmd->user_data;
2954
2955	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2956		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2957				 MGMT_STATUS_INVALID_PARAMS);
2958		goto unlock;
2959	}
2960
2961	pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2962
2963	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2964			   addr, sizeof(*addr));
2965unlock:
2966	hci_dev_unlock(hdev);
2967	return err;
2968}
2969
2970static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2971			     struct mgmt_addr_info *addr, u16 mgmt_op,
2972			     u16 hci_op, __le32 passkey)
2973{
2974	struct pending_cmd *cmd;
2975	struct hci_conn *conn;
2976	int err;
2977
2978	hci_dev_lock(hdev);
2979
2980	if (!hdev_is_powered(hdev)) {
2981		err = cmd_complete(sk, hdev->id, mgmt_op,
2982				   MGMT_STATUS_NOT_POWERED, addr,
2983				   sizeof(*addr));
2984		goto done;
2985	}
2986
2987	if (addr->type == BDADDR_BREDR)
2988		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2989	else
2990		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2991
2992	if (!conn) {
2993		err = cmd_complete(sk, hdev->id, mgmt_op,
2994				   MGMT_STATUS_NOT_CONNECTED, addr,
2995				   sizeof(*addr));
2996		goto done;
2997	}
2998
2999	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3000		/* Continue with pairing via SMP */
3001		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3002
3003		if (!err)
3004			err = cmd_complete(sk, hdev->id, mgmt_op,
3005					   MGMT_STATUS_SUCCESS, addr,
3006					   sizeof(*addr));
3007		else
3008			err = cmd_complete(sk, hdev->id, mgmt_op,
3009					   MGMT_STATUS_FAILED, addr,
3010					   sizeof(*addr));
3011
3012		goto done;
3013	}
3014
3015	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3016	if (!cmd) {
3017		err = -ENOMEM;
3018		goto done;
3019	}
3020
3021	/* Continue with pairing via HCI */
3022	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3023		struct hci_cp_user_passkey_reply cp;
3024
3025		bacpy(&cp.bdaddr, &addr->bdaddr);
3026		cp.passkey = passkey;
3027		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3028	} else
3029		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3030				   &addr->bdaddr);
3031
3032	if (err < 0)
3033		mgmt_pending_remove(cmd);
3034
3035done:
3036	hci_dev_unlock(hdev);
3037	return err;
3038}
3039
3040static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3041			      void *data, u16 len)
3042{
3043	struct mgmt_cp_pin_code_neg_reply *cp = data;
3044
3045	BT_DBG("");
3046
3047	return user_pairing_resp(sk, hdev, &cp->addr,
3048				MGMT_OP_PIN_CODE_NEG_REPLY,
3049				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3050}
3051
3052static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3053			      u16 len)
3054{
3055	struct mgmt_cp_user_confirm_reply *cp = data;
3056
3057	BT_DBG("");
3058
3059	if (len != sizeof(*cp))
3060		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3061				  MGMT_STATUS_INVALID_PARAMS);
3062
3063	return user_pairing_resp(sk, hdev, &cp->addr,
3064				 MGMT_OP_USER_CONFIRM_REPLY,
3065				 HCI_OP_USER_CONFIRM_REPLY, 0);
3066}
3067
3068static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3069				  void *data, u16 len)
3070{
3071	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3072
3073	BT_DBG("");
3074
3075	return user_pairing_resp(sk, hdev, &cp->addr,
3076				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3077				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3078}
3079
3080static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3081			      u16 len)
3082{
3083	struct mgmt_cp_user_passkey_reply *cp = data;
3084
3085	BT_DBG("");
3086
3087	return user_pairing_resp(sk, hdev, &cp->addr,
3088				 MGMT_OP_USER_PASSKEY_REPLY,
3089				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3090}
3091
3092static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3093				  void *data, u16 len)
3094{
3095	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3096
3097	BT_DBG("");
3098
3099	return user_pairing_resp(sk, hdev, &cp->addr,
3100				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3101				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3102}
3103
3104static void update_name(struct hci_request *req)
3105{
3106	struct hci_dev *hdev = req->hdev;
3107	struct hci_cp_write_local_name cp;
3108
3109	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3110
3111	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3112}
3113
3114static void set_name_complete(struct hci_dev *hdev, u8 status)
3115{
3116	struct mgmt_cp_set_local_name *cp;
3117	struct pending_cmd *cmd;
3118
3119	BT_DBG("status 0x%02x", status);
3120
3121	hci_dev_lock(hdev);
3122
3123	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3124	if (!cmd)
3125		goto unlock;
3126
3127	cp = cmd->param;
3128
3129	if (status)
3130		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3131			   mgmt_status(status));
3132	else
3133		cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3134			     cp, sizeof(*cp));
3135
3136	mgmt_pending_remove(cmd);
3137
3138unlock:
3139	hci_dev_unlock(hdev);
3140}
3141
3142static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3143			  u16 len)
3144{
3145	struct mgmt_cp_set_local_name *cp = data;
3146	struct pending_cmd *cmd;
3147	struct hci_request req;
3148	int err;
3149
3150	BT_DBG("");
3151
3152	hci_dev_lock(hdev);
3153
3154	/* If the old values are the same as the new ones just return a
3155	 * direct command complete event.
3156	 */
3157	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3158	    !memcmp(hdev->short_name, cp->short_name,
3159		    sizeof(hdev->short_name))) {
3160		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3161				   data, len);
3162		goto failed;
3163	}
3164
3165	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3166
3167	if (!hdev_is_powered(hdev)) {
3168		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3169
3170		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3171				   data, len);
3172		if (err < 0)
3173			goto failed;
3174
3175		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3176				 sk);
3177
3178		goto failed;
3179	}
3180
3181	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3182	if (!cmd) {
3183		err = -ENOMEM;
3184		goto failed;
3185	}
3186
3187	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3188
3189	hci_req_init(&req, hdev);
3190
3191	if (lmp_bredr_capable(hdev)) {
3192		update_name(&req);
3193		update_eir(&req);
3194	}
3195
3196	/* The name is stored in the scan response data and so
3197	 * no need to udpate the advertising data here.
3198	 */
3199	if (lmp_le_capable(hdev))
3200		update_scan_rsp_data(&req);
3201
3202	err = hci_req_run(&req, set_name_complete);
3203	if (err < 0)
3204		mgmt_pending_remove(cmd);
3205
3206failed:
3207	hci_dev_unlock(hdev);
3208	return err;
3209}
3210
3211static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3212			       void *data, u16 data_len)
3213{
3214	struct pending_cmd *cmd;
3215	int err;
3216
3217	BT_DBG("%s", hdev->name);
3218
3219	hci_dev_lock(hdev);
3220
3221	if (!hdev_is_powered(hdev)) {
3222		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3223				 MGMT_STATUS_NOT_POWERED);
3224		goto unlock;
3225	}
3226
3227	if (!lmp_ssp_capable(hdev)) {
3228		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3229				 MGMT_STATUS_NOT_SUPPORTED);
3230		goto unlock;
3231	}
3232
3233	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3234		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3235				 MGMT_STATUS_BUSY);
3236		goto unlock;
3237	}
3238
3239	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3240	if (!cmd) {
3241		err = -ENOMEM;
3242		goto unlock;
3243	}
3244
3245	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3246		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3247				   0, NULL);
3248	else
3249		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3250
3251	if (err < 0)
3252		mgmt_pending_remove(cmd);
3253
3254unlock:
3255	hci_dev_unlock(hdev);
3256	return err;
3257}
3258
3259static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3260			       void *data, u16 len)
3261{
3262	int err;
3263
3264	BT_DBG("%s ", hdev->name);
3265
3266	hci_dev_lock(hdev);
3267
3268	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3269		struct mgmt_cp_add_remote_oob_data *cp = data;
3270		u8 status;
3271
3272		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3273					      cp->hash, cp->randomizer);
3274		if (err < 0)
3275			status = MGMT_STATUS_FAILED;
3276		else
3277			status = MGMT_STATUS_SUCCESS;
3278
3279		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3280				   status, &cp->addr, sizeof(cp->addr));
3281	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3282		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3283		u8 status;
3284
3285		err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3286						  cp->hash192,
3287						  cp->randomizer192,
3288						  cp->hash256,
3289						  cp->randomizer256);
3290		if (err < 0)
3291			status = MGMT_STATUS_FAILED;
3292		else
3293			status = MGMT_STATUS_SUCCESS;
3294
3295		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3296				   status, &cp->addr, sizeof(cp->addr));
3297	} else {
3298		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3299		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3300				 MGMT_STATUS_INVALID_PARAMS);
3301	}
3302
3303	hci_dev_unlock(hdev);
3304	return err;
3305}
3306
3307static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3308				  void *data, u16 len)
3309{
3310	struct mgmt_cp_remove_remote_oob_data *cp = data;
3311	u8 status;
3312	int err;
3313
3314	BT_DBG("%s", hdev->name);
3315
3316	hci_dev_lock(hdev);
3317
3318	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3319	if (err < 0)
3320		status = MGMT_STATUS_INVALID_PARAMS;
3321	else
3322		status = MGMT_STATUS_SUCCESS;
3323
3324	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3325			   status, &cp->addr, sizeof(cp->addr));
3326
3327	hci_dev_unlock(hdev);
3328	return err;
3329}
3330
3331static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3332{
3333	struct pending_cmd *cmd;
3334	u8 type;
3335	int err;
3336
3337	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3338
3339	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3340	if (!cmd)
3341		return -ENOENT;
3342
3343	type = hdev->discovery.type;
3344
3345	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3346			   &type, sizeof(type));
3347	mgmt_pending_remove(cmd);
3348
3349	return err;
3350}
3351
3352static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3353{
3354	BT_DBG("status %d", status);
3355
3356	if (status) {
3357		hci_dev_lock(hdev);
3358		mgmt_start_discovery_failed(hdev, status);
3359		hci_dev_unlock(hdev);
3360		return;
3361	}
3362
3363	hci_dev_lock(hdev);
3364	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3365	hci_dev_unlock(hdev);
3366
3367	switch (hdev->discovery.type) {
3368	case DISCOV_TYPE_LE:
3369		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3370				   DISCOV_LE_TIMEOUT);
3371		break;
3372
3373	case DISCOV_TYPE_INTERLEAVED:
3374		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3375				   DISCOV_INTERLEAVED_TIMEOUT);
3376		break;
3377
3378	case DISCOV_TYPE_BREDR:
3379		break;
3380
3381	default:
3382		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3383	}
3384}
3385
3386static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3387			   void *data, u16 len)
3388{
3389	struct mgmt_cp_start_discovery *cp = data;
3390	struct pending_cmd *cmd;
3391	struct hci_cp_le_set_scan_param param_cp;
3392	struct hci_cp_le_set_scan_enable enable_cp;
3393	struct hci_cp_inquiry inq_cp;
3394	struct hci_request req;
3395	/* General inquiry access code (GIAC) */
3396	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3397	u8 status, own_addr_type;
3398	int err;
3399
3400	BT_DBG("%s", hdev->name);
3401
3402	hci_dev_lock(hdev);
3403
3404	if (!hdev_is_powered(hdev)) {
3405		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3406				 MGMT_STATUS_NOT_POWERED);
3407		goto failed;
3408	}
3409
3410	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3411		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3412				 MGMT_STATUS_BUSY);
3413		goto failed;
3414	}
3415
3416	if (hdev->discovery.state != DISCOVERY_STOPPED) {
3417		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3418				 MGMT_STATUS_BUSY);
3419		goto failed;
3420	}
3421
3422	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3423	if (!cmd) {
3424		err = -ENOMEM;
3425		goto failed;
3426	}
3427
3428	hdev->discovery.type = cp->type;
3429
3430	hci_req_init(&req, hdev);
3431
3432	switch (hdev->discovery.type) {
3433	case DISCOV_TYPE_BREDR:
3434		status = mgmt_bredr_support(hdev);
3435		if (status) {
3436			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3437					 status);
3438			mgmt_pending_remove(cmd);
3439			goto failed;
3440		}
3441
3442		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3443			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3444					 MGMT_STATUS_BUSY);
3445			mgmt_pending_remove(cmd);
3446			goto failed;
3447		}
3448
3449		hci_inquiry_cache_flush(hdev);
3450
3451		memset(&inq_cp, 0, sizeof(inq_cp));
3452		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3453		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3454		hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3455		break;
3456
3457	case DISCOV_TYPE_LE:
3458	case DISCOV_TYPE_INTERLEAVED:
3459		status = mgmt_le_support(hdev);
3460		if (status) {
3461			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3462					 status);
3463			mgmt_pending_remove(cmd);
3464			goto failed;
3465		}
3466
3467		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3468		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3469			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3470					 MGMT_STATUS_NOT_SUPPORTED);
3471			mgmt_pending_remove(cmd);
3472			goto failed;
3473		}
3474
3475		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3476			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3477					 MGMT_STATUS_REJECTED);
3478			mgmt_pending_remove(cmd);
3479			goto failed;
3480		}
3481
3482		/* If controller is scanning, it means the background scanning
3483		 * is running. Thus, we should temporarily stop it in order to
3484		 * set the discovery scanning parameters.
3485		 */
3486		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3487			hci_req_add_le_scan_disable(&req);
3488
3489		memset(&param_cp, 0, sizeof(param_cp));
3490
3491		/* All active scans will be done with either a resolvable
3492		 * private address (when privacy feature has been enabled)
3493		 * or unresolvable private address.
3494		 */
3495		err = hci_update_random_address(&req, true, &own_addr_type);
3496		if (err < 0) {
3497			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3498					 MGMT_STATUS_FAILED);
3499			mgmt_pending_remove(cmd);
3500			goto failed;
3501		}
3502
3503		param_cp.type = LE_SCAN_ACTIVE;
3504		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3505		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3506		param_cp.own_address_type = own_addr_type;
3507		hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3508			    &param_cp);
3509
3510		memset(&enable_cp, 0, sizeof(enable_cp));
3511		enable_cp.enable = LE_SCAN_ENABLE;
3512		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3513		hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3514			    &enable_cp);
3515		break;
3516
3517	default:
3518		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3519				 MGMT_STATUS_INVALID_PARAMS);
3520		mgmt_pending_remove(cmd);
3521		goto failed;
3522	}
3523
3524	err = hci_req_run(&req, start_discovery_complete);
3525	if (err < 0)
3526		mgmt_pending_remove(cmd);
3527	else
3528		hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3529
3530failed:
3531	hci_dev_unlock(hdev);
3532	return err;
3533}
3534
3535static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3536{
3537	struct pending_cmd *cmd;
3538	int err;
3539
3540	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3541	if (!cmd)
3542		return -ENOENT;
3543
3544	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3545			   &hdev->discovery.type, sizeof(hdev->discovery.type));
3546	mgmt_pending_remove(cmd);
3547
3548	return err;
3549}
3550
3551static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3552{
3553	BT_DBG("status %d", status);
3554
3555	hci_dev_lock(hdev);
3556
3557	if (status) {
3558		mgmt_stop_discovery_failed(hdev, status);
3559		goto unlock;
3560	}
3561
3562	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3563
3564unlock:
3565	hci_dev_unlock(hdev);
3566}
3567
3568static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3569			  u16 len)
3570{
3571	struct mgmt_cp_stop_discovery *mgmt_cp = data;
3572	struct pending_cmd *cmd;
3573	struct hci_cp_remote_name_req_cancel cp;
3574	struct inquiry_entry *e;
3575	struct hci_request req;
3576	int err;
3577
3578	BT_DBG("%s", hdev->name);
3579
3580	hci_dev_lock(hdev);
3581
3582	if (!hci_discovery_active(hdev)) {
3583		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3584				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
3585				   sizeof(mgmt_cp->type));
3586		goto unlock;
3587	}
3588
3589	if (hdev->discovery.type != mgmt_cp->type) {
3590		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3591				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3592				   sizeof(mgmt_cp->type));
3593		goto unlock;
3594	}
3595
3596	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3597	if (!cmd) {
3598		err = -ENOMEM;
3599		goto unlock;
3600	}
3601
3602	hci_req_init(&req, hdev);
3603
3604	switch (hdev->discovery.state) {
3605	case DISCOVERY_FINDING:
3606		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3607			hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3608		} else {
3609			cancel_delayed_work(&hdev->le_scan_disable);
3610
3611			hci_req_add_le_scan_disable(&req);
3612		}
3613
3614		break;
3615
3616	case DISCOVERY_RESOLVING:
3617		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3618						     NAME_PENDING);
3619		if (!e) {
3620			mgmt_pending_remove(cmd);
3621			err = cmd_complete(sk, hdev->id,
3622					   MGMT_OP_STOP_DISCOVERY, 0,
3623					   &mgmt_cp->type,
3624					   sizeof(mgmt_cp->type));
3625			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3626			goto unlock;
3627		}
3628
3629		bacpy(&cp.bdaddr, &e->data.bdaddr);
3630		hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3631			    &cp);
3632
3633		break;
3634
3635	default:
3636		BT_DBG("unknown discovery state %u", hdev->discovery.state);
3637
3638		mgmt_pending_remove(cmd);
3639		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3640				   MGMT_STATUS_FAILED, &mgmt_cp->type,
3641				   sizeof(mgmt_cp->type));
3642		goto unlock;
3643	}
3644
3645	err = hci_req_run(&req, stop_discovery_complete);
3646	if (err < 0)
3647		mgmt_pending_remove(cmd);
3648	else
3649		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3650
3651unlock:
3652	hci_dev_unlock(hdev);
3653	return err;
3654}
3655
3656static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3657			u16 len)
3658{
3659	struct mgmt_cp_confirm_name *cp = data;
3660	struct inquiry_entry *e;
3661	int err;
3662
3663	BT_DBG("%s", hdev->name);
3664
3665	hci_dev_lock(hdev);
3666
3667	if (!hci_discovery_active(hdev)) {
3668		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3669				   MGMT_STATUS_FAILED, &cp->addr,
3670				   sizeof(cp->addr));
3671		goto failed;
3672	}
3673
3674	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3675	if (!e) {
3676		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3677				   MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3678				   sizeof(cp->addr));
3679		goto failed;
3680	}
3681
3682	if (cp->name_known) {
3683		e->name_state = NAME_KNOWN;
3684		list_del(&e->list);
3685	} else {
3686		e->name_state = NAME_NEEDED;
3687		hci_inquiry_cache_update_resolve(hdev, e);
3688	}
3689
3690	err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3691			   sizeof(cp->addr));
3692
3693failed:
3694	hci_dev_unlock(hdev);
3695	return err;
3696}
3697
3698static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3699			u16 len)
3700{
3701	struct mgmt_cp_block_device *cp = data;
3702	u8 status;
3703	int err;
3704
3705	BT_DBG("%s", hdev->name);
3706
3707	if (!bdaddr_type_is_valid(cp->addr.type))
3708		return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3709				    MGMT_STATUS_INVALID_PARAMS,
3710				    &cp->addr, sizeof(cp->addr));
3711
3712	hci_dev_lock(hdev);
3713
3714	err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3715	if (err < 0)
3716		status = MGMT_STATUS_FAILED;
3717	else
3718		status = MGMT_STATUS_SUCCESS;
3719
3720	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3721			   &cp->addr, sizeof(cp->addr));
3722
3723	hci_dev_unlock(hdev);
3724
3725	return err;
3726}
3727
3728static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3729			  u16 len)
3730{
3731	struct mgmt_cp_unblock_device *cp = data;
3732	u8 status;
3733	int err;
3734
3735	BT_DBG("%s", hdev->name);
3736
3737	if (!bdaddr_type_is_valid(cp->addr.type))
3738		return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3739				    MGMT_STATUS_INVALID_PARAMS,
3740				    &cp->addr, sizeof(cp->addr));
3741
3742	hci_dev_lock(hdev);
3743
3744	err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3745	if (err < 0)
3746		status = MGMT_STATUS_INVALID_PARAMS;
3747	else
3748		status = MGMT_STATUS_SUCCESS;
3749
3750	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3751			   &cp->addr, sizeof(cp->addr));
3752
3753	hci_dev_unlock(hdev);
3754
3755	return err;
3756}
3757
3758static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3759			 u16 len)
3760{
3761	struct mgmt_cp_set_device_id *cp = data;
3762	struct hci_request req;
3763	int err;
3764	__u16 source;
3765
3766	BT_DBG("%s", hdev->name);
3767
3768	source = __le16_to_cpu(cp->source);
3769
3770	if (source > 0x0002)
3771		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3772				  MGMT_STATUS_INVALID_PARAMS);
3773
3774	hci_dev_lock(hdev);
3775
3776	hdev->devid_source = source;
3777	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3778	hdev->devid_product = __le16_to_cpu(cp->product);
3779	hdev->devid_version = __le16_to_cpu(cp->version);
3780
3781	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3782
3783	hci_req_init(&req, hdev);
3784	update_eir(&req);
3785	hci_req_run(&req, NULL);
3786
3787	hci_dev_unlock(hdev);
3788
3789	return err;
3790}
3791
3792static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3793{
3794	struct cmd_lookup match = { NULL, hdev };
3795
3796	if (status) {
3797		u8 mgmt_err = mgmt_status(status);
3798
3799		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3800				     cmd_status_rsp, &mgmt_err);
3801		return;
3802	}
3803
3804	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3805			     &match);
3806
3807	new_settings(hdev, match.sk);
3808
3809	if (match.sk)
3810		sock_put(match.sk);
3811}
3812
3813static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3814			   u16 len)
3815{
3816	struct mgmt_mode *cp = data;
3817	struct pending_cmd *cmd;
3818	struct hci_request req;
3819	u8 val, enabled, status;
3820	int err;
3821
3822	BT_DBG("request for %s", hdev->name);
3823
3824	status = mgmt_le_support(hdev);
3825	if (status)
3826		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3827				  status);
3828
3829	if (cp->val != 0x00 && cp->val != 0x01)
3830		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3831				  MGMT_STATUS_INVALID_PARAMS);
3832
3833	hci_dev_lock(hdev);
3834
3835	val = !!cp->val;
3836	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3837
3838	/* The following conditions are ones which mean that we should
3839	 * not do any HCI communication but directly send a mgmt
3840	 * response to user space (after toggling the flag if
3841	 * necessary).
3842	 */
3843	if (!hdev_is_powered(hdev) || val == enabled ||
3844	    hci_conn_num(hdev, LE_LINK) > 0) {
3845		bool changed = false;
3846
3847		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3848			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3849			changed = true;
3850		}
3851
3852		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3853		if (err < 0)
3854			goto unlock;
3855
3856		if (changed)
3857			err = new_settings(hdev, sk);
3858
3859		goto unlock;
3860	}
3861
3862	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3863	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3864		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3865				 MGMT_STATUS_BUSY);
3866		goto unlock;
3867	}
3868
3869	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3870	if (!cmd) {
3871		err = -ENOMEM;
3872		goto unlock;
3873	}
3874
3875	hci_req_init(&req, hdev);
3876
3877	if (val)
3878		enable_advertising(&req);
3879	else
3880		disable_advertising(&req);
3881
3882	err = hci_req_run(&req, set_advertising_complete);
3883	if (err < 0)
3884		mgmt_pending_remove(cmd);
3885
3886unlock:
3887	hci_dev_unlock(hdev);
3888	return err;
3889}
3890
3891static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3892			      void *data, u16 len)
3893{
3894	struct mgmt_cp_set_static_address *cp = data;
3895	int err;
3896
3897	BT_DBG("%s", hdev->name);
3898
3899	if (!lmp_le_capable(hdev))
3900		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3901				  MGMT_STATUS_NOT_SUPPORTED);
3902
3903	if (hdev_is_powered(hdev))
3904		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3905				  MGMT_STATUS_REJECTED);
3906
3907	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3908		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3909			return cmd_status(sk, hdev->id,
3910					  MGMT_OP_SET_STATIC_ADDRESS,
3911					  MGMT_STATUS_INVALID_PARAMS);
3912
3913		/* Two most significant bits shall be set */
3914		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3915			return cmd_status(sk, hdev->id,
3916					  MGMT_OP_SET_STATIC_ADDRESS,
3917					  MGMT_STATUS_INVALID_PARAMS);
3918	}
3919
3920	hci_dev_lock(hdev);
3921
3922	bacpy(&hdev->static_addr, &cp->bdaddr);
3923
3924	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3925
3926	hci_dev_unlock(hdev);
3927
3928	return err;
3929}
3930
3931static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3932			   void *data, u16 len)
3933{
3934	struct mgmt_cp_set_scan_params *cp = data;
3935	__u16 interval, window;
3936	int err;
3937
3938	BT_DBG("%s", hdev->name);
3939
3940	if (!lmp_le_capable(hdev))
3941		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3942				  MGMT_STATUS_NOT_SUPPORTED);
3943
3944	interval = __le16_to_cpu(cp->interval);
3945
3946	if (interval < 0x0004 || interval > 0x4000)
3947		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3948				  MGMT_STATUS_INVALID_PARAMS);
3949
3950	window = __le16_to_cpu(cp->window);
3951
3952	if (window < 0x0004 || window > 0x4000)
3953		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3954				  MGMT_STATUS_INVALID_PARAMS);
3955
3956	if (window > interval)
3957		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3958				  MGMT_STATUS_INVALID_PARAMS);
3959
3960	hci_dev_lock(hdev);
3961
3962	hdev->le_scan_interval = interval;
3963	hdev->le_scan_window = window;
3964
3965	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3966
3967	/* If background scan is running, restart it so new parameters are
3968	 * loaded.
3969	 */
3970	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
3971	    hdev->discovery.state == DISCOVERY_STOPPED) {
3972		struct hci_request req;
3973
3974		hci_req_init(&req, hdev);
3975
3976		hci_req_add_le_scan_disable(&req);
3977		hci_req_add_le_passive_scan(&req);
3978
3979		hci_req_run(&req, NULL);
3980	}
3981
3982	hci_dev_unlock(hdev);
3983
3984	return err;
3985}
3986
3987static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3988{
3989	struct pending_cmd *cmd;
3990
3991	BT_DBG("status 0x%02x", status);
3992
3993	hci_dev_lock(hdev);
3994
3995	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3996	if (!cmd)
3997		goto unlock;
3998
3999	if (status) {
4000		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4001			   mgmt_status(status));
4002	} else {
4003		struct mgmt_mode *cp = cmd->param;
4004
4005		if (cp->val)
4006			set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4007		else
4008			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4009
4010		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4011		new_settings(hdev, cmd->sk);
4012	}
4013
4014	mgmt_pending_remove(cmd);
4015
4016unlock:
4017	hci_dev_unlock(hdev);
4018}
4019
4020static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4021				void *data, u16 len)
4022{
4023	struct mgmt_mode *cp = data;
4024	struct pending_cmd *cmd;
4025	struct hci_request req;
4026	int err;
4027
4028	BT_DBG("%s", hdev->name);
4029
4030	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4031	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4032		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4033				  MGMT_STATUS_NOT_SUPPORTED);
4034
4035	if (cp->val != 0x00 && cp->val != 0x01)
4036		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4037				  MGMT_STATUS_INVALID_PARAMS);
4038
4039	if (!hdev_is_powered(hdev))
4040		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4041				  MGMT_STATUS_NOT_POWERED);
4042
4043	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4044		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4045				  MGMT_STATUS_REJECTED);
4046
4047	hci_dev_lock(hdev);
4048
4049	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4050		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4051				 MGMT_STATUS_BUSY);
4052		goto unlock;
4053	}
4054
4055	if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4056		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4057					hdev);
4058		goto unlock;
4059	}
4060
4061	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4062			       data, len);
4063	if (!cmd) {
4064		err = -ENOMEM;
4065		goto unlock;
4066	}
4067
4068	hci_req_init(&req, hdev);
4069
4070	write_fast_connectable(&req, cp->val);
4071
4072	err = hci_req_run(&req, fast_connectable_complete);
4073	if (err < 0) {
4074		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4075				 MGMT_STATUS_FAILED);
4076		mgmt_pending_remove(cmd);
4077	}
4078
4079unlock:
4080	hci_dev_unlock(hdev);
4081
4082	return err;
4083}
4084
4085static void set_bredr_scan(struct hci_request *req)
4086{
4087	struct hci_dev *hdev = req->hdev;
4088	u8 scan = 0;
4089
4090	/* Ensure that fast connectable is disabled. This function will
4091	 * not do anything if the page scan parameters are already what
4092	 * they should be.
4093	 */
4094	write_fast_connectable(req, false);
4095
4096	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4097		scan |= SCAN_PAGE;
4098	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4099		scan |= SCAN_INQUIRY;
4100
4101	if (scan)
4102		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4103}
4104
4105static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4106{
4107	struct pending_cmd *cmd;
4108
4109	BT_DBG("status 0x%02x", status);
4110
4111	hci_dev_lock(hdev);
4112
4113	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4114	if (!cmd)
4115		goto unlock;
4116
4117	if (status) {
4118		u8 mgmt_err = mgmt_status(status);
4119
4120		/* We need to restore the flag if related HCI commands
4121		 * failed.
4122		 */
4123		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4124
4125		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4126	} else {
4127		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4128		new_settings(hdev, cmd->sk);
4129	}
4130
4131	mgmt_pending_remove(cmd);
4132
4133unlock:
4134	hci_dev_unlock(hdev);
4135}
4136
4137static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4138{
4139	struct mgmt_mode *cp = data;
4140	struct pending_cmd *cmd;
4141	struct hci_request req;
4142	int err;
4143
4144	BT_DBG("request for %s", hdev->name);
4145
4146	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4147		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4148				  MGMT_STATUS_NOT_SUPPORTED);
4149
4150	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4151		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4152				  MGMT_STATUS_REJECTED);
4153
4154	if (cp->val != 0x00 && cp->val != 0x01)
4155		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4156				  MGMT_STATUS_INVALID_PARAMS);
4157
4158	hci_dev_lock(hdev);
4159
4160	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4161		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4162		goto unlock;
4163	}
4164
4165	if (!hdev_is_powered(hdev)) {
4166		if (!cp->val) {
4167			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4168			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4169			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4170			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4171			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4172		}
4173
4174		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4175
4176		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4177		if (err < 0)
4178			goto unlock;
4179
4180		err = new_settings(hdev, sk);
4181		goto unlock;
4182	}
4183
4184	/* Reject disabling when powered on */
4185	if (!cp->val) {
4186		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4187				 MGMT_STATUS_REJECTED);
4188		goto unlock;
4189	}
4190
4191	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4192		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4193				 MGMT_STATUS_BUSY);
4194		goto unlock;
4195	}
4196
4197	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4198	if (!cmd) {
4199		err = -ENOMEM;
4200		goto unlock;
4201	}
4202
4203	/* We need to flip the bit already here so that update_adv_data
4204	 * generates the correct flags.
4205	 */
4206	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4207
4208	hci_req_init(&req, hdev);
4209
4210	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4211		set_bredr_scan(&req);
4212
4213	/* Since only the advertising data flags will change, there
4214	 * is no need to update the scan response data.
4215	 */
4216	update_adv_data(&req);
4217
4218	err = hci_req_run(&req, set_bredr_complete);
4219	if (err < 0)
4220		mgmt_pending_remove(cmd);
4221
4222unlock:
4223	hci_dev_unlock(hdev);
4224	return err;
4225}
4226
4227static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4228			   void *data, u16 len)
4229{
4230	struct mgmt_mode *cp = data;
4231	struct pending_cmd *cmd;
4232	u8 val, status;
4233	int err;
4234
4235	BT_DBG("request for %s", hdev->name);
4236
4237	status = mgmt_bredr_support(hdev);
4238	if (status)
4239		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4240				  status);
4241
4242	if (!lmp_sc_capable(hdev) &&
4243	    !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4244		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4245				  MGMT_STATUS_NOT_SUPPORTED);
4246
4247	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4248		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4249				  MGMT_STATUS_INVALID_PARAMS);
4250
4251	hci_dev_lock(hdev);
4252
4253	if (!hdev_is_powered(hdev)) {
4254		bool changed;
4255
4256		if (cp->val) {
4257			changed = !test_and_set_bit(HCI_SC_ENABLED,
4258						    &hdev->dev_flags);
4259			if (cp->val == 0x02)
4260				set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4261			else
4262				clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4263		} else {
4264			changed = test_and_clear_bit(HCI_SC_ENABLED,
4265						     &hdev->dev_flags);
4266			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4267		}
4268
4269		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4270		if (err < 0)
4271			goto failed;
4272
4273		if (changed)
4274			err = new_settings(hdev, sk);
4275
4276		goto failed;
4277	}
4278
4279	if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4280		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4281				 MGMT_STATUS_BUSY);
4282		goto failed;
4283	}
4284
4285	val = !!cp->val;
4286
4287	if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4288	    (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4289		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4290		goto failed;
4291	}
4292
4293	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4294	if (!cmd) {
4295		err = -ENOMEM;
4296		goto failed;
4297	}
4298
4299	err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4300	if (err < 0) {
4301		mgmt_pending_remove(cmd);
4302		goto failed;
4303	}
4304
4305	if (cp->val == 0x02)
4306		set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4307	else
4308		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4309
4310failed:
4311	hci_dev_unlock(hdev);
4312	return err;
4313}
4314
4315static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4316			  void *data, u16 len)
4317{
4318	struct mgmt_mode *cp = data;
4319	bool changed;
4320	int err;
4321
4322	BT_DBG("request for %s", hdev->name);
4323
4324	if (cp->val != 0x00 && cp->val != 0x01)
4325		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4326				  MGMT_STATUS_INVALID_PARAMS);
4327
4328	hci_dev_lock(hdev);
4329
4330	if (cp->val)
4331		changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4332	else
4333		changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4334
4335	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4336	if (err < 0)
4337		goto unlock;
4338
4339	if (changed)
4340		err = new_settings(hdev, sk);
4341
4342unlock:
4343	hci_dev_unlock(hdev);
4344	return err;
4345}
4346
4347static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4348		       u16 len)
4349{
4350	struct mgmt_cp_set_privacy *cp = cp_data;
4351	bool changed;
4352	int err;
4353
4354	BT_DBG("request for %s", hdev->name);
4355
4356	if (!lmp_le_capable(hdev))
4357		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4358				  MGMT_STATUS_NOT_SUPPORTED);
4359
4360	if (cp->privacy != 0x00 && cp->privacy != 0x01)
4361		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4362				  MGMT_STATUS_INVALID_PARAMS);
4363
4364	if (hdev_is_powered(hdev))
4365		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4366				  MGMT_STATUS_REJECTED);
4367
4368	hci_dev_lock(hdev);
4369
4370	/* If user space supports this command it is also expected to
4371	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4372	 */
4373	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4374
4375	if (cp->privacy) {
4376		changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4377		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4378		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4379	} else {
4380		changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4381		memset(hdev->irk, 0, sizeof(hdev->irk));
4382		clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4383	}
4384
4385	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4386	if (err < 0)
4387		goto unlock;
4388
4389	if (changed)
4390		err = new_settings(hdev, sk);
4391
4392unlock:
4393	hci_dev_unlock(hdev);
4394	return err;
4395}
4396
4397static bool irk_is_valid(struct mgmt_irk_info *irk)
4398{
4399	switch (irk->addr.type) {
4400	case BDADDR_LE_PUBLIC:
4401		return true;
4402
4403	case BDADDR_LE_RANDOM:
4404		/* Two most significant bits shall be set */
4405		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4406			return false;
4407		return true;
4408	}
4409
4410	return false;
4411}
4412
4413static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4414		     u16 len)
4415{
4416	struct mgmt_cp_load_irks *cp = cp_data;
4417	u16 irk_count, expected_len;
4418	int i, err;
4419
4420	BT_DBG("request for %s", hdev->name);
4421
4422	if (!lmp_le_capable(hdev))
4423		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4424				  MGMT_STATUS_NOT_SUPPORTED);
4425
4426	irk_count = __le16_to_cpu(cp->irk_count);
4427
4428	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4429	if (expected_len != len) {
4430		BT_ERR("load_irks: expected %u bytes, got %u bytes",
4431		       expected_len, len);
4432		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4433				  MGMT_STATUS_INVALID_PARAMS);
4434	}
4435
4436	BT_DBG("%s irk_count %u", hdev->name, irk_count);
4437
4438	for (i = 0; i < irk_count; i++) {
4439		struct mgmt_irk_info *key = &cp->irks[i];
4440
4441		if (!irk_is_valid(key))
4442			return cmd_status(sk, hdev->id,
4443					  MGMT_OP_LOAD_IRKS,
4444					  MGMT_STATUS_INVALID_PARAMS);
4445	}
4446
4447	hci_dev_lock(hdev);
4448
4449	hci_smp_irks_clear(hdev);
4450
4451	for (i = 0; i < irk_count; i++) {
4452		struct mgmt_irk_info *irk = &cp->irks[i];
4453		u8 addr_type;
4454
4455		if (irk->addr.type == BDADDR_LE_PUBLIC)
4456			addr_type = ADDR_LE_DEV_PUBLIC;
4457		else
4458			addr_type = ADDR_LE_DEV_RANDOM;
4459
4460		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4461			    BDADDR_ANY);
4462	}
4463
4464	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4465
4466	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4467
4468	hci_dev_unlock(hdev);
4469
4470	return err;
4471}
4472
4473static bool ltk_is_valid(struct mgmt_ltk_info *key)
4474{
4475	if (key->master != 0x00 && key->master != 0x01)
4476		return false;
4477
4478	switch (key->addr.type) {
4479	case BDADDR_LE_PUBLIC:
4480		return true;
4481
4482	case BDADDR_LE_RANDOM:
4483		/* Two most significant bits shall be set */
4484		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4485			return false;
4486		return true;
4487	}
4488
4489	return false;
4490}
4491
4492static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4493			       void *cp_data, u16 len)
4494{
4495	struct mgmt_cp_load_long_term_keys *cp = cp_data;
4496	u16 key_count, expected_len;
4497	int i, err;
4498
4499	BT_DBG("request for %s", hdev->name);
4500
4501	if (!lmp_le_capable(hdev))
4502		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4503				  MGMT_STATUS_NOT_SUPPORTED);
4504
4505	key_count = __le16_to_cpu(cp->key_count);
4506
4507	expected_len = sizeof(*cp) + key_count *
4508					sizeof(struct mgmt_ltk_info);
4509	if (expected_len != len) {
4510		BT_ERR("load_keys: expected %u bytes, got %u bytes",
4511		       expected_len, len);
4512		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4513				  MGMT_STATUS_INVALID_PARAMS);
4514	}
4515
4516	BT_DBG("%s key_count %u", hdev->name, key_count);
4517
4518	for (i = 0; i < key_count; i++) {
4519		struct mgmt_ltk_info *key = &cp->keys[i];
4520
4521		if (!ltk_is_valid(key))
4522			return cmd_status(sk, hdev->id,
4523					  MGMT_OP_LOAD_LONG_TERM_KEYS,
4524					  MGMT_STATUS_INVALID_PARAMS);
4525	}
4526
4527	hci_dev_lock(hdev);
4528
4529	hci_smp_ltks_clear(hdev);
4530
4531	for (i = 0; i < key_count; i++) {
4532		struct mgmt_ltk_info *key = &cp->keys[i];
4533		u8 type, addr_type;
4534
4535		if (key->addr.type == BDADDR_LE_PUBLIC)
4536			addr_type = ADDR_LE_DEV_PUBLIC;
4537		else
4538			addr_type = ADDR_LE_DEV_RANDOM;
4539
4540		if (key->master)
4541			type = HCI_SMP_LTK;
4542		else
4543			type = HCI_SMP_LTK_SLAVE;
4544
4545		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4546			    key->type, key->val, key->enc_size, key->ediv,
4547			    key->rand);
4548	}
4549
4550	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4551			   NULL, 0);
4552
4553	hci_dev_unlock(hdev);
4554
4555	return err;
4556}
4557
4558static const struct mgmt_handler {
4559	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4560		     u16 data_len);
4561	bool var_len;
4562	size_t data_len;
4563} mgmt_handlers[] = {
4564	{ NULL }, /* 0x0000 (no command) */
4565	{ read_version,           false, MGMT_READ_VERSION_SIZE },
4566	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
4567	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
4568	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
4569	{ set_powered,            false, MGMT_SETTING_SIZE },
4570	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
4571	{ set_connectable,        false, MGMT_SETTING_SIZE },
4572	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
4573	{ set_pairable,           false, MGMT_SETTING_SIZE },
4574	{ set_link_security,      false, MGMT_SETTING_SIZE },
4575	{ set_ssp,                false, MGMT_SETTING_SIZE },
4576	{ set_hs,                 false, MGMT_SETTING_SIZE },
4577	{ set_le,                 false, MGMT_SETTING_SIZE },
4578	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
4579	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
4580	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
4581	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
4582	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
4583	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4584	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
4585	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
4586	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
4587	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4588	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
4589	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
4590	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4591	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
4592	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
4593	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4594	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
4595	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4596	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4597	{ add_remote_oob_data,    true,  MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4598	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4599	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
4600	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
4601	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
4602	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
4603	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
4604	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
4605	{ set_advertising,        false, MGMT_SETTING_SIZE },
4606	{ set_bredr,              false, MGMT_SETTING_SIZE },
4607	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
4608	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
4609	{ set_secure_conn,        false, MGMT_SETTING_SIZE },
4610	{ set_debug_keys,         false, MGMT_SETTING_SIZE },
4611	{ set_privacy,            false, MGMT_SET_PRIVACY_SIZE },
4612	{ load_irks,              true,  MGMT_LOAD_IRKS_SIZE },
4613};
4614
4615
4616int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4617{
4618	void *buf;
4619	u8 *cp;
4620	struct mgmt_hdr *hdr;
4621	u16 opcode, index, len;
4622	struct hci_dev *hdev = NULL;
4623	const struct mgmt_handler *handler;
4624	int err;
4625
4626	BT_DBG("got %zu bytes", msglen);
4627
4628	if (msglen < sizeof(*hdr))
4629		return -EINVAL;
4630
4631	buf = kmalloc(msglen, GFP_KERNEL);
4632	if (!buf)
4633		return -ENOMEM;
4634
4635	if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4636		err = -EFAULT;
4637		goto done;
4638	}
4639
4640	hdr = buf;
4641	opcode = __le16_to_cpu(hdr->opcode);
4642	index = __le16_to_cpu(hdr->index);
4643	len = __le16_to_cpu(hdr->len);
4644
4645	if (len != msglen - sizeof(*hdr)) {
4646		err = -EINVAL;
4647		goto done;
4648	}
4649
4650	if (index != MGMT_INDEX_NONE) {
4651		hdev = hci_dev_get(index);
4652		if (!hdev) {
4653			err = cmd_status(sk, index, opcode,
4654					 MGMT_STATUS_INVALID_INDEX);
4655			goto done;
4656		}
4657
4658		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4659		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4660			err = cmd_status(sk, index, opcode,
4661					 MGMT_STATUS_INVALID_INDEX);
4662			goto done;
4663		}
4664	}
4665
4666	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4667	    mgmt_handlers[opcode].func == NULL) {
4668		BT_DBG("Unknown op %u", opcode);
4669		err = cmd_status(sk, index, opcode,
4670				 MGMT_STATUS_UNKNOWN_COMMAND);
4671		goto done;
4672	}
4673
4674	if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4675	    (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4676		err = cmd_status(sk, index, opcode,
4677				 MGMT_STATUS_INVALID_INDEX);
4678		goto done;
4679	}
4680
4681	handler = &mgmt_handlers[opcode];
4682
4683	if ((handler->var_len && len < handler->data_len) ||
4684	    (!handler->var_len && len != handler->data_len)) {
4685		err = cmd_status(sk, index, opcode,
4686				 MGMT_STATUS_INVALID_PARAMS);
4687		goto done;
4688	}
4689
4690	if (hdev)
4691		mgmt_init_hdev(sk, hdev);
4692
4693	cp = buf + sizeof(*hdr);
4694
4695	err = handler->func(sk, hdev, cp, len);
4696	if (err < 0)
4697		goto done;
4698
4699	err = msglen;
4700
4701done:
4702	if (hdev)
4703		hci_dev_put(hdev);
4704
4705	kfree(buf);
4706	return err;
4707}
4708
4709void mgmt_index_added(struct hci_dev *hdev)
4710{
4711	if (hdev->dev_type != HCI_BREDR)
4712		return;
4713
4714	mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4715}
4716
4717void mgmt_index_removed(struct hci_dev *hdev)
4718{
4719	u8 status = MGMT_STATUS_INVALID_INDEX;
4720
4721	if (hdev->dev_type != HCI_BREDR)
4722		return;
4723
4724	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4725
4726	mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4727}
4728
4729/* This function requires the caller holds hdev->lock */
4730static void restart_le_auto_conns(struct hci_dev *hdev)
4731{
4732	struct hci_conn_params *p;
4733
4734	list_for_each_entry(p, &hdev->le_conn_params, list) {
4735		if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
4736			hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
4737	}
4738}
4739
4740static void powered_complete(struct hci_dev *hdev, u8 status)
4741{
4742	struct cmd_lookup match = { NULL, hdev };
4743
4744	BT_DBG("status 0x%02x", status);
4745
4746	hci_dev_lock(hdev);
4747
4748	restart_le_auto_conns(hdev);
4749
4750	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4751
4752	new_settings(hdev, match.sk);
4753
4754	hci_dev_unlock(hdev);
4755
4756	if (match.sk)
4757		sock_put(match.sk);
4758}
4759
4760static int powered_update_hci(struct hci_dev *hdev)
4761{
4762	struct hci_request req;
4763	u8 link_sec;
4764
4765	hci_req_init(&req, hdev);
4766
4767	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4768	    !lmp_host_ssp_capable(hdev)) {
4769		u8 ssp = 1;
4770
4771		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4772	}
4773
4774	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4775	    lmp_bredr_capable(hdev)) {
4776		struct hci_cp_write_le_host_supported cp;
4777
4778		cp.le = 1;
4779		cp.simul = lmp_le_br_capable(hdev);
4780
4781		/* Check first if we already have the right
4782		 * host state (host features set)
4783		 */
4784		if (cp.le != lmp_host_le_capable(hdev) ||
4785		    cp.simul != lmp_host_le_br_capable(hdev))
4786			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4787				    sizeof(cp), &cp);
4788	}
4789
4790	if (lmp_le_capable(hdev)) {
4791		/* Make sure the controller has a good default for
4792		 * advertising data. This also applies to the case
4793		 * where BR/EDR was toggled during the AUTO_OFF phase.
4794		 */
4795		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4796			update_adv_data(&req);
4797			update_scan_rsp_data(&req);
4798		}
4799
4800		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4801			enable_advertising(&req);
4802	}
4803
4804	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4805	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4806		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4807			    sizeof(link_sec), &link_sec);
4808
4809	if (lmp_bredr_capable(hdev)) {
4810		if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4811			set_bredr_scan(&req);
4812		update_class(&req);
4813		update_name(&req);
4814		update_eir(&req);
4815	}
4816
4817	return hci_req_run(&req, powered_complete);
4818}
4819
4820int mgmt_powered(struct hci_dev *hdev, u8 powered)
4821{
4822	struct cmd_lookup match = { NULL, hdev };
4823	u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4824	u8 zero_cod[] = { 0, 0, 0 };
4825	int err;
4826
4827	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4828		return 0;
4829
4830	if (powered) {
4831		if (powered_update_hci(hdev) == 0)
4832			return 0;
4833
4834		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4835				     &match);
4836		goto new_settings;
4837	}
4838
4839	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4840	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4841
4842	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4843		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4844			   zero_cod, sizeof(zero_cod), NULL);
4845
4846new_settings:
4847	err = new_settings(hdev, match.sk);
4848
4849	if (match.sk)
4850		sock_put(match.sk);
4851
4852	return err;
4853}
4854
4855void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4856{
4857	struct pending_cmd *cmd;
4858	u8 status;
4859
4860	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4861	if (!cmd)
4862		return;
4863
4864	if (err == -ERFKILL)
4865		status = MGMT_STATUS_RFKILLED;
4866	else
4867		status = MGMT_STATUS_FAILED;
4868
4869	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4870
4871	mgmt_pending_remove(cmd);
4872}
4873
4874void mgmt_discoverable_timeout(struct hci_dev *hdev)
4875{
4876	struct hci_request req;
4877
4878	hci_dev_lock(hdev);
4879
4880	/* When discoverable timeout triggers, then just make sure
4881	 * the limited discoverable flag is cleared. Even in the case
4882	 * of a timeout triggered from general discoverable, it is
4883	 * safe to unconditionally clear the flag.
4884	 */
4885	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4886	clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4887
4888	hci_req_init(&req, hdev);
4889	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4890		u8 scan = SCAN_PAGE;
4891		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4892			    sizeof(scan), &scan);
4893	}
4894	update_class(&req);
4895	update_adv_data(&req);
4896	hci_req_run(&req, NULL);
4897
4898	hdev->discov_timeout = 0;
4899
4900	new_settings(hdev, NULL);
4901
4902	hci_dev_unlock(hdev);
4903}
4904
4905void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4906{
4907	bool changed;
4908
4909	/* Nothing needed here if there's a pending command since that
4910	 * commands request completion callback takes care of everything
4911	 * necessary.
4912	 */
4913	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4914		return;
4915
4916	/* Powering off may clear the scan mode - don't let that interfere */
4917	if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4918		return;
4919
4920	if (discoverable) {
4921		changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4922	} else {
4923		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4924		changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4925	}
4926
4927	if (changed) {
4928		struct hci_request req;
4929
4930		/* In case this change in discoverable was triggered by
4931		 * a disabling of connectable there could be a need to
4932		 * update the advertising flags.
4933		 */
4934		hci_req_init(&req, hdev);
4935		update_adv_data(&req);
4936		hci_req_run(&req, NULL);
4937
4938		new_settings(hdev, NULL);
4939	}
4940}
4941
4942void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4943{
4944	bool changed;
4945
4946	/* Nothing needed here if there's a pending command since that
4947	 * commands request completion callback takes care of everything
4948	 * necessary.
4949	 */
4950	if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4951		return;
4952
4953	/* Powering off may clear the scan mode - don't let that interfere */
4954	if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4955		return;
4956
4957	if (connectable)
4958		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4959	else
4960		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4961
4962	if (changed)
4963		new_settings(hdev, NULL);
4964}
4965
4966void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
4967{
4968	/* Powering off may stop advertising - don't let that interfere */
4969	if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4970		return;
4971
4972	if (advertising)
4973		set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4974	else
4975		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4976}
4977
4978void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4979{
4980	u8 mgmt_err = mgmt_status(status);
4981
4982	if (scan & SCAN_PAGE)
4983		mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4984				     cmd_status_rsp, &mgmt_err);
4985
4986	if (scan & SCAN_INQUIRY)
4987		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4988				     cmd_status_rsp, &mgmt_err);
4989}
4990
4991void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4992		       bool persistent)
4993{
4994	struct mgmt_ev_new_link_key ev;
4995
4996	memset(&ev, 0, sizeof(ev));
4997
4998	ev.store_hint = persistent;
4999	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5000	ev.key.addr.type = BDADDR_BREDR;
5001	ev.key.type = key->type;
5002	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5003	ev.key.pin_len = key->pin_len;
5004
5005	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5006}
5007
5008void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5009{
5010	struct mgmt_ev_new_long_term_key ev;
5011
5012	memset(&ev, 0, sizeof(ev));
5013
5014	/* Devices using resolvable or non-resolvable random addresses
5015	 * without providing an indentity resolving key don't require
5016	 * to store long term keys. Their addresses will change the
5017	 * next time around.
5018	 *
5019	 * Only when a remote device provides an identity address
5020	 * make sure the long term key is stored. If the remote
5021	 * identity is known, the long term keys are internally
5022	 * mapped to the identity address. So allow static random
5023	 * and public addresses here.
5024	 */
5025	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5026	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
5027		ev.store_hint = 0x00;
5028	else
5029		ev.store_hint = persistent;
5030
5031	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5032	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5033	ev.key.type = key->authenticated;
5034	ev.key.enc_size = key->enc_size;
5035	ev.key.ediv = key->ediv;
5036	ev.key.rand = key->rand;
5037
5038	if (key->type == HCI_SMP_LTK)
5039		ev.key.master = 1;
5040
5041	memcpy(ev.key.val, key->val, sizeof(key->val));
5042
5043	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5044}
5045
5046void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5047{
5048	struct mgmt_ev_new_irk ev;
5049
5050	memset(&ev, 0, sizeof(ev));
5051
5052	/* For identity resolving keys from devices that are already
5053	 * using a public address or static random address, do not
5054	 * ask for storing this key. The identity resolving key really
5055	 * is only mandatory for devices using resovlable random
5056	 * addresses.
5057	 *
5058	 * Storing all identity resolving keys has the downside that
5059	 * they will be also loaded on next boot of they system. More
5060	 * identity resolving keys, means more time during scanning is
5061	 * needed to actually resolve these addresses.
5062	 */
5063	if (bacmp(&irk->rpa, BDADDR_ANY))
5064		ev.store_hint = 0x01;
5065	else
5066		ev.store_hint = 0x00;
5067
5068	bacpy(&ev.rpa, &irk->rpa);
5069	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5070	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5071	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5072
5073	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5074}
5075
5076void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5077		   bool persistent)
5078{
5079	struct mgmt_ev_new_csrk ev;
5080
5081	memset(&ev, 0, sizeof(ev));
5082
5083	/* Devices using resolvable or non-resolvable random addresses
5084	 * without providing an indentity resolving key don't require
5085	 * to store signature resolving keys. Their addresses will change
5086	 * the next time around.
5087	 *
5088	 * Only when a remote device provides an identity address
5089	 * make sure the signature resolving key is stored. So allow
5090	 * static random and public addresses here.
5091	 */
5092	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5093	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5094		ev.store_hint = 0x00;
5095	else
5096		ev.store_hint = persistent;
5097
5098	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5099	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5100	ev.key.master = csrk->master;
5101	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5102
5103	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5104}
5105
5106static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5107				  u8 data_len)
5108{
5109	eir[eir_len++] = sizeof(type) + data_len;
5110	eir[eir_len++] = type;
5111	memcpy(&eir[eir_len], data, data_len);
5112	eir_len += data_len;
5113
5114	return eir_len;
5115}
5116
5117void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5118			   u8 addr_type, u32 flags, u8 *name, u8 name_len,
5119			   u8 *dev_class)
5120{
5121	char buf[512];
5122	struct mgmt_ev_device_connected *ev = (void *) buf;
5123	u16 eir_len = 0;
5124
5125	bacpy(&ev->addr.bdaddr, bdaddr);
5126	ev->addr.type = link_to_bdaddr(link_type, addr_type);
5127
5128	ev->flags = __cpu_to_le32(flags);
5129
5130	if (name_len > 0)
5131		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5132					  name, name_len);
5133
5134	if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5135		eir_len = eir_append_data(ev->eir, eir_len,
5136					  EIR_CLASS_OF_DEV, dev_class, 3);
5137
5138	ev->eir_len = cpu_to_le16(eir_len);
5139
5140	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5141		    sizeof(*ev) + eir_len, NULL);
5142}
5143
5144static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5145{
5146	struct mgmt_cp_disconnect *cp = cmd->param;
5147	struct sock **sk = data;
5148	struct mgmt_rp_disconnect rp;
5149
5150	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5151	rp.addr.type = cp->addr.type;
5152
5153	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5154		     sizeof(rp));
5155
5156	*sk = cmd->sk;
5157	sock_hold(*sk);
5158
5159	mgmt_pending_remove(cmd);
5160}
5161
5162static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5163{
5164	struct hci_dev *hdev = data;
5165	struct mgmt_cp_unpair_device *cp = cmd->param;
5166	struct mgmt_rp_unpair_device rp;
5167
5168	memset(&rp, 0, sizeof(rp));
5169	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5170	rp.addr.type = cp->addr.type;
5171
5172	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5173
5174	cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5175
5176	mgmt_pending_remove(cmd);
5177}
5178
5179void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5180			      u8 link_type, u8 addr_type, u8 reason,
5181			      bool mgmt_connected)
5182{
5183	struct mgmt_ev_device_disconnected ev;
5184	struct pending_cmd *power_off;
5185	struct sock *sk = NULL;
5186
5187	power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5188	if (power_off) {
5189		struct mgmt_mode *cp = power_off->param;
5190
5191		/* The connection is still in hci_conn_hash so test for 1
5192		 * instead of 0 to know if this is the last one.
5193		 */
5194		if (!cp->val && hci_conn_count(hdev) == 1) {
5195			cancel_delayed_work(&hdev->power_off);
5196			queue_work(hdev->req_workqueue, &hdev->power_off.work);
5197		}
5198	}
5199
5200	if (!mgmt_connected)
5201		return;
5202
5203	if (link_type != ACL_LINK && link_type != LE_LINK)
5204		return;
5205
5206	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5207
5208	bacpy(&ev.addr.bdaddr, bdaddr);
5209	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5210	ev.reason = reason;
5211
5212	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5213
5214	if (sk)
5215		sock_put(sk);
5216
5217	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5218			     hdev);
5219}
5220
5221void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5222			    u8 link_type, u8 addr_type, u8 status)
5223{
5224	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5225	struct mgmt_cp_disconnect *cp;
5226	struct mgmt_rp_disconnect rp;
5227	struct pending_cmd *cmd;
5228
5229	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5230			     hdev);
5231
5232	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5233	if (!cmd)
5234		return;
5235
5236	cp = cmd->param;
5237
5238	if (bacmp(bdaddr, &cp->addr.bdaddr))
5239		return;
5240
5241	if (cp->addr.type != bdaddr_type)
5242		return;
5243
5244	bacpy(&rp.addr.bdaddr, bdaddr);
5245	rp.addr.type = bdaddr_type;
5246
5247	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5248		     mgmt_status(status), &rp, sizeof(rp));
5249
5250	mgmt_pending_remove(cmd);
5251}
5252
5253void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5254			 u8 addr_type, u8 status)
5255{
5256	struct mgmt_ev_connect_failed ev;
5257	struct pending_cmd *power_off;
5258
5259	power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5260	if (power_off) {
5261		struct mgmt_mode *cp = power_off->param;
5262
5263		/* The connection is still in hci_conn_hash so test for 1
5264		 * instead of 0 to know if this is the last one.
5265		 */
5266		if (!cp->val && hci_conn_count(hdev) == 1) {
5267			cancel_delayed_work(&hdev->power_off);
5268			queue_work(hdev->req_workqueue, &hdev->power_off.work);
5269		}
5270	}
5271
5272	bacpy(&ev.addr.bdaddr, bdaddr);
5273	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5274	ev.status = mgmt_status(status);
5275
5276	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5277}
5278
5279void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5280{
5281	struct mgmt_ev_pin_code_request ev;
5282
5283	bacpy(&ev.addr.bdaddr, bdaddr);
5284	ev.addr.type = BDADDR_BREDR;
5285	ev.secure = secure;
5286
5287	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5288}
5289
5290void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5291				  u8 status)
5292{
5293	struct pending_cmd *cmd;
5294	struct mgmt_rp_pin_code_reply rp;
5295
5296	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5297	if (!cmd)
5298		return;
5299
5300	bacpy(&rp.addr.bdaddr, bdaddr);
5301	rp.addr.type = BDADDR_BREDR;
5302
5303	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5304		     mgmt_status(status), &rp, sizeof(rp));
5305
5306	mgmt_pending_remove(cmd);
5307}
5308
5309void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5310				      u8 status)
5311{
5312	struct pending_cmd *cmd;
5313	struct mgmt_rp_pin_code_reply rp;
5314
5315	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5316	if (!cmd)
5317		return;
5318
5319	bacpy(&rp.addr.bdaddr, bdaddr);
5320	rp.addr.type = BDADDR_BREDR;
5321
5322	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5323		     mgmt_status(status), &rp, sizeof(rp));
5324
5325	mgmt_pending_remove(cmd);
5326}
5327
5328int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5329			      u8 link_type, u8 addr_type, __le32 value,
5330			      u8 confirm_hint)
5331{
5332	struct mgmt_ev_user_confirm_request ev;
5333
5334	BT_DBG("%s", hdev->name);
5335
5336	bacpy(&ev.addr.bdaddr, bdaddr);
5337	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5338	ev.confirm_hint = confirm_hint;
5339	ev.value = value;
5340
5341	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5342			  NULL);
5343}
5344
5345int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5346			      u8 link_type, u8 addr_type)
5347{
5348	struct mgmt_ev_user_passkey_request ev;
5349
5350	BT_DBG("%s", hdev->name);
5351
5352	bacpy(&ev.addr.bdaddr, bdaddr);
5353	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5354
5355	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5356			  NULL);
5357}
5358
5359static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5360				      u8 link_type, u8 addr_type, u8 status,
5361				      u8 opcode)
5362{
5363	struct pending_cmd *cmd;
5364	struct mgmt_rp_user_confirm_reply rp;
5365	int err;
5366
5367	cmd = mgmt_pending_find(opcode, hdev);
5368	if (!cmd)
5369		return -ENOENT;
5370
5371	bacpy(&rp.addr.bdaddr, bdaddr);
5372	rp.addr.type = link_to_bdaddr(link_type, addr_type);
5373	err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5374			   &rp, sizeof(rp));
5375
5376	mgmt_pending_remove(cmd);
5377
5378	return err;
5379}
5380
5381int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5382				     u8 link_type, u8 addr_type, u8 status)
5383{
5384	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5385					  status, MGMT_OP_USER_CONFIRM_REPLY);
5386}
5387
5388int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5389					 u8 link_type, u8 addr_type, u8 status)
5390{
5391	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5392					  status,
5393					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
5394}
5395
5396int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5397				     u8 link_type, u8 addr_type, u8 status)
5398{
5399	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5400					  status, MGMT_OP_USER_PASSKEY_REPLY);
5401}
5402
5403int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5404					 u8 link_type, u8 addr_type, u8 status)
5405{
5406	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5407					  status,
5408					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
5409}
5410
5411int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5412			     u8 link_type, u8 addr_type, u32 passkey,
5413			     u8 entered)
5414{
5415	struct mgmt_ev_passkey_notify ev;
5416
5417	BT_DBG("%s", hdev->name);
5418
5419	bacpy(&ev.addr.bdaddr, bdaddr);
5420	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5421	ev.passkey = __cpu_to_le32(passkey);
5422	ev.entered = entered;
5423
5424	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5425}
5426
5427void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5428		      u8 addr_type, u8 status)
5429{
5430	struct mgmt_ev_auth_failed ev;
5431
5432	bacpy(&ev.addr.bdaddr, bdaddr);
5433	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5434	ev.status = mgmt_status(status);
5435
5436	mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5437}
5438
5439void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5440{
5441	struct cmd_lookup match = { NULL, hdev };
5442	bool changed;
5443
5444	if (status) {
5445		u8 mgmt_err = mgmt_status(status);
5446		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5447				     cmd_status_rsp, &mgmt_err);
5448		return;
5449	}
5450
5451	if (test_bit(HCI_AUTH, &hdev->flags))
5452		changed = !test_and_set_bit(HCI_LINK_SECURITY,
5453					    &hdev->dev_flags);
5454	else
5455		changed = test_and_clear_bit(HCI_LINK_SECURITY,
5456					     &hdev->dev_flags);
5457
5458	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5459			     &match);
5460
5461	if (changed)
5462		new_settings(hdev, match.sk);
5463
5464	if (match.sk)
5465		sock_put(match.sk);
5466}
5467
5468static void clear_eir(struct hci_request *req)
5469{
5470	struct hci_dev *hdev = req->hdev;
5471	struct hci_cp_write_eir cp;
5472
5473	if (!lmp_ext_inq_capable(hdev))
5474		return;
5475
5476	memset(hdev->eir, 0, sizeof(hdev->eir));
5477
5478	memset(&cp, 0, sizeof(cp));
5479
5480	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5481}
5482
5483void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5484{
5485	struct cmd_lookup match = { NULL, hdev };
5486	struct hci_request req;
5487	bool changed = false;
5488
5489	if (status) {
5490		u8 mgmt_err = mgmt_status(status);
5491
5492		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5493						 &hdev->dev_flags)) {
5494			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5495			new_settings(hdev, NULL);
5496		}
5497
5498		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5499				     &mgmt_err);
5500		return;
5501	}
5502
5503	if (enable) {
5504		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5505	} else {
5506		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5507		if (!changed)
5508			changed = test_and_clear_bit(HCI_HS_ENABLED,
5509						     &hdev->dev_flags);
5510		else
5511			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5512	}
5513
5514	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5515
5516	if (changed)
5517		new_settings(hdev, match.sk);
5518
5519	if (match.sk)
5520		sock_put(match.sk);
5521
5522	hci_req_init(&req, hdev);
5523
5524	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5525		update_eir(&req);
5526	else
5527		clear_eir(&req);
5528
5529	hci_req_run(&req, NULL);
5530}
5531
5532void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5533{
5534	struct cmd_lookup match = { NULL, hdev };
5535	bool changed = false;
5536
5537	if (status) {
5538		u8 mgmt_err = mgmt_status(status);
5539
5540		if (enable) {
5541			if (test_and_clear_bit(HCI_SC_ENABLED,
5542					       &hdev->dev_flags))
5543				new_settings(hdev, NULL);
5544			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5545		}
5546
5547		mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5548				     cmd_status_rsp, &mgmt_err);
5549		return;
5550	}
5551
5552	if (enable) {
5553		changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5554	} else {
5555		changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5556		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5557	}
5558
5559	mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5560			     settings_rsp, &match);
5561
5562	if (changed)
5563		new_settings(hdev, match.sk);
5564
5565	if (match.sk)
5566		sock_put(match.sk);
5567}
5568
5569static void sk_lookup(struct pending_cmd *cmd, void *data)
5570{
5571	struct cmd_lookup *match = data;
5572
5573	if (match->sk == NULL) {
5574		match->sk = cmd->sk;
5575		sock_hold(match->sk);
5576	}
5577}
5578
5579void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5580				    u8 status)
5581{
5582	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5583
5584	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5585	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5586	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5587
5588	if (!status)
5589		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5590			   NULL);
5591
5592	if (match.sk)
5593		sock_put(match.sk);
5594}
5595
5596void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5597{
5598	struct mgmt_cp_set_local_name ev;
5599	struct pending_cmd *cmd;
5600
5601	if (status)
5602		return;
5603
5604	memset(&ev, 0, sizeof(ev));
5605	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5606	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5607
5608	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5609	if (!cmd) {
5610		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5611
5612		/* If this is a HCI command related to powering on the
5613		 * HCI dev don't send any mgmt signals.
5614		 */
5615		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5616			return;
5617	}
5618
5619	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5620		   cmd ? cmd->sk : NULL);
5621}
5622
5623void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5624				       u8 *randomizer192, u8 *hash256,
5625				       u8 *randomizer256, u8 status)
5626{
5627	struct pending_cmd *cmd;
5628
5629	BT_DBG("%s status %u", hdev->name, status);
5630
5631	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5632	if (!cmd)
5633		return;
5634
5635	if (status) {
5636		cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5637			   mgmt_status(status));
5638	} else {
5639		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5640		    hash256 && randomizer256) {
5641			struct mgmt_rp_read_local_oob_ext_data rp;
5642
5643			memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5644			memcpy(rp.randomizer192, randomizer192,
5645			       sizeof(rp.randomizer192));
5646
5647			memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5648			memcpy(rp.randomizer256, randomizer256,
5649			       sizeof(rp.randomizer256));
5650
5651			cmd_complete(cmd->sk, hdev->id,
5652				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5653				     &rp, sizeof(rp));
5654		} else {
5655			struct mgmt_rp_read_local_oob_data rp;
5656
5657			memcpy(rp.hash, hash192, sizeof(rp.hash));
5658			memcpy(rp.randomizer, randomizer192,
5659			       sizeof(rp.randomizer));
5660
5661			cmd_complete(cmd->sk, hdev->id,
5662				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5663				     &rp, sizeof(rp));
5664		}
5665	}
5666
5667	mgmt_pending_remove(cmd);
5668}
5669
5670void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5671		       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
5672		       ssp, u8 *eir, u16 eir_len)
5673{
5674	char buf[512];
5675	struct mgmt_ev_device_found *ev = (void *) buf;
5676	struct smp_irk *irk;
5677	size_t ev_size;
5678
5679	if (!hci_discovery_active(hdev))
5680		return;
5681
5682	/* Leave 5 bytes for a potential CoD field */
5683	if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5684		return;
5685
5686	memset(buf, 0, sizeof(buf));
5687
5688	irk = hci_get_irk(hdev, bdaddr, addr_type);
5689	if (irk) {
5690		bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5691		ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5692	} else {
5693		bacpy(&ev->addr.bdaddr, bdaddr);
5694		ev->addr.type = link_to_bdaddr(link_type, addr_type);
5695	}
5696
5697	ev->rssi = rssi;
5698	if (cfm_name)
5699		ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5700	if (!ssp)
5701		ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5702
5703	if (eir_len > 0)
5704		memcpy(ev->eir, eir, eir_len);
5705
5706	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5707		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5708					  dev_class, 3);
5709
5710	ev->eir_len = cpu_to_le16(eir_len);
5711	ev_size = sizeof(*ev) + eir_len;
5712
5713	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5714}
5715
5716void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5717		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5718{
5719	struct mgmt_ev_device_found *ev;
5720	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5721	u16 eir_len;
5722
5723	ev = (struct mgmt_ev_device_found *) buf;
5724
5725	memset(buf, 0, sizeof(buf));
5726
5727	bacpy(&ev->addr.bdaddr, bdaddr);
5728	ev->addr.type = link_to_bdaddr(link_type, addr_type);
5729	ev->rssi = rssi;
5730
5731	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5732				  name_len);
5733
5734	ev->eir_len = cpu_to_le16(eir_len);
5735
5736	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5737}
5738
5739void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5740{
5741	struct mgmt_ev_discovering ev;
5742	struct pending_cmd *cmd;
5743
5744	BT_DBG("%s discovering %u", hdev->name, discovering);
5745
5746	if (discovering)
5747		cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5748	else
5749		cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5750
5751	if (cmd != NULL) {
5752		u8 type = hdev->discovery.type;
5753
5754		cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5755			     sizeof(type));
5756		mgmt_pending_remove(cmd);
5757	}
5758
5759	memset(&ev, 0, sizeof(ev));
5760	ev.type = hdev->discovery.type;
5761	ev.discovering = discovering;
5762
5763	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5764}
5765
5766int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5767{
5768	struct pending_cmd *cmd;
5769	struct mgmt_ev_device_blocked ev;
5770
5771	cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5772
5773	bacpy(&ev.addr.bdaddr, bdaddr);
5774	ev.addr.type = type;
5775
5776	return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5777			  cmd ? cmd->sk : NULL);
5778}
5779
5780int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5781{
5782	struct pending_cmd *cmd;
5783	struct mgmt_ev_device_unblocked ev;
5784
5785	cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5786
5787	bacpy(&ev.addr.bdaddr, bdaddr);
5788	ev.addr.type = type;
5789
5790	return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5791			  cmd ? cmd->sk : NULL);
5792}
5793
5794static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5795{
5796	BT_DBG("%s status %u", hdev->name, status);
5797
5798	/* Clear the advertising mgmt setting if we failed to re-enable it */
5799	if (status) {
5800		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5801		new_settings(hdev, NULL);
5802	}
5803}
5804
5805void mgmt_reenable_advertising(struct hci_dev *hdev)
5806{
5807	struct hci_request req;
5808
5809	if (hci_conn_num(hdev, LE_LINK) > 0)
5810		return;
5811
5812	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5813		return;
5814
5815	hci_req_init(&req, hdev);
5816	enable_advertising(&req);
5817
5818	/* If this fails we have no option but to let user space know
5819	 * that we've disabled advertising.
5820	 */
5821	if (hci_req_run(&req, adv_enable_complete) < 0) {
5822		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5823		new_settings(hdev, NULL);
5824	}
5825}
5826