mgmt.c revision f4a407bef20c0e63fcd910a9404418522abff4ab
1/*
2   BlueZ - Bluetooth protocol stack for Linux
3
4   Copyright (C) 2010  Nokia Corporation
5   Copyright (C) 2011-2012 Intel Corporation
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI Management interface */
26
27#include <linux/module.h>
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/mgmt.h>
33
34#include "smp.h"
35
36#define MGMT_VERSION	1
37#define MGMT_REVISION	5
38
39static const u16 mgmt_commands[] = {
40	MGMT_OP_READ_INDEX_LIST,
41	MGMT_OP_READ_INFO,
42	MGMT_OP_SET_POWERED,
43	MGMT_OP_SET_DISCOVERABLE,
44	MGMT_OP_SET_CONNECTABLE,
45	MGMT_OP_SET_FAST_CONNECTABLE,
46	MGMT_OP_SET_PAIRABLE,
47	MGMT_OP_SET_LINK_SECURITY,
48	MGMT_OP_SET_SSP,
49	MGMT_OP_SET_HS,
50	MGMT_OP_SET_LE,
51	MGMT_OP_SET_DEV_CLASS,
52	MGMT_OP_SET_LOCAL_NAME,
53	MGMT_OP_ADD_UUID,
54	MGMT_OP_REMOVE_UUID,
55	MGMT_OP_LOAD_LINK_KEYS,
56	MGMT_OP_LOAD_LONG_TERM_KEYS,
57	MGMT_OP_DISCONNECT,
58	MGMT_OP_GET_CONNECTIONS,
59	MGMT_OP_PIN_CODE_REPLY,
60	MGMT_OP_PIN_CODE_NEG_REPLY,
61	MGMT_OP_SET_IO_CAPABILITY,
62	MGMT_OP_PAIR_DEVICE,
63	MGMT_OP_CANCEL_PAIR_DEVICE,
64	MGMT_OP_UNPAIR_DEVICE,
65	MGMT_OP_USER_CONFIRM_REPLY,
66	MGMT_OP_USER_CONFIRM_NEG_REPLY,
67	MGMT_OP_USER_PASSKEY_REPLY,
68	MGMT_OP_USER_PASSKEY_NEG_REPLY,
69	MGMT_OP_READ_LOCAL_OOB_DATA,
70	MGMT_OP_ADD_REMOTE_OOB_DATA,
71	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72	MGMT_OP_START_DISCOVERY,
73	MGMT_OP_STOP_DISCOVERY,
74	MGMT_OP_CONFIRM_NAME,
75	MGMT_OP_BLOCK_DEVICE,
76	MGMT_OP_UNBLOCK_DEVICE,
77	MGMT_OP_SET_DEVICE_ID,
78	MGMT_OP_SET_ADVERTISING,
79	MGMT_OP_SET_BREDR,
80	MGMT_OP_SET_STATIC_ADDRESS,
81	MGMT_OP_SET_SCAN_PARAMS,
82	MGMT_OP_SET_SECURE_CONN,
83	MGMT_OP_SET_DEBUG_KEYS,
84	MGMT_OP_LOAD_IRKS,
85};
86
87static const u16 mgmt_events[] = {
88	MGMT_EV_CONTROLLER_ERROR,
89	MGMT_EV_INDEX_ADDED,
90	MGMT_EV_INDEX_REMOVED,
91	MGMT_EV_NEW_SETTINGS,
92	MGMT_EV_CLASS_OF_DEV_CHANGED,
93	MGMT_EV_LOCAL_NAME_CHANGED,
94	MGMT_EV_NEW_LINK_KEY,
95	MGMT_EV_NEW_LONG_TERM_KEY,
96	MGMT_EV_DEVICE_CONNECTED,
97	MGMT_EV_DEVICE_DISCONNECTED,
98	MGMT_EV_CONNECT_FAILED,
99	MGMT_EV_PIN_CODE_REQUEST,
100	MGMT_EV_USER_CONFIRM_REQUEST,
101	MGMT_EV_USER_PASSKEY_REQUEST,
102	MGMT_EV_AUTH_FAILED,
103	MGMT_EV_DEVICE_FOUND,
104	MGMT_EV_DISCOVERING,
105	MGMT_EV_DEVICE_BLOCKED,
106	MGMT_EV_DEVICE_UNBLOCKED,
107	MGMT_EV_DEVICE_UNPAIRED,
108	MGMT_EV_PASSKEY_NOTIFY,
109};
110
111#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
112
113#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
114				!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
115
116struct pending_cmd {
117	struct list_head list;
118	u16 opcode;
119	int index;
120	void *param;
121	struct sock *sk;
122	void *user_data;
123};
124
125/* HCI to MGMT error code conversion table */
126static u8 mgmt_status_table[] = {
127	MGMT_STATUS_SUCCESS,
128	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
129	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
130	MGMT_STATUS_FAILED,		/* Hardware Failure */
131	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
132	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
133	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
134	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
135	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
136	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
137	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
138	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
139	MGMT_STATUS_BUSY,		/* Command Disallowed */
140	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
141	MGMT_STATUS_REJECTED,		/* Rejected Security */
142	MGMT_STATUS_REJECTED,		/* Rejected Personal */
143	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
144	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
145	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
146	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
147	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
148	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
149	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
150	MGMT_STATUS_BUSY,		/* Repeated Attempts */
151	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
152	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
153	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
154	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
155	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
156	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
157	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
158	MGMT_STATUS_FAILED,		/* Unspecified Error */
159	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
160	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
161	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
162	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
163	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
164	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
165	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
166	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
167	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
168	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
169	MGMT_STATUS_FAILED,		/* Transaction Collision */
170	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
171	MGMT_STATUS_REJECTED,		/* QoS Rejected */
172	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
173	MGMT_STATUS_REJECTED,		/* Insufficient Security */
174	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
175	MGMT_STATUS_BUSY,		/* Role Switch Pending */
176	MGMT_STATUS_FAILED,		/* Slot Violation */
177	MGMT_STATUS_FAILED,		/* Role Switch Failed */
178	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
179	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
180	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
181	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
182	MGMT_STATUS_BUSY,		/* Controller Busy */
183	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
184	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
185	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
186	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
187	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
188};
189
190static u8 mgmt_status(u8 hci_status)
191{
192	if (hci_status < ARRAY_SIZE(mgmt_status_table))
193		return mgmt_status_table[hci_status];
194
195	return MGMT_STATUS_FAILED;
196}
197
198static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
199{
200	struct sk_buff *skb;
201	struct mgmt_hdr *hdr;
202	struct mgmt_ev_cmd_status *ev;
203	int err;
204
205	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
206
207	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
208	if (!skb)
209		return -ENOMEM;
210
211	hdr = (void *) skb_put(skb, sizeof(*hdr));
212
213	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
214	hdr->index = cpu_to_le16(index);
215	hdr->len = cpu_to_le16(sizeof(*ev));
216
217	ev = (void *) skb_put(skb, sizeof(*ev));
218	ev->status = status;
219	ev->opcode = cpu_to_le16(cmd);
220
221	err = sock_queue_rcv_skb(sk, skb);
222	if (err < 0)
223		kfree_skb(skb);
224
225	return err;
226}
227
228static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
229			void *rp, size_t rp_len)
230{
231	struct sk_buff *skb;
232	struct mgmt_hdr *hdr;
233	struct mgmt_ev_cmd_complete *ev;
234	int err;
235
236	BT_DBG("sock %p", sk);
237
238	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
239	if (!skb)
240		return -ENOMEM;
241
242	hdr = (void *) skb_put(skb, sizeof(*hdr));
243
244	hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
245	hdr->index = cpu_to_le16(index);
246	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
247
248	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
249	ev->opcode = cpu_to_le16(cmd);
250	ev->status = status;
251
252	if (rp)
253		memcpy(ev->data, rp, rp_len);
254
255	err = sock_queue_rcv_skb(sk, skb);
256	if (err < 0)
257		kfree_skb(skb);
258
259	return err;
260}
261
262static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
263			u16 data_len)
264{
265	struct mgmt_rp_read_version rp;
266
267	BT_DBG("sock %p", sk);
268
269	rp.version = MGMT_VERSION;
270	rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
271
272	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
273			    sizeof(rp));
274}
275
276static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
277			 u16 data_len)
278{
279	struct mgmt_rp_read_commands *rp;
280	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
281	const u16 num_events = ARRAY_SIZE(mgmt_events);
282	__le16 *opcode;
283	size_t rp_size;
284	int i, err;
285
286	BT_DBG("sock %p", sk);
287
288	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
289
290	rp = kmalloc(rp_size, GFP_KERNEL);
291	if (!rp)
292		return -ENOMEM;
293
294	rp->num_commands = __constant_cpu_to_le16(num_commands);
295	rp->num_events = __constant_cpu_to_le16(num_events);
296
297	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
298		put_unaligned_le16(mgmt_commands[i], opcode);
299
300	for (i = 0; i < num_events; i++, opcode++)
301		put_unaligned_le16(mgmt_events[i], opcode);
302
303	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
304			   rp_size);
305	kfree(rp);
306
307	return err;
308}
309
310static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
311			   u16 data_len)
312{
313	struct mgmt_rp_read_index_list *rp;
314	struct hci_dev *d;
315	size_t rp_len;
316	u16 count;
317	int err;
318
319	BT_DBG("sock %p", sk);
320
321	read_lock(&hci_dev_list_lock);
322
323	count = 0;
324	list_for_each_entry(d, &hci_dev_list, list) {
325		if (d->dev_type == HCI_BREDR)
326			count++;
327	}
328
329	rp_len = sizeof(*rp) + (2 * count);
330	rp = kmalloc(rp_len, GFP_ATOMIC);
331	if (!rp) {
332		read_unlock(&hci_dev_list_lock);
333		return -ENOMEM;
334	}
335
336	count = 0;
337	list_for_each_entry(d, &hci_dev_list, list) {
338		if (test_bit(HCI_SETUP, &d->dev_flags))
339			continue;
340
341		if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
342			continue;
343
344		if (d->dev_type == HCI_BREDR) {
345			rp->index[count++] = cpu_to_le16(d->id);
346			BT_DBG("Added hci%u", d->id);
347		}
348	}
349
350	rp->num_controllers = cpu_to_le16(count);
351	rp_len = sizeof(*rp) + (2 * count);
352
353	read_unlock(&hci_dev_list_lock);
354
355	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
356			   rp_len);
357
358	kfree(rp);
359
360	return err;
361}
362
363static u32 get_supported_settings(struct hci_dev *hdev)
364{
365	u32 settings = 0;
366
367	settings |= MGMT_SETTING_POWERED;
368	settings |= MGMT_SETTING_PAIRABLE;
369	settings |= MGMT_SETTING_DEBUG_KEYS;
370
371	if (lmp_bredr_capable(hdev)) {
372		settings |= MGMT_SETTING_CONNECTABLE;
373		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
374			settings |= MGMT_SETTING_FAST_CONNECTABLE;
375		settings |= MGMT_SETTING_DISCOVERABLE;
376		settings |= MGMT_SETTING_BREDR;
377		settings |= MGMT_SETTING_LINK_SECURITY;
378
379		if (lmp_ssp_capable(hdev)) {
380			settings |= MGMT_SETTING_SSP;
381			settings |= MGMT_SETTING_HS;
382		}
383
384		if (lmp_sc_capable(hdev) ||
385		    test_bit(HCI_FORCE_SC, &hdev->dev_flags))
386			settings |= MGMT_SETTING_SECURE_CONN;
387	}
388
389	if (lmp_le_capable(hdev)) {
390		settings |= MGMT_SETTING_LE;
391		settings |= MGMT_SETTING_ADVERTISING;
392	}
393
394	return settings;
395}
396
397static u32 get_current_settings(struct hci_dev *hdev)
398{
399	u32 settings = 0;
400
401	if (hdev_is_powered(hdev))
402		settings |= MGMT_SETTING_POWERED;
403
404	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
405		settings |= MGMT_SETTING_CONNECTABLE;
406
407	if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
408		settings |= MGMT_SETTING_FAST_CONNECTABLE;
409
410	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
411		settings |= MGMT_SETTING_DISCOVERABLE;
412
413	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
414		settings |= MGMT_SETTING_PAIRABLE;
415
416	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
417		settings |= MGMT_SETTING_BREDR;
418
419	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
420		settings |= MGMT_SETTING_LE;
421
422	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
423		settings |= MGMT_SETTING_LINK_SECURITY;
424
425	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
426		settings |= MGMT_SETTING_SSP;
427
428	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
429		settings |= MGMT_SETTING_HS;
430
431	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
432		settings |= MGMT_SETTING_ADVERTISING;
433
434	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
435		settings |= MGMT_SETTING_SECURE_CONN;
436
437	if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
438		settings |= MGMT_SETTING_DEBUG_KEYS;
439
440	return settings;
441}
442
443#define PNP_INFO_SVCLASS_ID		0x1200
444
445static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
446{
447	u8 *ptr = data, *uuids_start = NULL;
448	struct bt_uuid *uuid;
449
450	if (len < 4)
451		return ptr;
452
453	list_for_each_entry(uuid, &hdev->uuids, list) {
454		u16 uuid16;
455
456		if (uuid->size != 16)
457			continue;
458
459		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
460		if (uuid16 < 0x1100)
461			continue;
462
463		if (uuid16 == PNP_INFO_SVCLASS_ID)
464			continue;
465
466		if (!uuids_start) {
467			uuids_start = ptr;
468			uuids_start[0] = 1;
469			uuids_start[1] = EIR_UUID16_ALL;
470			ptr += 2;
471		}
472
473		/* Stop if not enough space to put next UUID */
474		if ((ptr - data) + sizeof(u16) > len) {
475			uuids_start[1] = EIR_UUID16_SOME;
476			break;
477		}
478
479		*ptr++ = (uuid16 & 0x00ff);
480		*ptr++ = (uuid16 & 0xff00) >> 8;
481		uuids_start[0] += sizeof(uuid16);
482	}
483
484	return ptr;
485}
486
487static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
488{
489	u8 *ptr = data, *uuids_start = NULL;
490	struct bt_uuid *uuid;
491
492	if (len < 6)
493		return ptr;
494
495	list_for_each_entry(uuid, &hdev->uuids, list) {
496		if (uuid->size != 32)
497			continue;
498
499		if (!uuids_start) {
500			uuids_start = ptr;
501			uuids_start[0] = 1;
502			uuids_start[1] = EIR_UUID32_ALL;
503			ptr += 2;
504		}
505
506		/* Stop if not enough space to put next UUID */
507		if ((ptr - data) + sizeof(u32) > len) {
508			uuids_start[1] = EIR_UUID32_SOME;
509			break;
510		}
511
512		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
513		ptr += sizeof(u32);
514		uuids_start[0] += sizeof(u32);
515	}
516
517	return ptr;
518}
519
520static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
521{
522	u8 *ptr = data, *uuids_start = NULL;
523	struct bt_uuid *uuid;
524
525	if (len < 18)
526		return ptr;
527
528	list_for_each_entry(uuid, &hdev->uuids, list) {
529		if (uuid->size != 128)
530			continue;
531
532		if (!uuids_start) {
533			uuids_start = ptr;
534			uuids_start[0] = 1;
535			uuids_start[1] = EIR_UUID128_ALL;
536			ptr += 2;
537		}
538
539		/* Stop if not enough space to put next UUID */
540		if ((ptr - data) + 16 > len) {
541			uuids_start[1] = EIR_UUID128_SOME;
542			break;
543		}
544
545		memcpy(ptr, uuid->uuid, 16);
546		ptr += 16;
547		uuids_start[0] += 16;
548	}
549
550	return ptr;
551}
552
553static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
554{
555	struct pending_cmd *cmd;
556
557	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
558		if (cmd->opcode == opcode)
559			return cmd;
560	}
561
562	return NULL;
563}
564
565static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
566{
567	u8 ad_len = 0;
568	size_t name_len;
569
570	name_len = strlen(hdev->dev_name);
571	if (name_len > 0) {
572		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
573
574		if (name_len > max_len) {
575			name_len = max_len;
576			ptr[1] = EIR_NAME_SHORT;
577		} else
578			ptr[1] = EIR_NAME_COMPLETE;
579
580		ptr[0] = name_len + 1;
581
582		memcpy(ptr + 2, hdev->dev_name, name_len);
583
584		ad_len += (name_len + 2);
585		ptr += (name_len + 2);
586	}
587
588	return ad_len;
589}
590
591static void update_scan_rsp_data(struct hci_request *req)
592{
593	struct hci_dev *hdev = req->hdev;
594	struct hci_cp_le_set_scan_rsp_data cp;
595	u8 len;
596
597	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
598		return;
599
600	memset(&cp, 0, sizeof(cp));
601
602	len = create_scan_rsp_data(hdev, cp.data);
603
604	if (hdev->scan_rsp_data_len == len &&
605	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
606		return;
607
608	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
609	hdev->scan_rsp_data_len = len;
610
611	cp.length = len;
612
613	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
614}
615
616static u8 get_adv_discov_flags(struct hci_dev *hdev)
617{
618	struct pending_cmd *cmd;
619
620	/* If there's a pending mgmt command the flags will not yet have
621	 * their final values, so check for this first.
622	 */
623	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
624	if (cmd) {
625		struct mgmt_mode *cp = cmd->param;
626		if (cp->val == 0x01)
627			return LE_AD_GENERAL;
628		else if (cp->val == 0x02)
629			return LE_AD_LIMITED;
630	} else {
631		if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
632			return LE_AD_LIMITED;
633		else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
634			return LE_AD_GENERAL;
635	}
636
637	return 0;
638}
639
640static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
641{
642	u8 ad_len = 0, flags = 0;
643
644	flags |= get_adv_discov_flags(hdev);
645
646	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
647		flags |= LE_AD_NO_BREDR;
648
649	if (flags) {
650		BT_DBG("adv flags 0x%02x", flags);
651
652		ptr[0] = 2;
653		ptr[1] = EIR_FLAGS;
654		ptr[2] = flags;
655
656		ad_len += 3;
657		ptr += 3;
658	}
659
660	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
661		ptr[0] = 2;
662		ptr[1] = EIR_TX_POWER;
663		ptr[2] = (u8) hdev->adv_tx_power;
664
665		ad_len += 3;
666		ptr += 3;
667	}
668
669	return ad_len;
670}
671
672static void update_adv_data(struct hci_request *req)
673{
674	struct hci_dev *hdev = req->hdev;
675	struct hci_cp_le_set_adv_data cp;
676	u8 len;
677
678	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
679		return;
680
681	memset(&cp, 0, sizeof(cp));
682
683	len = create_adv_data(hdev, cp.data);
684
685	if (hdev->adv_data_len == len &&
686	    memcmp(cp.data, hdev->adv_data, len) == 0)
687		return;
688
689	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
690	hdev->adv_data_len = len;
691
692	cp.length = len;
693
694	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
695}
696
697static void create_eir(struct hci_dev *hdev, u8 *data)
698{
699	u8 *ptr = data;
700	size_t name_len;
701
702	name_len = strlen(hdev->dev_name);
703
704	if (name_len > 0) {
705		/* EIR Data type */
706		if (name_len > 48) {
707			name_len = 48;
708			ptr[1] = EIR_NAME_SHORT;
709		} else
710			ptr[1] = EIR_NAME_COMPLETE;
711
712		/* EIR Data length */
713		ptr[0] = name_len + 1;
714
715		memcpy(ptr + 2, hdev->dev_name, name_len);
716
717		ptr += (name_len + 2);
718	}
719
720	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
721		ptr[0] = 2;
722		ptr[1] = EIR_TX_POWER;
723		ptr[2] = (u8) hdev->inq_tx_power;
724
725		ptr += 3;
726	}
727
728	if (hdev->devid_source > 0) {
729		ptr[0] = 9;
730		ptr[1] = EIR_DEVICE_ID;
731
732		put_unaligned_le16(hdev->devid_source, ptr + 2);
733		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
734		put_unaligned_le16(hdev->devid_product, ptr + 6);
735		put_unaligned_le16(hdev->devid_version, ptr + 8);
736
737		ptr += 10;
738	}
739
740	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
741	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
742	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
743}
744
745static void update_eir(struct hci_request *req)
746{
747	struct hci_dev *hdev = req->hdev;
748	struct hci_cp_write_eir cp;
749
750	if (!hdev_is_powered(hdev))
751		return;
752
753	if (!lmp_ext_inq_capable(hdev))
754		return;
755
756	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
757		return;
758
759	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
760		return;
761
762	memset(&cp, 0, sizeof(cp));
763
764	create_eir(hdev, cp.data);
765
766	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
767		return;
768
769	memcpy(hdev->eir, cp.data, sizeof(cp.data));
770
771	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
772}
773
774static u8 get_service_classes(struct hci_dev *hdev)
775{
776	struct bt_uuid *uuid;
777	u8 val = 0;
778
779	list_for_each_entry(uuid, &hdev->uuids, list)
780		val |= uuid->svc_hint;
781
782	return val;
783}
784
785static void update_class(struct hci_request *req)
786{
787	struct hci_dev *hdev = req->hdev;
788	u8 cod[3];
789
790	BT_DBG("%s", hdev->name);
791
792	if (!hdev_is_powered(hdev))
793		return;
794
795	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
796		return;
797
798	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
799		return;
800
801	cod[0] = hdev->minor_class;
802	cod[1] = hdev->major_class;
803	cod[2] = get_service_classes(hdev);
804
805	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
806		cod[1] |= 0x20;
807
808	if (memcmp(cod, hdev->dev_class, 3) == 0)
809		return;
810
811	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
812}
813
814static void service_cache_off(struct work_struct *work)
815{
816	struct hci_dev *hdev = container_of(work, struct hci_dev,
817					    service_cache.work);
818	struct hci_request req;
819
820	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
821		return;
822
823	hci_req_init(&req, hdev);
824
825	hci_dev_lock(hdev);
826
827	update_eir(&req);
828	update_class(&req);
829
830	hci_dev_unlock(hdev);
831
832	hci_req_run(&req, NULL);
833}
834
835static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
836{
837	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
838		return;
839
840	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
841
842	/* Non-mgmt controlled devices get this bit set
843	 * implicitly so that pairing works for them, however
844	 * for mgmt we require user-space to explicitly enable
845	 * it
846	 */
847	clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
848}
849
850static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
851				void *data, u16 data_len)
852{
853	struct mgmt_rp_read_info rp;
854
855	BT_DBG("sock %p %s", sk, hdev->name);
856
857	hci_dev_lock(hdev);
858
859	memset(&rp, 0, sizeof(rp));
860
861	bacpy(&rp.bdaddr, &hdev->bdaddr);
862
863	rp.version = hdev->hci_ver;
864	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
865
866	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
867	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
868
869	memcpy(rp.dev_class, hdev->dev_class, 3);
870
871	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
872	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
873
874	hci_dev_unlock(hdev);
875
876	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
877			    sizeof(rp));
878}
879
880static void mgmt_pending_free(struct pending_cmd *cmd)
881{
882	sock_put(cmd->sk);
883	kfree(cmd->param);
884	kfree(cmd);
885}
886
887static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
888					    struct hci_dev *hdev, void *data,
889					    u16 len)
890{
891	struct pending_cmd *cmd;
892
893	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
894	if (!cmd)
895		return NULL;
896
897	cmd->opcode = opcode;
898	cmd->index = hdev->id;
899
900	cmd->param = kmalloc(len, GFP_KERNEL);
901	if (!cmd->param) {
902		kfree(cmd);
903		return NULL;
904	}
905
906	if (data)
907		memcpy(cmd->param, data, len);
908
909	cmd->sk = sk;
910	sock_hold(sk);
911
912	list_add(&cmd->list, &hdev->mgmt_pending);
913
914	return cmd;
915}
916
917static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
918				 void (*cb)(struct pending_cmd *cmd,
919					    void *data),
920				 void *data)
921{
922	struct pending_cmd *cmd, *tmp;
923
924	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
925		if (opcode > 0 && cmd->opcode != opcode)
926			continue;
927
928		cb(cmd, data);
929	}
930}
931
932static void mgmt_pending_remove(struct pending_cmd *cmd)
933{
934	list_del(&cmd->list);
935	mgmt_pending_free(cmd);
936}
937
938static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
939{
940	__le32 settings = cpu_to_le32(get_current_settings(hdev));
941
942	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
943			    sizeof(settings));
944}
945
946static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
947		       u16 len)
948{
949	struct mgmt_mode *cp = data;
950	struct pending_cmd *cmd;
951	int err;
952
953	BT_DBG("request for %s", hdev->name);
954
955	if (cp->val != 0x00 && cp->val != 0x01)
956		return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
957				  MGMT_STATUS_INVALID_PARAMS);
958
959	hci_dev_lock(hdev);
960
961	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
962		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
963				 MGMT_STATUS_BUSY);
964		goto failed;
965	}
966
967	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
968		cancel_delayed_work(&hdev->power_off);
969
970		if (cp->val) {
971			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
972					 data, len);
973			err = mgmt_powered(hdev, 1);
974			goto failed;
975		}
976	}
977
978	if (!!cp->val == hdev_is_powered(hdev)) {
979		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
980		goto failed;
981	}
982
983	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
984	if (!cmd) {
985		err = -ENOMEM;
986		goto failed;
987	}
988
989	if (cp->val)
990		queue_work(hdev->req_workqueue, &hdev->power_on);
991	else
992		queue_work(hdev->req_workqueue, &hdev->power_off.work);
993
994	err = 0;
995
996failed:
997	hci_dev_unlock(hdev);
998	return err;
999}
1000
1001static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1002		      struct sock *skip_sk)
1003{
1004	struct sk_buff *skb;
1005	struct mgmt_hdr *hdr;
1006
1007	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1008	if (!skb)
1009		return -ENOMEM;
1010
1011	hdr = (void *) skb_put(skb, sizeof(*hdr));
1012	hdr->opcode = cpu_to_le16(event);
1013	if (hdev)
1014		hdr->index = cpu_to_le16(hdev->id);
1015	else
1016		hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1017	hdr->len = cpu_to_le16(data_len);
1018
1019	if (data)
1020		memcpy(skb_put(skb, data_len), data, data_len);
1021
1022	/* Time stamp */
1023	__net_timestamp(skb);
1024
1025	hci_send_to_control(skb, skip_sk);
1026	kfree_skb(skb);
1027
1028	return 0;
1029}
1030
1031static int new_settings(struct hci_dev *hdev, struct sock *skip)
1032{
1033	__le32 ev;
1034
1035	ev = cpu_to_le32(get_current_settings(hdev));
1036
1037	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1038}
1039
1040struct cmd_lookup {
1041	struct sock *sk;
1042	struct hci_dev *hdev;
1043	u8 mgmt_status;
1044};
1045
1046static void settings_rsp(struct pending_cmd *cmd, void *data)
1047{
1048	struct cmd_lookup *match = data;
1049
1050	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1051
1052	list_del(&cmd->list);
1053
1054	if (match->sk == NULL) {
1055		match->sk = cmd->sk;
1056		sock_hold(match->sk);
1057	}
1058
1059	mgmt_pending_free(cmd);
1060}
1061
1062static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1063{
1064	u8 *status = data;
1065
1066	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1067	mgmt_pending_remove(cmd);
1068}
1069
1070static u8 mgmt_bredr_support(struct hci_dev *hdev)
1071{
1072	if (!lmp_bredr_capable(hdev))
1073		return MGMT_STATUS_NOT_SUPPORTED;
1074	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1075		return MGMT_STATUS_REJECTED;
1076	else
1077		return MGMT_STATUS_SUCCESS;
1078}
1079
1080static u8 mgmt_le_support(struct hci_dev *hdev)
1081{
1082	if (!lmp_le_capable(hdev))
1083		return MGMT_STATUS_NOT_SUPPORTED;
1084	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1085		return MGMT_STATUS_REJECTED;
1086	else
1087		return MGMT_STATUS_SUCCESS;
1088}
1089
1090static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1091{
1092	struct pending_cmd *cmd;
1093	struct mgmt_mode *cp;
1094	struct hci_request req;
1095	bool changed;
1096
1097	BT_DBG("status 0x%02x", status);
1098
1099	hci_dev_lock(hdev);
1100
1101	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1102	if (!cmd)
1103		goto unlock;
1104
1105	if (status) {
1106		u8 mgmt_err = mgmt_status(status);
1107		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1108		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1109		goto remove_cmd;
1110	}
1111
1112	cp = cmd->param;
1113	if (cp->val) {
1114		changed = !test_and_set_bit(HCI_DISCOVERABLE,
1115					    &hdev->dev_flags);
1116
1117		if (hdev->discov_timeout > 0) {
1118			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1119			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1120					   to);
1121		}
1122	} else {
1123		changed = test_and_clear_bit(HCI_DISCOVERABLE,
1124					     &hdev->dev_flags);
1125	}
1126
1127	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1128
1129	if (changed)
1130		new_settings(hdev, cmd->sk);
1131
1132	/* When the discoverable mode gets changed, make sure
1133	 * that class of device has the limited discoverable
1134	 * bit correctly set.
1135	 */
1136	hci_req_init(&req, hdev);
1137	update_class(&req);
1138	hci_req_run(&req, NULL);
1139
1140remove_cmd:
1141	mgmt_pending_remove(cmd);
1142
1143unlock:
1144	hci_dev_unlock(hdev);
1145}
1146
1147static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1148			    u16 len)
1149{
1150	struct mgmt_cp_set_discoverable *cp = data;
1151	struct pending_cmd *cmd;
1152	struct hci_request req;
1153	u16 timeout;
1154	u8 scan;
1155	int err;
1156
1157	BT_DBG("request for %s", hdev->name);
1158
1159	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1160	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1161		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1162				  MGMT_STATUS_REJECTED);
1163
1164	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1165		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1166				  MGMT_STATUS_INVALID_PARAMS);
1167
1168	timeout = __le16_to_cpu(cp->timeout);
1169
1170	/* Disabling discoverable requires that no timeout is set,
1171	 * and enabling limited discoverable requires a timeout.
1172	 */
1173	if ((cp->val == 0x00 && timeout > 0) ||
1174	    (cp->val == 0x02 && timeout == 0))
1175		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1176				  MGMT_STATUS_INVALID_PARAMS);
1177
1178	hci_dev_lock(hdev);
1179
1180	if (!hdev_is_powered(hdev) && timeout > 0) {
1181		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1182				 MGMT_STATUS_NOT_POWERED);
1183		goto failed;
1184	}
1185
1186	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1187	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1188		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1189				 MGMT_STATUS_BUSY);
1190		goto failed;
1191	}
1192
1193	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1194		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1195				 MGMT_STATUS_REJECTED);
1196		goto failed;
1197	}
1198
1199	if (!hdev_is_powered(hdev)) {
1200		bool changed = false;
1201
1202		/* Setting limited discoverable when powered off is
1203		 * not a valid operation since it requires a timeout
1204		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1205		 */
1206		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1207			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1208			changed = true;
1209		}
1210
1211		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1212		if (err < 0)
1213			goto failed;
1214
1215		if (changed)
1216			err = new_settings(hdev, sk);
1217
1218		goto failed;
1219	}
1220
1221	/* If the current mode is the same, then just update the timeout
1222	 * value with the new value. And if only the timeout gets updated,
1223	 * then no need for any HCI transactions.
1224	 */
1225	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1226	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1227					  &hdev->dev_flags)) {
1228		cancel_delayed_work(&hdev->discov_off);
1229		hdev->discov_timeout = timeout;
1230
1231		if (cp->val && hdev->discov_timeout > 0) {
1232			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1233			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1234					   to);
1235		}
1236
1237		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1238		goto failed;
1239	}
1240
1241	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1242	if (!cmd) {
1243		err = -ENOMEM;
1244		goto failed;
1245	}
1246
1247	/* Cancel any potential discoverable timeout that might be
1248	 * still active and store new timeout value. The arming of
1249	 * the timeout happens in the complete handler.
1250	 */
1251	cancel_delayed_work(&hdev->discov_off);
1252	hdev->discov_timeout = timeout;
1253
1254	/* Limited discoverable mode */
1255	if (cp->val == 0x02)
1256		set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1257	else
1258		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1259
1260	hci_req_init(&req, hdev);
1261
1262	/* The procedure for LE-only controllers is much simpler - just
1263	 * update the advertising data.
1264	 */
1265	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1266		goto update_ad;
1267
1268	scan = SCAN_PAGE;
1269
1270	if (cp->val) {
1271		struct hci_cp_write_current_iac_lap hci_cp;
1272
1273		if (cp->val == 0x02) {
1274			/* Limited discoverable mode */
1275			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1276			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
1277			hci_cp.iac_lap[1] = 0x8b;
1278			hci_cp.iac_lap[2] = 0x9e;
1279			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
1280			hci_cp.iac_lap[4] = 0x8b;
1281			hci_cp.iac_lap[5] = 0x9e;
1282		} else {
1283			/* General discoverable mode */
1284			hci_cp.num_iac = 1;
1285			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
1286			hci_cp.iac_lap[1] = 0x8b;
1287			hci_cp.iac_lap[2] = 0x9e;
1288		}
1289
1290		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1291			    (hci_cp.num_iac * 3) + 1, &hci_cp);
1292
1293		scan |= SCAN_INQUIRY;
1294	} else {
1295		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1296	}
1297
1298	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1299
1300update_ad:
1301	update_adv_data(&req);
1302
1303	err = hci_req_run(&req, set_discoverable_complete);
1304	if (err < 0)
1305		mgmt_pending_remove(cmd);
1306
1307failed:
1308	hci_dev_unlock(hdev);
1309	return err;
1310}
1311
1312static void write_fast_connectable(struct hci_request *req, bool enable)
1313{
1314	struct hci_dev *hdev = req->hdev;
1315	struct hci_cp_write_page_scan_activity acp;
1316	u8 type;
1317
1318	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1319		return;
1320
1321	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1322		return;
1323
1324	if (enable) {
1325		type = PAGE_SCAN_TYPE_INTERLACED;
1326
1327		/* 160 msec page scan interval */
1328		acp.interval = __constant_cpu_to_le16(0x0100);
1329	} else {
1330		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
1331
1332		/* default 1.28 sec page scan */
1333		acp.interval = __constant_cpu_to_le16(0x0800);
1334	}
1335
1336	acp.window = __constant_cpu_to_le16(0x0012);
1337
1338	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1339	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
1340		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1341			    sizeof(acp), &acp);
1342
1343	if (hdev->page_scan_type != type)
1344		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1345}
1346
1347static u8 get_adv_type(struct hci_dev *hdev)
1348{
1349	struct pending_cmd *cmd;
1350	bool connectable;
1351
1352	/* If there's a pending mgmt command the flag will not yet have
1353	 * it's final value, so check for this first.
1354	 */
1355	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1356	if (cmd) {
1357		struct mgmt_mode *cp = cmd->param;
1358		connectable = !!cp->val;
1359	} else {
1360		connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1361	}
1362
1363	return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1364}
1365
1366static void enable_advertising(struct hci_request *req)
1367{
1368	struct hci_dev *hdev = req->hdev;
1369	struct hci_cp_le_set_adv_param cp;
1370	u8 enable = 0x01;
1371
1372	memset(&cp, 0, sizeof(cp));
1373	cp.min_interval = __constant_cpu_to_le16(0x0800);
1374	cp.max_interval = __constant_cpu_to_le16(0x0800);
1375	cp.type = get_adv_type(hdev);
1376	cp.own_address_type = hdev->own_addr_type;
1377	cp.channel_map = 0x07;
1378
1379	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1380
1381	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1382}
1383
1384static void disable_advertising(struct hci_request *req)
1385{
1386	u8 enable = 0x00;
1387
1388	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1389}
1390
1391static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1392{
1393	struct pending_cmd *cmd;
1394	struct mgmt_mode *cp;
1395	bool changed;
1396
1397	BT_DBG("status 0x%02x", status);
1398
1399	hci_dev_lock(hdev);
1400
1401	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1402	if (!cmd)
1403		goto unlock;
1404
1405	if (status) {
1406		u8 mgmt_err = mgmt_status(status);
1407		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1408		goto remove_cmd;
1409	}
1410
1411	cp = cmd->param;
1412	if (cp->val)
1413		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1414	else
1415		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1416
1417	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1418
1419	if (changed)
1420		new_settings(hdev, cmd->sk);
1421
1422remove_cmd:
1423	mgmt_pending_remove(cmd);
1424
1425unlock:
1426	hci_dev_unlock(hdev);
1427}
1428
1429static int set_connectable_update_settings(struct hci_dev *hdev,
1430					   struct sock *sk, u8 val)
1431{
1432	bool changed = false;
1433	int err;
1434
1435	if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1436		changed = true;
1437
1438	if (val) {
1439		set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1440	} else {
1441		clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1442		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1443	}
1444
1445	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1446	if (err < 0)
1447		return err;
1448
1449	if (changed)
1450		return new_settings(hdev, sk);
1451
1452	return 0;
1453}
1454
1455static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1456			   u16 len)
1457{
1458	struct mgmt_mode *cp = data;
1459	struct pending_cmd *cmd;
1460	struct hci_request req;
1461	u8 scan;
1462	int err;
1463
1464	BT_DBG("request for %s", hdev->name);
1465
1466	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1467	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1468		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1469				  MGMT_STATUS_REJECTED);
1470
1471	if (cp->val != 0x00 && cp->val != 0x01)
1472		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1473				  MGMT_STATUS_INVALID_PARAMS);
1474
1475	hci_dev_lock(hdev);
1476
1477	if (!hdev_is_powered(hdev)) {
1478		err = set_connectable_update_settings(hdev, sk, cp->val);
1479		goto failed;
1480	}
1481
1482	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1483	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1484		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1485				 MGMT_STATUS_BUSY);
1486		goto failed;
1487	}
1488
1489	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1490	if (!cmd) {
1491		err = -ENOMEM;
1492		goto failed;
1493	}
1494
1495	hci_req_init(&req, hdev);
1496
1497	/* If BR/EDR is not enabled and we disable advertising as a
1498	 * by-product of disabling connectable, we need to update the
1499	 * advertising flags.
1500	 */
1501	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1502		if (!cp->val) {
1503			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1504			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1505		}
1506		update_adv_data(&req);
1507	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1508		if (cp->val) {
1509			scan = SCAN_PAGE;
1510		} else {
1511			scan = 0;
1512
1513			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1514			    hdev->discov_timeout > 0)
1515				cancel_delayed_work(&hdev->discov_off);
1516		}
1517
1518		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1519	}
1520
1521	/* If we're going from non-connectable to connectable or
1522	 * vice-versa when fast connectable is enabled ensure that fast
1523	 * connectable gets disabled. write_fast_connectable won't do
1524	 * anything if the page scan parameters are already what they
1525	 * should be.
1526	 */
1527	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1528		write_fast_connectable(&req, false);
1529
1530	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1531	    hci_conn_num(hdev, LE_LINK) == 0) {
1532		disable_advertising(&req);
1533		enable_advertising(&req);
1534	}
1535
1536	err = hci_req_run(&req, set_connectable_complete);
1537	if (err < 0) {
1538		mgmt_pending_remove(cmd);
1539		if (err == -ENODATA)
1540			err = set_connectable_update_settings(hdev, sk,
1541							      cp->val);
1542		goto failed;
1543	}
1544
1545failed:
1546	hci_dev_unlock(hdev);
1547	return err;
1548}
1549
1550static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1551			u16 len)
1552{
1553	struct mgmt_mode *cp = data;
1554	bool changed;
1555	int err;
1556
1557	BT_DBG("request for %s", hdev->name);
1558
1559	if (cp->val != 0x00 && cp->val != 0x01)
1560		return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1561				  MGMT_STATUS_INVALID_PARAMS);
1562
1563	hci_dev_lock(hdev);
1564
1565	if (cp->val)
1566		changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1567	else
1568		changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1569
1570	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1571	if (err < 0)
1572		goto unlock;
1573
1574	if (changed)
1575		err = new_settings(hdev, sk);
1576
1577unlock:
1578	hci_dev_unlock(hdev);
1579	return err;
1580}
1581
1582static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1583			     u16 len)
1584{
1585	struct mgmt_mode *cp = data;
1586	struct pending_cmd *cmd;
1587	u8 val, status;
1588	int err;
1589
1590	BT_DBG("request for %s", hdev->name);
1591
1592	status = mgmt_bredr_support(hdev);
1593	if (status)
1594		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1595				  status);
1596
1597	if (cp->val != 0x00 && cp->val != 0x01)
1598		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1599				  MGMT_STATUS_INVALID_PARAMS);
1600
1601	hci_dev_lock(hdev);
1602
1603	if (!hdev_is_powered(hdev)) {
1604		bool changed = false;
1605
1606		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1607					  &hdev->dev_flags)) {
1608			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1609			changed = true;
1610		}
1611
1612		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1613		if (err < 0)
1614			goto failed;
1615
1616		if (changed)
1617			err = new_settings(hdev, sk);
1618
1619		goto failed;
1620	}
1621
1622	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1623		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1624				 MGMT_STATUS_BUSY);
1625		goto failed;
1626	}
1627
1628	val = !!cp->val;
1629
1630	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1631		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1632		goto failed;
1633	}
1634
1635	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1636	if (!cmd) {
1637		err = -ENOMEM;
1638		goto failed;
1639	}
1640
1641	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1642	if (err < 0) {
1643		mgmt_pending_remove(cmd);
1644		goto failed;
1645	}
1646
1647failed:
1648	hci_dev_unlock(hdev);
1649	return err;
1650}
1651
1652static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1653{
1654	struct mgmt_mode *cp = data;
1655	struct pending_cmd *cmd;
1656	u8 status;
1657	int err;
1658
1659	BT_DBG("request for %s", hdev->name);
1660
1661	status = mgmt_bredr_support(hdev);
1662	if (status)
1663		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1664
1665	if (!lmp_ssp_capable(hdev))
1666		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1667				  MGMT_STATUS_NOT_SUPPORTED);
1668
1669	if (cp->val != 0x00 && cp->val != 0x01)
1670		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1671				  MGMT_STATUS_INVALID_PARAMS);
1672
1673	hci_dev_lock(hdev);
1674
1675	if (!hdev_is_powered(hdev)) {
1676		bool changed;
1677
1678		if (cp->val) {
1679			changed = !test_and_set_bit(HCI_SSP_ENABLED,
1680						    &hdev->dev_flags);
1681		} else {
1682			changed = test_and_clear_bit(HCI_SSP_ENABLED,
1683						     &hdev->dev_flags);
1684			if (!changed)
1685				changed = test_and_clear_bit(HCI_HS_ENABLED,
1686							     &hdev->dev_flags);
1687			else
1688				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1689		}
1690
1691		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1692		if (err < 0)
1693			goto failed;
1694
1695		if (changed)
1696			err = new_settings(hdev, sk);
1697
1698		goto failed;
1699	}
1700
1701	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1702	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1703		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1704				 MGMT_STATUS_BUSY);
1705		goto failed;
1706	}
1707
1708	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1709		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1710		goto failed;
1711	}
1712
1713	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1714	if (!cmd) {
1715		err = -ENOMEM;
1716		goto failed;
1717	}
1718
1719	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1720	if (err < 0) {
1721		mgmt_pending_remove(cmd);
1722		goto failed;
1723	}
1724
1725failed:
1726	hci_dev_unlock(hdev);
1727	return err;
1728}
1729
1730static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1731{
1732	struct mgmt_mode *cp = data;
1733	bool changed;
1734	u8 status;
1735	int err;
1736
1737	BT_DBG("request for %s", hdev->name);
1738
1739	status = mgmt_bredr_support(hdev);
1740	if (status)
1741		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1742
1743	if (!lmp_ssp_capable(hdev))
1744		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1745				  MGMT_STATUS_NOT_SUPPORTED);
1746
1747	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1748		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1749				  MGMT_STATUS_REJECTED);
1750
1751	if (cp->val != 0x00 && cp->val != 0x01)
1752		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1753				  MGMT_STATUS_INVALID_PARAMS);
1754
1755	hci_dev_lock(hdev);
1756
1757	if (cp->val) {
1758		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1759	} else {
1760		if (hdev_is_powered(hdev)) {
1761			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1762					 MGMT_STATUS_REJECTED);
1763			goto unlock;
1764		}
1765
1766		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1767	}
1768
1769	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1770	if (err < 0)
1771		goto unlock;
1772
1773	if (changed)
1774		err = new_settings(hdev, sk);
1775
1776unlock:
1777	hci_dev_unlock(hdev);
1778	return err;
1779}
1780
1781static void le_enable_complete(struct hci_dev *hdev, u8 status)
1782{
1783	struct cmd_lookup match = { NULL, hdev };
1784
1785	if (status) {
1786		u8 mgmt_err = mgmt_status(status);
1787
1788		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1789				     &mgmt_err);
1790		return;
1791	}
1792
1793	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1794
1795	new_settings(hdev, match.sk);
1796
1797	if (match.sk)
1798		sock_put(match.sk);
1799
1800	/* Make sure the controller has a good default for
1801	 * advertising data. Restrict the update to when LE
1802	 * has actually been enabled. During power on, the
1803	 * update in powered_update_hci will take care of it.
1804	 */
1805	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1806		struct hci_request req;
1807
1808		hci_dev_lock(hdev);
1809
1810		hci_req_init(&req, hdev);
1811		update_adv_data(&req);
1812		update_scan_rsp_data(&req);
1813		hci_req_run(&req, NULL);
1814
1815		hci_dev_unlock(hdev);
1816	}
1817}
1818
1819static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1820{
1821	struct mgmt_mode *cp = data;
1822	struct hci_cp_write_le_host_supported hci_cp;
1823	struct pending_cmd *cmd;
1824	struct hci_request req;
1825	int err;
1826	u8 val, enabled;
1827
1828	BT_DBG("request for %s", hdev->name);
1829
1830	if (!lmp_le_capable(hdev))
1831		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1832				  MGMT_STATUS_NOT_SUPPORTED);
1833
1834	if (cp->val != 0x00 && cp->val != 0x01)
1835		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1836				  MGMT_STATUS_INVALID_PARAMS);
1837
1838	/* LE-only devices do not allow toggling LE on/off */
1839	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1840		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1841				  MGMT_STATUS_REJECTED);
1842
1843	hci_dev_lock(hdev);
1844
1845	val = !!cp->val;
1846	enabled = lmp_host_le_capable(hdev);
1847
1848	if (!hdev_is_powered(hdev) || val == enabled) {
1849		bool changed = false;
1850
1851		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1852			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1853			changed = true;
1854		}
1855
1856		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1857			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1858			changed = true;
1859		}
1860
1861		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1862		if (err < 0)
1863			goto unlock;
1864
1865		if (changed)
1866			err = new_settings(hdev, sk);
1867
1868		goto unlock;
1869	}
1870
1871	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1872	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1873		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1874				 MGMT_STATUS_BUSY);
1875		goto unlock;
1876	}
1877
1878	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1879	if (!cmd) {
1880		err = -ENOMEM;
1881		goto unlock;
1882	}
1883
1884	hci_req_init(&req, hdev);
1885
1886	memset(&hci_cp, 0, sizeof(hci_cp));
1887
1888	if (val) {
1889		hci_cp.le = val;
1890		hci_cp.simul = lmp_le_br_capable(hdev);
1891	} else {
1892		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1893			disable_advertising(&req);
1894	}
1895
1896	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1897		    &hci_cp);
1898
1899	err = hci_req_run(&req, le_enable_complete);
1900	if (err < 0)
1901		mgmt_pending_remove(cmd);
1902
1903unlock:
1904	hci_dev_unlock(hdev);
1905	return err;
1906}
1907
1908/* This is a helper function to test for pending mgmt commands that can
1909 * cause CoD or EIR HCI commands. We can only allow one such pending
1910 * mgmt command at a time since otherwise we cannot easily track what
1911 * the current values are, will be, and based on that calculate if a new
1912 * HCI command needs to be sent and if yes with what value.
1913 */
1914static bool pending_eir_or_class(struct hci_dev *hdev)
1915{
1916	struct pending_cmd *cmd;
1917
1918	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1919		switch (cmd->opcode) {
1920		case MGMT_OP_ADD_UUID:
1921		case MGMT_OP_REMOVE_UUID:
1922		case MGMT_OP_SET_DEV_CLASS:
1923		case MGMT_OP_SET_POWERED:
1924			return true;
1925		}
1926	}
1927
1928	return false;
1929}
1930
1931static const u8 bluetooth_base_uuid[] = {
1932			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1933			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1934};
1935
1936static u8 get_uuid_size(const u8 *uuid)
1937{
1938	u32 val;
1939
1940	if (memcmp(uuid, bluetooth_base_uuid, 12))
1941		return 128;
1942
1943	val = get_unaligned_le32(&uuid[12]);
1944	if (val > 0xffff)
1945		return 32;
1946
1947	return 16;
1948}
1949
1950static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1951{
1952	struct pending_cmd *cmd;
1953
1954	hci_dev_lock(hdev);
1955
1956	cmd = mgmt_pending_find(mgmt_op, hdev);
1957	if (!cmd)
1958		goto unlock;
1959
1960	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1961		     hdev->dev_class, 3);
1962
1963	mgmt_pending_remove(cmd);
1964
1965unlock:
1966	hci_dev_unlock(hdev);
1967}
1968
1969static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1970{
1971	BT_DBG("status 0x%02x", status);
1972
1973	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1974}
1975
1976static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1977{
1978	struct mgmt_cp_add_uuid *cp = data;
1979	struct pending_cmd *cmd;
1980	struct hci_request req;
1981	struct bt_uuid *uuid;
1982	int err;
1983
1984	BT_DBG("request for %s", hdev->name);
1985
1986	hci_dev_lock(hdev);
1987
1988	if (pending_eir_or_class(hdev)) {
1989		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1990				 MGMT_STATUS_BUSY);
1991		goto failed;
1992	}
1993
1994	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1995	if (!uuid) {
1996		err = -ENOMEM;
1997		goto failed;
1998	}
1999
2000	memcpy(uuid->uuid, cp->uuid, 16);
2001	uuid->svc_hint = cp->svc_hint;
2002	uuid->size = get_uuid_size(cp->uuid);
2003
2004	list_add_tail(&uuid->list, &hdev->uuids);
2005
2006	hci_req_init(&req, hdev);
2007
2008	update_class(&req);
2009	update_eir(&req);
2010
2011	err = hci_req_run(&req, add_uuid_complete);
2012	if (err < 0) {
2013		if (err != -ENODATA)
2014			goto failed;
2015
2016		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2017				   hdev->dev_class, 3);
2018		goto failed;
2019	}
2020
2021	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2022	if (!cmd) {
2023		err = -ENOMEM;
2024		goto failed;
2025	}
2026
2027	err = 0;
2028
2029failed:
2030	hci_dev_unlock(hdev);
2031	return err;
2032}
2033
2034static bool enable_service_cache(struct hci_dev *hdev)
2035{
2036	if (!hdev_is_powered(hdev))
2037		return false;
2038
2039	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2040		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2041				   CACHE_TIMEOUT);
2042		return true;
2043	}
2044
2045	return false;
2046}
2047
2048static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2049{
2050	BT_DBG("status 0x%02x", status);
2051
2052	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2053}
2054
2055static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2056		       u16 len)
2057{
2058	struct mgmt_cp_remove_uuid *cp = data;
2059	struct pending_cmd *cmd;
2060	struct bt_uuid *match, *tmp;
2061	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2062	struct hci_request req;
2063	int err, found;
2064
2065	BT_DBG("request for %s", hdev->name);
2066
2067	hci_dev_lock(hdev);
2068
2069	if (pending_eir_or_class(hdev)) {
2070		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2071				 MGMT_STATUS_BUSY);
2072		goto unlock;
2073	}
2074
2075	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2076		hci_uuids_clear(hdev);
2077
2078		if (enable_service_cache(hdev)) {
2079			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2080					   0, hdev->dev_class, 3);
2081			goto unlock;
2082		}
2083
2084		goto update_class;
2085	}
2086
2087	found = 0;
2088
2089	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2090		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2091			continue;
2092
2093		list_del(&match->list);
2094		kfree(match);
2095		found++;
2096	}
2097
2098	if (found == 0) {
2099		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2100				 MGMT_STATUS_INVALID_PARAMS);
2101		goto unlock;
2102	}
2103
2104update_class:
2105	hci_req_init(&req, hdev);
2106
2107	update_class(&req);
2108	update_eir(&req);
2109
2110	err = hci_req_run(&req, remove_uuid_complete);
2111	if (err < 0) {
2112		if (err != -ENODATA)
2113			goto unlock;
2114
2115		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2116				   hdev->dev_class, 3);
2117		goto unlock;
2118	}
2119
2120	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2121	if (!cmd) {
2122		err = -ENOMEM;
2123		goto unlock;
2124	}
2125
2126	err = 0;
2127
2128unlock:
2129	hci_dev_unlock(hdev);
2130	return err;
2131}
2132
2133static void set_class_complete(struct hci_dev *hdev, u8 status)
2134{
2135	BT_DBG("status 0x%02x", status);
2136
2137	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2138}
2139
2140static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2141			 u16 len)
2142{
2143	struct mgmt_cp_set_dev_class *cp = data;
2144	struct pending_cmd *cmd;
2145	struct hci_request req;
2146	int err;
2147
2148	BT_DBG("request for %s", hdev->name);
2149
2150	if (!lmp_bredr_capable(hdev))
2151		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2152				  MGMT_STATUS_NOT_SUPPORTED);
2153
2154	hci_dev_lock(hdev);
2155
2156	if (pending_eir_or_class(hdev)) {
2157		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2158				 MGMT_STATUS_BUSY);
2159		goto unlock;
2160	}
2161
2162	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2163		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2164				 MGMT_STATUS_INVALID_PARAMS);
2165		goto unlock;
2166	}
2167
2168	hdev->major_class = cp->major;
2169	hdev->minor_class = cp->minor;
2170
2171	if (!hdev_is_powered(hdev)) {
2172		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2173				   hdev->dev_class, 3);
2174		goto unlock;
2175	}
2176
2177	hci_req_init(&req, hdev);
2178
2179	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2180		hci_dev_unlock(hdev);
2181		cancel_delayed_work_sync(&hdev->service_cache);
2182		hci_dev_lock(hdev);
2183		update_eir(&req);
2184	}
2185
2186	update_class(&req);
2187
2188	err = hci_req_run(&req, set_class_complete);
2189	if (err < 0) {
2190		if (err != -ENODATA)
2191			goto unlock;
2192
2193		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2194				   hdev->dev_class, 3);
2195		goto unlock;
2196	}
2197
2198	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2199	if (!cmd) {
2200		err = -ENOMEM;
2201		goto unlock;
2202	}
2203
2204	err = 0;
2205
2206unlock:
2207	hci_dev_unlock(hdev);
2208	return err;
2209}
2210
2211static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2212			  u16 len)
2213{
2214	struct mgmt_cp_load_link_keys *cp = data;
2215	u16 key_count, expected_len;
2216	bool changed;
2217	int i;
2218
2219	BT_DBG("request for %s", hdev->name);
2220
2221	if (!lmp_bredr_capable(hdev))
2222		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2223				  MGMT_STATUS_NOT_SUPPORTED);
2224
2225	key_count = __le16_to_cpu(cp->key_count);
2226
2227	expected_len = sizeof(*cp) + key_count *
2228					sizeof(struct mgmt_link_key_info);
2229	if (expected_len != len) {
2230		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2231		       len, expected_len);
2232		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2233				  MGMT_STATUS_INVALID_PARAMS);
2234	}
2235
2236	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2237		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2238				  MGMT_STATUS_INVALID_PARAMS);
2239
2240	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2241	       key_count);
2242
2243	for (i = 0; i < key_count; i++) {
2244		struct mgmt_link_key_info *key = &cp->keys[i];
2245
2246		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2247			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2248					  MGMT_STATUS_INVALID_PARAMS);
2249	}
2250
2251	hci_dev_lock(hdev);
2252
2253	hci_link_keys_clear(hdev);
2254
2255	if (cp->debug_keys)
2256		changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2257	else
2258		changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2259
2260	if (changed)
2261		new_settings(hdev, NULL);
2262
2263	for (i = 0; i < key_count; i++) {
2264		struct mgmt_link_key_info *key = &cp->keys[i];
2265
2266		hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2267				 key->type, key->pin_len);
2268	}
2269
2270	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2271
2272	hci_dev_unlock(hdev);
2273
2274	return 0;
2275}
2276
2277static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2278			   u8 addr_type, struct sock *skip_sk)
2279{
2280	struct mgmt_ev_device_unpaired ev;
2281
2282	bacpy(&ev.addr.bdaddr, bdaddr);
2283	ev.addr.type = addr_type;
2284
2285	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2286			  skip_sk);
2287}
2288
2289static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2290			 u16 len)
2291{
2292	struct mgmt_cp_unpair_device *cp = data;
2293	struct mgmt_rp_unpair_device rp;
2294	struct hci_cp_disconnect dc;
2295	struct pending_cmd *cmd;
2296	struct hci_conn *conn;
2297	int err;
2298
2299	memset(&rp, 0, sizeof(rp));
2300	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2301	rp.addr.type = cp->addr.type;
2302
2303	if (!bdaddr_type_is_valid(cp->addr.type))
2304		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2305				    MGMT_STATUS_INVALID_PARAMS,
2306				    &rp, sizeof(rp));
2307
2308	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2309		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2310				    MGMT_STATUS_INVALID_PARAMS,
2311				    &rp, sizeof(rp));
2312
2313	hci_dev_lock(hdev);
2314
2315	if (!hdev_is_powered(hdev)) {
2316		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2317				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2318		goto unlock;
2319	}
2320
2321	if (cp->addr.type == BDADDR_BREDR) {
2322		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2323	} else {
2324		u8 addr_type;
2325
2326		if (cp->addr.type == BDADDR_LE_PUBLIC)
2327			addr_type = ADDR_LE_DEV_PUBLIC;
2328		else
2329			addr_type = ADDR_LE_DEV_RANDOM;
2330
2331		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2332
2333		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2334	}
2335
2336	if (err < 0) {
2337		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2338				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2339		goto unlock;
2340	}
2341
2342	if (cp->disconnect) {
2343		if (cp->addr.type == BDADDR_BREDR)
2344			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2345						       &cp->addr.bdaddr);
2346		else
2347			conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2348						       &cp->addr.bdaddr);
2349	} else {
2350		conn = NULL;
2351	}
2352
2353	if (!conn) {
2354		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2355				   &rp, sizeof(rp));
2356		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2357		goto unlock;
2358	}
2359
2360	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2361			       sizeof(*cp));
2362	if (!cmd) {
2363		err = -ENOMEM;
2364		goto unlock;
2365	}
2366
2367	dc.handle = cpu_to_le16(conn->handle);
2368	dc.reason = 0x13; /* Remote User Terminated Connection */
2369	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2370	if (err < 0)
2371		mgmt_pending_remove(cmd);
2372
2373unlock:
2374	hci_dev_unlock(hdev);
2375	return err;
2376}
2377
2378static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2379		      u16 len)
2380{
2381	struct mgmt_cp_disconnect *cp = data;
2382	struct mgmt_rp_disconnect rp;
2383	struct hci_cp_disconnect dc;
2384	struct pending_cmd *cmd;
2385	struct hci_conn *conn;
2386	int err;
2387
2388	BT_DBG("");
2389
2390	memset(&rp, 0, sizeof(rp));
2391	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2392	rp.addr.type = cp->addr.type;
2393
2394	if (!bdaddr_type_is_valid(cp->addr.type))
2395		return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2396				    MGMT_STATUS_INVALID_PARAMS,
2397				    &rp, sizeof(rp));
2398
2399	hci_dev_lock(hdev);
2400
2401	if (!test_bit(HCI_UP, &hdev->flags)) {
2402		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2403				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2404		goto failed;
2405	}
2406
2407	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2408		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2409				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2410		goto failed;
2411	}
2412
2413	if (cp->addr.type == BDADDR_BREDR)
2414		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2415					       &cp->addr.bdaddr);
2416	else
2417		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2418
2419	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2420		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2421				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2422		goto failed;
2423	}
2424
2425	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2426	if (!cmd) {
2427		err = -ENOMEM;
2428		goto failed;
2429	}
2430
2431	dc.handle = cpu_to_le16(conn->handle);
2432	dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2433
2434	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2435	if (err < 0)
2436		mgmt_pending_remove(cmd);
2437
2438failed:
2439	hci_dev_unlock(hdev);
2440	return err;
2441}
2442
2443static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2444{
2445	switch (link_type) {
2446	case LE_LINK:
2447		switch (addr_type) {
2448		case ADDR_LE_DEV_PUBLIC:
2449			return BDADDR_LE_PUBLIC;
2450
2451		default:
2452			/* Fallback to LE Random address type */
2453			return BDADDR_LE_RANDOM;
2454		}
2455
2456	default:
2457		/* Fallback to BR/EDR type */
2458		return BDADDR_BREDR;
2459	}
2460}
2461
2462static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2463			   u16 data_len)
2464{
2465	struct mgmt_rp_get_connections *rp;
2466	struct hci_conn *c;
2467	size_t rp_len;
2468	int err;
2469	u16 i;
2470
2471	BT_DBG("");
2472
2473	hci_dev_lock(hdev);
2474
2475	if (!hdev_is_powered(hdev)) {
2476		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2477				 MGMT_STATUS_NOT_POWERED);
2478		goto unlock;
2479	}
2480
2481	i = 0;
2482	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2483		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2484			i++;
2485	}
2486
2487	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2488	rp = kmalloc(rp_len, GFP_KERNEL);
2489	if (!rp) {
2490		err = -ENOMEM;
2491		goto unlock;
2492	}
2493
2494	i = 0;
2495	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2496		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2497			continue;
2498		bacpy(&rp->addr[i].bdaddr, &c->dst);
2499		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2500		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2501			continue;
2502		i++;
2503	}
2504
2505	rp->conn_count = cpu_to_le16(i);
2506
2507	/* Recalculate length in case of filtered SCO connections, etc */
2508	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2509
2510	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2511			   rp_len);
2512
2513	kfree(rp);
2514
2515unlock:
2516	hci_dev_unlock(hdev);
2517	return err;
2518}
2519
2520static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2521				   struct mgmt_cp_pin_code_neg_reply *cp)
2522{
2523	struct pending_cmd *cmd;
2524	int err;
2525
2526	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2527			       sizeof(*cp));
2528	if (!cmd)
2529		return -ENOMEM;
2530
2531	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2532			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2533	if (err < 0)
2534		mgmt_pending_remove(cmd);
2535
2536	return err;
2537}
2538
2539static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2540			  u16 len)
2541{
2542	struct hci_conn *conn;
2543	struct mgmt_cp_pin_code_reply *cp = data;
2544	struct hci_cp_pin_code_reply reply;
2545	struct pending_cmd *cmd;
2546	int err;
2547
2548	BT_DBG("");
2549
2550	hci_dev_lock(hdev);
2551
2552	if (!hdev_is_powered(hdev)) {
2553		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2554				 MGMT_STATUS_NOT_POWERED);
2555		goto failed;
2556	}
2557
2558	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2559	if (!conn) {
2560		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2561				 MGMT_STATUS_NOT_CONNECTED);
2562		goto failed;
2563	}
2564
2565	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2566		struct mgmt_cp_pin_code_neg_reply ncp;
2567
2568		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2569
2570		BT_ERR("PIN code is not 16 bytes long");
2571
2572		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2573		if (err >= 0)
2574			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2575					 MGMT_STATUS_INVALID_PARAMS);
2576
2577		goto failed;
2578	}
2579
2580	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2581	if (!cmd) {
2582		err = -ENOMEM;
2583		goto failed;
2584	}
2585
2586	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2587	reply.pin_len = cp->pin_len;
2588	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2589
2590	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2591	if (err < 0)
2592		mgmt_pending_remove(cmd);
2593
2594failed:
2595	hci_dev_unlock(hdev);
2596	return err;
2597}
2598
2599static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2600			     u16 len)
2601{
2602	struct mgmt_cp_set_io_capability *cp = data;
2603
2604	BT_DBG("");
2605
2606	hci_dev_lock(hdev);
2607
2608	hdev->io_capability = cp->io_capability;
2609
2610	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2611	       hdev->io_capability);
2612
2613	hci_dev_unlock(hdev);
2614
2615	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2616			    0);
2617}
2618
2619static struct pending_cmd *find_pairing(struct hci_conn *conn)
2620{
2621	struct hci_dev *hdev = conn->hdev;
2622	struct pending_cmd *cmd;
2623
2624	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2625		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2626			continue;
2627
2628		if (cmd->user_data != conn)
2629			continue;
2630
2631		return cmd;
2632	}
2633
2634	return NULL;
2635}
2636
2637static void pairing_complete(struct pending_cmd *cmd, u8 status)
2638{
2639	struct mgmt_rp_pair_device rp;
2640	struct hci_conn *conn = cmd->user_data;
2641
2642	bacpy(&rp.addr.bdaddr, &conn->dst);
2643	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2644
2645	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2646		     &rp, sizeof(rp));
2647
2648	/* So we don't get further callbacks for this connection */
2649	conn->connect_cfm_cb = NULL;
2650	conn->security_cfm_cb = NULL;
2651	conn->disconn_cfm_cb = NULL;
2652
2653	hci_conn_drop(conn);
2654
2655	mgmt_pending_remove(cmd);
2656}
2657
2658void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2659{
2660	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2661	struct pending_cmd *cmd;
2662
2663	cmd = find_pairing(conn);
2664	if (cmd)
2665		pairing_complete(cmd, status);
2666}
2667
2668static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2669{
2670	struct pending_cmd *cmd;
2671
2672	BT_DBG("status %u", status);
2673
2674	cmd = find_pairing(conn);
2675	if (!cmd)
2676		BT_DBG("Unable to find a pending command");
2677	else
2678		pairing_complete(cmd, mgmt_status(status));
2679}
2680
2681static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2682{
2683	struct pending_cmd *cmd;
2684
2685	BT_DBG("status %u", status);
2686
2687	if (!status)
2688		return;
2689
2690	cmd = find_pairing(conn);
2691	if (!cmd)
2692		BT_DBG("Unable to find a pending command");
2693	else
2694		pairing_complete(cmd, mgmt_status(status));
2695}
2696
2697static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2698		       u16 len)
2699{
2700	struct mgmt_cp_pair_device *cp = data;
2701	struct mgmt_rp_pair_device rp;
2702	struct pending_cmd *cmd;
2703	u8 sec_level, auth_type;
2704	struct hci_conn *conn;
2705	int err;
2706
2707	BT_DBG("");
2708
2709	memset(&rp, 0, sizeof(rp));
2710	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2711	rp.addr.type = cp->addr.type;
2712
2713	if (!bdaddr_type_is_valid(cp->addr.type))
2714		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2715				    MGMT_STATUS_INVALID_PARAMS,
2716				    &rp, sizeof(rp));
2717
2718	hci_dev_lock(hdev);
2719
2720	if (!hdev_is_powered(hdev)) {
2721		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2722				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2723		goto unlock;
2724	}
2725
2726	sec_level = BT_SECURITY_MEDIUM;
2727	if (cp->io_cap == 0x03)
2728		auth_type = HCI_AT_DEDICATED_BONDING;
2729	else
2730		auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2731
2732	if (cp->addr.type == BDADDR_BREDR)
2733		conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2734				   cp->addr.type, sec_level, auth_type);
2735	else
2736		conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2737				   cp->addr.type, sec_level, auth_type);
2738
2739	if (IS_ERR(conn)) {
2740		int status;
2741
2742		if (PTR_ERR(conn) == -EBUSY)
2743			status = MGMT_STATUS_BUSY;
2744		else
2745			status = MGMT_STATUS_CONNECT_FAILED;
2746
2747		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2748				   status, &rp,
2749				   sizeof(rp));
2750		goto unlock;
2751	}
2752
2753	if (conn->connect_cfm_cb) {
2754		hci_conn_drop(conn);
2755		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2756				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2757		goto unlock;
2758	}
2759
2760	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2761	if (!cmd) {
2762		err = -ENOMEM;
2763		hci_conn_drop(conn);
2764		goto unlock;
2765	}
2766
2767	/* For LE, just connecting isn't a proof that the pairing finished */
2768	if (cp->addr.type == BDADDR_BREDR) {
2769		conn->connect_cfm_cb = pairing_complete_cb;
2770		conn->security_cfm_cb = pairing_complete_cb;
2771		conn->disconn_cfm_cb = pairing_complete_cb;
2772	} else {
2773		conn->connect_cfm_cb = le_pairing_complete_cb;
2774		conn->security_cfm_cb = le_pairing_complete_cb;
2775		conn->disconn_cfm_cb = le_pairing_complete_cb;
2776	}
2777
2778	conn->io_capability = cp->io_cap;
2779	cmd->user_data = conn;
2780
2781	if (conn->state == BT_CONNECTED &&
2782	    hci_conn_security(conn, sec_level, auth_type))
2783		pairing_complete(cmd, 0);
2784
2785	err = 0;
2786
2787unlock:
2788	hci_dev_unlock(hdev);
2789	return err;
2790}
2791
2792static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2793			      u16 len)
2794{
2795	struct mgmt_addr_info *addr = data;
2796	struct pending_cmd *cmd;
2797	struct hci_conn *conn;
2798	int err;
2799
2800	BT_DBG("");
2801
2802	hci_dev_lock(hdev);
2803
2804	if (!hdev_is_powered(hdev)) {
2805		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2806				 MGMT_STATUS_NOT_POWERED);
2807		goto unlock;
2808	}
2809
2810	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2811	if (!cmd) {
2812		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2813				 MGMT_STATUS_INVALID_PARAMS);
2814		goto unlock;
2815	}
2816
2817	conn = cmd->user_data;
2818
2819	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2820		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2821				 MGMT_STATUS_INVALID_PARAMS);
2822		goto unlock;
2823	}
2824
2825	pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2826
2827	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2828			   addr, sizeof(*addr));
2829unlock:
2830	hci_dev_unlock(hdev);
2831	return err;
2832}
2833
2834static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2835			     struct mgmt_addr_info *addr, u16 mgmt_op,
2836			     u16 hci_op, __le32 passkey)
2837{
2838	struct pending_cmd *cmd;
2839	struct hci_conn *conn;
2840	int err;
2841
2842	hci_dev_lock(hdev);
2843
2844	if (!hdev_is_powered(hdev)) {
2845		err = cmd_complete(sk, hdev->id, mgmt_op,
2846				   MGMT_STATUS_NOT_POWERED, addr,
2847				   sizeof(*addr));
2848		goto done;
2849	}
2850
2851	if (addr->type == BDADDR_BREDR)
2852		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2853	else
2854		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2855
2856	if (!conn) {
2857		err = cmd_complete(sk, hdev->id, mgmt_op,
2858				   MGMT_STATUS_NOT_CONNECTED, addr,
2859				   sizeof(*addr));
2860		goto done;
2861	}
2862
2863	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2864		/* Continue with pairing via SMP */
2865		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2866
2867		if (!err)
2868			err = cmd_complete(sk, hdev->id, mgmt_op,
2869					   MGMT_STATUS_SUCCESS, addr,
2870					   sizeof(*addr));
2871		else
2872			err = cmd_complete(sk, hdev->id, mgmt_op,
2873					   MGMT_STATUS_FAILED, addr,
2874					   sizeof(*addr));
2875
2876		goto done;
2877	}
2878
2879	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2880	if (!cmd) {
2881		err = -ENOMEM;
2882		goto done;
2883	}
2884
2885	/* Continue with pairing via HCI */
2886	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2887		struct hci_cp_user_passkey_reply cp;
2888
2889		bacpy(&cp.bdaddr, &addr->bdaddr);
2890		cp.passkey = passkey;
2891		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2892	} else
2893		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2894				   &addr->bdaddr);
2895
2896	if (err < 0)
2897		mgmt_pending_remove(cmd);
2898
2899done:
2900	hci_dev_unlock(hdev);
2901	return err;
2902}
2903
2904static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2905			      void *data, u16 len)
2906{
2907	struct mgmt_cp_pin_code_neg_reply *cp = data;
2908
2909	BT_DBG("");
2910
2911	return user_pairing_resp(sk, hdev, &cp->addr,
2912				MGMT_OP_PIN_CODE_NEG_REPLY,
2913				HCI_OP_PIN_CODE_NEG_REPLY, 0);
2914}
2915
2916static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2917			      u16 len)
2918{
2919	struct mgmt_cp_user_confirm_reply *cp = data;
2920
2921	BT_DBG("");
2922
2923	if (len != sizeof(*cp))
2924		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2925				  MGMT_STATUS_INVALID_PARAMS);
2926
2927	return user_pairing_resp(sk, hdev, &cp->addr,
2928				 MGMT_OP_USER_CONFIRM_REPLY,
2929				 HCI_OP_USER_CONFIRM_REPLY, 0);
2930}
2931
2932static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2933				  void *data, u16 len)
2934{
2935	struct mgmt_cp_user_confirm_neg_reply *cp = data;
2936
2937	BT_DBG("");
2938
2939	return user_pairing_resp(sk, hdev, &cp->addr,
2940				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2941				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2942}
2943
2944static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2945			      u16 len)
2946{
2947	struct mgmt_cp_user_passkey_reply *cp = data;
2948
2949	BT_DBG("");
2950
2951	return user_pairing_resp(sk, hdev, &cp->addr,
2952				 MGMT_OP_USER_PASSKEY_REPLY,
2953				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2954}
2955
2956static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2957				  void *data, u16 len)
2958{
2959	struct mgmt_cp_user_passkey_neg_reply *cp = data;
2960
2961	BT_DBG("");
2962
2963	return user_pairing_resp(sk, hdev, &cp->addr,
2964				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2965				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2966}
2967
2968static void update_name(struct hci_request *req)
2969{
2970	struct hci_dev *hdev = req->hdev;
2971	struct hci_cp_write_local_name cp;
2972
2973	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2974
2975	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2976}
2977
2978static void set_name_complete(struct hci_dev *hdev, u8 status)
2979{
2980	struct mgmt_cp_set_local_name *cp;
2981	struct pending_cmd *cmd;
2982
2983	BT_DBG("status 0x%02x", status);
2984
2985	hci_dev_lock(hdev);
2986
2987	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2988	if (!cmd)
2989		goto unlock;
2990
2991	cp = cmd->param;
2992
2993	if (status)
2994		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2995			   mgmt_status(status));
2996	else
2997		cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2998			     cp, sizeof(*cp));
2999
3000	mgmt_pending_remove(cmd);
3001
3002unlock:
3003	hci_dev_unlock(hdev);
3004}
3005
3006static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3007			  u16 len)
3008{
3009	struct mgmt_cp_set_local_name *cp = data;
3010	struct pending_cmd *cmd;
3011	struct hci_request req;
3012	int err;
3013
3014	BT_DBG("");
3015
3016	hci_dev_lock(hdev);
3017
3018	/* If the old values are the same as the new ones just return a
3019	 * direct command complete event.
3020	 */
3021	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3022	    !memcmp(hdev->short_name, cp->short_name,
3023		    sizeof(hdev->short_name))) {
3024		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3025				   data, len);
3026		goto failed;
3027	}
3028
3029	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3030
3031	if (!hdev_is_powered(hdev)) {
3032		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3033
3034		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3035				   data, len);
3036		if (err < 0)
3037			goto failed;
3038
3039		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3040				 sk);
3041
3042		goto failed;
3043	}
3044
3045	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3046	if (!cmd) {
3047		err = -ENOMEM;
3048		goto failed;
3049	}
3050
3051	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3052
3053	hci_req_init(&req, hdev);
3054
3055	if (lmp_bredr_capable(hdev)) {
3056		update_name(&req);
3057		update_eir(&req);
3058	}
3059
3060	/* The name is stored in the scan response data and so
3061	 * no need to udpate the advertising data here.
3062	 */
3063	if (lmp_le_capable(hdev))
3064		update_scan_rsp_data(&req);
3065
3066	err = hci_req_run(&req, set_name_complete);
3067	if (err < 0)
3068		mgmt_pending_remove(cmd);
3069
3070failed:
3071	hci_dev_unlock(hdev);
3072	return err;
3073}
3074
3075static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3076			       void *data, u16 data_len)
3077{
3078	struct pending_cmd *cmd;
3079	int err;
3080
3081	BT_DBG("%s", hdev->name);
3082
3083	hci_dev_lock(hdev);
3084
3085	if (!hdev_is_powered(hdev)) {
3086		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3087				 MGMT_STATUS_NOT_POWERED);
3088		goto unlock;
3089	}
3090
3091	if (!lmp_ssp_capable(hdev)) {
3092		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3093				 MGMT_STATUS_NOT_SUPPORTED);
3094		goto unlock;
3095	}
3096
3097	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3098		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3099				 MGMT_STATUS_BUSY);
3100		goto unlock;
3101	}
3102
3103	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3104	if (!cmd) {
3105		err = -ENOMEM;
3106		goto unlock;
3107	}
3108
3109	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3110		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3111				   0, NULL);
3112	else
3113		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3114
3115	if (err < 0)
3116		mgmt_pending_remove(cmd);
3117
3118unlock:
3119	hci_dev_unlock(hdev);
3120	return err;
3121}
3122
3123static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3124			       void *data, u16 len)
3125{
3126	int err;
3127
3128	BT_DBG("%s ", hdev->name);
3129
3130	hci_dev_lock(hdev);
3131
3132	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3133		struct mgmt_cp_add_remote_oob_data *cp = data;
3134		u8 status;
3135
3136		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3137					      cp->hash, cp->randomizer);
3138		if (err < 0)
3139			status = MGMT_STATUS_FAILED;
3140		else
3141			status = MGMT_STATUS_SUCCESS;
3142
3143		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3144				   status, &cp->addr, sizeof(cp->addr));
3145	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3146		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3147		u8 status;
3148
3149		err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3150						  cp->hash192,
3151						  cp->randomizer192,
3152						  cp->hash256,
3153						  cp->randomizer256);
3154		if (err < 0)
3155			status = MGMT_STATUS_FAILED;
3156		else
3157			status = MGMT_STATUS_SUCCESS;
3158
3159		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3160				   status, &cp->addr, sizeof(cp->addr));
3161	} else {
3162		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3163		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3164				 MGMT_STATUS_INVALID_PARAMS);
3165	}
3166
3167	hci_dev_unlock(hdev);
3168	return err;
3169}
3170
3171static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3172				  void *data, u16 len)
3173{
3174	struct mgmt_cp_remove_remote_oob_data *cp = data;
3175	u8 status;
3176	int err;
3177
3178	BT_DBG("%s", hdev->name);
3179
3180	hci_dev_lock(hdev);
3181
3182	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3183	if (err < 0)
3184		status = MGMT_STATUS_INVALID_PARAMS;
3185	else
3186		status = MGMT_STATUS_SUCCESS;
3187
3188	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3189			   status, &cp->addr, sizeof(cp->addr));
3190
3191	hci_dev_unlock(hdev);
3192	return err;
3193}
3194
3195static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3196{
3197	struct pending_cmd *cmd;
3198	u8 type;
3199	int err;
3200
3201	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3202
3203	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3204	if (!cmd)
3205		return -ENOENT;
3206
3207	type = hdev->discovery.type;
3208
3209	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3210			   &type, sizeof(type));
3211	mgmt_pending_remove(cmd);
3212
3213	return err;
3214}
3215
3216static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3217{
3218	BT_DBG("status %d", status);
3219
3220	if (status) {
3221		hci_dev_lock(hdev);
3222		mgmt_start_discovery_failed(hdev, status);
3223		hci_dev_unlock(hdev);
3224		return;
3225	}
3226
3227	hci_dev_lock(hdev);
3228	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3229	hci_dev_unlock(hdev);
3230
3231	switch (hdev->discovery.type) {
3232	case DISCOV_TYPE_LE:
3233		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3234				   DISCOV_LE_TIMEOUT);
3235		break;
3236
3237	case DISCOV_TYPE_INTERLEAVED:
3238		queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3239				   DISCOV_INTERLEAVED_TIMEOUT);
3240		break;
3241
3242	case DISCOV_TYPE_BREDR:
3243		break;
3244
3245	default:
3246		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3247	}
3248}
3249
3250static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3251			   void *data, u16 len)
3252{
3253	struct mgmt_cp_start_discovery *cp = data;
3254	struct pending_cmd *cmd;
3255	struct hci_cp_le_set_scan_param param_cp;
3256	struct hci_cp_le_set_scan_enable enable_cp;
3257	struct hci_cp_inquiry inq_cp;
3258	struct hci_request req;
3259	/* General inquiry access code (GIAC) */
3260	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3261	u8 status;
3262	int err;
3263
3264	BT_DBG("%s", hdev->name);
3265
3266	hci_dev_lock(hdev);
3267
3268	if (!hdev_is_powered(hdev)) {
3269		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3270				 MGMT_STATUS_NOT_POWERED);
3271		goto failed;
3272	}
3273
3274	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3275		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3276				 MGMT_STATUS_BUSY);
3277		goto failed;
3278	}
3279
3280	if (hdev->discovery.state != DISCOVERY_STOPPED) {
3281		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3282				 MGMT_STATUS_BUSY);
3283		goto failed;
3284	}
3285
3286	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3287	if (!cmd) {
3288		err = -ENOMEM;
3289		goto failed;
3290	}
3291
3292	hdev->discovery.type = cp->type;
3293
3294	hci_req_init(&req, hdev);
3295
3296	switch (hdev->discovery.type) {
3297	case DISCOV_TYPE_BREDR:
3298		status = mgmt_bredr_support(hdev);
3299		if (status) {
3300			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3301					 status);
3302			mgmt_pending_remove(cmd);
3303			goto failed;
3304		}
3305
3306		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3307			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3308					 MGMT_STATUS_BUSY);
3309			mgmt_pending_remove(cmd);
3310			goto failed;
3311		}
3312
3313		hci_inquiry_cache_flush(hdev);
3314
3315		memset(&inq_cp, 0, sizeof(inq_cp));
3316		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3317		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3318		hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3319		break;
3320
3321	case DISCOV_TYPE_LE:
3322	case DISCOV_TYPE_INTERLEAVED:
3323		status = mgmt_le_support(hdev);
3324		if (status) {
3325			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3326					 status);
3327			mgmt_pending_remove(cmd);
3328			goto failed;
3329		}
3330
3331		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3332		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3333			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3334					 MGMT_STATUS_NOT_SUPPORTED);
3335			mgmt_pending_remove(cmd);
3336			goto failed;
3337		}
3338
3339		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3340			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3341					 MGMT_STATUS_REJECTED);
3342			mgmt_pending_remove(cmd);
3343			goto failed;
3344		}
3345
3346		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3347			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3348					 MGMT_STATUS_BUSY);
3349			mgmt_pending_remove(cmd);
3350			goto failed;
3351		}
3352
3353		memset(&param_cp, 0, sizeof(param_cp));
3354		param_cp.type = LE_SCAN_ACTIVE;
3355		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3356		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3357		param_cp.own_address_type = hdev->own_addr_type;
3358		hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3359			    &param_cp);
3360
3361		memset(&enable_cp, 0, sizeof(enable_cp));
3362		enable_cp.enable = LE_SCAN_ENABLE;
3363		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3364		hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3365			    &enable_cp);
3366		break;
3367
3368	default:
3369		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3370				 MGMT_STATUS_INVALID_PARAMS);
3371		mgmt_pending_remove(cmd);
3372		goto failed;
3373	}
3374
3375	err = hci_req_run(&req, start_discovery_complete);
3376	if (err < 0)
3377		mgmt_pending_remove(cmd);
3378	else
3379		hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3380
3381failed:
3382	hci_dev_unlock(hdev);
3383	return err;
3384}
3385
3386static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3387{
3388	struct pending_cmd *cmd;
3389	int err;
3390
3391	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3392	if (!cmd)
3393		return -ENOENT;
3394
3395	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3396			   &hdev->discovery.type, sizeof(hdev->discovery.type));
3397	mgmt_pending_remove(cmd);
3398
3399	return err;
3400}
3401
3402static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3403{
3404	BT_DBG("status %d", status);
3405
3406	hci_dev_lock(hdev);
3407
3408	if (status) {
3409		mgmt_stop_discovery_failed(hdev, status);
3410		goto unlock;
3411	}
3412
3413	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3414
3415unlock:
3416	hci_dev_unlock(hdev);
3417}
3418
3419static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3420			  u16 len)
3421{
3422	struct mgmt_cp_stop_discovery *mgmt_cp = data;
3423	struct pending_cmd *cmd;
3424	struct hci_cp_remote_name_req_cancel cp;
3425	struct inquiry_entry *e;
3426	struct hci_request req;
3427	struct hci_cp_le_set_scan_enable enable_cp;
3428	int err;
3429
3430	BT_DBG("%s", hdev->name);
3431
3432	hci_dev_lock(hdev);
3433
3434	if (!hci_discovery_active(hdev)) {
3435		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3436				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
3437				   sizeof(mgmt_cp->type));
3438		goto unlock;
3439	}
3440
3441	if (hdev->discovery.type != mgmt_cp->type) {
3442		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3443				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3444				   sizeof(mgmt_cp->type));
3445		goto unlock;
3446	}
3447
3448	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3449	if (!cmd) {
3450		err = -ENOMEM;
3451		goto unlock;
3452	}
3453
3454	hci_req_init(&req, hdev);
3455
3456	switch (hdev->discovery.state) {
3457	case DISCOVERY_FINDING:
3458		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3459			hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3460		} else {
3461			cancel_delayed_work(&hdev->le_scan_disable);
3462
3463			memset(&enable_cp, 0, sizeof(enable_cp));
3464			enable_cp.enable = LE_SCAN_DISABLE;
3465			hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3466				    sizeof(enable_cp), &enable_cp);
3467		}
3468
3469		break;
3470
3471	case DISCOVERY_RESOLVING:
3472		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3473						     NAME_PENDING);
3474		if (!e) {
3475			mgmt_pending_remove(cmd);
3476			err = cmd_complete(sk, hdev->id,
3477					   MGMT_OP_STOP_DISCOVERY, 0,
3478					   &mgmt_cp->type,
3479					   sizeof(mgmt_cp->type));
3480			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3481			goto unlock;
3482		}
3483
3484		bacpy(&cp.bdaddr, &e->data.bdaddr);
3485		hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3486			    &cp);
3487
3488		break;
3489
3490	default:
3491		BT_DBG("unknown discovery state %u", hdev->discovery.state);
3492
3493		mgmt_pending_remove(cmd);
3494		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3495				   MGMT_STATUS_FAILED, &mgmt_cp->type,
3496				   sizeof(mgmt_cp->type));
3497		goto unlock;
3498	}
3499
3500	err = hci_req_run(&req, stop_discovery_complete);
3501	if (err < 0)
3502		mgmt_pending_remove(cmd);
3503	else
3504		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3505
3506unlock:
3507	hci_dev_unlock(hdev);
3508	return err;
3509}
3510
3511static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3512			u16 len)
3513{
3514	struct mgmt_cp_confirm_name *cp = data;
3515	struct inquiry_entry *e;
3516	int err;
3517
3518	BT_DBG("%s", hdev->name);
3519
3520	hci_dev_lock(hdev);
3521
3522	if (!hci_discovery_active(hdev)) {
3523		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3524				 MGMT_STATUS_FAILED);
3525		goto failed;
3526	}
3527
3528	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3529	if (!e) {
3530		err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3531				 MGMT_STATUS_INVALID_PARAMS);
3532		goto failed;
3533	}
3534
3535	if (cp->name_known) {
3536		e->name_state = NAME_KNOWN;
3537		list_del(&e->list);
3538	} else {
3539		e->name_state = NAME_NEEDED;
3540		hci_inquiry_cache_update_resolve(hdev, e);
3541	}
3542
3543	err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3544			   sizeof(cp->addr));
3545
3546failed:
3547	hci_dev_unlock(hdev);
3548	return err;
3549}
3550
3551static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3552			u16 len)
3553{
3554	struct mgmt_cp_block_device *cp = data;
3555	u8 status;
3556	int err;
3557
3558	BT_DBG("%s", hdev->name);
3559
3560	if (!bdaddr_type_is_valid(cp->addr.type))
3561		return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3562				    MGMT_STATUS_INVALID_PARAMS,
3563				    &cp->addr, sizeof(cp->addr));
3564
3565	hci_dev_lock(hdev);
3566
3567	err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3568	if (err < 0)
3569		status = MGMT_STATUS_FAILED;
3570	else
3571		status = MGMT_STATUS_SUCCESS;
3572
3573	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3574			   &cp->addr, sizeof(cp->addr));
3575
3576	hci_dev_unlock(hdev);
3577
3578	return err;
3579}
3580
3581static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3582			  u16 len)
3583{
3584	struct mgmt_cp_unblock_device *cp = data;
3585	u8 status;
3586	int err;
3587
3588	BT_DBG("%s", hdev->name);
3589
3590	if (!bdaddr_type_is_valid(cp->addr.type))
3591		return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3592				    MGMT_STATUS_INVALID_PARAMS,
3593				    &cp->addr, sizeof(cp->addr));
3594
3595	hci_dev_lock(hdev);
3596
3597	err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3598	if (err < 0)
3599		status = MGMT_STATUS_INVALID_PARAMS;
3600	else
3601		status = MGMT_STATUS_SUCCESS;
3602
3603	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3604			   &cp->addr, sizeof(cp->addr));
3605
3606	hci_dev_unlock(hdev);
3607
3608	return err;
3609}
3610
3611static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3612			 u16 len)
3613{
3614	struct mgmt_cp_set_device_id *cp = data;
3615	struct hci_request req;
3616	int err;
3617	__u16 source;
3618
3619	BT_DBG("%s", hdev->name);
3620
3621	source = __le16_to_cpu(cp->source);
3622
3623	if (source > 0x0002)
3624		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3625				  MGMT_STATUS_INVALID_PARAMS);
3626
3627	hci_dev_lock(hdev);
3628
3629	hdev->devid_source = source;
3630	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3631	hdev->devid_product = __le16_to_cpu(cp->product);
3632	hdev->devid_version = __le16_to_cpu(cp->version);
3633
3634	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3635
3636	hci_req_init(&req, hdev);
3637	update_eir(&req);
3638	hci_req_run(&req, NULL);
3639
3640	hci_dev_unlock(hdev);
3641
3642	return err;
3643}
3644
3645static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3646{
3647	struct cmd_lookup match = { NULL, hdev };
3648
3649	if (status) {
3650		u8 mgmt_err = mgmt_status(status);
3651
3652		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3653				     cmd_status_rsp, &mgmt_err);
3654		return;
3655	}
3656
3657	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3658			     &match);
3659
3660	new_settings(hdev, match.sk);
3661
3662	if (match.sk)
3663		sock_put(match.sk);
3664}
3665
3666static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3667			   u16 len)
3668{
3669	struct mgmt_mode *cp = data;
3670	struct pending_cmd *cmd;
3671	struct hci_request req;
3672	u8 val, enabled, status;
3673	int err;
3674
3675	BT_DBG("request for %s", hdev->name);
3676
3677	status = mgmt_le_support(hdev);
3678	if (status)
3679		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3680				  status);
3681
3682	if (cp->val != 0x00 && cp->val != 0x01)
3683		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3684				  MGMT_STATUS_INVALID_PARAMS);
3685
3686	hci_dev_lock(hdev);
3687
3688	val = !!cp->val;
3689	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3690
3691	/* The following conditions are ones which mean that we should
3692	 * not do any HCI communication but directly send a mgmt
3693	 * response to user space (after toggling the flag if
3694	 * necessary).
3695	 */
3696	if (!hdev_is_powered(hdev) || val == enabled ||
3697	    hci_conn_num(hdev, LE_LINK) > 0) {
3698		bool changed = false;
3699
3700		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3701			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3702			changed = true;
3703		}
3704
3705		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3706		if (err < 0)
3707			goto unlock;
3708
3709		if (changed)
3710			err = new_settings(hdev, sk);
3711
3712		goto unlock;
3713	}
3714
3715	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3716	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3717		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3718				 MGMT_STATUS_BUSY);
3719		goto unlock;
3720	}
3721
3722	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3723	if (!cmd) {
3724		err = -ENOMEM;
3725		goto unlock;
3726	}
3727
3728	hci_req_init(&req, hdev);
3729
3730	if (val)
3731		enable_advertising(&req);
3732	else
3733		disable_advertising(&req);
3734
3735	err = hci_req_run(&req, set_advertising_complete);
3736	if (err < 0)
3737		mgmt_pending_remove(cmd);
3738
3739unlock:
3740	hci_dev_unlock(hdev);
3741	return err;
3742}
3743
3744static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3745			      void *data, u16 len)
3746{
3747	struct mgmt_cp_set_static_address *cp = data;
3748	int err;
3749
3750	BT_DBG("%s", hdev->name);
3751
3752	if (!lmp_le_capable(hdev))
3753		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3754				  MGMT_STATUS_NOT_SUPPORTED);
3755
3756	if (hdev_is_powered(hdev))
3757		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3758				  MGMT_STATUS_REJECTED);
3759
3760	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3761		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3762			return cmd_status(sk, hdev->id,
3763					  MGMT_OP_SET_STATIC_ADDRESS,
3764					  MGMT_STATUS_INVALID_PARAMS);
3765
3766		/* Two most significant bits shall be set */
3767		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3768			return cmd_status(sk, hdev->id,
3769					  MGMT_OP_SET_STATIC_ADDRESS,
3770					  MGMT_STATUS_INVALID_PARAMS);
3771	}
3772
3773	hci_dev_lock(hdev);
3774
3775	bacpy(&hdev->static_addr, &cp->bdaddr);
3776
3777	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3778
3779	hci_dev_unlock(hdev);
3780
3781	return err;
3782}
3783
3784static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3785			   void *data, u16 len)
3786{
3787	struct mgmt_cp_set_scan_params *cp = data;
3788	__u16 interval, window;
3789	int err;
3790
3791	BT_DBG("%s", hdev->name);
3792
3793	if (!lmp_le_capable(hdev))
3794		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3795				  MGMT_STATUS_NOT_SUPPORTED);
3796
3797	interval = __le16_to_cpu(cp->interval);
3798
3799	if (interval < 0x0004 || interval > 0x4000)
3800		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3801				  MGMT_STATUS_INVALID_PARAMS);
3802
3803	window = __le16_to_cpu(cp->window);
3804
3805	if (window < 0x0004 || window > 0x4000)
3806		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3807				  MGMT_STATUS_INVALID_PARAMS);
3808
3809	if (window > interval)
3810		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3811				  MGMT_STATUS_INVALID_PARAMS);
3812
3813	hci_dev_lock(hdev);
3814
3815	hdev->le_scan_interval = interval;
3816	hdev->le_scan_window = window;
3817
3818	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3819
3820	hci_dev_unlock(hdev);
3821
3822	return err;
3823}
3824
3825static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3826{
3827	struct pending_cmd *cmd;
3828
3829	BT_DBG("status 0x%02x", status);
3830
3831	hci_dev_lock(hdev);
3832
3833	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3834	if (!cmd)
3835		goto unlock;
3836
3837	if (status) {
3838		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3839			   mgmt_status(status));
3840	} else {
3841		struct mgmt_mode *cp = cmd->param;
3842
3843		if (cp->val)
3844			set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3845		else
3846			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3847
3848		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3849		new_settings(hdev, cmd->sk);
3850	}
3851
3852	mgmt_pending_remove(cmd);
3853
3854unlock:
3855	hci_dev_unlock(hdev);
3856}
3857
3858static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3859				void *data, u16 len)
3860{
3861	struct mgmt_mode *cp = data;
3862	struct pending_cmd *cmd;
3863	struct hci_request req;
3864	int err;
3865
3866	BT_DBG("%s", hdev->name);
3867
3868	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3869	    hdev->hci_ver < BLUETOOTH_VER_1_2)
3870		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3871				  MGMT_STATUS_NOT_SUPPORTED);
3872
3873	if (cp->val != 0x00 && cp->val != 0x01)
3874		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3875				  MGMT_STATUS_INVALID_PARAMS);
3876
3877	if (!hdev_is_powered(hdev))
3878		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3879				  MGMT_STATUS_NOT_POWERED);
3880
3881	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3882		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3883				  MGMT_STATUS_REJECTED);
3884
3885	hci_dev_lock(hdev);
3886
3887	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3888		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3889				 MGMT_STATUS_BUSY);
3890		goto unlock;
3891	}
3892
3893	if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3894		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3895					hdev);
3896		goto unlock;
3897	}
3898
3899	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3900			       data, len);
3901	if (!cmd) {
3902		err = -ENOMEM;
3903		goto unlock;
3904	}
3905
3906	hci_req_init(&req, hdev);
3907
3908	write_fast_connectable(&req, cp->val);
3909
3910	err = hci_req_run(&req, fast_connectable_complete);
3911	if (err < 0) {
3912		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3913				 MGMT_STATUS_FAILED);
3914		mgmt_pending_remove(cmd);
3915	}
3916
3917unlock:
3918	hci_dev_unlock(hdev);
3919
3920	return err;
3921}
3922
3923static void set_bredr_scan(struct hci_request *req)
3924{
3925	struct hci_dev *hdev = req->hdev;
3926	u8 scan = 0;
3927
3928	/* Ensure that fast connectable is disabled. This function will
3929	 * not do anything if the page scan parameters are already what
3930	 * they should be.
3931	 */
3932	write_fast_connectable(req, false);
3933
3934	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3935		scan |= SCAN_PAGE;
3936	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3937		scan |= SCAN_INQUIRY;
3938
3939	if (scan)
3940		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3941}
3942
3943static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3944{
3945	struct pending_cmd *cmd;
3946
3947	BT_DBG("status 0x%02x", status);
3948
3949	hci_dev_lock(hdev);
3950
3951	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
3952	if (!cmd)
3953		goto unlock;
3954
3955	if (status) {
3956		u8 mgmt_err = mgmt_status(status);
3957
3958		/* We need to restore the flag if related HCI commands
3959		 * failed.
3960		 */
3961		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3962
3963		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
3964	} else {
3965		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
3966		new_settings(hdev, cmd->sk);
3967	}
3968
3969	mgmt_pending_remove(cmd);
3970
3971unlock:
3972	hci_dev_unlock(hdev);
3973}
3974
3975static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3976{
3977	struct mgmt_mode *cp = data;
3978	struct pending_cmd *cmd;
3979	struct hci_request req;
3980	int err;
3981
3982	BT_DBG("request for %s", hdev->name);
3983
3984	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
3985		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3986				  MGMT_STATUS_NOT_SUPPORTED);
3987
3988	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3989		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3990				  MGMT_STATUS_REJECTED);
3991
3992	if (cp->val != 0x00 && cp->val != 0x01)
3993		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3994				  MGMT_STATUS_INVALID_PARAMS);
3995
3996	hci_dev_lock(hdev);
3997
3998	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3999		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4000		goto unlock;
4001	}
4002
4003	if (!hdev_is_powered(hdev)) {
4004		if (!cp->val) {
4005			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4006			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4007			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4008			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4009			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4010		}
4011
4012		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4013
4014		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4015		if (err < 0)
4016			goto unlock;
4017
4018		err = new_settings(hdev, sk);
4019		goto unlock;
4020	}
4021
4022	/* Reject disabling when powered on */
4023	if (!cp->val) {
4024		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4025				 MGMT_STATUS_REJECTED);
4026		goto unlock;
4027	}
4028
4029	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4030		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4031				 MGMT_STATUS_BUSY);
4032		goto unlock;
4033	}
4034
4035	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4036	if (!cmd) {
4037		err = -ENOMEM;
4038		goto unlock;
4039	}
4040
4041	/* We need to flip the bit already here so that update_adv_data
4042	 * generates the correct flags.
4043	 */
4044	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4045
4046	hci_req_init(&req, hdev);
4047
4048	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4049		set_bredr_scan(&req);
4050
4051	/* Since only the advertising data flags will change, there
4052	 * is no need to update the scan response data.
4053	 */
4054	update_adv_data(&req);
4055
4056	err = hci_req_run(&req, set_bredr_complete);
4057	if (err < 0)
4058		mgmt_pending_remove(cmd);
4059
4060unlock:
4061	hci_dev_unlock(hdev);
4062	return err;
4063}
4064
4065static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4066			   void *data, u16 len)
4067{
4068	struct mgmt_mode *cp = data;
4069	struct pending_cmd *cmd;
4070	u8 val, status;
4071	int err;
4072
4073	BT_DBG("request for %s", hdev->name);
4074
4075	status = mgmt_bredr_support(hdev);
4076	if (status)
4077		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4078				  status);
4079
4080	if (!lmp_sc_capable(hdev) &&
4081	    !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4082		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4083				  MGMT_STATUS_NOT_SUPPORTED);
4084
4085	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4086		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4087				  MGMT_STATUS_INVALID_PARAMS);
4088
4089	hci_dev_lock(hdev);
4090
4091	if (!hdev_is_powered(hdev)) {
4092		bool changed;
4093
4094		if (cp->val) {
4095			changed = !test_and_set_bit(HCI_SC_ENABLED,
4096						    &hdev->dev_flags);
4097			if (cp->val == 0x02)
4098				set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4099			else
4100				clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4101		} else {
4102			changed = test_and_clear_bit(HCI_SC_ENABLED,
4103						     &hdev->dev_flags);
4104			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4105		}
4106
4107		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4108		if (err < 0)
4109			goto failed;
4110
4111		if (changed)
4112			err = new_settings(hdev, sk);
4113
4114		goto failed;
4115	}
4116
4117	if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4118		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4119				 MGMT_STATUS_BUSY);
4120		goto failed;
4121	}
4122
4123	val = !!cp->val;
4124
4125	if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4126	    (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4127		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4128		goto failed;
4129	}
4130
4131	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4132	if (!cmd) {
4133		err = -ENOMEM;
4134		goto failed;
4135	}
4136
4137	err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4138	if (err < 0) {
4139		mgmt_pending_remove(cmd);
4140		goto failed;
4141	}
4142
4143	if (cp->val == 0x02)
4144		set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4145	else
4146		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4147
4148failed:
4149	hci_dev_unlock(hdev);
4150	return err;
4151}
4152
4153static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4154			  void *data, u16 len)
4155{
4156	struct mgmt_mode *cp = data;
4157	bool changed;
4158	int err;
4159
4160	BT_DBG("request for %s", hdev->name);
4161
4162	if (cp->val != 0x00 && cp->val != 0x01)
4163		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4164				  MGMT_STATUS_INVALID_PARAMS);
4165
4166	hci_dev_lock(hdev);
4167
4168	if (cp->val)
4169		changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4170	else
4171		changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4172
4173	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4174	if (err < 0)
4175		goto unlock;
4176
4177	if (changed)
4178		err = new_settings(hdev, sk);
4179
4180unlock:
4181	hci_dev_unlock(hdev);
4182	return err;
4183}
4184
4185static bool irk_is_valid(struct mgmt_irk_info *irk)
4186{
4187	switch (irk->addr.type) {
4188	case BDADDR_LE_PUBLIC:
4189		return true;
4190
4191	case BDADDR_LE_RANDOM:
4192		/* Two most significant bits shall be set */
4193		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4194			return false;
4195		return true;
4196	}
4197
4198	return false;
4199}
4200
4201static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4202		     u16 len)
4203{
4204	struct mgmt_cp_load_irks *cp = cp_data;
4205	u16 irk_count, expected_len;
4206	int i, err;
4207
4208	BT_DBG("request for %s", hdev->name);
4209
4210	if (!lmp_le_capable(hdev))
4211		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4212				  MGMT_STATUS_NOT_SUPPORTED);
4213
4214	irk_count = __le16_to_cpu(cp->irk_count);
4215
4216	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4217	if (expected_len != len) {
4218		BT_ERR("load_irks: expected %u bytes, got %u bytes",
4219		       len, expected_len);
4220		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4221				  MGMT_STATUS_INVALID_PARAMS);
4222	}
4223
4224	BT_DBG("%s irk_count %u", hdev->name, irk_count);
4225
4226	for (i = 0; i < irk_count; i++) {
4227		struct mgmt_irk_info *key = &cp->irks[i];
4228
4229		if (!irk_is_valid(key))
4230			return cmd_status(sk, hdev->id,
4231					  MGMT_OP_LOAD_IRKS,
4232					  MGMT_STATUS_INVALID_PARAMS);
4233	}
4234
4235	hci_dev_lock(hdev);
4236
4237	hci_smp_irks_clear(hdev);
4238
4239	for (i = 0; i < irk_count; i++) {
4240		struct mgmt_irk_info *irk = &cp->irks[i];
4241		u8 addr_type;
4242
4243		if (irk->addr.type == BDADDR_LE_PUBLIC)
4244			addr_type = ADDR_LE_DEV_PUBLIC;
4245		else
4246			addr_type = ADDR_LE_DEV_RANDOM;
4247
4248		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4249			    BDADDR_ANY);
4250	}
4251
4252	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4253
4254	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4255
4256	hci_dev_unlock(hdev);
4257
4258	return err;
4259}
4260
4261static bool ltk_is_valid(struct mgmt_ltk_info *key)
4262{
4263	if (key->master != 0x00 && key->master != 0x01)
4264		return false;
4265
4266	switch (key->addr.type) {
4267	case BDADDR_LE_PUBLIC:
4268		return true;
4269
4270	case BDADDR_LE_RANDOM:
4271		/* Two most significant bits shall be set */
4272		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4273			return false;
4274		return true;
4275	}
4276
4277	return false;
4278}
4279
4280static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4281			       void *cp_data, u16 len)
4282{
4283	struct mgmt_cp_load_long_term_keys *cp = cp_data;
4284	u16 key_count, expected_len;
4285	int i, err;
4286
4287	BT_DBG("request for %s", hdev->name);
4288
4289	if (!lmp_le_capable(hdev))
4290		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4291				  MGMT_STATUS_NOT_SUPPORTED);
4292
4293	key_count = __le16_to_cpu(cp->key_count);
4294
4295	expected_len = sizeof(*cp) + key_count *
4296					sizeof(struct mgmt_ltk_info);
4297	if (expected_len != len) {
4298		BT_ERR("load_keys: expected %u bytes, got %u bytes",
4299		       len, expected_len);
4300		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4301				  MGMT_STATUS_INVALID_PARAMS);
4302	}
4303
4304	BT_DBG("%s key_count %u", hdev->name, key_count);
4305
4306	for (i = 0; i < key_count; i++) {
4307		struct mgmt_ltk_info *key = &cp->keys[i];
4308
4309		if (!ltk_is_valid(key))
4310			return cmd_status(sk, hdev->id,
4311					  MGMT_OP_LOAD_LONG_TERM_KEYS,
4312					  MGMT_STATUS_INVALID_PARAMS);
4313	}
4314
4315	hci_dev_lock(hdev);
4316
4317	hci_smp_ltks_clear(hdev);
4318
4319	for (i = 0; i < key_count; i++) {
4320		struct mgmt_ltk_info *key = &cp->keys[i];
4321		u8 type, addr_type;
4322
4323		if (key->addr.type == BDADDR_LE_PUBLIC)
4324			addr_type = ADDR_LE_DEV_PUBLIC;
4325		else
4326			addr_type = ADDR_LE_DEV_RANDOM;
4327
4328		if (key->master)
4329			type = HCI_SMP_LTK;
4330		else
4331			type = HCI_SMP_LTK_SLAVE;
4332
4333		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
4334			    type, 0, key->type, key->val,
4335			    key->enc_size, key->ediv, key->rand);
4336	}
4337
4338	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4339			   NULL, 0);
4340
4341	hci_dev_unlock(hdev);
4342
4343	return err;
4344}
4345
4346static const struct mgmt_handler {
4347	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4348		     u16 data_len);
4349	bool var_len;
4350	size_t data_len;
4351} mgmt_handlers[] = {
4352	{ NULL }, /* 0x0000 (no command) */
4353	{ read_version,           false, MGMT_READ_VERSION_SIZE },
4354	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
4355	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
4356	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
4357	{ set_powered,            false, MGMT_SETTING_SIZE },
4358	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
4359	{ set_connectable,        false, MGMT_SETTING_SIZE },
4360	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
4361	{ set_pairable,           false, MGMT_SETTING_SIZE },
4362	{ set_link_security,      false, MGMT_SETTING_SIZE },
4363	{ set_ssp,                false, MGMT_SETTING_SIZE },
4364	{ set_hs,                 false, MGMT_SETTING_SIZE },
4365	{ set_le,                 false, MGMT_SETTING_SIZE },
4366	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
4367	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
4368	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
4369	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
4370	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
4371	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4372	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
4373	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
4374	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
4375	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4376	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
4377	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
4378	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4379	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
4380	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
4381	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4382	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
4383	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4384	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4385	{ add_remote_oob_data,    true,  MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4386	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4387	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
4388	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
4389	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
4390	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
4391	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
4392	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
4393	{ set_advertising,        false, MGMT_SETTING_SIZE },
4394	{ set_bredr,              false, MGMT_SETTING_SIZE },
4395	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
4396	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
4397	{ set_secure_conn,        false, MGMT_SETTING_SIZE },
4398	{ set_debug_keys,         false, MGMT_SETTING_SIZE },
4399	{ },
4400	{ load_irks,              true,  MGMT_LOAD_IRKS_SIZE },
4401};
4402
4403
4404int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4405{
4406	void *buf;
4407	u8 *cp;
4408	struct mgmt_hdr *hdr;
4409	u16 opcode, index, len;
4410	struct hci_dev *hdev = NULL;
4411	const struct mgmt_handler *handler;
4412	int err;
4413
4414	BT_DBG("got %zu bytes", msglen);
4415
4416	if (msglen < sizeof(*hdr))
4417		return -EINVAL;
4418
4419	buf = kmalloc(msglen, GFP_KERNEL);
4420	if (!buf)
4421		return -ENOMEM;
4422
4423	if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4424		err = -EFAULT;
4425		goto done;
4426	}
4427
4428	hdr = buf;
4429	opcode = __le16_to_cpu(hdr->opcode);
4430	index = __le16_to_cpu(hdr->index);
4431	len = __le16_to_cpu(hdr->len);
4432
4433	if (len != msglen - sizeof(*hdr)) {
4434		err = -EINVAL;
4435		goto done;
4436	}
4437
4438	if (index != MGMT_INDEX_NONE) {
4439		hdev = hci_dev_get(index);
4440		if (!hdev) {
4441			err = cmd_status(sk, index, opcode,
4442					 MGMT_STATUS_INVALID_INDEX);
4443			goto done;
4444		}
4445
4446		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4447		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4448			err = cmd_status(sk, index, opcode,
4449					 MGMT_STATUS_INVALID_INDEX);
4450			goto done;
4451		}
4452	}
4453
4454	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4455	    mgmt_handlers[opcode].func == NULL) {
4456		BT_DBG("Unknown op %u", opcode);
4457		err = cmd_status(sk, index, opcode,
4458				 MGMT_STATUS_UNKNOWN_COMMAND);
4459		goto done;
4460	}
4461
4462	if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4463	    (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4464		err = cmd_status(sk, index, opcode,
4465				 MGMT_STATUS_INVALID_INDEX);
4466		goto done;
4467	}
4468
4469	handler = &mgmt_handlers[opcode];
4470
4471	if ((handler->var_len && len < handler->data_len) ||
4472	    (!handler->var_len && len != handler->data_len)) {
4473		err = cmd_status(sk, index, opcode,
4474				 MGMT_STATUS_INVALID_PARAMS);
4475		goto done;
4476	}
4477
4478	if (hdev)
4479		mgmt_init_hdev(sk, hdev);
4480
4481	cp = buf + sizeof(*hdr);
4482
4483	err = handler->func(sk, hdev, cp, len);
4484	if (err < 0)
4485		goto done;
4486
4487	err = msglen;
4488
4489done:
4490	if (hdev)
4491		hci_dev_put(hdev);
4492
4493	kfree(buf);
4494	return err;
4495}
4496
4497void mgmt_index_added(struct hci_dev *hdev)
4498{
4499	if (hdev->dev_type != HCI_BREDR)
4500		return;
4501
4502	mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4503}
4504
4505void mgmt_index_removed(struct hci_dev *hdev)
4506{
4507	u8 status = MGMT_STATUS_INVALID_INDEX;
4508
4509	if (hdev->dev_type != HCI_BREDR)
4510		return;
4511
4512	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4513
4514	mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4515}
4516
4517static void powered_complete(struct hci_dev *hdev, u8 status)
4518{
4519	struct cmd_lookup match = { NULL, hdev };
4520
4521	BT_DBG("status 0x%02x", status);
4522
4523	hci_dev_lock(hdev);
4524
4525	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4526
4527	new_settings(hdev, match.sk);
4528
4529	hci_dev_unlock(hdev);
4530
4531	if (match.sk)
4532		sock_put(match.sk);
4533}
4534
4535static int powered_update_hci(struct hci_dev *hdev)
4536{
4537	struct hci_request req;
4538	u8 link_sec;
4539
4540	hci_req_init(&req, hdev);
4541
4542	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4543	    !lmp_host_ssp_capable(hdev)) {
4544		u8 ssp = 1;
4545
4546		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4547	}
4548
4549	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4550	    lmp_bredr_capable(hdev)) {
4551		struct hci_cp_write_le_host_supported cp;
4552
4553		cp.le = 1;
4554		cp.simul = lmp_le_br_capable(hdev);
4555
4556		/* Check first if we already have the right
4557		 * host state (host features set)
4558		 */
4559		if (cp.le != lmp_host_le_capable(hdev) ||
4560		    cp.simul != lmp_host_le_br_capable(hdev))
4561			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4562				    sizeof(cp), &cp);
4563	}
4564
4565	if (lmp_le_capable(hdev)) {
4566		/* Set random address to static address if configured */
4567		if (bacmp(&hdev->static_addr, BDADDR_ANY))
4568			hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4569				    &hdev->static_addr);
4570
4571		/* Make sure the controller has a good default for
4572		 * advertising data. This also applies to the case
4573		 * where BR/EDR was toggled during the AUTO_OFF phase.
4574		 */
4575		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4576			update_adv_data(&req);
4577			update_scan_rsp_data(&req);
4578		}
4579
4580		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4581			enable_advertising(&req);
4582	}
4583
4584	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4585	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4586		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4587			    sizeof(link_sec), &link_sec);
4588
4589	if (lmp_bredr_capable(hdev)) {
4590		if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4591			set_bredr_scan(&req);
4592		update_class(&req);
4593		update_name(&req);
4594		update_eir(&req);
4595	}
4596
4597	return hci_req_run(&req, powered_complete);
4598}
4599
4600int mgmt_powered(struct hci_dev *hdev, u8 powered)
4601{
4602	struct cmd_lookup match = { NULL, hdev };
4603	u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4604	u8 zero_cod[] = { 0, 0, 0 };
4605	int err;
4606
4607	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4608		return 0;
4609
4610	if (powered) {
4611		if (powered_update_hci(hdev) == 0)
4612			return 0;
4613
4614		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4615				     &match);
4616		goto new_settings;
4617	}
4618
4619	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4620	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4621
4622	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4623		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4624			   zero_cod, sizeof(zero_cod), NULL);
4625
4626new_settings:
4627	err = new_settings(hdev, match.sk);
4628
4629	if (match.sk)
4630		sock_put(match.sk);
4631
4632	return err;
4633}
4634
4635void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4636{
4637	struct pending_cmd *cmd;
4638	u8 status;
4639
4640	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4641	if (!cmd)
4642		return;
4643
4644	if (err == -ERFKILL)
4645		status = MGMT_STATUS_RFKILLED;
4646	else
4647		status = MGMT_STATUS_FAILED;
4648
4649	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4650
4651	mgmt_pending_remove(cmd);
4652}
4653
4654void mgmt_discoverable_timeout(struct hci_dev *hdev)
4655{
4656	struct hci_request req;
4657
4658	hci_dev_lock(hdev);
4659
4660	/* When discoverable timeout triggers, then just make sure
4661	 * the limited discoverable flag is cleared. Even in the case
4662	 * of a timeout triggered from general discoverable, it is
4663	 * safe to unconditionally clear the flag.
4664	 */
4665	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4666	clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4667
4668	hci_req_init(&req, hdev);
4669	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4670		u8 scan = SCAN_PAGE;
4671		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4672			    sizeof(scan), &scan);
4673	}
4674	update_class(&req);
4675	update_adv_data(&req);
4676	hci_req_run(&req, NULL);
4677
4678	hdev->discov_timeout = 0;
4679
4680	new_settings(hdev, NULL);
4681
4682	hci_dev_unlock(hdev);
4683}
4684
4685void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4686{
4687	bool changed;
4688
4689	/* Nothing needed here if there's a pending command since that
4690	 * commands request completion callback takes care of everything
4691	 * necessary.
4692	 */
4693	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4694		return;
4695
4696	if (discoverable) {
4697		changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4698	} else {
4699		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4700		changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4701	}
4702
4703	if (changed) {
4704		struct hci_request req;
4705
4706		/* In case this change in discoverable was triggered by
4707		 * a disabling of connectable there could be a need to
4708		 * update the advertising flags.
4709		 */
4710		hci_req_init(&req, hdev);
4711		update_adv_data(&req);
4712		hci_req_run(&req, NULL);
4713
4714		new_settings(hdev, NULL);
4715	}
4716}
4717
4718void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4719{
4720	bool changed;
4721
4722	/* Nothing needed here if there's a pending command since that
4723	 * commands request completion callback takes care of everything
4724	 * necessary.
4725	 */
4726	if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4727		return;
4728
4729	if (connectable)
4730		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4731	else
4732		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4733
4734	if (changed)
4735		new_settings(hdev, NULL);
4736}
4737
4738void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4739{
4740	u8 mgmt_err = mgmt_status(status);
4741
4742	if (scan & SCAN_PAGE)
4743		mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4744				     cmd_status_rsp, &mgmt_err);
4745
4746	if (scan & SCAN_INQUIRY)
4747		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4748				     cmd_status_rsp, &mgmt_err);
4749}
4750
4751void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4752		       bool persistent)
4753{
4754	struct mgmt_ev_new_link_key ev;
4755
4756	memset(&ev, 0, sizeof(ev));
4757
4758	ev.store_hint = persistent;
4759	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4760	ev.key.addr.type = BDADDR_BREDR;
4761	ev.key.type = key->type;
4762	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4763	ev.key.pin_len = key->pin_len;
4764
4765	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4766}
4767
4768void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4769{
4770	struct mgmt_ev_new_long_term_key ev;
4771
4772	memset(&ev, 0, sizeof(ev));
4773
4774	ev.store_hint = persistent;
4775	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4776	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4777	ev.key.type = key->authenticated;
4778	ev.key.enc_size = key->enc_size;
4779	ev.key.ediv = key->ediv;
4780
4781	if (key->type == HCI_SMP_LTK)
4782		ev.key.master = 1;
4783
4784	memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4785	memcpy(ev.key.val, key->val, sizeof(key->val));
4786
4787	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4788}
4789
4790static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4791				  u8 data_len)
4792{
4793	eir[eir_len++] = sizeof(type) + data_len;
4794	eir[eir_len++] = type;
4795	memcpy(&eir[eir_len], data, data_len);
4796	eir_len += data_len;
4797
4798	return eir_len;
4799}
4800
4801void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4802			   u8 addr_type, u32 flags, u8 *name, u8 name_len,
4803			   u8 *dev_class)
4804{
4805	char buf[512];
4806	struct mgmt_ev_device_connected *ev = (void *) buf;
4807	u16 eir_len = 0;
4808
4809	bacpy(&ev->addr.bdaddr, bdaddr);
4810	ev->addr.type = link_to_bdaddr(link_type, addr_type);
4811
4812	ev->flags = __cpu_to_le32(flags);
4813
4814	if (name_len > 0)
4815		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4816					  name, name_len);
4817
4818	if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4819		eir_len = eir_append_data(ev->eir, eir_len,
4820					  EIR_CLASS_OF_DEV, dev_class, 3);
4821
4822	ev->eir_len = cpu_to_le16(eir_len);
4823
4824	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4825		    sizeof(*ev) + eir_len, NULL);
4826}
4827
4828static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4829{
4830	struct mgmt_cp_disconnect *cp = cmd->param;
4831	struct sock **sk = data;
4832	struct mgmt_rp_disconnect rp;
4833
4834	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4835	rp.addr.type = cp->addr.type;
4836
4837	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4838		     sizeof(rp));
4839
4840	*sk = cmd->sk;
4841	sock_hold(*sk);
4842
4843	mgmt_pending_remove(cmd);
4844}
4845
4846static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4847{
4848	struct hci_dev *hdev = data;
4849	struct mgmt_cp_unpair_device *cp = cmd->param;
4850	struct mgmt_rp_unpair_device rp;
4851
4852	memset(&rp, 0, sizeof(rp));
4853	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4854	rp.addr.type = cp->addr.type;
4855
4856	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
4857
4858	cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4859
4860	mgmt_pending_remove(cmd);
4861}
4862
4863void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
4864			      u8 link_type, u8 addr_type, u8 reason)
4865{
4866	struct mgmt_ev_device_disconnected ev;
4867	struct sock *sk = NULL;
4868
4869	if (link_type != ACL_LINK && link_type != LE_LINK)
4870		return;
4871
4872	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4873
4874	bacpy(&ev.addr.bdaddr, bdaddr);
4875	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4876	ev.reason = reason;
4877
4878	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4879
4880	if (sk)
4881		sock_put(sk);
4882
4883	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4884			     hdev);
4885}
4886
4887void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
4888			    u8 link_type, u8 addr_type, u8 status)
4889{
4890	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
4891	struct mgmt_cp_disconnect *cp;
4892	struct mgmt_rp_disconnect rp;
4893	struct pending_cmd *cmd;
4894
4895	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4896			     hdev);
4897
4898	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4899	if (!cmd)
4900		return;
4901
4902	cp = cmd->param;
4903
4904	if (bacmp(bdaddr, &cp->addr.bdaddr))
4905		return;
4906
4907	if (cp->addr.type != bdaddr_type)
4908		return;
4909
4910	bacpy(&rp.addr.bdaddr, bdaddr);
4911	rp.addr.type = bdaddr_type;
4912
4913	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
4914		     mgmt_status(status), &rp, sizeof(rp));
4915
4916	mgmt_pending_remove(cmd);
4917}
4918
4919void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4920			 u8 addr_type, u8 status)
4921{
4922	struct mgmt_ev_connect_failed ev;
4923
4924	bacpy(&ev.addr.bdaddr, bdaddr);
4925	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4926	ev.status = mgmt_status(status);
4927
4928	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4929}
4930
4931void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4932{
4933	struct mgmt_ev_pin_code_request ev;
4934
4935	bacpy(&ev.addr.bdaddr, bdaddr);
4936	ev.addr.type = BDADDR_BREDR;
4937	ev.secure = secure;
4938
4939	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4940}
4941
4942void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4943				  u8 status)
4944{
4945	struct pending_cmd *cmd;
4946	struct mgmt_rp_pin_code_reply rp;
4947
4948	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4949	if (!cmd)
4950		return;
4951
4952	bacpy(&rp.addr.bdaddr, bdaddr);
4953	rp.addr.type = BDADDR_BREDR;
4954
4955	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
4956		     mgmt_status(status), &rp, sizeof(rp));
4957
4958	mgmt_pending_remove(cmd);
4959}
4960
4961void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4962				      u8 status)
4963{
4964	struct pending_cmd *cmd;
4965	struct mgmt_rp_pin_code_reply rp;
4966
4967	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4968	if (!cmd)
4969		return;
4970
4971	bacpy(&rp.addr.bdaddr, bdaddr);
4972	rp.addr.type = BDADDR_BREDR;
4973
4974	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
4975		     mgmt_status(status), &rp, sizeof(rp));
4976
4977	mgmt_pending_remove(cmd);
4978}
4979
4980int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4981			      u8 link_type, u8 addr_type, __le32 value,
4982			      u8 confirm_hint)
4983{
4984	struct mgmt_ev_user_confirm_request ev;
4985
4986	BT_DBG("%s", hdev->name);
4987
4988	bacpy(&ev.addr.bdaddr, bdaddr);
4989	ev.addr.type = link_to_bdaddr(link_type, addr_type);
4990	ev.confirm_hint = confirm_hint;
4991	ev.value = value;
4992
4993	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4994			  NULL);
4995}
4996
4997int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4998			      u8 link_type, u8 addr_type)
4999{
5000	struct mgmt_ev_user_passkey_request ev;
5001
5002	BT_DBG("%s", hdev->name);
5003
5004	bacpy(&ev.addr.bdaddr, bdaddr);
5005	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5006
5007	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5008			  NULL);
5009}
5010
5011static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5012				      u8 link_type, u8 addr_type, u8 status,
5013				      u8 opcode)
5014{
5015	struct pending_cmd *cmd;
5016	struct mgmt_rp_user_confirm_reply rp;
5017	int err;
5018
5019	cmd = mgmt_pending_find(opcode, hdev);
5020	if (!cmd)
5021		return -ENOENT;
5022
5023	bacpy(&rp.addr.bdaddr, bdaddr);
5024	rp.addr.type = link_to_bdaddr(link_type, addr_type);
5025	err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5026			   &rp, sizeof(rp));
5027
5028	mgmt_pending_remove(cmd);
5029
5030	return err;
5031}
5032
5033int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5034				     u8 link_type, u8 addr_type, u8 status)
5035{
5036	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5037					  status, MGMT_OP_USER_CONFIRM_REPLY);
5038}
5039
5040int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5041					 u8 link_type, u8 addr_type, u8 status)
5042{
5043	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5044					  status,
5045					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
5046}
5047
5048int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5049				     u8 link_type, u8 addr_type, u8 status)
5050{
5051	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5052					  status, MGMT_OP_USER_PASSKEY_REPLY);
5053}
5054
5055int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5056					 u8 link_type, u8 addr_type, u8 status)
5057{
5058	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5059					  status,
5060					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
5061}
5062
5063int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5064			     u8 link_type, u8 addr_type, u32 passkey,
5065			     u8 entered)
5066{
5067	struct mgmt_ev_passkey_notify ev;
5068
5069	BT_DBG("%s", hdev->name);
5070
5071	bacpy(&ev.addr.bdaddr, bdaddr);
5072	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5073	ev.passkey = __cpu_to_le32(passkey);
5074	ev.entered = entered;
5075
5076	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5077}
5078
5079void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5080		      u8 addr_type, u8 status)
5081{
5082	struct mgmt_ev_auth_failed ev;
5083
5084	bacpy(&ev.addr.bdaddr, bdaddr);
5085	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5086	ev.status = mgmt_status(status);
5087
5088	mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5089}
5090
5091void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5092{
5093	struct cmd_lookup match = { NULL, hdev };
5094	bool changed;
5095
5096	if (status) {
5097		u8 mgmt_err = mgmt_status(status);
5098		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5099				     cmd_status_rsp, &mgmt_err);
5100		return;
5101	}
5102
5103	if (test_bit(HCI_AUTH, &hdev->flags))
5104		changed = !test_and_set_bit(HCI_LINK_SECURITY,
5105					    &hdev->dev_flags);
5106	else
5107		changed = test_and_clear_bit(HCI_LINK_SECURITY,
5108					     &hdev->dev_flags);
5109
5110	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5111			     &match);
5112
5113	if (changed)
5114		new_settings(hdev, match.sk);
5115
5116	if (match.sk)
5117		sock_put(match.sk);
5118}
5119
5120static void clear_eir(struct hci_request *req)
5121{
5122	struct hci_dev *hdev = req->hdev;
5123	struct hci_cp_write_eir cp;
5124
5125	if (!lmp_ext_inq_capable(hdev))
5126		return;
5127
5128	memset(hdev->eir, 0, sizeof(hdev->eir));
5129
5130	memset(&cp, 0, sizeof(cp));
5131
5132	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5133}
5134
5135void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5136{
5137	struct cmd_lookup match = { NULL, hdev };
5138	struct hci_request req;
5139	bool changed = false;
5140
5141	if (status) {
5142		u8 mgmt_err = mgmt_status(status);
5143
5144		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5145						 &hdev->dev_flags)) {
5146			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5147			new_settings(hdev, NULL);
5148		}
5149
5150		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5151				     &mgmt_err);
5152		return;
5153	}
5154
5155	if (enable) {
5156		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5157	} else {
5158		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5159		if (!changed)
5160			changed = test_and_clear_bit(HCI_HS_ENABLED,
5161						     &hdev->dev_flags);
5162		else
5163			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5164	}
5165
5166	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5167
5168	if (changed)
5169		new_settings(hdev, match.sk);
5170
5171	if (match.sk)
5172		sock_put(match.sk);
5173
5174	hci_req_init(&req, hdev);
5175
5176	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5177		update_eir(&req);
5178	else
5179		clear_eir(&req);
5180
5181	hci_req_run(&req, NULL);
5182}
5183
5184void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5185{
5186	struct cmd_lookup match = { NULL, hdev };
5187	bool changed = false;
5188
5189	if (status) {
5190		u8 mgmt_err = mgmt_status(status);
5191
5192		if (enable) {
5193			if (test_and_clear_bit(HCI_SC_ENABLED,
5194					       &hdev->dev_flags))
5195				new_settings(hdev, NULL);
5196			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5197		}
5198
5199		mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5200				     cmd_status_rsp, &mgmt_err);
5201		return;
5202	}
5203
5204	if (enable) {
5205		changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5206	} else {
5207		changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5208		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5209	}
5210
5211	mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5212			     settings_rsp, &match);
5213
5214	if (changed)
5215		new_settings(hdev, match.sk);
5216
5217	if (match.sk)
5218		sock_put(match.sk);
5219}
5220
5221static void sk_lookup(struct pending_cmd *cmd, void *data)
5222{
5223	struct cmd_lookup *match = data;
5224
5225	if (match->sk == NULL) {
5226		match->sk = cmd->sk;
5227		sock_hold(match->sk);
5228	}
5229}
5230
5231void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5232				    u8 status)
5233{
5234	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5235
5236	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5237	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5238	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5239
5240	if (!status)
5241		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5242			   NULL);
5243
5244	if (match.sk)
5245		sock_put(match.sk);
5246}
5247
5248void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5249{
5250	struct mgmt_cp_set_local_name ev;
5251	struct pending_cmd *cmd;
5252
5253	if (status)
5254		return;
5255
5256	memset(&ev, 0, sizeof(ev));
5257	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5258	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5259
5260	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5261	if (!cmd) {
5262		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5263
5264		/* If this is a HCI command related to powering on the
5265		 * HCI dev don't send any mgmt signals.
5266		 */
5267		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5268			return;
5269	}
5270
5271	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5272		   cmd ? cmd->sk : NULL);
5273}
5274
5275void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5276				       u8 *randomizer192, u8 *hash256,
5277				       u8 *randomizer256, u8 status)
5278{
5279	struct pending_cmd *cmd;
5280
5281	BT_DBG("%s status %u", hdev->name, status);
5282
5283	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5284	if (!cmd)
5285		return;
5286
5287	if (status) {
5288		cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5289			   mgmt_status(status));
5290	} else {
5291		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5292		    hash256 && randomizer256) {
5293			struct mgmt_rp_read_local_oob_ext_data rp;
5294
5295			memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5296			memcpy(rp.randomizer192, randomizer192,
5297			       sizeof(rp.randomizer192));
5298
5299			memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5300			memcpy(rp.randomizer256, randomizer256,
5301			       sizeof(rp.randomizer256));
5302
5303			cmd_complete(cmd->sk, hdev->id,
5304				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5305				     &rp, sizeof(rp));
5306		} else {
5307			struct mgmt_rp_read_local_oob_data rp;
5308
5309			memcpy(rp.hash, hash192, sizeof(rp.hash));
5310			memcpy(rp.randomizer, randomizer192,
5311			       sizeof(rp.randomizer));
5312
5313			cmd_complete(cmd->sk, hdev->id,
5314				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5315				     &rp, sizeof(rp));
5316		}
5317	}
5318
5319	mgmt_pending_remove(cmd);
5320}
5321
5322void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5323		       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
5324		       ssp, u8 *eir, u16 eir_len)
5325{
5326	char buf[512];
5327	struct mgmt_ev_device_found *ev = (void *) buf;
5328	size_t ev_size;
5329
5330	if (!hci_discovery_active(hdev))
5331		return;
5332
5333	/* Leave 5 bytes for a potential CoD field */
5334	if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5335		return;
5336
5337	memset(buf, 0, sizeof(buf));
5338
5339	bacpy(&ev->addr.bdaddr, bdaddr);
5340	ev->addr.type = link_to_bdaddr(link_type, addr_type);
5341	ev->rssi = rssi;
5342	if (cfm_name)
5343		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5344	if (!ssp)
5345		ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5346
5347	if (eir_len > 0)
5348		memcpy(ev->eir, eir, eir_len);
5349
5350	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5351		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5352					  dev_class, 3);
5353
5354	ev->eir_len = cpu_to_le16(eir_len);
5355	ev_size = sizeof(*ev) + eir_len;
5356
5357	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5358}
5359
5360void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5361		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5362{
5363	struct mgmt_ev_device_found *ev;
5364	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5365	u16 eir_len;
5366
5367	ev = (struct mgmt_ev_device_found *) buf;
5368
5369	memset(buf, 0, sizeof(buf));
5370
5371	bacpy(&ev->addr.bdaddr, bdaddr);
5372	ev->addr.type = link_to_bdaddr(link_type, addr_type);
5373	ev->rssi = rssi;
5374
5375	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5376				  name_len);
5377
5378	ev->eir_len = cpu_to_le16(eir_len);
5379
5380	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5381}
5382
5383void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5384{
5385	struct mgmt_ev_discovering ev;
5386	struct pending_cmd *cmd;
5387
5388	BT_DBG("%s discovering %u", hdev->name, discovering);
5389
5390	if (discovering)
5391		cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5392	else
5393		cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5394
5395	if (cmd != NULL) {
5396		u8 type = hdev->discovery.type;
5397
5398		cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5399			     sizeof(type));
5400		mgmt_pending_remove(cmd);
5401	}
5402
5403	memset(&ev, 0, sizeof(ev));
5404	ev.type = hdev->discovery.type;
5405	ev.discovering = discovering;
5406
5407	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5408}
5409
5410int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5411{
5412	struct pending_cmd *cmd;
5413	struct mgmt_ev_device_blocked ev;
5414
5415	cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5416
5417	bacpy(&ev.addr.bdaddr, bdaddr);
5418	ev.addr.type = type;
5419
5420	return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5421			  cmd ? cmd->sk : NULL);
5422}
5423
5424int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5425{
5426	struct pending_cmd *cmd;
5427	struct mgmt_ev_device_unblocked ev;
5428
5429	cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5430
5431	bacpy(&ev.addr.bdaddr, bdaddr);
5432	ev.addr.type = type;
5433
5434	return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5435			  cmd ? cmd->sk : NULL);
5436}
5437
5438static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5439{
5440	BT_DBG("%s status %u", hdev->name, status);
5441
5442	/* Clear the advertising mgmt setting if we failed to re-enable it */
5443	if (status) {
5444		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5445		new_settings(hdev, NULL);
5446	}
5447}
5448
5449void mgmt_reenable_advertising(struct hci_dev *hdev)
5450{
5451	struct hci_request req;
5452
5453	if (hci_conn_num(hdev, LE_LINK) > 0)
5454		return;
5455
5456	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5457		return;
5458
5459	hci_req_init(&req, hdev);
5460	enable_advertising(&req);
5461
5462	/* If this fails we have no option but to let user space know
5463	 * that we've disabled advertising.
5464	 */
5465	if (hci_req_run(&req, adv_enable_complete) < 0) {
5466		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5467		new_settings(hdev, NULL);
5468	}
5469}
5470