mgmt.c revision 628531c9e971f1bd023d9fbd00faff014ca22440
1/*
2   BlueZ - Bluetooth protocol stack for Linux
3
4   Copyright (C) 2010  Nokia Corporation
5   Copyright (C) 2011-2012 Intel Corporation
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI Management interface */
26
27#include <linux/module.h>
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/l2cap.h>
33#include <net/bluetooth/mgmt.h>
34
35#include "smp.h"
36
37#define MGMT_VERSION	1
38#define MGMT_REVISION	7
39
40static const u16 mgmt_commands[] = {
41	MGMT_OP_READ_INDEX_LIST,
42	MGMT_OP_READ_INFO,
43	MGMT_OP_SET_POWERED,
44	MGMT_OP_SET_DISCOVERABLE,
45	MGMT_OP_SET_CONNECTABLE,
46	MGMT_OP_SET_FAST_CONNECTABLE,
47	MGMT_OP_SET_PAIRABLE,
48	MGMT_OP_SET_LINK_SECURITY,
49	MGMT_OP_SET_SSP,
50	MGMT_OP_SET_HS,
51	MGMT_OP_SET_LE,
52	MGMT_OP_SET_DEV_CLASS,
53	MGMT_OP_SET_LOCAL_NAME,
54	MGMT_OP_ADD_UUID,
55	MGMT_OP_REMOVE_UUID,
56	MGMT_OP_LOAD_LINK_KEYS,
57	MGMT_OP_LOAD_LONG_TERM_KEYS,
58	MGMT_OP_DISCONNECT,
59	MGMT_OP_GET_CONNECTIONS,
60	MGMT_OP_PIN_CODE_REPLY,
61	MGMT_OP_PIN_CODE_NEG_REPLY,
62	MGMT_OP_SET_IO_CAPABILITY,
63	MGMT_OP_PAIR_DEVICE,
64	MGMT_OP_CANCEL_PAIR_DEVICE,
65	MGMT_OP_UNPAIR_DEVICE,
66	MGMT_OP_USER_CONFIRM_REPLY,
67	MGMT_OP_USER_CONFIRM_NEG_REPLY,
68	MGMT_OP_USER_PASSKEY_REPLY,
69	MGMT_OP_USER_PASSKEY_NEG_REPLY,
70	MGMT_OP_READ_LOCAL_OOB_DATA,
71	MGMT_OP_ADD_REMOTE_OOB_DATA,
72	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73	MGMT_OP_START_DISCOVERY,
74	MGMT_OP_STOP_DISCOVERY,
75	MGMT_OP_CONFIRM_NAME,
76	MGMT_OP_BLOCK_DEVICE,
77	MGMT_OP_UNBLOCK_DEVICE,
78	MGMT_OP_SET_DEVICE_ID,
79	MGMT_OP_SET_ADVERTISING,
80	MGMT_OP_SET_BREDR,
81	MGMT_OP_SET_STATIC_ADDRESS,
82	MGMT_OP_SET_SCAN_PARAMS,
83	MGMT_OP_SET_SECURE_CONN,
84	MGMT_OP_SET_DEBUG_KEYS,
85	MGMT_OP_SET_PRIVACY,
86	MGMT_OP_LOAD_IRKS,
87	MGMT_OP_GET_CONN_INFO,
88	MGMT_OP_GET_CLOCK_INFO,
89	MGMT_OP_ADD_DEVICE,
90	MGMT_OP_REMOVE_DEVICE,
91	MGMT_OP_LOAD_CONN_PARAM,
92	MGMT_OP_READ_UNCONF_INDEX_LIST,
93	MGMT_OP_READ_CONFIG_INFO,
94	MGMT_OP_SET_EXTERNAL_CONFIG,
95	MGMT_OP_SET_PUBLIC_ADDRESS,
96};
97
98static const u16 mgmt_events[] = {
99	MGMT_EV_CONTROLLER_ERROR,
100	MGMT_EV_INDEX_ADDED,
101	MGMT_EV_INDEX_REMOVED,
102	MGMT_EV_NEW_SETTINGS,
103	MGMT_EV_CLASS_OF_DEV_CHANGED,
104	MGMT_EV_LOCAL_NAME_CHANGED,
105	MGMT_EV_NEW_LINK_KEY,
106	MGMT_EV_NEW_LONG_TERM_KEY,
107	MGMT_EV_DEVICE_CONNECTED,
108	MGMT_EV_DEVICE_DISCONNECTED,
109	MGMT_EV_CONNECT_FAILED,
110	MGMT_EV_PIN_CODE_REQUEST,
111	MGMT_EV_USER_CONFIRM_REQUEST,
112	MGMT_EV_USER_PASSKEY_REQUEST,
113	MGMT_EV_AUTH_FAILED,
114	MGMT_EV_DEVICE_FOUND,
115	MGMT_EV_DISCOVERING,
116	MGMT_EV_DEVICE_BLOCKED,
117	MGMT_EV_DEVICE_UNBLOCKED,
118	MGMT_EV_DEVICE_UNPAIRED,
119	MGMT_EV_PASSKEY_NOTIFY,
120	MGMT_EV_NEW_IRK,
121	MGMT_EV_NEW_CSRK,
122	MGMT_EV_DEVICE_ADDED,
123	MGMT_EV_DEVICE_REMOVED,
124	MGMT_EV_NEW_CONN_PARAM,
125	MGMT_EV_UNCONF_INDEX_ADDED,
126	MGMT_EV_UNCONF_INDEX_REMOVED,
127	MGMT_EV_NEW_CONFIG_OPTIONS,
128};
129
130#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
131
132#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
133				!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
134
135struct pending_cmd {
136	struct list_head list;
137	u16 opcode;
138	int index;
139	void *param;
140	struct sock *sk;
141	void *user_data;
142};
143
144/* HCI to MGMT error code conversion table */
145static u8 mgmt_status_table[] = {
146	MGMT_STATUS_SUCCESS,
147	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
148	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
149	MGMT_STATUS_FAILED,		/* Hardware Failure */
150	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
151	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
152	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
153	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
154	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
155	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
156	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
157	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
158	MGMT_STATUS_BUSY,		/* Command Disallowed */
159	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
160	MGMT_STATUS_REJECTED,		/* Rejected Security */
161	MGMT_STATUS_REJECTED,		/* Rejected Personal */
162	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
163	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
164	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
165	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
166	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
167	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
168	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
169	MGMT_STATUS_BUSY,		/* Repeated Attempts */
170	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
171	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
172	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
173	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
174	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
175	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
176	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
177	MGMT_STATUS_FAILED,		/* Unspecified Error */
178	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
179	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
180	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
181	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
182	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
183	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
184	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
185	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
186	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
187	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
188	MGMT_STATUS_FAILED,		/* Transaction Collision */
189	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
190	MGMT_STATUS_REJECTED,		/* QoS Rejected */
191	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
192	MGMT_STATUS_REJECTED,		/* Insufficient Security */
193	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
194	MGMT_STATUS_BUSY,		/* Role Switch Pending */
195	MGMT_STATUS_FAILED,		/* Slot Violation */
196	MGMT_STATUS_FAILED,		/* Role Switch Failed */
197	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
198	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
199	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
200	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
201	MGMT_STATUS_BUSY,		/* Controller Busy */
202	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
203	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
204	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
205	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
206	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
207};
208
209static u8 mgmt_status(u8 hci_status)
210{
211	if (hci_status < ARRAY_SIZE(mgmt_status_table))
212		return mgmt_status_table[hci_status];
213
214	return MGMT_STATUS_FAILED;
215}
216
217static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218		      struct sock *skip_sk)
219{
220	struct sk_buff *skb;
221	struct mgmt_hdr *hdr;
222
223	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
224	if (!skb)
225		return -ENOMEM;
226
227	hdr = (void *) skb_put(skb, sizeof(*hdr));
228	hdr->opcode = cpu_to_le16(event);
229	if (hdev)
230		hdr->index = cpu_to_le16(hdev->id);
231	else
232		hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233	hdr->len = cpu_to_le16(data_len);
234
235	if (data)
236		memcpy(skb_put(skb, data_len), data, data_len);
237
238	/* Time stamp */
239	__net_timestamp(skb);
240
241	hci_send_to_control(skb, skip_sk);
242	kfree_skb(skb);
243
244	return 0;
245}
246
247static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
248{
249	struct sk_buff *skb;
250	struct mgmt_hdr *hdr;
251	struct mgmt_ev_cmd_status *ev;
252	int err;
253
254	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
255
256	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
257	if (!skb)
258		return -ENOMEM;
259
260	hdr = (void *) skb_put(skb, sizeof(*hdr));
261
262	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263	hdr->index = cpu_to_le16(index);
264	hdr->len = cpu_to_le16(sizeof(*ev));
265
266	ev = (void *) skb_put(skb, sizeof(*ev));
267	ev->status = status;
268	ev->opcode = cpu_to_le16(cmd);
269
270	err = sock_queue_rcv_skb(sk, skb);
271	if (err < 0)
272		kfree_skb(skb);
273
274	return err;
275}
276
277static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278			void *rp, size_t rp_len)
279{
280	struct sk_buff *skb;
281	struct mgmt_hdr *hdr;
282	struct mgmt_ev_cmd_complete *ev;
283	int err;
284
285	BT_DBG("sock %p", sk);
286
287	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
288	if (!skb)
289		return -ENOMEM;
290
291	hdr = (void *) skb_put(skb, sizeof(*hdr));
292
293	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294	hdr->index = cpu_to_le16(index);
295	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
296
297	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298	ev->opcode = cpu_to_le16(cmd);
299	ev->status = status;
300
301	if (rp)
302		memcpy(ev->data, rp, rp_len);
303
304	err = sock_queue_rcv_skb(sk, skb);
305	if (err < 0)
306		kfree_skb(skb);
307
308	return err;
309}
310
311static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
312			u16 data_len)
313{
314	struct mgmt_rp_read_version rp;
315
316	BT_DBG("sock %p", sk);
317
318	rp.version = MGMT_VERSION;
319	rp.revision = cpu_to_le16(MGMT_REVISION);
320
321	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
322			    sizeof(rp));
323}
324
325static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
326			 u16 data_len)
327{
328	struct mgmt_rp_read_commands *rp;
329	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330	const u16 num_events = ARRAY_SIZE(mgmt_events);
331	__le16 *opcode;
332	size_t rp_size;
333	int i, err;
334
335	BT_DBG("sock %p", sk);
336
337	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
338
339	rp = kmalloc(rp_size, GFP_KERNEL);
340	if (!rp)
341		return -ENOMEM;
342
343	rp->num_commands = cpu_to_le16(num_commands);
344	rp->num_events = cpu_to_le16(num_events);
345
346	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347		put_unaligned_le16(mgmt_commands[i], opcode);
348
349	for (i = 0; i < num_events; i++, opcode++)
350		put_unaligned_le16(mgmt_events[i], opcode);
351
352	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
353			   rp_size);
354	kfree(rp);
355
356	return err;
357}
358
359static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
360			   u16 data_len)
361{
362	struct mgmt_rp_read_index_list *rp;
363	struct hci_dev *d;
364	size_t rp_len;
365	u16 count;
366	int err;
367
368	BT_DBG("sock %p", sk);
369
370	read_lock(&hci_dev_list_lock);
371
372	count = 0;
373	list_for_each_entry(d, &hci_dev_list, list) {
374		if (d->dev_type == HCI_BREDR &&
375		    !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
376			count++;
377	}
378
379	rp_len = sizeof(*rp) + (2 * count);
380	rp = kmalloc(rp_len, GFP_ATOMIC);
381	if (!rp) {
382		read_unlock(&hci_dev_list_lock);
383		return -ENOMEM;
384	}
385
386	count = 0;
387	list_for_each_entry(d, &hci_dev_list, list) {
388		if (test_bit(HCI_SETUP, &d->dev_flags) ||
389		    test_bit(HCI_CONFIG, &d->dev_flags) ||
390		    test_bit(HCI_USER_CHANNEL, &d->dev_flags))
391			continue;
392
393		/* Devices marked as raw-only are neither configured
394		 * nor unconfigured controllers.
395		 */
396		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
397			continue;
398
399		if (d->dev_type == HCI_BREDR &&
400		    !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401			rp->index[count++] = cpu_to_le16(d->id);
402			BT_DBG("Added hci%u", d->id);
403		}
404	}
405
406	rp->num_controllers = cpu_to_le16(count);
407	rp_len = sizeof(*rp) + (2 * count);
408
409	read_unlock(&hci_dev_list_lock);
410
411	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
412			   rp_len);
413
414	kfree(rp);
415
416	return err;
417}
418
419static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420				  void *data, u16 data_len)
421{
422	struct mgmt_rp_read_unconf_index_list *rp;
423	struct hci_dev *d;
424	size_t rp_len;
425	u16 count;
426	int err;
427
428	BT_DBG("sock %p", sk);
429
430	read_lock(&hci_dev_list_lock);
431
432	count = 0;
433	list_for_each_entry(d, &hci_dev_list, list) {
434		if (d->dev_type == HCI_BREDR &&
435		    test_bit(HCI_UNCONFIGURED, &d->dev_flags))
436			count++;
437	}
438
439	rp_len = sizeof(*rp) + (2 * count);
440	rp = kmalloc(rp_len, GFP_ATOMIC);
441	if (!rp) {
442		read_unlock(&hci_dev_list_lock);
443		return -ENOMEM;
444	}
445
446	count = 0;
447	list_for_each_entry(d, &hci_dev_list, list) {
448		if (test_bit(HCI_SETUP, &d->dev_flags) ||
449		    test_bit(HCI_CONFIG, &d->dev_flags) ||
450		    test_bit(HCI_USER_CHANNEL, &d->dev_flags))
451			continue;
452
453		/* Devices marked as raw-only are neither configured
454		 * nor unconfigured controllers.
455		 */
456		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
457			continue;
458
459		if (d->dev_type == HCI_BREDR &&
460		    test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461			rp->index[count++] = cpu_to_le16(d->id);
462			BT_DBG("Added hci%u", d->id);
463		}
464	}
465
466	rp->num_controllers = cpu_to_le16(count);
467	rp_len = sizeof(*rp) + (2 * count);
468
469	read_unlock(&hci_dev_list_lock);
470
471	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
472			   0, rp, rp_len);
473
474	kfree(rp);
475
476	return err;
477}
478
479static bool is_configured(struct hci_dev *hdev)
480{
481	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482	    !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
483		return false;
484
485	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486	    !bacmp(&hdev->public_addr, BDADDR_ANY))
487		return false;
488
489	return true;
490}
491
492static __le32 get_missing_options(struct hci_dev *hdev)
493{
494	u32 options = 0;
495
496	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497	    !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498		options |= MGMT_OPTION_EXTERNAL_CONFIG;
499
500	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501	    !bacmp(&hdev->public_addr, BDADDR_ANY))
502		options |= MGMT_OPTION_PUBLIC_ADDRESS;
503
504	return cpu_to_le32(options);
505}
506
507static int new_options(struct hci_dev *hdev, struct sock *skip)
508{
509	__le32 options = get_missing_options(hdev);
510
511	return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512			  sizeof(options), skip);
513}
514
515static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
516{
517	__le32 options = get_missing_options(hdev);
518
519	return cmd_complete(sk, hdev->id, opcode, 0, &options,
520			    sizeof(options));
521}
522
523static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524			    void *data, u16 data_len)
525{
526	struct mgmt_rp_read_config_info rp;
527	u32 options = 0;
528
529	BT_DBG("sock %p %s", sk, hdev->name);
530
531	hci_dev_lock(hdev);
532
533	memset(&rp, 0, sizeof(rp));
534	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
535
536	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537		options |= MGMT_OPTION_EXTERNAL_CONFIG;
538
539	if (hdev->set_bdaddr)
540		options |= MGMT_OPTION_PUBLIC_ADDRESS;
541
542	rp.supported_options = cpu_to_le32(options);
543	rp.missing_options = get_missing_options(hdev);
544
545	hci_dev_unlock(hdev);
546
547	return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
548			    sizeof(rp));
549}
550
551static u32 get_supported_settings(struct hci_dev *hdev)
552{
553	u32 settings = 0;
554
555	settings |= MGMT_SETTING_POWERED;
556	settings |= MGMT_SETTING_PAIRABLE;
557	settings |= MGMT_SETTING_DEBUG_KEYS;
558	settings |= MGMT_SETTING_CONNECTABLE;
559	settings |= MGMT_SETTING_DISCOVERABLE;
560
561	if (lmp_bredr_capable(hdev)) {
562		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
563			settings |= MGMT_SETTING_FAST_CONNECTABLE;
564		settings |= MGMT_SETTING_BREDR;
565		settings |= MGMT_SETTING_LINK_SECURITY;
566
567		if (lmp_ssp_capable(hdev)) {
568			settings |= MGMT_SETTING_SSP;
569			settings |= MGMT_SETTING_HS;
570		}
571
572		if (lmp_sc_capable(hdev) ||
573		    test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574			settings |= MGMT_SETTING_SECURE_CONN;
575	}
576
577	if (lmp_le_capable(hdev)) {
578		settings |= MGMT_SETTING_LE;
579		settings |= MGMT_SETTING_ADVERTISING;
580		settings |= MGMT_SETTING_PRIVACY;
581	}
582
583	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
584	    hdev->set_bdaddr)
585		settings |= MGMT_SETTING_CONFIGURATION;
586
587	return settings;
588}
589
590static u32 get_current_settings(struct hci_dev *hdev)
591{
592	u32 settings = 0;
593
594	if (hdev_is_powered(hdev))
595		settings |= MGMT_SETTING_POWERED;
596
597	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
598		settings |= MGMT_SETTING_CONNECTABLE;
599
600	if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
601		settings |= MGMT_SETTING_FAST_CONNECTABLE;
602
603	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
604		settings |= MGMT_SETTING_DISCOVERABLE;
605
606	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
607		settings |= MGMT_SETTING_PAIRABLE;
608
609	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
610		settings |= MGMT_SETTING_BREDR;
611
612	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
613		settings |= MGMT_SETTING_LE;
614
615	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
616		settings |= MGMT_SETTING_LINK_SECURITY;
617
618	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
619		settings |= MGMT_SETTING_SSP;
620
621	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
622		settings |= MGMT_SETTING_HS;
623
624	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
625		settings |= MGMT_SETTING_ADVERTISING;
626
627	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
628		settings |= MGMT_SETTING_SECURE_CONN;
629
630	if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
631		settings |= MGMT_SETTING_DEBUG_KEYS;
632
633	if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
634		settings |= MGMT_SETTING_PRIVACY;
635
636	return settings;
637}
638
639#define PNP_INFO_SVCLASS_ID		0x1200
640
641static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
642{
643	u8 *ptr = data, *uuids_start = NULL;
644	struct bt_uuid *uuid;
645
646	if (len < 4)
647		return ptr;
648
649	list_for_each_entry(uuid, &hdev->uuids, list) {
650		u16 uuid16;
651
652		if (uuid->size != 16)
653			continue;
654
655		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
656		if (uuid16 < 0x1100)
657			continue;
658
659		if (uuid16 == PNP_INFO_SVCLASS_ID)
660			continue;
661
662		if (!uuids_start) {
663			uuids_start = ptr;
664			uuids_start[0] = 1;
665			uuids_start[1] = EIR_UUID16_ALL;
666			ptr += 2;
667		}
668
669		/* Stop if not enough space to put next UUID */
670		if ((ptr - data) + sizeof(u16) > len) {
671			uuids_start[1] = EIR_UUID16_SOME;
672			break;
673		}
674
675		*ptr++ = (uuid16 & 0x00ff);
676		*ptr++ = (uuid16 & 0xff00) >> 8;
677		uuids_start[0] += sizeof(uuid16);
678	}
679
680	return ptr;
681}
682
683static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
684{
685	u8 *ptr = data, *uuids_start = NULL;
686	struct bt_uuid *uuid;
687
688	if (len < 6)
689		return ptr;
690
691	list_for_each_entry(uuid, &hdev->uuids, list) {
692		if (uuid->size != 32)
693			continue;
694
695		if (!uuids_start) {
696			uuids_start = ptr;
697			uuids_start[0] = 1;
698			uuids_start[1] = EIR_UUID32_ALL;
699			ptr += 2;
700		}
701
702		/* Stop if not enough space to put next UUID */
703		if ((ptr - data) + sizeof(u32) > len) {
704			uuids_start[1] = EIR_UUID32_SOME;
705			break;
706		}
707
708		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
709		ptr += sizeof(u32);
710		uuids_start[0] += sizeof(u32);
711	}
712
713	return ptr;
714}
715
716static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
717{
718	u8 *ptr = data, *uuids_start = NULL;
719	struct bt_uuid *uuid;
720
721	if (len < 18)
722		return ptr;
723
724	list_for_each_entry(uuid, &hdev->uuids, list) {
725		if (uuid->size != 128)
726			continue;
727
728		if (!uuids_start) {
729			uuids_start = ptr;
730			uuids_start[0] = 1;
731			uuids_start[1] = EIR_UUID128_ALL;
732			ptr += 2;
733		}
734
735		/* Stop if not enough space to put next UUID */
736		if ((ptr - data) + 16 > len) {
737			uuids_start[1] = EIR_UUID128_SOME;
738			break;
739		}
740
741		memcpy(ptr, uuid->uuid, 16);
742		ptr += 16;
743		uuids_start[0] += 16;
744	}
745
746	return ptr;
747}
748
749static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
750{
751	struct pending_cmd *cmd;
752
753	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
754		if (cmd->opcode == opcode)
755			return cmd;
756	}
757
758	return NULL;
759}
760
761static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
762						  struct hci_dev *hdev,
763						  const void *data)
764{
765	struct pending_cmd *cmd;
766
767	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
768		if (cmd->user_data != data)
769			continue;
770		if (cmd->opcode == opcode)
771			return cmd;
772	}
773
774	return NULL;
775}
776
777static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
778{
779	u8 ad_len = 0;
780	size_t name_len;
781
782	name_len = strlen(hdev->dev_name);
783	if (name_len > 0) {
784		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
785
786		if (name_len > max_len) {
787			name_len = max_len;
788			ptr[1] = EIR_NAME_SHORT;
789		} else
790			ptr[1] = EIR_NAME_COMPLETE;
791
792		ptr[0] = name_len + 1;
793
794		memcpy(ptr + 2, hdev->dev_name, name_len);
795
796		ad_len += (name_len + 2);
797		ptr += (name_len + 2);
798	}
799
800	return ad_len;
801}
802
803static void update_scan_rsp_data(struct hci_request *req)
804{
805	struct hci_dev *hdev = req->hdev;
806	struct hci_cp_le_set_scan_rsp_data cp;
807	u8 len;
808
809	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
810		return;
811
812	memset(&cp, 0, sizeof(cp));
813
814	len = create_scan_rsp_data(hdev, cp.data);
815
816	if (hdev->scan_rsp_data_len == len &&
817	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
818		return;
819
820	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
821	hdev->scan_rsp_data_len = len;
822
823	cp.length = len;
824
825	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
826}
827
828static u8 get_adv_discov_flags(struct hci_dev *hdev)
829{
830	struct pending_cmd *cmd;
831
832	/* If there's a pending mgmt command the flags will not yet have
833	 * their final values, so check for this first.
834	 */
835	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
836	if (cmd) {
837		struct mgmt_mode *cp = cmd->param;
838		if (cp->val == 0x01)
839			return LE_AD_GENERAL;
840		else if (cp->val == 0x02)
841			return LE_AD_LIMITED;
842	} else {
843		if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
844			return LE_AD_LIMITED;
845		else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
846			return LE_AD_GENERAL;
847	}
848
849	return 0;
850}
851
852static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
853{
854	u8 ad_len = 0, flags = 0;
855
856	flags |= get_adv_discov_flags(hdev);
857
858	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
859		flags |= LE_AD_NO_BREDR;
860
861	if (flags) {
862		BT_DBG("adv flags 0x%02x", flags);
863
864		ptr[0] = 2;
865		ptr[1] = EIR_FLAGS;
866		ptr[2] = flags;
867
868		ad_len += 3;
869		ptr += 3;
870	}
871
872	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
873		ptr[0] = 2;
874		ptr[1] = EIR_TX_POWER;
875		ptr[2] = (u8) hdev->adv_tx_power;
876
877		ad_len += 3;
878		ptr += 3;
879	}
880
881	return ad_len;
882}
883
884static void update_adv_data(struct hci_request *req)
885{
886	struct hci_dev *hdev = req->hdev;
887	struct hci_cp_le_set_adv_data cp;
888	u8 len;
889
890	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
891		return;
892
893	memset(&cp, 0, sizeof(cp));
894
895	len = create_adv_data(hdev, cp.data);
896
897	if (hdev->adv_data_len == len &&
898	    memcmp(cp.data, hdev->adv_data, len) == 0)
899		return;
900
901	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
902	hdev->adv_data_len = len;
903
904	cp.length = len;
905
906	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
907}
908
909int mgmt_update_adv_data(struct hci_dev *hdev)
910{
911	struct hci_request req;
912
913	hci_req_init(&req, hdev);
914	update_adv_data(&req);
915
916	return hci_req_run(&req, NULL);
917}
918
919static void create_eir(struct hci_dev *hdev, u8 *data)
920{
921	u8 *ptr = data;
922	size_t name_len;
923
924	name_len = strlen(hdev->dev_name);
925
926	if (name_len > 0) {
927		/* EIR Data type */
928		if (name_len > 48) {
929			name_len = 48;
930			ptr[1] = EIR_NAME_SHORT;
931		} else
932			ptr[1] = EIR_NAME_COMPLETE;
933
934		/* EIR Data length */
935		ptr[0] = name_len + 1;
936
937		memcpy(ptr + 2, hdev->dev_name, name_len);
938
939		ptr += (name_len + 2);
940	}
941
942	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
943		ptr[0] = 2;
944		ptr[1] = EIR_TX_POWER;
945		ptr[2] = (u8) hdev->inq_tx_power;
946
947		ptr += 3;
948	}
949
950	if (hdev->devid_source > 0) {
951		ptr[0] = 9;
952		ptr[1] = EIR_DEVICE_ID;
953
954		put_unaligned_le16(hdev->devid_source, ptr + 2);
955		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
956		put_unaligned_le16(hdev->devid_product, ptr + 6);
957		put_unaligned_le16(hdev->devid_version, ptr + 8);
958
959		ptr += 10;
960	}
961
962	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
963	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965}
966
967static void update_eir(struct hci_request *req)
968{
969	struct hci_dev *hdev = req->hdev;
970	struct hci_cp_write_eir cp;
971
972	if (!hdev_is_powered(hdev))
973		return;
974
975	if (!lmp_ext_inq_capable(hdev))
976		return;
977
978	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
979		return;
980
981	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
982		return;
983
984	memset(&cp, 0, sizeof(cp));
985
986	create_eir(hdev, cp.data);
987
988	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
989		return;
990
991	memcpy(hdev->eir, cp.data, sizeof(cp.data));
992
993	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
994}
995
996static u8 get_service_classes(struct hci_dev *hdev)
997{
998	struct bt_uuid *uuid;
999	u8 val = 0;
1000
1001	list_for_each_entry(uuid, &hdev->uuids, list)
1002		val |= uuid->svc_hint;
1003
1004	return val;
1005}
1006
1007static void update_class(struct hci_request *req)
1008{
1009	struct hci_dev *hdev = req->hdev;
1010	u8 cod[3];
1011
1012	BT_DBG("%s", hdev->name);
1013
1014	if (!hdev_is_powered(hdev))
1015		return;
1016
1017	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1018		return;
1019
1020	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1021		return;
1022
1023	cod[0] = hdev->minor_class;
1024	cod[1] = hdev->major_class;
1025	cod[2] = get_service_classes(hdev);
1026
1027	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1028		cod[1] |= 0x20;
1029
1030	if (memcmp(cod, hdev->dev_class, 3) == 0)
1031		return;
1032
1033	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1034}
1035
1036static bool get_connectable(struct hci_dev *hdev)
1037{
1038	struct pending_cmd *cmd;
1039
1040	/* If there's a pending mgmt command the flag will not yet have
1041	 * it's final value, so check for this first.
1042	 */
1043	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1044	if (cmd) {
1045		struct mgmt_mode *cp = cmd->param;
1046		return cp->val;
1047	}
1048
1049	return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1050}
1051
1052static void disable_advertising(struct hci_request *req)
1053{
1054	u8 enable = 0x00;
1055
1056	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1057}
1058
1059static void enable_advertising(struct hci_request *req)
1060{
1061	struct hci_dev *hdev = req->hdev;
1062	struct hci_cp_le_set_adv_param cp;
1063	u8 own_addr_type, enable = 0x01;
1064	bool connectable;
1065
1066	if (hci_conn_num(hdev, LE_LINK) > 0)
1067		return;
1068
1069	if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1070		disable_advertising(req);
1071
1072	/* Clear the HCI_LE_ADV bit temporarily so that the
1073	 * hci_update_random_address knows that it's safe to go ahead
1074	 * and write a new random address. The flag will be set back on
1075	 * as soon as the SET_ADV_ENABLE HCI command completes.
1076	 */
1077	clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1078
1079	connectable = get_connectable(hdev);
1080
1081	/* Set require_privacy to true only when non-connectable
1082	 * advertising is used. In that case it is fine to use a
1083	 * non-resolvable private address.
1084	 */
1085	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1086		return;
1087
1088	memset(&cp, 0, sizeof(cp));
1089	cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1090	cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1091	cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1092	cp.own_address_type = own_addr_type;
1093	cp.channel_map = hdev->le_adv_channel_map;
1094
1095	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1096
1097	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1098}
1099
1100static void service_cache_off(struct work_struct *work)
1101{
1102	struct hci_dev *hdev = container_of(work, struct hci_dev,
1103					    service_cache.work);
1104	struct hci_request req;
1105
1106	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1107		return;
1108
1109	hci_req_init(&req, hdev);
1110
1111	hci_dev_lock(hdev);
1112
1113	update_eir(&req);
1114	update_class(&req);
1115
1116	hci_dev_unlock(hdev);
1117
1118	hci_req_run(&req, NULL);
1119}
1120
1121static void rpa_expired(struct work_struct *work)
1122{
1123	struct hci_dev *hdev = container_of(work, struct hci_dev,
1124					    rpa_expired.work);
1125	struct hci_request req;
1126
1127	BT_DBG("");
1128
1129	set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1130
1131	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1132		return;
1133
1134	/* The generation of a new RPA and programming it into the
1135	 * controller happens in the enable_advertising() function.
1136	 */
1137	hci_req_init(&req, hdev);
1138	enable_advertising(&req);
1139	hci_req_run(&req, NULL);
1140}
1141
1142static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1143{
1144	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1145		return;
1146
1147	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1148	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1149
1150	/* Non-mgmt controlled devices get this bit set
1151	 * implicitly so that pairing works for them, however
1152	 * for mgmt we require user-space to explicitly enable
1153	 * it
1154	 */
1155	clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1156}
1157
1158static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1159				void *data, u16 data_len)
1160{
1161	struct mgmt_rp_read_info rp;
1162
1163	BT_DBG("sock %p %s", sk, hdev->name);
1164
1165	hci_dev_lock(hdev);
1166
1167	memset(&rp, 0, sizeof(rp));
1168
1169	bacpy(&rp.bdaddr, &hdev->bdaddr);
1170
1171	rp.version = hdev->hci_ver;
1172	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1173
1174	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1175	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1176
1177	memcpy(rp.dev_class, hdev->dev_class, 3);
1178
1179	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1180	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1181
1182	hci_dev_unlock(hdev);
1183
1184	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1185			    sizeof(rp));
1186}
1187
1188static void mgmt_pending_free(struct pending_cmd *cmd)
1189{
1190	sock_put(cmd->sk);
1191	kfree(cmd->param);
1192	kfree(cmd);
1193}
1194
1195static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1196					    struct hci_dev *hdev, void *data,
1197					    u16 len)
1198{
1199	struct pending_cmd *cmd;
1200
1201	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1202	if (!cmd)
1203		return NULL;
1204
1205	cmd->opcode = opcode;
1206	cmd->index = hdev->id;
1207
1208	cmd->param = kmalloc(len, GFP_KERNEL);
1209	if (!cmd->param) {
1210		kfree(cmd);
1211		return NULL;
1212	}
1213
1214	if (data)
1215		memcpy(cmd->param, data, len);
1216
1217	cmd->sk = sk;
1218	sock_hold(sk);
1219
1220	list_add(&cmd->list, &hdev->mgmt_pending);
1221
1222	return cmd;
1223}
1224
1225static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1226				 void (*cb)(struct pending_cmd *cmd,
1227					    void *data),
1228				 void *data)
1229{
1230	struct pending_cmd *cmd, *tmp;
1231
1232	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1233		if (opcode > 0 && cmd->opcode != opcode)
1234			continue;
1235
1236		cb(cmd, data);
1237	}
1238}
1239
1240static void mgmt_pending_remove(struct pending_cmd *cmd)
1241{
1242	list_del(&cmd->list);
1243	mgmt_pending_free(cmd);
1244}
1245
1246static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1247{
1248	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1249
1250	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1251			    sizeof(settings));
1252}
1253
1254static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1255{
1256	BT_DBG("%s status 0x%02x", hdev->name, status);
1257
1258	if (hci_conn_count(hdev) == 0) {
1259		cancel_delayed_work(&hdev->power_off);
1260		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1261	}
1262}
1263
1264static bool hci_stop_discovery(struct hci_request *req)
1265{
1266	struct hci_dev *hdev = req->hdev;
1267	struct hci_cp_remote_name_req_cancel cp;
1268	struct inquiry_entry *e;
1269
1270	switch (hdev->discovery.state) {
1271	case DISCOVERY_FINDING:
1272		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1273			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1274		} else {
1275			cancel_delayed_work(&hdev->le_scan_disable);
1276			hci_req_add_le_scan_disable(req);
1277		}
1278
1279		return true;
1280
1281	case DISCOVERY_RESOLVING:
1282		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1283						     NAME_PENDING);
1284		if (!e)
1285			break;
1286
1287		bacpy(&cp.bdaddr, &e->data.bdaddr);
1288		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1289			    &cp);
1290
1291		return true;
1292
1293	default:
1294		/* Passive scanning */
1295		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1296			hci_req_add_le_scan_disable(req);
1297			return true;
1298		}
1299
1300		break;
1301	}
1302
1303	return false;
1304}
1305
1306static int clean_up_hci_state(struct hci_dev *hdev)
1307{
1308	struct hci_request req;
1309	struct hci_conn *conn;
1310	bool discov_stopped;
1311	int err;
1312
1313	hci_req_init(&req, hdev);
1314
1315	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1316	    test_bit(HCI_PSCAN, &hdev->flags)) {
1317		u8 scan = 0x00;
1318		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1319	}
1320
1321	if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1322		disable_advertising(&req);
1323
1324	discov_stopped = hci_stop_discovery(&req);
1325
1326	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1327		struct hci_cp_disconnect dc;
1328		struct hci_cp_reject_conn_req rej;
1329
1330		switch (conn->state) {
1331		case BT_CONNECTED:
1332		case BT_CONFIG:
1333			dc.handle = cpu_to_le16(conn->handle);
1334			dc.reason = 0x15; /* Terminated due to Power Off */
1335			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1336			break;
1337		case BT_CONNECT:
1338			if (conn->type == LE_LINK)
1339				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1340					    0, NULL);
1341			else if (conn->type == ACL_LINK)
1342				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1343					    6, &conn->dst);
1344			break;
1345		case BT_CONNECT2:
1346			bacpy(&rej.bdaddr, &conn->dst);
1347			rej.reason = 0x15; /* Terminated due to Power Off */
1348			if (conn->type == ACL_LINK)
1349				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1350					    sizeof(rej), &rej);
1351			else if (conn->type == SCO_LINK)
1352				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1353					    sizeof(rej), &rej);
1354			break;
1355		}
1356	}
1357
1358	err = hci_req_run(&req, clean_up_hci_complete);
1359	if (!err && discov_stopped)
1360		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1361
1362	return err;
1363}
1364
1365static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1366		       u16 len)
1367{
1368	struct mgmt_mode *cp = data;
1369	struct pending_cmd *cmd;
1370	int err;
1371
1372	BT_DBG("request for %s", hdev->name);
1373
1374	if (cp->val != 0x00 && cp->val != 0x01)
1375		return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376				  MGMT_STATUS_INVALID_PARAMS);
1377
1378	hci_dev_lock(hdev);
1379
1380	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1381		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1382				 MGMT_STATUS_BUSY);
1383		goto failed;
1384	}
1385
1386	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1387		cancel_delayed_work(&hdev->power_off);
1388
1389		if (cp->val) {
1390			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1391					 data, len);
1392			err = mgmt_powered(hdev, 1);
1393			goto failed;
1394		}
1395	}
1396
1397	if (!!cp->val == hdev_is_powered(hdev)) {
1398		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399		goto failed;
1400	}
1401
1402	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1403	if (!cmd) {
1404		err = -ENOMEM;
1405		goto failed;
1406	}
1407
1408	if (cp->val) {
1409		queue_work(hdev->req_workqueue, &hdev->power_on);
1410		err = 0;
1411	} else {
1412		/* Disconnect connections, stop scans, etc */
1413		err = clean_up_hci_state(hdev);
1414		if (!err)
1415			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1416					   HCI_POWER_OFF_TIMEOUT);
1417
1418		/* ENODATA means there were no HCI commands queued */
1419		if (err == -ENODATA) {
1420			cancel_delayed_work(&hdev->power_off);
1421			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1422			err = 0;
1423		}
1424	}
1425
1426failed:
1427	hci_dev_unlock(hdev);
1428	return err;
1429}
1430
1431static int new_settings(struct hci_dev *hdev, struct sock *skip)
1432{
1433	__le32 ev;
1434
1435	ev = cpu_to_le32(get_current_settings(hdev));
1436
1437	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1438}
1439
1440int mgmt_new_settings(struct hci_dev *hdev)
1441{
1442	return new_settings(hdev, NULL);
1443}
1444
1445struct cmd_lookup {
1446	struct sock *sk;
1447	struct hci_dev *hdev;
1448	u8 mgmt_status;
1449};
1450
1451static void settings_rsp(struct pending_cmd *cmd, void *data)
1452{
1453	struct cmd_lookup *match = data;
1454
1455	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1456
1457	list_del(&cmd->list);
1458
1459	if (match->sk == NULL) {
1460		match->sk = cmd->sk;
1461		sock_hold(match->sk);
1462	}
1463
1464	mgmt_pending_free(cmd);
1465}
1466
1467static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1468{
1469	u8 *status = data;
1470
1471	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1472	mgmt_pending_remove(cmd);
1473}
1474
1475static u8 mgmt_bredr_support(struct hci_dev *hdev)
1476{
1477	if (!lmp_bredr_capable(hdev))
1478		return MGMT_STATUS_NOT_SUPPORTED;
1479	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1480		return MGMT_STATUS_REJECTED;
1481	else
1482		return MGMT_STATUS_SUCCESS;
1483}
1484
1485static u8 mgmt_le_support(struct hci_dev *hdev)
1486{
1487	if (!lmp_le_capable(hdev))
1488		return MGMT_STATUS_NOT_SUPPORTED;
1489	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1490		return MGMT_STATUS_REJECTED;
1491	else
1492		return MGMT_STATUS_SUCCESS;
1493}
1494
1495static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1496{
1497	struct pending_cmd *cmd;
1498	struct mgmt_mode *cp;
1499	struct hci_request req;
1500	bool changed;
1501
1502	BT_DBG("status 0x%02x", status);
1503
1504	hci_dev_lock(hdev);
1505
1506	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1507	if (!cmd)
1508		goto unlock;
1509
1510	if (status) {
1511		u8 mgmt_err = mgmt_status(status);
1512		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1513		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1514		goto remove_cmd;
1515	}
1516
1517	cp = cmd->param;
1518	if (cp->val) {
1519		changed = !test_and_set_bit(HCI_DISCOVERABLE,
1520					    &hdev->dev_flags);
1521
1522		if (hdev->discov_timeout > 0) {
1523			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1524			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1525					   to);
1526		}
1527	} else {
1528		changed = test_and_clear_bit(HCI_DISCOVERABLE,
1529					     &hdev->dev_flags);
1530	}
1531
1532	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1533
1534	if (changed)
1535		new_settings(hdev, cmd->sk);
1536
1537	/* When the discoverable mode gets changed, make sure
1538	 * that class of device has the limited discoverable
1539	 * bit correctly set.
1540	 */
1541	hci_req_init(&req, hdev);
1542	update_class(&req);
1543	hci_req_run(&req, NULL);
1544
1545remove_cmd:
1546	mgmt_pending_remove(cmd);
1547
1548unlock:
1549	hci_dev_unlock(hdev);
1550}
1551
1552static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1553			    u16 len)
1554{
1555	struct mgmt_cp_set_discoverable *cp = data;
1556	struct pending_cmd *cmd;
1557	struct hci_request req;
1558	u16 timeout;
1559	u8 scan;
1560	int err;
1561
1562	BT_DBG("request for %s", hdev->name);
1563
1564	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1565	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1566		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567				  MGMT_STATUS_REJECTED);
1568
1569	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1570		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1571				  MGMT_STATUS_INVALID_PARAMS);
1572
1573	timeout = __le16_to_cpu(cp->timeout);
1574
1575	/* Disabling discoverable requires that no timeout is set,
1576	 * and enabling limited discoverable requires a timeout.
1577	 */
1578	if ((cp->val == 0x00 && timeout > 0) ||
1579	    (cp->val == 0x02 && timeout == 0))
1580		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581				  MGMT_STATUS_INVALID_PARAMS);
1582
1583	hci_dev_lock(hdev);
1584
1585	if (!hdev_is_powered(hdev) && timeout > 0) {
1586		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587				 MGMT_STATUS_NOT_POWERED);
1588		goto failed;
1589	}
1590
1591	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1592	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1593		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1594				 MGMT_STATUS_BUSY);
1595		goto failed;
1596	}
1597
1598	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1599		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600				 MGMT_STATUS_REJECTED);
1601		goto failed;
1602	}
1603
1604	if (!hdev_is_powered(hdev)) {
1605		bool changed = false;
1606
1607		/* Setting limited discoverable when powered off is
1608		 * not a valid operation since it requires a timeout
1609		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1610		 */
1611		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1612			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1613			changed = true;
1614		}
1615
1616		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1617		if (err < 0)
1618			goto failed;
1619
1620		if (changed)
1621			err = new_settings(hdev, sk);
1622
1623		goto failed;
1624	}
1625
1626	/* If the current mode is the same, then just update the timeout
1627	 * value with the new value. And if only the timeout gets updated,
1628	 * then no need for any HCI transactions.
1629	 */
1630	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1631	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1632					  &hdev->dev_flags)) {
1633		cancel_delayed_work(&hdev->discov_off);
1634		hdev->discov_timeout = timeout;
1635
1636		if (cp->val && hdev->discov_timeout > 0) {
1637			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1638			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1639					   to);
1640		}
1641
1642		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1643		goto failed;
1644	}
1645
1646	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1647	if (!cmd) {
1648		err = -ENOMEM;
1649		goto failed;
1650	}
1651
1652	/* Cancel any potential discoverable timeout that might be
1653	 * still active and store new timeout value. The arming of
1654	 * the timeout happens in the complete handler.
1655	 */
1656	cancel_delayed_work(&hdev->discov_off);
1657	hdev->discov_timeout = timeout;
1658
1659	/* Limited discoverable mode */
1660	if (cp->val == 0x02)
1661		set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1662	else
1663		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1664
1665	hci_req_init(&req, hdev);
1666
1667	/* The procedure for LE-only controllers is much simpler - just
1668	 * update the advertising data.
1669	 */
1670	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1671		goto update_ad;
1672
1673	scan = SCAN_PAGE;
1674
1675	if (cp->val) {
1676		struct hci_cp_write_current_iac_lap hci_cp;
1677
1678		if (cp->val == 0x02) {
1679			/* Limited discoverable mode */
1680			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1681			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
1682			hci_cp.iac_lap[1] = 0x8b;
1683			hci_cp.iac_lap[2] = 0x9e;
1684			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
1685			hci_cp.iac_lap[4] = 0x8b;
1686			hci_cp.iac_lap[5] = 0x9e;
1687		} else {
1688			/* General discoverable mode */
1689			hci_cp.num_iac = 1;
1690			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
1691			hci_cp.iac_lap[1] = 0x8b;
1692			hci_cp.iac_lap[2] = 0x9e;
1693		}
1694
1695		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1696			    (hci_cp.num_iac * 3) + 1, &hci_cp);
1697
1698		scan |= SCAN_INQUIRY;
1699	} else {
1700		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1701	}
1702
1703	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1704
1705update_ad:
1706	update_adv_data(&req);
1707
1708	err = hci_req_run(&req, set_discoverable_complete);
1709	if (err < 0)
1710		mgmt_pending_remove(cmd);
1711
1712failed:
1713	hci_dev_unlock(hdev);
1714	return err;
1715}
1716
1717static void write_fast_connectable(struct hci_request *req, bool enable)
1718{
1719	struct hci_dev *hdev = req->hdev;
1720	struct hci_cp_write_page_scan_activity acp;
1721	u8 type;
1722
1723	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1724		return;
1725
1726	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1727		return;
1728
1729	if (enable) {
1730		type = PAGE_SCAN_TYPE_INTERLACED;
1731
1732		/* 160 msec page scan interval */
1733		acp.interval = cpu_to_le16(0x0100);
1734	} else {
1735		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
1736
1737		/* default 1.28 sec page scan */
1738		acp.interval = cpu_to_le16(0x0800);
1739	}
1740
1741	acp.window = cpu_to_le16(0x0012);
1742
1743	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1744	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
1745		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1746			    sizeof(acp), &acp);
1747
1748	if (hdev->page_scan_type != type)
1749		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1750}
1751
1752static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1753{
1754	struct pending_cmd *cmd;
1755	struct mgmt_mode *cp;
1756	bool conn_changed, discov_changed;
1757
1758	BT_DBG("status 0x%02x", status);
1759
1760	hci_dev_lock(hdev);
1761
1762	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1763	if (!cmd)
1764		goto unlock;
1765
1766	if (status) {
1767		u8 mgmt_err = mgmt_status(status);
1768		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1769		goto remove_cmd;
1770	}
1771
1772	cp = cmd->param;
1773	if (cp->val) {
1774		conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1775						 &hdev->dev_flags);
1776		discov_changed = false;
1777	} else {
1778		conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1779						  &hdev->dev_flags);
1780		discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1781						    &hdev->dev_flags);
1782	}
1783
1784	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1785
1786	if (conn_changed || discov_changed) {
1787		new_settings(hdev, cmd->sk);
1788		if (discov_changed)
1789			mgmt_update_adv_data(hdev);
1790		hci_update_background_scan(hdev);
1791	}
1792
1793remove_cmd:
1794	mgmt_pending_remove(cmd);
1795
1796unlock:
1797	hci_dev_unlock(hdev);
1798}
1799
1800static int set_connectable_update_settings(struct hci_dev *hdev,
1801					   struct sock *sk, u8 val)
1802{
1803	bool changed = false;
1804	int err;
1805
1806	if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1807		changed = true;
1808
1809	if (val) {
1810		set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1811	} else {
1812		clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1813		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1814	}
1815
1816	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1817	if (err < 0)
1818		return err;
1819
1820	if (changed) {
1821		hci_update_background_scan(hdev);
1822		return new_settings(hdev, sk);
1823	}
1824
1825	return 0;
1826}
1827
1828static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1829			   u16 len)
1830{
1831	struct mgmt_mode *cp = data;
1832	struct pending_cmd *cmd;
1833	struct hci_request req;
1834	u8 scan;
1835	int err;
1836
1837	BT_DBG("request for %s", hdev->name);
1838
1839	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1840	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1841		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1842				  MGMT_STATUS_REJECTED);
1843
1844	if (cp->val != 0x00 && cp->val != 0x01)
1845		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1846				  MGMT_STATUS_INVALID_PARAMS);
1847
1848	hci_dev_lock(hdev);
1849
1850	if (!hdev_is_powered(hdev)) {
1851		err = set_connectable_update_settings(hdev, sk, cp->val);
1852		goto failed;
1853	}
1854
1855	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1856	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1857		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1858				 MGMT_STATUS_BUSY);
1859		goto failed;
1860	}
1861
1862	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1863	if (!cmd) {
1864		err = -ENOMEM;
1865		goto failed;
1866	}
1867
1868	hci_req_init(&req, hdev);
1869
1870	/* If BR/EDR is not enabled and we disable advertising as a
1871	 * by-product of disabling connectable, we need to update the
1872	 * advertising flags.
1873	 */
1874	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1875		if (!cp->val) {
1876			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1877			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1878		}
1879		update_adv_data(&req);
1880	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1881		if (cp->val) {
1882			scan = SCAN_PAGE;
1883		} else {
1884			scan = 0;
1885
1886			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1887			    hdev->discov_timeout > 0)
1888				cancel_delayed_work(&hdev->discov_off);
1889		}
1890
1891		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1892	}
1893
1894	/* If we're going from non-connectable to connectable or
1895	 * vice-versa when fast connectable is enabled ensure that fast
1896	 * connectable gets disabled. write_fast_connectable won't do
1897	 * anything if the page scan parameters are already what they
1898	 * should be.
1899	 */
1900	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1901		write_fast_connectable(&req, false);
1902
1903	/* Update the advertising parameters if necessary */
1904	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1905		enable_advertising(&req);
1906
1907	err = hci_req_run(&req, set_connectable_complete);
1908	if (err < 0) {
1909		mgmt_pending_remove(cmd);
1910		if (err == -ENODATA)
1911			err = set_connectable_update_settings(hdev, sk,
1912							      cp->val);
1913		goto failed;
1914	}
1915
1916failed:
1917	hci_dev_unlock(hdev);
1918	return err;
1919}
1920
1921static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1922			u16 len)
1923{
1924	struct mgmt_mode *cp = data;
1925	bool changed;
1926	int err;
1927
1928	BT_DBG("request for %s", hdev->name);
1929
1930	if (cp->val != 0x00 && cp->val != 0x01)
1931		return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1932				  MGMT_STATUS_INVALID_PARAMS);
1933
1934	hci_dev_lock(hdev);
1935
1936	if (cp->val)
1937		changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1938	else
1939		changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1940
1941	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1942	if (err < 0)
1943		goto unlock;
1944
1945	if (changed)
1946		err = new_settings(hdev, sk);
1947
1948unlock:
1949	hci_dev_unlock(hdev);
1950	return err;
1951}
1952
1953static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1954			     u16 len)
1955{
1956	struct mgmt_mode *cp = data;
1957	struct pending_cmd *cmd;
1958	u8 val, status;
1959	int err;
1960
1961	BT_DBG("request for %s", hdev->name);
1962
1963	status = mgmt_bredr_support(hdev);
1964	if (status)
1965		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1966				  status);
1967
1968	if (cp->val != 0x00 && cp->val != 0x01)
1969		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1970				  MGMT_STATUS_INVALID_PARAMS);
1971
1972	hci_dev_lock(hdev);
1973
1974	if (!hdev_is_powered(hdev)) {
1975		bool changed = false;
1976
1977		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1978					  &hdev->dev_flags)) {
1979			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1980			changed = true;
1981		}
1982
1983		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1984		if (err < 0)
1985			goto failed;
1986
1987		if (changed)
1988			err = new_settings(hdev, sk);
1989
1990		goto failed;
1991	}
1992
1993	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1994		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1995				 MGMT_STATUS_BUSY);
1996		goto failed;
1997	}
1998
1999	val = !!cp->val;
2000
2001	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2002		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2003		goto failed;
2004	}
2005
2006	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2007	if (!cmd) {
2008		err = -ENOMEM;
2009		goto failed;
2010	}
2011
2012	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2013	if (err < 0) {
2014		mgmt_pending_remove(cmd);
2015		goto failed;
2016	}
2017
2018failed:
2019	hci_dev_unlock(hdev);
2020	return err;
2021}
2022
2023static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2024{
2025	struct mgmt_mode *cp = data;
2026	struct pending_cmd *cmd;
2027	u8 status;
2028	int err;
2029
2030	BT_DBG("request for %s", hdev->name);
2031
2032	status = mgmt_bredr_support(hdev);
2033	if (status)
2034		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2035
2036	if (!lmp_ssp_capable(hdev))
2037		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2038				  MGMT_STATUS_NOT_SUPPORTED);
2039
2040	if (cp->val != 0x00 && cp->val != 0x01)
2041		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2042				  MGMT_STATUS_INVALID_PARAMS);
2043
2044	hci_dev_lock(hdev);
2045
2046	if (!hdev_is_powered(hdev)) {
2047		bool changed;
2048
2049		if (cp->val) {
2050			changed = !test_and_set_bit(HCI_SSP_ENABLED,
2051						    &hdev->dev_flags);
2052		} else {
2053			changed = test_and_clear_bit(HCI_SSP_ENABLED,
2054						     &hdev->dev_flags);
2055			if (!changed)
2056				changed = test_and_clear_bit(HCI_HS_ENABLED,
2057							     &hdev->dev_flags);
2058			else
2059				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2060		}
2061
2062		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2063		if (err < 0)
2064			goto failed;
2065
2066		if (changed)
2067			err = new_settings(hdev, sk);
2068
2069		goto failed;
2070	}
2071
2072	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2073	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2074		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2075				 MGMT_STATUS_BUSY);
2076		goto failed;
2077	}
2078
2079	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2080		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2081		goto failed;
2082	}
2083
2084	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2085	if (!cmd) {
2086		err = -ENOMEM;
2087		goto failed;
2088	}
2089
2090	if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2091		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2092			     sizeof(cp->val), &cp->val);
2093
2094	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2095	if (err < 0) {
2096		mgmt_pending_remove(cmd);
2097		goto failed;
2098	}
2099
2100failed:
2101	hci_dev_unlock(hdev);
2102	return err;
2103}
2104
2105static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2106{
2107	struct mgmt_mode *cp = data;
2108	bool changed;
2109	u8 status;
2110	int err;
2111
2112	BT_DBG("request for %s", hdev->name);
2113
2114	status = mgmt_bredr_support(hdev);
2115	if (status)
2116		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2117
2118	if (!lmp_ssp_capable(hdev))
2119		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2120				  MGMT_STATUS_NOT_SUPPORTED);
2121
2122	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2123		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2124				  MGMT_STATUS_REJECTED);
2125
2126	if (cp->val != 0x00 && cp->val != 0x01)
2127		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2128				  MGMT_STATUS_INVALID_PARAMS);
2129
2130	hci_dev_lock(hdev);
2131
2132	if (cp->val) {
2133		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2134	} else {
2135		if (hdev_is_powered(hdev)) {
2136			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2137					 MGMT_STATUS_REJECTED);
2138			goto unlock;
2139		}
2140
2141		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2142	}
2143
2144	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2145	if (err < 0)
2146		goto unlock;
2147
2148	if (changed)
2149		err = new_settings(hdev, sk);
2150
2151unlock:
2152	hci_dev_unlock(hdev);
2153	return err;
2154}
2155
2156static void le_enable_complete(struct hci_dev *hdev, u8 status)
2157{
2158	struct cmd_lookup match = { NULL, hdev };
2159
2160	if (status) {
2161		u8 mgmt_err = mgmt_status(status);
2162
2163		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2164				     &mgmt_err);
2165		return;
2166	}
2167
2168	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2169
2170	new_settings(hdev, match.sk);
2171
2172	if (match.sk)
2173		sock_put(match.sk);
2174
2175	/* Make sure the controller has a good default for
2176	 * advertising data. Restrict the update to when LE
2177	 * has actually been enabled. During power on, the
2178	 * update in powered_update_hci will take care of it.
2179	 */
2180	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2181		struct hci_request req;
2182
2183		hci_dev_lock(hdev);
2184
2185		hci_req_init(&req, hdev);
2186		update_adv_data(&req);
2187		update_scan_rsp_data(&req);
2188		hci_req_run(&req, NULL);
2189
2190		hci_update_background_scan(hdev);
2191
2192		hci_dev_unlock(hdev);
2193	}
2194}
2195
2196static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2197{
2198	struct mgmt_mode *cp = data;
2199	struct hci_cp_write_le_host_supported hci_cp;
2200	struct pending_cmd *cmd;
2201	struct hci_request req;
2202	int err;
2203	u8 val, enabled;
2204
2205	BT_DBG("request for %s", hdev->name);
2206
2207	if (!lmp_le_capable(hdev))
2208		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2209				  MGMT_STATUS_NOT_SUPPORTED);
2210
2211	if (cp->val != 0x00 && cp->val != 0x01)
2212		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2213				  MGMT_STATUS_INVALID_PARAMS);
2214
2215	/* LE-only devices do not allow toggling LE on/off */
2216	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2217		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2218				  MGMT_STATUS_REJECTED);
2219
2220	hci_dev_lock(hdev);
2221
2222	val = !!cp->val;
2223	enabled = lmp_host_le_capable(hdev);
2224
2225	if (!hdev_is_powered(hdev) || val == enabled) {
2226		bool changed = false;
2227
2228		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2229			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2230			changed = true;
2231		}
2232
2233		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2234			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2235			changed = true;
2236		}
2237
2238		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2239		if (err < 0)
2240			goto unlock;
2241
2242		if (changed)
2243			err = new_settings(hdev, sk);
2244
2245		goto unlock;
2246	}
2247
2248	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2249	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2250		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2251				 MGMT_STATUS_BUSY);
2252		goto unlock;
2253	}
2254
2255	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2256	if (!cmd) {
2257		err = -ENOMEM;
2258		goto unlock;
2259	}
2260
2261	hci_req_init(&req, hdev);
2262
2263	memset(&hci_cp, 0, sizeof(hci_cp));
2264
2265	if (val) {
2266		hci_cp.le = val;
2267		hci_cp.simul = lmp_le_br_capable(hdev);
2268	} else {
2269		if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2270			disable_advertising(&req);
2271	}
2272
2273	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2274		    &hci_cp);
2275
2276	err = hci_req_run(&req, le_enable_complete);
2277	if (err < 0)
2278		mgmt_pending_remove(cmd);
2279
2280unlock:
2281	hci_dev_unlock(hdev);
2282	return err;
2283}
2284
2285/* This is a helper function to test for pending mgmt commands that can
2286 * cause CoD or EIR HCI commands. We can only allow one such pending
2287 * mgmt command at a time since otherwise we cannot easily track what
2288 * the current values are, will be, and based on that calculate if a new
2289 * HCI command needs to be sent and if yes with what value.
2290 */
2291static bool pending_eir_or_class(struct hci_dev *hdev)
2292{
2293	struct pending_cmd *cmd;
2294
2295	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2296		switch (cmd->opcode) {
2297		case MGMT_OP_ADD_UUID:
2298		case MGMT_OP_REMOVE_UUID:
2299		case MGMT_OP_SET_DEV_CLASS:
2300		case MGMT_OP_SET_POWERED:
2301			return true;
2302		}
2303	}
2304
2305	return false;
2306}
2307
2308static const u8 bluetooth_base_uuid[] = {
2309			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2310			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2311};
2312
2313static u8 get_uuid_size(const u8 *uuid)
2314{
2315	u32 val;
2316
2317	if (memcmp(uuid, bluetooth_base_uuid, 12))
2318		return 128;
2319
2320	val = get_unaligned_le32(&uuid[12]);
2321	if (val > 0xffff)
2322		return 32;
2323
2324	return 16;
2325}
2326
2327static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2328{
2329	struct pending_cmd *cmd;
2330
2331	hci_dev_lock(hdev);
2332
2333	cmd = mgmt_pending_find(mgmt_op, hdev);
2334	if (!cmd)
2335		goto unlock;
2336
2337	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2338		     hdev->dev_class, 3);
2339
2340	mgmt_pending_remove(cmd);
2341
2342unlock:
2343	hci_dev_unlock(hdev);
2344}
2345
2346static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2347{
2348	BT_DBG("status 0x%02x", status);
2349
2350	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2351}
2352
2353static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2354{
2355	struct mgmt_cp_add_uuid *cp = data;
2356	struct pending_cmd *cmd;
2357	struct hci_request req;
2358	struct bt_uuid *uuid;
2359	int err;
2360
2361	BT_DBG("request for %s", hdev->name);
2362
2363	hci_dev_lock(hdev);
2364
2365	if (pending_eir_or_class(hdev)) {
2366		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2367				 MGMT_STATUS_BUSY);
2368		goto failed;
2369	}
2370
2371	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2372	if (!uuid) {
2373		err = -ENOMEM;
2374		goto failed;
2375	}
2376
2377	memcpy(uuid->uuid, cp->uuid, 16);
2378	uuid->svc_hint = cp->svc_hint;
2379	uuid->size = get_uuid_size(cp->uuid);
2380
2381	list_add_tail(&uuid->list, &hdev->uuids);
2382
2383	hci_req_init(&req, hdev);
2384
2385	update_class(&req);
2386	update_eir(&req);
2387
2388	err = hci_req_run(&req, add_uuid_complete);
2389	if (err < 0) {
2390		if (err != -ENODATA)
2391			goto failed;
2392
2393		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2394				   hdev->dev_class, 3);
2395		goto failed;
2396	}
2397
2398	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2399	if (!cmd) {
2400		err = -ENOMEM;
2401		goto failed;
2402	}
2403
2404	err = 0;
2405
2406failed:
2407	hci_dev_unlock(hdev);
2408	return err;
2409}
2410
2411static bool enable_service_cache(struct hci_dev *hdev)
2412{
2413	if (!hdev_is_powered(hdev))
2414		return false;
2415
2416	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2417		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2418				   CACHE_TIMEOUT);
2419		return true;
2420	}
2421
2422	return false;
2423}
2424
2425static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2426{
2427	BT_DBG("status 0x%02x", status);
2428
2429	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2430}
2431
2432static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2433		       u16 len)
2434{
2435	struct mgmt_cp_remove_uuid *cp = data;
2436	struct pending_cmd *cmd;
2437	struct bt_uuid *match, *tmp;
2438	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2439	struct hci_request req;
2440	int err, found;
2441
2442	BT_DBG("request for %s", hdev->name);
2443
2444	hci_dev_lock(hdev);
2445
2446	if (pending_eir_or_class(hdev)) {
2447		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2448				 MGMT_STATUS_BUSY);
2449		goto unlock;
2450	}
2451
2452	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2453		hci_uuids_clear(hdev);
2454
2455		if (enable_service_cache(hdev)) {
2456			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2457					   0, hdev->dev_class, 3);
2458			goto unlock;
2459		}
2460
2461		goto update_class;
2462	}
2463
2464	found = 0;
2465
2466	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2467		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2468			continue;
2469
2470		list_del(&match->list);
2471		kfree(match);
2472		found++;
2473	}
2474
2475	if (found == 0) {
2476		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2477				 MGMT_STATUS_INVALID_PARAMS);
2478		goto unlock;
2479	}
2480
2481update_class:
2482	hci_req_init(&req, hdev);
2483
2484	update_class(&req);
2485	update_eir(&req);
2486
2487	err = hci_req_run(&req, remove_uuid_complete);
2488	if (err < 0) {
2489		if (err != -ENODATA)
2490			goto unlock;
2491
2492		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2493				   hdev->dev_class, 3);
2494		goto unlock;
2495	}
2496
2497	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2498	if (!cmd) {
2499		err = -ENOMEM;
2500		goto unlock;
2501	}
2502
2503	err = 0;
2504
2505unlock:
2506	hci_dev_unlock(hdev);
2507	return err;
2508}
2509
2510static void set_class_complete(struct hci_dev *hdev, u8 status)
2511{
2512	BT_DBG("status 0x%02x", status);
2513
2514	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2515}
2516
2517static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2518			 u16 len)
2519{
2520	struct mgmt_cp_set_dev_class *cp = data;
2521	struct pending_cmd *cmd;
2522	struct hci_request req;
2523	int err;
2524
2525	BT_DBG("request for %s", hdev->name);
2526
2527	if (!lmp_bredr_capable(hdev))
2528		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2529				  MGMT_STATUS_NOT_SUPPORTED);
2530
2531	hci_dev_lock(hdev);
2532
2533	if (pending_eir_or_class(hdev)) {
2534		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2535				 MGMT_STATUS_BUSY);
2536		goto unlock;
2537	}
2538
2539	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2540		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2541				 MGMT_STATUS_INVALID_PARAMS);
2542		goto unlock;
2543	}
2544
2545	hdev->major_class = cp->major;
2546	hdev->minor_class = cp->minor;
2547
2548	if (!hdev_is_powered(hdev)) {
2549		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2550				   hdev->dev_class, 3);
2551		goto unlock;
2552	}
2553
2554	hci_req_init(&req, hdev);
2555
2556	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2557		hci_dev_unlock(hdev);
2558		cancel_delayed_work_sync(&hdev->service_cache);
2559		hci_dev_lock(hdev);
2560		update_eir(&req);
2561	}
2562
2563	update_class(&req);
2564
2565	err = hci_req_run(&req, set_class_complete);
2566	if (err < 0) {
2567		if (err != -ENODATA)
2568			goto unlock;
2569
2570		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2571				   hdev->dev_class, 3);
2572		goto unlock;
2573	}
2574
2575	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2576	if (!cmd) {
2577		err = -ENOMEM;
2578		goto unlock;
2579	}
2580
2581	err = 0;
2582
2583unlock:
2584	hci_dev_unlock(hdev);
2585	return err;
2586}
2587
2588static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2589			  u16 len)
2590{
2591	struct mgmt_cp_load_link_keys *cp = data;
2592	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2593				   sizeof(struct mgmt_link_key_info));
2594	u16 key_count, expected_len;
2595	bool changed;
2596	int i;
2597
2598	BT_DBG("request for %s", hdev->name);
2599
2600	if (!lmp_bredr_capable(hdev))
2601		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2602				  MGMT_STATUS_NOT_SUPPORTED);
2603
2604	key_count = __le16_to_cpu(cp->key_count);
2605	if (key_count > max_key_count) {
2606		BT_ERR("load_link_keys: too big key_count value %u",
2607		       key_count);
2608		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2609				  MGMT_STATUS_INVALID_PARAMS);
2610	}
2611
2612	expected_len = sizeof(*cp) + key_count *
2613					sizeof(struct mgmt_link_key_info);
2614	if (expected_len != len) {
2615		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2616		       expected_len, len);
2617		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2618				  MGMT_STATUS_INVALID_PARAMS);
2619	}
2620
2621	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2622		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2623				  MGMT_STATUS_INVALID_PARAMS);
2624
2625	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2626	       key_count);
2627
2628	for (i = 0; i < key_count; i++) {
2629		struct mgmt_link_key_info *key = &cp->keys[i];
2630
2631		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2632			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2633					  MGMT_STATUS_INVALID_PARAMS);
2634	}
2635
2636	hci_dev_lock(hdev);
2637
2638	hci_link_keys_clear(hdev);
2639
2640	if (cp->debug_keys)
2641		changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2642					    &hdev->dev_flags);
2643	else
2644		changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2645					     &hdev->dev_flags);
2646
2647	if (changed)
2648		new_settings(hdev, NULL);
2649
2650	for (i = 0; i < key_count; i++) {
2651		struct mgmt_link_key_info *key = &cp->keys[i];
2652
2653		/* Always ignore debug keys and require a new pairing if
2654		 * the user wants to use them.
2655		 */
2656		if (key->type == HCI_LK_DEBUG_COMBINATION)
2657			continue;
2658
2659		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2660				 key->type, key->pin_len, NULL);
2661	}
2662
2663	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2664
2665	hci_dev_unlock(hdev);
2666
2667	return 0;
2668}
2669
2670static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2671			   u8 addr_type, struct sock *skip_sk)
2672{
2673	struct mgmt_ev_device_unpaired ev;
2674
2675	bacpy(&ev.addr.bdaddr, bdaddr);
2676	ev.addr.type = addr_type;
2677
2678	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2679			  skip_sk);
2680}
2681
2682static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2683			 u16 len)
2684{
2685	struct mgmt_cp_unpair_device *cp = data;
2686	struct mgmt_rp_unpair_device rp;
2687	struct hci_cp_disconnect dc;
2688	struct pending_cmd *cmd;
2689	struct hci_conn *conn;
2690	int err;
2691
2692	memset(&rp, 0, sizeof(rp));
2693	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2694	rp.addr.type = cp->addr.type;
2695
2696	if (!bdaddr_type_is_valid(cp->addr.type))
2697		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2698				    MGMT_STATUS_INVALID_PARAMS,
2699				    &rp, sizeof(rp));
2700
2701	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2702		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2703				    MGMT_STATUS_INVALID_PARAMS,
2704				    &rp, sizeof(rp));
2705
2706	hci_dev_lock(hdev);
2707
2708	if (!hdev_is_powered(hdev)) {
2709		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2710				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2711		goto unlock;
2712	}
2713
2714	if (cp->addr.type == BDADDR_BREDR) {
2715		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2716	} else {
2717		u8 addr_type;
2718
2719		if (cp->addr.type == BDADDR_LE_PUBLIC)
2720			addr_type = ADDR_LE_DEV_PUBLIC;
2721		else
2722			addr_type = ADDR_LE_DEV_RANDOM;
2723
2724		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2725
2726		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2727
2728		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2729	}
2730
2731	if (err < 0) {
2732		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2733				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2734		goto unlock;
2735	}
2736
2737	if (cp->disconnect) {
2738		if (cp->addr.type == BDADDR_BREDR)
2739			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2740						       &cp->addr.bdaddr);
2741		else
2742			conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2743						       &cp->addr.bdaddr);
2744	} else {
2745		conn = NULL;
2746	}
2747
2748	if (!conn) {
2749		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2750				   &rp, sizeof(rp));
2751		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2752		goto unlock;
2753	}
2754
2755	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2756			       sizeof(*cp));
2757	if (!cmd) {
2758		err = -ENOMEM;
2759		goto unlock;
2760	}
2761
2762	dc.handle = cpu_to_le16(conn->handle);
2763	dc.reason = 0x13; /* Remote User Terminated Connection */
2764	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2765	if (err < 0)
2766		mgmt_pending_remove(cmd);
2767
2768unlock:
2769	hci_dev_unlock(hdev);
2770	return err;
2771}
2772
2773static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2774		      u16 len)
2775{
2776	struct mgmt_cp_disconnect *cp = data;
2777	struct mgmt_rp_disconnect rp;
2778	struct hci_cp_disconnect dc;
2779	struct pending_cmd *cmd;
2780	struct hci_conn *conn;
2781	int err;
2782
2783	BT_DBG("");
2784
2785	memset(&rp, 0, sizeof(rp));
2786	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2787	rp.addr.type = cp->addr.type;
2788
2789	if (!bdaddr_type_is_valid(cp->addr.type))
2790		return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2791				    MGMT_STATUS_INVALID_PARAMS,
2792				    &rp, sizeof(rp));
2793
2794	hci_dev_lock(hdev);
2795
2796	if (!test_bit(HCI_UP, &hdev->flags)) {
2797		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2798				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2799		goto failed;
2800	}
2801
2802	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2803		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2804				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2805		goto failed;
2806	}
2807
2808	if (cp->addr.type == BDADDR_BREDR)
2809		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2810					       &cp->addr.bdaddr);
2811	else
2812		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2813
2814	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2815		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2816				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2817		goto failed;
2818	}
2819
2820	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2821	if (!cmd) {
2822		err = -ENOMEM;
2823		goto failed;
2824	}
2825
2826	dc.handle = cpu_to_le16(conn->handle);
2827	dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2828
2829	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2830	if (err < 0)
2831		mgmt_pending_remove(cmd);
2832
2833failed:
2834	hci_dev_unlock(hdev);
2835	return err;
2836}
2837
2838static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2839{
2840	switch (link_type) {
2841	case LE_LINK:
2842		switch (addr_type) {
2843		case ADDR_LE_DEV_PUBLIC:
2844			return BDADDR_LE_PUBLIC;
2845
2846		default:
2847			/* Fallback to LE Random address type */
2848			return BDADDR_LE_RANDOM;
2849		}
2850
2851	default:
2852		/* Fallback to BR/EDR type */
2853		return BDADDR_BREDR;
2854	}
2855}
2856
2857static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2858			   u16 data_len)
2859{
2860	struct mgmt_rp_get_connections *rp;
2861	struct hci_conn *c;
2862	size_t rp_len;
2863	int err;
2864	u16 i;
2865
2866	BT_DBG("");
2867
2868	hci_dev_lock(hdev);
2869
2870	if (!hdev_is_powered(hdev)) {
2871		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2872				 MGMT_STATUS_NOT_POWERED);
2873		goto unlock;
2874	}
2875
2876	i = 0;
2877	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2878		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2879			i++;
2880	}
2881
2882	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2883	rp = kmalloc(rp_len, GFP_KERNEL);
2884	if (!rp) {
2885		err = -ENOMEM;
2886		goto unlock;
2887	}
2888
2889	i = 0;
2890	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2891		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2892			continue;
2893		bacpy(&rp->addr[i].bdaddr, &c->dst);
2894		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2895		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2896			continue;
2897		i++;
2898	}
2899
2900	rp->conn_count = cpu_to_le16(i);
2901
2902	/* Recalculate length in case of filtered SCO connections, etc */
2903	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2904
2905	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2906			   rp_len);
2907
2908	kfree(rp);
2909
2910unlock:
2911	hci_dev_unlock(hdev);
2912	return err;
2913}
2914
2915static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2916				   struct mgmt_cp_pin_code_neg_reply *cp)
2917{
2918	struct pending_cmd *cmd;
2919	int err;
2920
2921	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2922			       sizeof(*cp));
2923	if (!cmd)
2924		return -ENOMEM;
2925
2926	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2927			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2928	if (err < 0)
2929		mgmt_pending_remove(cmd);
2930
2931	return err;
2932}
2933
2934static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2935			  u16 len)
2936{
2937	struct hci_conn *conn;
2938	struct mgmt_cp_pin_code_reply *cp = data;
2939	struct hci_cp_pin_code_reply reply;
2940	struct pending_cmd *cmd;
2941	int err;
2942
2943	BT_DBG("");
2944
2945	hci_dev_lock(hdev);
2946
2947	if (!hdev_is_powered(hdev)) {
2948		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2949				 MGMT_STATUS_NOT_POWERED);
2950		goto failed;
2951	}
2952
2953	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2954	if (!conn) {
2955		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2956				 MGMT_STATUS_NOT_CONNECTED);
2957		goto failed;
2958	}
2959
2960	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2961		struct mgmt_cp_pin_code_neg_reply ncp;
2962
2963		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2964
2965		BT_ERR("PIN code is not 16 bytes long");
2966
2967		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2968		if (err >= 0)
2969			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2970					 MGMT_STATUS_INVALID_PARAMS);
2971
2972		goto failed;
2973	}
2974
2975	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2976	if (!cmd) {
2977		err = -ENOMEM;
2978		goto failed;
2979	}
2980
2981	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2982	reply.pin_len = cp->pin_len;
2983	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2984
2985	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2986	if (err < 0)
2987		mgmt_pending_remove(cmd);
2988
2989failed:
2990	hci_dev_unlock(hdev);
2991	return err;
2992}
2993
2994static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2995			     u16 len)
2996{
2997	struct mgmt_cp_set_io_capability *cp = data;
2998
2999	BT_DBG("");
3000
3001	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3002		return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3003				    MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3004
3005	hci_dev_lock(hdev);
3006
3007	hdev->io_capability = cp->io_capability;
3008
3009	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3010	       hdev->io_capability);
3011
3012	hci_dev_unlock(hdev);
3013
3014	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3015			    0);
3016}
3017
3018static struct pending_cmd *find_pairing(struct hci_conn *conn)
3019{
3020	struct hci_dev *hdev = conn->hdev;
3021	struct pending_cmd *cmd;
3022
3023	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3024		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3025			continue;
3026
3027		if (cmd->user_data != conn)
3028			continue;
3029
3030		return cmd;
3031	}
3032
3033	return NULL;
3034}
3035
3036static void pairing_complete(struct pending_cmd *cmd, u8 status)
3037{
3038	struct mgmt_rp_pair_device rp;
3039	struct hci_conn *conn = cmd->user_data;
3040
3041	bacpy(&rp.addr.bdaddr, &conn->dst);
3042	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3043
3044	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3045		     &rp, sizeof(rp));
3046
3047	/* So we don't get further callbacks for this connection */
3048	conn->connect_cfm_cb = NULL;
3049	conn->security_cfm_cb = NULL;
3050	conn->disconn_cfm_cb = NULL;
3051
3052	hci_conn_drop(conn);
3053
3054	mgmt_pending_remove(cmd);
3055}
3056
3057void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3058{
3059	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3060	struct pending_cmd *cmd;
3061
3062	cmd = find_pairing(conn);
3063	if (cmd)
3064		pairing_complete(cmd, status);
3065}
3066
3067static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3068{
3069	struct pending_cmd *cmd;
3070
3071	BT_DBG("status %u", status);
3072
3073	cmd = find_pairing(conn);
3074	if (!cmd)
3075		BT_DBG("Unable to find a pending command");
3076	else
3077		pairing_complete(cmd, mgmt_status(status));
3078}
3079
3080static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3081{
3082	struct pending_cmd *cmd;
3083
3084	BT_DBG("status %u", status);
3085
3086	if (!status)
3087		return;
3088
3089	cmd = find_pairing(conn);
3090	if (!cmd)
3091		BT_DBG("Unable to find a pending command");
3092	else
3093		pairing_complete(cmd, mgmt_status(status));
3094}
3095
3096static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3097		       u16 len)
3098{
3099	struct mgmt_cp_pair_device *cp = data;
3100	struct mgmt_rp_pair_device rp;
3101	struct pending_cmd *cmd;
3102	u8 sec_level, auth_type;
3103	struct hci_conn *conn;
3104	int err;
3105
3106	BT_DBG("");
3107
3108	memset(&rp, 0, sizeof(rp));
3109	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3110	rp.addr.type = cp->addr.type;
3111
3112	if (!bdaddr_type_is_valid(cp->addr.type))
3113		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3114				    MGMT_STATUS_INVALID_PARAMS,
3115				    &rp, sizeof(rp));
3116
3117	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3118		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3119				    MGMT_STATUS_INVALID_PARAMS,
3120				    &rp, sizeof(rp));
3121
3122	hci_dev_lock(hdev);
3123
3124	if (!hdev_is_powered(hdev)) {
3125		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3126				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3127		goto unlock;
3128	}
3129
3130	sec_level = BT_SECURITY_MEDIUM;
3131	auth_type = HCI_AT_DEDICATED_BONDING;
3132
3133	if (cp->addr.type == BDADDR_BREDR) {
3134		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3135				       auth_type);
3136	} else {
3137		u8 addr_type;
3138
3139		/* Convert from L2CAP channel address type to HCI address type
3140		 */
3141		if (cp->addr.type == BDADDR_LE_PUBLIC)
3142			addr_type = ADDR_LE_DEV_PUBLIC;
3143		else
3144			addr_type = ADDR_LE_DEV_RANDOM;
3145
3146		/* When pairing a new device, it is expected to remember
3147		 * this device for future connections. Adding the connection
3148		 * parameter information ahead of time allows tracking
3149		 * of the slave preferred values and will speed up any
3150		 * further connection establishment.
3151		 *
3152		 * If connection parameters already exist, then they
3153		 * will be kept and this function does nothing.
3154		 */
3155		hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3156
3157		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3158				      sec_level, HCI_LE_CONN_TIMEOUT,
3159				      HCI_ROLE_MASTER);
3160	}
3161
3162	if (IS_ERR(conn)) {
3163		int status;
3164
3165		if (PTR_ERR(conn) == -EBUSY)
3166			status = MGMT_STATUS_BUSY;
3167		else
3168			status = MGMT_STATUS_CONNECT_FAILED;
3169
3170		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3171				   status, &rp,
3172				   sizeof(rp));
3173		goto unlock;
3174	}
3175
3176	if (conn->connect_cfm_cb) {
3177		hci_conn_drop(conn);
3178		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3179				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
3180		goto unlock;
3181	}
3182
3183	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3184	if (!cmd) {
3185		err = -ENOMEM;
3186		hci_conn_drop(conn);
3187		goto unlock;
3188	}
3189
3190	/* For LE, just connecting isn't a proof that the pairing finished */
3191	if (cp->addr.type == BDADDR_BREDR) {
3192		conn->connect_cfm_cb = pairing_complete_cb;
3193		conn->security_cfm_cb = pairing_complete_cb;
3194		conn->disconn_cfm_cb = pairing_complete_cb;
3195	} else {
3196		conn->connect_cfm_cb = le_pairing_complete_cb;
3197		conn->security_cfm_cb = le_pairing_complete_cb;
3198		conn->disconn_cfm_cb = le_pairing_complete_cb;
3199	}
3200
3201	conn->io_capability = cp->io_cap;
3202	cmd->user_data = conn;
3203
3204	if (conn->state == BT_CONNECTED &&
3205	    hci_conn_security(conn, sec_level, auth_type, true))
3206		pairing_complete(cmd, 0);
3207
3208	err = 0;
3209
3210unlock:
3211	hci_dev_unlock(hdev);
3212	return err;
3213}
3214
3215static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3216			      u16 len)
3217{
3218	struct mgmt_addr_info *addr = data;
3219	struct pending_cmd *cmd;
3220	struct hci_conn *conn;
3221	int err;
3222
3223	BT_DBG("");
3224
3225	hci_dev_lock(hdev);
3226
3227	if (!hdev_is_powered(hdev)) {
3228		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3229				 MGMT_STATUS_NOT_POWERED);
3230		goto unlock;
3231	}
3232
3233	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3234	if (!cmd) {
3235		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3236				 MGMT_STATUS_INVALID_PARAMS);
3237		goto unlock;
3238	}
3239
3240	conn = cmd->user_data;
3241
3242	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3243		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3244				 MGMT_STATUS_INVALID_PARAMS);
3245		goto unlock;
3246	}
3247
3248	pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3249
3250	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3251			   addr, sizeof(*addr));
3252unlock:
3253	hci_dev_unlock(hdev);
3254	return err;
3255}
3256
3257static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3258			     struct mgmt_addr_info *addr, u16 mgmt_op,
3259			     u16 hci_op, __le32 passkey)
3260{
3261	struct pending_cmd *cmd;
3262	struct hci_conn *conn;
3263	int err;
3264
3265	hci_dev_lock(hdev);
3266
3267	if (!hdev_is_powered(hdev)) {
3268		err = cmd_complete(sk, hdev->id, mgmt_op,
3269				   MGMT_STATUS_NOT_POWERED, addr,
3270				   sizeof(*addr));
3271		goto done;
3272	}
3273
3274	if (addr->type == BDADDR_BREDR)
3275		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3276	else
3277		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3278
3279	if (!conn) {
3280		err = cmd_complete(sk, hdev->id, mgmt_op,
3281				   MGMT_STATUS_NOT_CONNECTED, addr,
3282				   sizeof(*addr));
3283		goto done;
3284	}
3285
3286	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3287		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3288		if (!err)
3289			err = cmd_complete(sk, hdev->id, mgmt_op,
3290					   MGMT_STATUS_SUCCESS, addr,
3291					   sizeof(*addr));
3292		else
3293			err = cmd_complete(sk, hdev->id, mgmt_op,
3294					   MGMT_STATUS_FAILED, addr,
3295					   sizeof(*addr));
3296
3297		goto done;
3298	}
3299
3300	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3301	if (!cmd) {
3302		err = -ENOMEM;
3303		goto done;
3304	}
3305
3306	/* Continue with pairing via HCI */
3307	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3308		struct hci_cp_user_passkey_reply cp;
3309
3310		bacpy(&cp.bdaddr, &addr->bdaddr);
3311		cp.passkey = passkey;
3312		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3313	} else
3314		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3315				   &addr->bdaddr);
3316
3317	if (err < 0)
3318		mgmt_pending_remove(cmd);
3319
3320done:
3321	hci_dev_unlock(hdev);
3322	return err;
3323}
3324
3325static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3326			      void *data, u16 len)
3327{
3328	struct mgmt_cp_pin_code_neg_reply *cp = data;
3329
3330	BT_DBG("");
3331
3332	return user_pairing_resp(sk, hdev, &cp->addr,
3333				MGMT_OP_PIN_CODE_NEG_REPLY,
3334				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3335}
3336
3337static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3338			      u16 len)
3339{
3340	struct mgmt_cp_user_confirm_reply *cp = data;
3341
3342	BT_DBG("");
3343
3344	if (len != sizeof(*cp))
3345		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3346				  MGMT_STATUS_INVALID_PARAMS);
3347
3348	return user_pairing_resp(sk, hdev, &cp->addr,
3349				 MGMT_OP_USER_CONFIRM_REPLY,
3350				 HCI_OP_USER_CONFIRM_REPLY, 0);
3351}
3352
3353static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3354				  void *data, u16 len)
3355{
3356	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3357
3358	BT_DBG("");
3359
3360	return user_pairing_resp(sk, hdev, &cp->addr,
3361				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3362				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3363}
3364
3365static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3366			      u16 len)
3367{
3368	struct mgmt_cp_user_passkey_reply *cp = data;
3369
3370	BT_DBG("");
3371
3372	return user_pairing_resp(sk, hdev, &cp->addr,
3373				 MGMT_OP_USER_PASSKEY_REPLY,
3374				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3375}
3376
3377static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3378				  void *data, u16 len)
3379{
3380	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3381
3382	BT_DBG("");
3383
3384	return user_pairing_resp(sk, hdev, &cp->addr,
3385				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3386				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3387}
3388
3389static void update_name(struct hci_request *req)
3390{
3391	struct hci_dev *hdev = req->hdev;
3392	struct hci_cp_write_local_name cp;
3393
3394	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3395
3396	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3397}
3398
3399static void set_name_complete(struct hci_dev *hdev, u8 status)
3400{
3401	struct mgmt_cp_set_local_name *cp;
3402	struct pending_cmd *cmd;
3403
3404	BT_DBG("status 0x%02x", status);
3405
3406	hci_dev_lock(hdev);
3407
3408	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3409	if (!cmd)
3410		goto unlock;
3411
3412	cp = cmd->param;
3413
3414	if (status)
3415		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3416			   mgmt_status(status));
3417	else
3418		cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3419			     cp, sizeof(*cp));
3420
3421	mgmt_pending_remove(cmd);
3422
3423unlock:
3424	hci_dev_unlock(hdev);
3425}
3426
3427static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3428			  u16 len)
3429{
3430	struct mgmt_cp_set_local_name *cp = data;
3431	struct pending_cmd *cmd;
3432	struct hci_request req;
3433	int err;
3434
3435	BT_DBG("");
3436
3437	hci_dev_lock(hdev);
3438
3439	/* If the old values are the same as the new ones just return a
3440	 * direct command complete event.
3441	 */
3442	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3443	    !memcmp(hdev->short_name, cp->short_name,
3444		    sizeof(hdev->short_name))) {
3445		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3446				   data, len);
3447		goto failed;
3448	}
3449
3450	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3451
3452	if (!hdev_is_powered(hdev)) {
3453		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3454
3455		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3456				   data, len);
3457		if (err < 0)
3458			goto failed;
3459
3460		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3461				 sk);
3462
3463		goto failed;
3464	}
3465
3466	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3467	if (!cmd) {
3468		err = -ENOMEM;
3469		goto failed;
3470	}
3471
3472	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3473
3474	hci_req_init(&req, hdev);
3475
3476	if (lmp_bredr_capable(hdev)) {
3477		update_name(&req);
3478		update_eir(&req);
3479	}
3480
3481	/* The name is stored in the scan response data and so
3482	 * no need to udpate the advertising data here.
3483	 */
3484	if (lmp_le_capable(hdev))
3485		update_scan_rsp_data(&req);
3486
3487	err = hci_req_run(&req, set_name_complete);
3488	if (err < 0)
3489		mgmt_pending_remove(cmd);
3490
3491failed:
3492	hci_dev_unlock(hdev);
3493	return err;
3494}
3495
3496static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3497			       void *data, u16 data_len)
3498{
3499	struct pending_cmd *cmd;
3500	int err;
3501
3502	BT_DBG("%s", hdev->name);
3503
3504	hci_dev_lock(hdev);
3505
3506	if (!hdev_is_powered(hdev)) {
3507		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3508				 MGMT_STATUS_NOT_POWERED);
3509		goto unlock;
3510	}
3511
3512	if (!lmp_ssp_capable(hdev)) {
3513		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3514				 MGMT_STATUS_NOT_SUPPORTED);
3515		goto unlock;
3516	}
3517
3518	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3519		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3520				 MGMT_STATUS_BUSY);
3521		goto unlock;
3522	}
3523
3524	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3525	if (!cmd) {
3526		err = -ENOMEM;
3527		goto unlock;
3528	}
3529
3530	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3531		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3532				   0, NULL);
3533	else
3534		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3535
3536	if (err < 0)
3537		mgmt_pending_remove(cmd);
3538
3539unlock:
3540	hci_dev_unlock(hdev);
3541	return err;
3542}
3543
3544static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3545			       void *data, u16 len)
3546{
3547	int err;
3548
3549	BT_DBG("%s ", hdev->name);
3550
3551	hci_dev_lock(hdev);
3552
3553	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3554		struct mgmt_cp_add_remote_oob_data *cp = data;
3555		u8 status;
3556
3557		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3558					      cp->hash, cp->randomizer);
3559		if (err < 0)
3560			status = MGMT_STATUS_FAILED;
3561		else
3562			status = MGMT_STATUS_SUCCESS;
3563
3564		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3565				   status, &cp->addr, sizeof(cp->addr));
3566	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3567		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3568		u8 status;
3569
3570		err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3571						  cp->hash192,
3572						  cp->randomizer192,
3573						  cp->hash256,
3574						  cp->randomizer256);
3575		if (err < 0)
3576			status = MGMT_STATUS_FAILED;
3577		else
3578			status = MGMT_STATUS_SUCCESS;
3579
3580		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3581				   status, &cp->addr, sizeof(cp->addr));
3582	} else {
3583		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3584		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3585				 MGMT_STATUS_INVALID_PARAMS);
3586	}
3587
3588	hci_dev_unlock(hdev);
3589	return err;
3590}
3591
3592static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3593				  void *data, u16 len)
3594{
3595	struct mgmt_cp_remove_remote_oob_data *cp = data;
3596	u8 status;
3597	int err;
3598
3599	BT_DBG("%s", hdev->name);
3600
3601	hci_dev_lock(hdev);
3602
3603	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3604	if (err < 0)
3605		status = MGMT_STATUS_INVALID_PARAMS;
3606	else
3607		status = MGMT_STATUS_SUCCESS;
3608
3609	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3610			   status, &cp->addr, sizeof(cp->addr));
3611
3612	hci_dev_unlock(hdev);
3613	return err;
3614}
3615
3616static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3617{
3618	struct pending_cmd *cmd;
3619	u8 type;
3620	int err;
3621
3622	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3623
3624	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3625	if (!cmd)
3626		return -ENOENT;
3627
3628	type = hdev->discovery.type;
3629
3630	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3631			   &type, sizeof(type));
3632	mgmt_pending_remove(cmd);
3633
3634	return err;
3635}
3636
3637static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3638{
3639	unsigned long timeout = 0;
3640
3641	BT_DBG("status %d", status);
3642
3643	if (status) {
3644		hci_dev_lock(hdev);
3645		mgmt_start_discovery_failed(hdev, status);
3646		hci_dev_unlock(hdev);
3647		return;
3648	}
3649
3650	hci_dev_lock(hdev);
3651	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3652	hci_dev_unlock(hdev);
3653
3654	switch (hdev->discovery.type) {
3655	case DISCOV_TYPE_LE:
3656		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3657		break;
3658
3659	case DISCOV_TYPE_INTERLEAVED:
3660		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3661		break;
3662
3663	case DISCOV_TYPE_BREDR:
3664		break;
3665
3666	default:
3667		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3668	}
3669
3670	if (!timeout)
3671		return;
3672
3673	queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3674}
3675
3676static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3677			   void *data, u16 len)
3678{
3679	struct mgmt_cp_start_discovery *cp = data;
3680	struct pending_cmd *cmd;
3681	struct hci_cp_le_set_scan_param param_cp;
3682	struct hci_cp_le_set_scan_enable enable_cp;
3683	struct hci_cp_inquiry inq_cp;
3684	struct hci_request req;
3685	/* General inquiry access code (GIAC) */
3686	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3687	u8 status, own_addr_type;
3688	int err;
3689
3690	BT_DBG("%s", hdev->name);
3691
3692	hci_dev_lock(hdev);
3693
3694	if (!hdev_is_powered(hdev)) {
3695		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3696				 MGMT_STATUS_NOT_POWERED);
3697		goto failed;
3698	}
3699
3700	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3701		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3702				 MGMT_STATUS_BUSY);
3703		goto failed;
3704	}
3705
3706	if (hdev->discovery.state != DISCOVERY_STOPPED) {
3707		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3708				 MGMT_STATUS_BUSY);
3709		goto failed;
3710	}
3711
3712	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3713	if (!cmd) {
3714		err = -ENOMEM;
3715		goto failed;
3716	}
3717
3718	hdev->discovery.type = cp->type;
3719
3720	hci_req_init(&req, hdev);
3721
3722	switch (hdev->discovery.type) {
3723	case DISCOV_TYPE_BREDR:
3724		status = mgmt_bredr_support(hdev);
3725		if (status) {
3726			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3727					 status);
3728			mgmt_pending_remove(cmd);
3729			goto failed;
3730		}
3731
3732		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3733			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3734					 MGMT_STATUS_BUSY);
3735			mgmt_pending_remove(cmd);
3736			goto failed;
3737		}
3738
3739		hci_inquiry_cache_flush(hdev);
3740
3741		memset(&inq_cp, 0, sizeof(inq_cp));
3742		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3743		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3744		hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3745		break;
3746
3747	case DISCOV_TYPE_LE:
3748	case DISCOV_TYPE_INTERLEAVED:
3749		status = mgmt_le_support(hdev);
3750		if (status) {
3751			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3752					 status);
3753			mgmt_pending_remove(cmd);
3754			goto failed;
3755		}
3756
3757		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3758		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3759			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3760					 MGMT_STATUS_NOT_SUPPORTED);
3761			mgmt_pending_remove(cmd);
3762			goto failed;
3763		}
3764
3765		if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3766			/* Don't let discovery abort an outgoing
3767			 * connection attempt that's using directed
3768			 * advertising.
3769			 */
3770			if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3771						       BT_CONNECT)) {
3772				err = cmd_status(sk, hdev->id,
3773						 MGMT_OP_START_DISCOVERY,
3774						 MGMT_STATUS_REJECTED);
3775				mgmt_pending_remove(cmd);
3776				goto failed;
3777			}
3778
3779			disable_advertising(&req);
3780		}
3781
3782		/* If controller is scanning, it means the background scanning
3783		 * is running. Thus, we should temporarily stop it in order to
3784		 * set the discovery scanning parameters.
3785		 */
3786		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3787			hci_req_add_le_scan_disable(&req);
3788
3789		memset(&param_cp, 0, sizeof(param_cp));
3790
3791		/* All active scans will be done with either a resolvable
3792		 * private address (when privacy feature has been enabled)
3793		 * or unresolvable private address.
3794		 */
3795		err = hci_update_random_address(&req, true, &own_addr_type);
3796		if (err < 0) {
3797			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3798					 MGMT_STATUS_FAILED);
3799			mgmt_pending_remove(cmd);
3800			goto failed;
3801		}
3802
3803		param_cp.type = LE_SCAN_ACTIVE;
3804		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3805		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3806		param_cp.own_address_type = own_addr_type;
3807		hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3808			    &param_cp);
3809
3810		memset(&enable_cp, 0, sizeof(enable_cp));
3811		enable_cp.enable = LE_SCAN_ENABLE;
3812		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3813		hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3814			    &enable_cp);
3815		break;
3816
3817	default:
3818		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3819				 MGMT_STATUS_INVALID_PARAMS);
3820		mgmt_pending_remove(cmd);
3821		goto failed;
3822	}
3823
3824	err = hci_req_run(&req, start_discovery_complete);
3825	if (err < 0)
3826		mgmt_pending_remove(cmd);
3827	else
3828		hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3829
3830failed:
3831	hci_dev_unlock(hdev);
3832	return err;
3833}
3834
3835static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3836{
3837	struct pending_cmd *cmd;
3838	int err;
3839
3840	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3841	if (!cmd)
3842		return -ENOENT;
3843
3844	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3845			   &hdev->discovery.type, sizeof(hdev->discovery.type));
3846	mgmt_pending_remove(cmd);
3847
3848	return err;
3849}
3850
3851static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3852{
3853	BT_DBG("status %d", status);
3854
3855	hci_dev_lock(hdev);
3856
3857	if (status) {
3858		mgmt_stop_discovery_failed(hdev, status);
3859		goto unlock;
3860	}
3861
3862	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3863
3864unlock:
3865	hci_dev_unlock(hdev);
3866}
3867
3868static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3869			  u16 len)
3870{
3871	struct mgmt_cp_stop_discovery *mgmt_cp = data;
3872	struct pending_cmd *cmd;
3873	struct hci_request req;
3874	int err;
3875
3876	BT_DBG("%s", hdev->name);
3877
3878	hci_dev_lock(hdev);
3879
3880	if (!hci_discovery_active(hdev)) {
3881		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3882				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
3883				   sizeof(mgmt_cp->type));
3884		goto unlock;
3885	}
3886
3887	if (hdev->discovery.type != mgmt_cp->type) {
3888		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3889				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3890				   sizeof(mgmt_cp->type));
3891		goto unlock;
3892	}
3893
3894	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3895	if (!cmd) {
3896		err = -ENOMEM;
3897		goto unlock;
3898	}
3899
3900	hci_req_init(&req, hdev);
3901
3902	hci_stop_discovery(&req);
3903
3904	err = hci_req_run(&req, stop_discovery_complete);
3905	if (!err) {
3906		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3907		goto unlock;
3908	}
3909
3910	mgmt_pending_remove(cmd);
3911
3912	/* If no HCI commands were sent we're done */
3913	if (err == -ENODATA) {
3914		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3915				   &mgmt_cp->type, sizeof(mgmt_cp->type));
3916		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3917	}
3918
3919unlock:
3920	hci_dev_unlock(hdev);
3921	return err;
3922}
3923
3924static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3925			u16 len)
3926{
3927	struct mgmt_cp_confirm_name *cp = data;
3928	struct inquiry_entry *e;
3929	int err;
3930
3931	BT_DBG("%s", hdev->name);
3932
3933	hci_dev_lock(hdev);
3934
3935	if (!hci_discovery_active(hdev)) {
3936		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3937				   MGMT_STATUS_FAILED, &cp->addr,
3938				   sizeof(cp->addr));
3939		goto failed;
3940	}
3941
3942	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3943	if (!e) {
3944		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3945				   MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3946				   sizeof(cp->addr));
3947		goto failed;
3948	}
3949
3950	if (cp->name_known) {
3951		e->name_state = NAME_KNOWN;
3952		list_del(&e->list);
3953	} else {
3954		e->name_state = NAME_NEEDED;
3955		hci_inquiry_cache_update_resolve(hdev, e);
3956	}
3957
3958	err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3959			   sizeof(cp->addr));
3960
3961failed:
3962	hci_dev_unlock(hdev);
3963	return err;
3964}
3965
3966static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3967			u16 len)
3968{
3969	struct mgmt_cp_block_device *cp = data;
3970	u8 status;
3971	int err;
3972
3973	BT_DBG("%s", hdev->name);
3974
3975	if (!bdaddr_type_is_valid(cp->addr.type))
3976		return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3977				    MGMT_STATUS_INVALID_PARAMS,
3978				    &cp->addr, sizeof(cp->addr));
3979
3980	hci_dev_lock(hdev);
3981
3982	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3983				  cp->addr.type);
3984	if (err < 0) {
3985		status = MGMT_STATUS_FAILED;
3986		goto done;
3987	}
3988
3989	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3990		   sk);
3991	status = MGMT_STATUS_SUCCESS;
3992
3993done:
3994	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3995			   &cp->addr, sizeof(cp->addr));
3996
3997	hci_dev_unlock(hdev);
3998
3999	return err;
4000}
4001
4002static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4003			  u16 len)
4004{
4005	struct mgmt_cp_unblock_device *cp = data;
4006	u8 status;
4007	int err;
4008
4009	BT_DBG("%s", hdev->name);
4010
4011	if (!bdaddr_type_is_valid(cp->addr.type))
4012		return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4013				    MGMT_STATUS_INVALID_PARAMS,
4014				    &cp->addr, sizeof(cp->addr));
4015
4016	hci_dev_lock(hdev);
4017
4018	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4019				  cp->addr.type);
4020	if (err < 0) {
4021		status = MGMT_STATUS_INVALID_PARAMS;
4022		goto done;
4023	}
4024
4025	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4026		   sk);
4027	status = MGMT_STATUS_SUCCESS;
4028
4029done:
4030	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4031			   &cp->addr, sizeof(cp->addr));
4032
4033	hci_dev_unlock(hdev);
4034
4035	return err;
4036}
4037
4038static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4039			 u16 len)
4040{
4041	struct mgmt_cp_set_device_id *cp = data;
4042	struct hci_request req;
4043	int err;
4044	__u16 source;
4045
4046	BT_DBG("%s", hdev->name);
4047
4048	source = __le16_to_cpu(cp->source);
4049
4050	if (source > 0x0002)
4051		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4052				  MGMT_STATUS_INVALID_PARAMS);
4053
4054	hci_dev_lock(hdev);
4055
4056	hdev->devid_source = source;
4057	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4058	hdev->devid_product = __le16_to_cpu(cp->product);
4059	hdev->devid_version = __le16_to_cpu(cp->version);
4060
4061	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4062
4063	hci_req_init(&req, hdev);
4064	update_eir(&req);
4065	hci_req_run(&req, NULL);
4066
4067	hci_dev_unlock(hdev);
4068
4069	return err;
4070}
4071
4072static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4073{
4074	struct cmd_lookup match = { NULL, hdev };
4075
4076	if (status) {
4077		u8 mgmt_err = mgmt_status(status);
4078
4079		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4080				     cmd_status_rsp, &mgmt_err);
4081		return;
4082	}
4083
4084	if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4085		set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4086	else
4087		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4088
4089	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4090			     &match);
4091
4092	new_settings(hdev, match.sk);
4093
4094	if (match.sk)
4095		sock_put(match.sk);
4096}
4097
4098static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4099			   u16 len)
4100{
4101	struct mgmt_mode *cp = data;
4102	struct pending_cmd *cmd;
4103	struct hci_request req;
4104	u8 val, enabled, status;
4105	int err;
4106
4107	BT_DBG("request for %s", hdev->name);
4108
4109	status = mgmt_le_support(hdev);
4110	if (status)
4111		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4112				  status);
4113
4114	if (cp->val != 0x00 && cp->val != 0x01)
4115		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4116				  MGMT_STATUS_INVALID_PARAMS);
4117
4118	hci_dev_lock(hdev);
4119
4120	val = !!cp->val;
4121	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4122
4123	/* The following conditions are ones which mean that we should
4124	 * not do any HCI communication but directly send a mgmt
4125	 * response to user space (after toggling the flag if
4126	 * necessary).
4127	 */
4128	if (!hdev_is_powered(hdev) || val == enabled ||
4129	    hci_conn_num(hdev, LE_LINK) > 0 ||
4130	    (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4131	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4132		bool changed = false;
4133
4134		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4135			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4136			changed = true;
4137		}
4138
4139		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4140		if (err < 0)
4141			goto unlock;
4142
4143		if (changed)
4144			err = new_settings(hdev, sk);
4145
4146		goto unlock;
4147	}
4148
4149	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4150	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4151		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4152				 MGMT_STATUS_BUSY);
4153		goto unlock;
4154	}
4155
4156	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4157	if (!cmd) {
4158		err = -ENOMEM;
4159		goto unlock;
4160	}
4161
4162	hci_req_init(&req, hdev);
4163
4164	if (val)
4165		enable_advertising(&req);
4166	else
4167		disable_advertising(&req);
4168
4169	err = hci_req_run(&req, set_advertising_complete);
4170	if (err < 0)
4171		mgmt_pending_remove(cmd);
4172
4173unlock:
4174	hci_dev_unlock(hdev);
4175	return err;
4176}
4177
4178static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4179			      void *data, u16 len)
4180{
4181	struct mgmt_cp_set_static_address *cp = data;
4182	int err;
4183
4184	BT_DBG("%s", hdev->name);
4185
4186	if (!lmp_le_capable(hdev))
4187		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4188				  MGMT_STATUS_NOT_SUPPORTED);
4189
4190	if (hdev_is_powered(hdev))
4191		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4192				  MGMT_STATUS_REJECTED);
4193
4194	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4195		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4196			return cmd_status(sk, hdev->id,
4197					  MGMT_OP_SET_STATIC_ADDRESS,
4198					  MGMT_STATUS_INVALID_PARAMS);
4199
4200		/* Two most significant bits shall be set */
4201		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4202			return cmd_status(sk, hdev->id,
4203					  MGMT_OP_SET_STATIC_ADDRESS,
4204					  MGMT_STATUS_INVALID_PARAMS);
4205	}
4206
4207	hci_dev_lock(hdev);
4208
4209	bacpy(&hdev->static_addr, &cp->bdaddr);
4210
4211	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4212
4213	hci_dev_unlock(hdev);
4214
4215	return err;
4216}
4217
4218static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4219			   void *data, u16 len)
4220{
4221	struct mgmt_cp_set_scan_params *cp = data;
4222	__u16 interval, window;
4223	int err;
4224
4225	BT_DBG("%s", hdev->name);
4226
4227	if (!lmp_le_capable(hdev))
4228		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4229				  MGMT_STATUS_NOT_SUPPORTED);
4230
4231	interval = __le16_to_cpu(cp->interval);
4232
4233	if (interval < 0x0004 || interval > 0x4000)
4234		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4235				  MGMT_STATUS_INVALID_PARAMS);
4236
4237	window = __le16_to_cpu(cp->window);
4238
4239	if (window < 0x0004 || window > 0x4000)
4240		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4241				  MGMT_STATUS_INVALID_PARAMS);
4242
4243	if (window > interval)
4244		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4245				  MGMT_STATUS_INVALID_PARAMS);
4246
4247	hci_dev_lock(hdev);
4248
4249	hdev->le_scan_interval = interval;
4250	hdev->le_scan_window = window;
4251
4252	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4253
4254	/* If background scan is running, restart it so new parameters are
4255	 * loaded.
4256	 */
4257	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4258	    hdev->discovery.state == DISCOVERY_STOPPED) {
4259		struct hci_request req;
4260
4261		hci_req_init(&req, hdev);
4262
4263		hci_req_add_le_scan_disable(&req);
4264		hci_req_add_le_passive_scan(&req);
4265
4266		hci_req_run(&req, NULL);
4267	}
4268
4269	hci_dev_unlock(hdev);
4270
4271	return err;
4272}
4273
4274static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4275{
4276	struct pending_cmd *cmd;
4277
4278	BT_DBG("status 0x%02x", status);
4279
4280	hci_dev_lock(hdev);
4281
4282	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4283	if (!cmd)
4284		goto unlock;
4285
4286	if (status) {
4287		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4288			   mgmt_status(status));
4289	} else {
4290		struct mgmt_mode *cp = cmd->param;
4291
4292		if (cp->val)
4293			set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4294		else
4295			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4296
4297		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4298		new_settings(hdev, cmd->sk);
4299	}
4300
4301	mgmt_pending_remove(cmd);
4302
4303unlock:
4304	hci_dev_unlock(hdev);
4305}
4306
4307static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4308				void *data, u16 len)
4309{
4310	struct mgmt_mode *cp = data;
4311	struct pending_cmd *cmd;
4312	struct hci_request req;
4313	int err;
4314
4315	BT_DBG("%s", hdev->name);
4316
4317	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4318	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4319		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4320				  MGMT_STATUS_NOT_SUPPORTED);
4321
4322	if (cp->val != 0x00 && cp->val != 0x01)
4323		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4324				  MGMT_STATUS_INVALID_PARAMS);
4325
4326	if (!hdev_is_powered(hdev))
4327		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4328				  MGMT_STATUS_NOT_POWERED);
4329
4330	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4331		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4332				  MGMT_STATUS_REJECTED);
4333
4334	hci_dev_lock(hdev);
4335
4336	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4337		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4338				 MGMT_STATUS_BUSY);
4339		goto unlock;
4340	}
4341
4342	if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4343		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4344					hdev);
4345		goto unlock;
4346	}
4347
4348	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4349			       data, len);
4350	if (!cmd) {
4351		err = -ENOMEM;
4352		goto unlock;
4353	}
4354
4355	hci_req_init(&req, hdev);
4356
4357	write_fast_connectable(&req, cp->val);
4358
4359	err = hci_req_run(&req, fast_connectable_complete);
4360	if (err < 0) {
4361		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4362				 MGMT_STATUS_FAILED);
4363		mgmt_pending_remove(cmd);
4364	}
4365
4366unlock:
4367	hci_dev_unlock(hdev);
4368
4369	return err;
4370}
4371
4372static void set_bredr_scan(struct hci_request *req)
4373{
4374	struct hci_dev *hdev = req->hdev;
4375	u8 scan = 0;
4376
4377	/* Ensure that fast connectable is disabled. This function will
4378	 * not do anything if the page scan parameters are already what
4379	 * they should be.
4380	 */
4381	write_fast_connectable(req, false);
4382
4383	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4384	    !list_empty(&hdev->whitelist))
4385		scan |= SCAN_PAGE;
4386	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4387		scan |= SCAN_INQUIRY;
4388
4389	if (scan)
4390		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4391}
4392
4393static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4394{
4395	struct pending_cmd *cmd;
4396
4397	BT_DBG("status 0x%02x", status);
4398
4399	hci_dev_lock(hdev);
4400
4401	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4402	if (!cmd)
4403		goto unlock;
4404
4405	if (status) {
4406		u8 mgmt_err = mgmt_status(status);
4407
4408		/* We need to restore the flag if related HCI commands
4409		 * failed.
4410		 */
4411		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4412
4413		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4414	} else {
4415		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4416		new_settings(hdev, cmd->sk);
4417	}
4418
4419	mgmt_pending_remove(cmd);
4420
4421unlock:
4422	hci_dev_unlock(hdev);
4423}
4424
4425static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4426{
4427	struct mgmt_mode *cp = data;
4428	struct pending_cmd *cmd;
4429	struct hci_request req;
4430	int err;
4431
4432	BT_DBG("request for %s", hdev->name);
4433
4434	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4435		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4436				  MGMT_STATUS_NOT_SUPPORTED);
4437
4438	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4439		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4440				  MGMT_STATUS_REJECTED);
4441
4442	if (cp->val != 0x00 && cp->val != 0x01)
4443		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4444				  MGMT_STATUS_INVALID_PARAMS);
4445
4446	hci_dev_lock(hdev);
4447
4448	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4449		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4450		goto unlock;
4451	}
4452
4453	if (!hdev_is_powered(hdev)) {
4454		if (!cp->val) {
4455			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4456			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4457			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4458			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4459			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4460		}
4461
4462		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4463
4464		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4465		if (err < 0)
4466			goto unlock;
4467
4468		err = new_settings(hdev, sk);
4469		goto unlock;
4470	}
4471
4472	/* Reject disabling when powered on */
4473	if (!cp->val) {
4474		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4475				 MGMT_STATUS_REJECTED);
4476		goto unlock;
4477	}
4478
4479	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4480		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4481				 MGMT_STATUS_BUSY);
4482		goto unlock;
4483	}
4484
4485	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4486	if (!cmd) {
4487		err = -ENOMEM;
4488		goto unlock;
4489	}
4490
4491	/* We need to flip the bit already here so that update_adv_data
4492	 * generates the correct flags.
4493	 */
4494	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4495
4496	hci_req_init(&req, hdev);
4497
4498	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4499	    !list_empty(&hdev->whitelist))
4500		set_bredr_scan(&req);
4501
4502	/* Since only the advertising data flags will change, there
4503	 * is no need to update the scan response data.
4504	 */
4505	update_adv_data(&req);
4506
4507	err = hci_req_run(&req, set_bredr_complete);
4508	if (err < 0)
4509		mgmt_pending_remove(cmd);
4510
4511unlock:
4512	hci_dev_unlock(hdev);
4513	return err;
4514}
4515
4516static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4517			   void *data, u16 len)
4518{
4519	struct mgmt_mode *cp = data;
4520	struct pending_cmd *cmd;
4521	u8 val, status;
4522	int err;
4523
4524	BT_DBG("request for %s", hdev->name);
4525
4526	status = mgmt_bredr_support(hdev);
4527	if (status)
4528		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4529				  status);
4530
4531	if (!lmp_sc_capable(hdev) &&
4532	    !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4533		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4534				  MGMT_STATUS_NOT_SUPPORTED);
4535
4536	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4537		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4538				  MGMT_STATUS_INVALID_PARAMS);
4539
4540	hci_dev_lock(hdev);
4541
4542	if (!hdev_is_powered(hdev)) {
4543		bool changed;
4544
4545		if (cp->val) {
4546			changed = !test_and_set_bit(HCI_SC_ENABLED,
4547						    &hdev->dev_flags);
4548			if (cp->val == 0x02)
4549				set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4550			else
4551				clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4552		} else {
4553			changed = test_and_clear_bit(HCI_SC_ENABLED,
4554						     &hdev->dev_flags);
4555			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4556		}
4557
4558		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4559		if (err < 0)
4560			goto failed;
4561
4562		if (changed)
4563			err = new_settings(hdev, sk);
4564
4565		goto failed;
4566	}
4567
4568	if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4569		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4570				 MGMT_STATUS_BUSY);
4571		goto failed;
4572	}
4573
4574	val = !!cp->val;
4575
4576	if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4577	    (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4578		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4579		goto failed;
4580	}
4581
4582	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4583	if (!cmd) {
4584		err = -ENOMEM;
4585		goto failed;
4586	}
4587
4588	err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4589	if (err < 0) {
4590		mgmt_pending_remove(cmd);
4591		goto failed;
4592	}
4593
4594	if (cp->val == 0x02)
4595		set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4596	else
4597		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4598
4599failed:
4600	hci_dev_unlock(hdev);
4601	return err;
4602}
4603
4604static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4605			  void *data, u16 len)
4606{
4607	struct mgmt_mode *cp = data;
4608	bool changed, use_changed;
4609	int err;
4610
4611	BT_DBG("request for %s", hdev->name);
4612
4613	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4614		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4615				  MGMT_STATUS_INVALID_PARAMS);
4616
4617	hci_dev_lock(hdev);
4618
4619	if (cp->val)
4620		changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4621					    &hdev->dev_flags);
4622	else
4623		changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4624					     &hdev->dev_flags);
4625
4626	if (cp->val == 0x02)
4627		use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4628						&hdev->dev_flags);
4629	else
4630		use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4631						 &hdev->dev_flags);
4632
4633	if (hdev_is_powered(hdev) && use_changed &&
4634	    test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4635		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4636		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4637			     sizeof(mode), &mode);
4638	}
4639
4640	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4641	if (err < 0)
4642		goto unlock;
4643
4644	if (changed)
4645		err = new_settings(hdev, sk);
4646
4647unlock:
4648	hci_dev_unlock(hdev);
4649	return err;
4650}
4651
4652static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4653		       u16 len)
4654{
4655	struct mgmt_cp_set_privacy *cp = cp_data;
4656	bool changed;
4657	int err;
4658
4659	BT_DBG("request for %s", hdev->name);
4660
4661	if (!lmp_le_capable(hdev))
4662		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4663				  MGMT_STATUS_NOT_SUPPORTED);
4664
4665	if (cp->privacy != 0x00 && cp->privacy != 0x01)
4666		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4667				  MGMT_STATUS_INVALID_PARAMS);
4668
4669	if (hdev_is_powered(hdev))
4670		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4671				  MGMT_STATUS_REJECTED);
4672
4673	hci_dev_lock(hdev);
4674
4675	/* If user space supports this command it is also expected to
4676	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4677	 */
4678	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4679
4680	if (cp->privacy) {
4681		changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4682		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4683		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4684	} else {
4685		changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4686		memset(hdev->irk, 0, sizeof(hdev->irk));
4687		clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4688	}
4689
4690	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4691	if (err < 0)
4692		goto unlock;
4693
4694	if (changed)
4695		err = new_settings(hdev, sk);
4696
4697unlock:
4698	hci_dev_unlock(hdev);
4699	return err;
4700}
4701
4702static bool irk_is_valid(struct mgmt_irk_info *irk)
4703{
4704	switch (irk->addr.type) {
4705	case BDADDR_LE_PUBLIC:
4706		return true;
4707
4708	case BDADDR_LE_RANDOM:
4709		/* Two most significant bits shall be set */
4710		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4711			return false;
4712		return true;
4713	}
4714
4715	return false;
4716}
4717
4718static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4719		     u16 len)
4720{
4721	struct mgmt_cp_load_irks *cp = cp_data;
4722	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4723				   sizeof(struct mgmt_irk_info));
4724	u16 irk_count, expected_len;
4725	int i, err;
4726
4727	BT_DBG("request for %s", hdev->name);
4728
4729	if (!lmp_le_capable(hdev))
4730		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4731				  MGMT_STATUS_NOT_SUPPORTED);
4732
4733	irk_count = __le16_to_cpu(cp->irk_count);
4734	if (irk_count > max_irk_count) {
4735		BT_ERR("load_irks: too big irk_count value %u", irk_count);
4736		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4737				  MGMT_STATUS_INVALID_PARAMS);
4738	}
4739
4740	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4741	if (expected_len != len) {
4742		BT_ERR("load_irks: expected %u bytes, got %u bytes",
4743		       expected_len, len);
4744		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4745				  MGMT_STATUS_INVALID_PARAMS);
4746	}
4747
4748	BT_DBG("%s irk_count %u", hdev->name, irk_count);
4749
4750	for (i = 0; i < irk_count; i++) {
4751		struct mgmt_irk_info *key = &cp->irks[i];
4752
4753		if (!irk_is_valid(key))
4754			return cmd_status(sk, hdev->id,
4755					  MGMT_OP_LOAD_IRKS,
4756					  MGMT_STATUS_INVALID_PARAMS);
4757	}
4758
4759	hci_dev_lock(hdev);
4760
4761	hci_smp_irks_clear(hdev);
4762
4763	for (i = 0; i < irk_count; i++) {
4764		struct mgmt_irk_info *irk = &cp->irks[i];
4765		u8 addr_type;
4766
4767		if (irk->addr.type == BDADDR_LE_PUBLIC)
4768			addr_type = ADDR_LE_DEV_PUBLIC;
4769		else
4770			addr_type = ADDR_LE_DEV_RANDOM;
4771
4772		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4773			    BDADDR_ANY);
4774	}
4775
4776	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4777
4778	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4779
4780	hci_dev_unlock(hdev);
4781
4782	return err;
4783}
4784
4785static bool ltk_is_valid(struct mgmt_ltk_info *key)
4786{
4787	if (key->master != 0x00 && key->master != 0x01)
4788		return false;
4789
4790	switch (key->addr.type) {
4791	case BDADDR_LE_PUBLIC:
4792		return true;
4793
4794	case BDADDR_LE_RANDOM:
4795		/* Two most significant bits shall be set */
4796		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4797			return false;
4798		return true;
4799	}
4800
4801	return false;
4802}
4803
4804static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4805			       void *cp_data, u16 len)
4806{
4807	struct mgmt_cp_load_long_term_keys *cp = cp_data;
4808	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4809				   sizeof(struct mgmt_ltk_info));
4810	u16 key_count, expected_len;
4811	int i, err;
4812
4813	BT_DBG("request for %s", hdev->name);
4814
4815	if (!lmp_le_capable(hdev))
4816		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4817				  MGMT_STATUS_NOT_SUPPORTED);
4818
4819	key_count = __le16_to_cpu(cp->key_count);
4820	if (key_count > max_key_count) {
4821		BT_ERR("load_ltks: too big key_count value %u", key_count);
4822		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4823				  MGMT_STATUS_INVALID_PARAMS);
4824	}
4825
4826	expected_len = sizeof(*cp) + key_count *
4827					sizeof(struct mgmt_ltk_info);
4828	if (expected_len != len) {
4829		BT_ERR("load_keys: expected %u bytes, got %u bytes",
4830		       expected_len, len);
4831		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4832				  MGMT_STATUS_INVALID_PARAMS);
4833	}
4834
4835	BT_DBG("%s key_count %u", hdev->name, key_count);
4836
4837	for (i = 0; i < key_count; i++) {
4838		struct mgmt_ltk_info *key = &cp->keys[i];
4839
4840		if (!ltk_is_valid(key))
4841			return cmd_status(sk, hdev->id,
4842					  MGMT_OP_LOAD_LONG_TERM_KEYS,
4843					  MGMT_STATUS_INVALID_PARAMS);
4844	}
4845
4846	hci_dev_lock(hdev);
4847
4848	hci_smp_ltks_clear(hdev);
4849
4850	for (i = 0; i < key_count; i++) {
4851		struct mgmt_ltk_info *key = &cp->keys[i];
4852		u8 type, addr_type, authenticated;
4853
4854		if (key->addr.type == BDADDR_LE_PUBLIC)
4855			addr_type = ADDR_LE_DEV_PUBLIC;
4856		else
4857			addr_type = ADDR_LE_DEV_RANDOM;
4858
4859		if (key->master)
4860			type = SMP_LTK;
4861		else
4862			type = SMP_LTK_SLAVE;
4863
4864		switch (key->type) {
4865		case MGMT_LTK_UNAUTHENTICATED:
4866			authenticated = 0x00;
4867			break;
4868		case MGMT_LTK_AUTHENTICATED:
4869			authenticated = 0x01;
4870			break;
4871		default:
4872			continue;
4873		}
4874
4875		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4876			    authenticated, key->val, key->enc_size, key->ediv,
4877			    key->rand);
4878	}
4879
4880	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4881			   NULL, 0);
4882
4883	hci_dev_unlock(hdev);
4884
4885	return err;
4886}
4887
4888struct cmd_conn_lookup {
4889	struct hci_conn *conn;
4890	bool valid_tx_power;
4891	u8 mgmt_status;
4892};
4893
4894static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4895{
4896	struct cmd_conn_lookup *match = data;
4897	struct mgmt_cp_get_conn_info *cp;
4898	struct mgmt_rp_get_conn_info rp;
4899	struct hci_conn *conn = cmd->user_data;
4900
4901	if (conn != match->conn)
4902		return;
4903
4904	cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4905
4906	memset(&rp, 0, sizeof(rp));
4907	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4908	rp.addr.type = cp->addr.type;
4909
4910	if (!match->mgmt_status) {
4911		rp.rssi = conn->rssi;
4912
4913		if (match->valid_tx_power) {
4914			rp.tx_power = conn->tx_power;
4915			rp.max_tx_power = conn->max_tx_power;
4916		} else {
4917			rp.tx_power = HCI_TX_POWER_INVALID;
4918			rp.max_tx_power = HCI_TX_POWER_INVALID;
4919		}
4920	}
4921
4922	cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4923		     match->mgmt_status, &rp, sizeof(rp));
4924
4925	hci_conn_drop(conn);
4926
4927	mgmt_pending_remove(cmd);
4928}
4929
4930static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4931{
4932	struct hci_cp_read_rssi *cp;
4933	struct hci_conn *conn;
4934	struct cmd_conn_lookup match;
4935	u16 handle;
4936
4937	BT_DBG("status 0x%02x", status);
4938
4939	hci_dev_lock(hdev);
4940
4941	/* TX power data is valid in case request completed successfully,
4942	 * otherwise we assume it's not valid. At the moment we assume that
4943	 * either both or none of current and max values are valid to keep code
4944	 * simple.
4945	 */
4946	match.valid_tx_power = !status;
4947
4948	/* Commands sent in request are either Read RSSI or Read Transmit Power
4949	 * Level so we check which one was last sent to retrieve connection
4950	 * handle.  Both commands have handle as first parameter so it's safe to
4951	 * cast data on the same command struct.
4952	 *
4953	 * First command sent is always Read RSSI and we fail only if it fails.
4954	 * In other case we simply override error to indicate success as we
4955	 * already remembered if TX power value is actually valid.
4956	 */
4957	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4958	if (!cp) {
4959		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4960		status = 0;
4961	}
4962
4963	if (!cp) {
4964		BT_ERR("invalid sent_cmd in response");
4965		goto unlock;
4966	}
4967
4968	handle = __le16_to_cpu(cp->handle);
4969	conn = hci_conn_hash_lookup_handle(hdev, handle);
4970	if (!conn) {
4971		BT_ERR("unknown handle (%d) in response", handle);
4972		goto unlock;
4973	}
4974
4975	match.conn = conn;
4976	match.mgmt_status = mgmt_status(status);
4977
4978	/* Cache refresh is complete, now reply for mgmt request for given
4979	 * connection only.
4980	 */
4981	mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4982			     get_conn_info_complete, &match);
4983
4984unlock:
4985	hci_dev_unlock(hdev);
4986}
4987
4988static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4989			 u16 len)
4990{
4991	struct mgmt_cp_get_conn_info *cp = data;
4992	struct mgmt_rp_get_conn_info rp;
4993	struct hci_conn *conn;
4994	unsigned long conn_info_age;
4995	int err = 0;
4996
4997	BT_DBG("%s", hdev->name);
4998
4999	memset(&rp, 0, sizeof(rp));
5000	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5001	rp.addr.type = cp->addr.type;
5002
5003	if (!bdaddr_type_is_valid(cp->addr.type))
5004		return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5005				    MGMT_STATUS_INVALID_PARAMS,
5006				    &rp, sizeof(rp));
5007
5008	hci_dev_lock(hdev);
5009
5010	if (!hdev_is_powered(hdev)) {
5011		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5012				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5013		goto unlock;
5014	}
5015
5016	if (cp->addr.type == BDADDR_BREDR)
5017		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5018					       &cp->addr.bdaddr);
5019	else
5020		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5021
5022	if (!conn || conn->state != BT_CONNECTED) {
5023		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5024				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5025		goto unlock;
5026	}
5027
5028	/* To avoid client trying to guess when to poll again for information we
5029	 * calculate conn info age as random value between min/max set in hdev.
5030	 */
5031	conn_info_age = hdev->conn_info_min_age +
5032			prandom_u32_max(hdev->conn_info_max_age -
5033					hdev->conn_info_min_age);
5034
5035	/* Query controller to refresh cached values if they are too old or were
5036	 * never read.
5037	 */
5038	if (time_after(jiffies, conn->conn_info_timestamp +
5039		       msecs_to_jiffies(conn_info_age)) ||
5040	    !conn->conn_info_timestamp) {
5041		struct hci_request req;
5042		struct hci_cp_read_tx_power req_txp_cp;
5043		struct hci_cp_read_rssi req_rssi_cp;
5044		struct pending_cmd *cmd;
5045
5046		hci_req_init(&req, hdev);
5047		req_rssi_cp.handle = cpu_to_le16(conn->handle);
5048		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5049			    &req_rssi_cp);
5050
5051		/* For LE links TX power does not change thus we don't need to
5052		 * query for it once value is known.
5053		 */
5054		if (!bdaddr_type_is_le(cp->addr.type) ||
5055		    conn->tx_power == HCI_TX_POWER_INVALID) {
5056			req_txp_cp.handle = cpu_to_le16(conn->handle);
5057			req_txp_cp.type = 0x00;
5058			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5059				    sizeof(req_txp_cp), &req_txp_cp);
5060		}
5061
5062		/* Max TX power needs to be read only once per connection */
5063		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5064			req_txp_cp.handle = cpu_to_le16(conn->handle);
5065			req_txp_cp.type = 0x01;
5066			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5067				    sizeof(req_txp_cp), &req_txp_cp);
5068		}
5069
5070		err = hci_req_run(&req, conn_info_refresh_complete);
5071		if (err < 0)
5072			goto unlock;
5073
5074		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5075				       data, len);
5076		if (!cmd) {
5077			err = -ENOMEM;
5078			goto unlock;
5079		}
5080
5081		hci_conn_hold(conn);
5082		cmd->user_data = conn;
5083
5084		conn->conn_info_timestamp = jiffies;
5085	} else {
5086		/* Cache is valid, just reply with values cached in hci_conn */
5087		rp.rssi = conn->rssi;
5088		rp.tx_power = conn->tx_power;
5089		rp.max_tx_power = conn->max_tx_power;
5090
5091		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5092				   MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5093	}
5094
5095unlock:
5096	hci_dev_unlock(hdev);
5097	return err;
5098}
5099
5100static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5101{
5102	struct mgmt_cp_get_clock_info *cp;
5103	struct mgmt_rp_get_clock_info rp;
5104	struct hci_cp_read_clock *hci_cp;
5105	struct pending_cmd *cmd;
5106	struct hci_conn *conn;
5107
5108	BT_DBG("%s status %u", hdev->name, status);
5109
5110	hci_dev_lock(hdev);
5111
5112	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5113	if (!hci_cp)
5114		goto unlock;
5115
5116	if (hci_cp->which) {
5117		u16 handle = __le16_to_cpu(hci_cp->handle);
5118		conn = hci_conn_hash_lookup_handle(hdev, handle);
5119	} else {
5120		conn = NULL;
5121	}
5122
5123	cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5124	if (!cmd)
5125		goto unlock;
5126
5127	cp = cmd->param;
5128
5129	memset(&rp, 0, sizeof(rp));
5130	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5131
5132	if (status)
5133		goto send_rsp;
5134
5135	rp.local_clock = cpu_to_le32(hdev->clock);
5136
5137	if (conn) {
5138		rp.piconet_clock = cpu_to_le32(conn->clock);
5139		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5140	}
5141
5142send_rsp:
5143	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5144		     &rp, sizeof(rp));
5145	mgmt_pending_remove(cmd);
5146	if (conn)
5147		hci_conn_drop(conn);
5148
5149unlock:
5150	hci_dev_unlock(hdev);
5151}
5152
5153static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5154			 u16 len)
5155{
5156	struct mgmt_cp_get_clock_info *cp = data;
5157	struct mgmt_rp_get_clock_info rp;
5158	struct hci_cp_read_clock hci_cp;
5159	struct pending_cmd *cmd;
5160	struct hci_request req;
5161	struct hci_conn *conn;
5162	int err;
5163
5164	BT_DBG("%s", hdev->name);
5165
5166	memset(&rp, 0, sizeof(rp));
5167	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5168	rp.addr.type = cp->addr.type;
5169
5170	if (cp->addr.type != BDADDR_BREDR)
5171		return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5172				    MGMT_STATUS_INVALID_PARAMS,
5173				    &rp, sizeof(rp));
5174
5175	hci_dev_lock(hdev);
5176
5177	if (!hdev_is_powered(hdev)) {
5178		err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5179				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5180		goto unlock;
5181	}
5182
5183	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5184		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5185					       &cp->addr.bdaddr);
5186		if (!conn || conn->state != BT_CONNECTED) {
5187			err = cmd_complete(sk, hdev->id,
5188					   MGMT_OP_GET_CLOCK_INFO,
5189					   MGMT_STATUS_NOT_CONNECTED,
5190					   &rp, sizeof(rp));
5191			goto unlock;
5192		}
5193	} else {
5194		conn = NULL;
5195	}
5196
5197	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5198	if (!cmd) {
5199		err = -ENOMEM;
5200		goto unlock;
5201	}
5202
5203	hci_req_init(&req, hdev);
5204
5205	memset(&hci_cp, 0, sizeof(hci_cp));
5206	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5207
5208	if (conn) {
5209		hci_conn_hold(conn);
5210		cmd->user_data = conn;
5211
5212		hci_cp.handle = cpu_to_le16(conn->handle);
5213		hci_cp.which = 0x01; /* Piconet clock */
5214		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5215	}
5216
5217	err = hci_req_run(&req, get_clock_info_complete);
5218	if (err < 0)
5219		mgmt_pending_remove(cmd);
5220
5221unlock:
5222	hci_dev_unlock(hdev);
5223	return err;
5224}
5225
5226/* Helper for Add/Remove Device commands */
5227static void update_page_scan(struct hci_dev *hdev, u8 scan)
5228{
5229	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5230		return;
5231
5232	if (!hdev_is_powered(hdev))
5233		return;
5234
5235	/* If HCI_CONNECTABLE is set then Add/Remove Device should not
5236	 * make any changes to page scanning.
5237	 */
5238	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
5239		return;
5240
5241	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5242		scan |= SCAN_INQUIRY;
5243
5244	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5245}
5246
5247static void device_added(struct sock *sk, struct hci_dev *hdev,
5248			 bdaddr_t *bdaddr, u8 type, u8 action)
5249{
5250	struct mgmt_ev_device_added ev;
5251
5252	bacpy(&ev.addr.bdaddr, bdaddr);
5253	ev.addr.type = type;
5254	ev.action = action;
5255
5256	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5257}
5258
5259static int add_device(struct sock *sk, struct hci_dev *hdev,
5260		      void *data, u16 len)
5261{
5262	struct mgmt_cp_add_device *cp = data;
5263	u8 auto_conn, addr_type;
5264	int err;
5265
5266	BT_DBG("%s", hdev->name);
5267
5268	if (!bdaddr_type_is_valid(cp->addr.type) ||
5269	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5270		return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5271				    MGMT_STATUS_INVALID_PARAMS,
5272				    &cp->addr, sizeof(cp->addr));
5273
5274	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5275		return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5276				    MGMT_STATUS_INVALID_PARAMS,
5277				    &cp->addr, sizeof(cp->addr));
5278
5279	hci_dev_lock(hdev);
5280
5281	if (cp->addr.type == BDADDR_BREDR) {
5282		bool update_scan;
5283
5284		/* Only incoming connections action is supported for now */
5285		if (cp->action != 0x01) {
5286			err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5287					   MGMT_STATUS_INVALID_PARAMS,
5288					   &cp->addr, sizeof(cp->addr));
5289			goto unlock;
5290		}
5291
5292		update_scan = list_empty(&hdev->whitelist);
5293
5294		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5295					  cp->addr.type);
5296		if (err)
5297			goto unlock;
5298
5299		if (update_scan)
5300			update_page_scan(hdev, SCAN_PAGE);
5301
5302		goto added;
5303	}
5304
5305	if (cp->addr.type == BDADDR_LE_PUBLIC)
5306		addr_type = ADDR_LE_DEV_PUBLIC;
5307	else
5308		addr_type = ADDR_LE_DEV_RANDOM;
5309
5310	if (cp->action == 0x02)
5311		auto_conn = HCI_AUTO_CONN_ALWAYS;
5312	else if (cp->action == 0x01)
5313		auto_conn = HCI_AUTO_CONN_DIRECT;
5314	else
5315		auto_conn = HCI_AUTO_CONN_REPORT;
5316
5317	/* If the connection parameters don't exist for this device,
5318	 * they will be created and configured with defaults.
5319	 */
5320	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5321				auto_conn) < 0) {
5322		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5323				   MGMT_STATUS_FAILED,
5324				   &cp->addr, sizeof(cp->addr));
5325		goto unlock;
5326	}
5327
5328added:
5329	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5330
5331	err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5332			   MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5333
5334unlock:
5335	hci_dev_unlock(hdev);
5336	return err;
5337}
5338
5339static void device_removed(struct sock *sk, struct hci_dev *hdev,
5340			   bdaddr_t *bdaddr, u8 type)
5341{
5342	struct mgmt_ev_device_removed ev;
5343
5344	bacpy(&ev.addr.bdaddr, bdaddr);
5345	ev.addr.type = type;
5346
5347	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5348}
5349
5350static int remove_device(struct sock *sk, struct hci_dev *hdev,
5351			 void *data, u16 len)
5352{
5353	struct mgmt_cp_remove_device *cp = data;
5354	int err;
5355
5356	BT_DBG("%s", hdev->name);
5357
5358	hci_dev_lock(hdev);
5359
5360	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5361		struct hci_conn_params *params;
5362		u8 addr_type;
5363
5364		if (!bdaddr_type_is_valid(cp->addr.type)) {
5365			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5366					   MGMT_STATUS_INVALID_PARAMS,
5367					   &cp->addr, sizeof(cp->addr));
5368			goto unlock;
5369		}
5370
5371		if (cp->addr.type == BDADDR_BREDR) {
5372			err = hci_bdaddr_list_del(&hdev->whitelist,
5373						  &cp->addr.bdaddr,
5374						  cp->addr.type);
5375			if (err) {
5376				err = cmd_complete(sk, hdev->id,
5377						   MGMT_OP_REMOVE_DEVICE,
5378						   MGMT_STATUS_INVALID_PARAMS,
5379						   &cp->addr, sizeof(cp->addr));
5380				goto unlock;
5381			}
5382
5383			if (list_empty(&hdev->whitelist))
5384				update_page_scan(hdev, SCAN_DISABLED);
5385
5386			device_removed(sk, hdev, &cp->addr.bdaddr,
5387				       cp->addr.type);
5388			goto complete;
5389		}
5390
5391		if (cp->addr.type == BDADDR_LE_PUBLIC)
5392			addr_type = ADDR_LE_DEV_PUBLIC;
5393		else
5394			addr_type = ADDR_LE_DEV_RANDOM;
5395
5396		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5397						addr_type);
5398		if (!params) {
5399			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5400					   MGMT_STATUS_INVALID_PARAMS,
5401					   &cp->addr, sizeof(cp->addr));
5402			goto unlock;
5403		}
5404
5405		if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5406			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5407					   MGMT_STATUS_INVALID_PARAMS,
5408					   &cp->addr, sizeof(cp->addr));
5409			goto unlock;
5410		}
5411
5412		list_del(&params->action);
5413		list_del(&params->list);
5414		kfree(params);
5415		hci_update_background_scan(hdev);
5416
5417		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5418	} else {
5419		struct hci_conn_params *p, *tmp;
5420		struct bdaddr_list *b, *btmp;
5421
5422		if (cp->addr.type) {
5423			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5424					   MGMT_STATUS_INVALID_PARAMS,
5425					   &cp->addr, sizeof(cp->addr));
5426			goto unlock;
5427		}
5428
5429		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5430			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5431			list_del(&b->list);
5432			kfree(b);
5433		}
5434
5435		update_page_scan(hdev, SCAN_DISABLED);
5436
5437		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5438			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5439				continue;
5440			device_removed(sk, hdev, &p->addr, p->addr_type);
5441			list_del(&p->action);
5442			list_del(&p->list);
5443			kfree(p);
5444		}
5445
5446		BT_DBG("All LE connection parameters were removed");
5447
5448		hci_update_background_scan(hdev);
5449	}
5450
5451complete:
5452	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5453			   MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5454
5455unlock:
5456	hci_dev_unlock(hdev);
5457	return err;
5458}
5459
5460static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5461			   u16 len)
5462{
5463	struct mgmt_cp_load_conn_param *cp = data;
5464	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5465				     sizeof(struct mgmt_conn_param));
5466	u16 param_count, expected_len;
5467	int i;
5468
5469	if (!lmp_le_capable(hdev))
5470		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5471				  MGMT_STATUS_NOT_SUPPORTED);
5472
5473	param_count = __le16_to_cpu(cp->param_count);
5474	if (param_count > max_param_count) {
5475		BT_ERR("load_conn_param: too big param_count value %u",
5476		       param_count);
5477		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5478				  MGMT_STATUS_INVALID_PARAMS);
5479	}
5480
5481	expected_len = sizeof(*cp) + param_count *
5482					sizeof(struct mgmt_conn_param);
5483	if (expected_len != len) {
5484		BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5485		       expected_len, len);
5486		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5487				  MGMT_STATUS_INVALID_PARAMS);
5488	}
5489
5490	BT_DBG("%s param_count %u", hdev->name, param_count);
5491
5492	hci_dev_lock(hdev);
5493
5494	hci_conn_params_clear_disabled(hdev);
5495
5496	for (i = 0; i < param_count; i++) {
5497		struct mgmt_conn_param *param = &cp->params[i];
5498		struct hci_conn_params *hci_param;
5499		u16 min, max, latency, timeout;
5500		u8 addr_type;
5501
5502		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5503		       param->addr.type);
5504
5505		if (param->addr.type == BDADDR_LE_PUBLIC) {
5506			addr_type = ADDR_LE_DEV_PUBLIC;
5507		} else if (param->addr.type == BDADDR_LE_RANDOM) {
5508			addr_type = ADDR_LE_DEV_RANDOM;
5509		} else {
5510			BT_ERR("Ignoring invalid connection parameters");
5511			continue;
5512		}
5513
5514		min = le16_to_cpu(param->min_interval);
5515		max = le16_to_cpu(param->max_interval);
5516		latency = le16_to_cpu(param->latency);
5517		timeout = le16_to_cpu(param->timeout);
5518
5519		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5520		       min, max, latency, timeout);
5521
5522		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5523			BT_ERR("Ignoring invalid connection parameters");
5524			continue;
5525		}
5526
5527		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5528						addr_type);
5529		if (!hci_param) {
5530			BT_ERR("Failed to add connection parameters");
5531			continue;
5532		}
5533
5534		hci_param->conn_min_interval = min;
5535		hci_param->conn_max_interval = max;
5536		hci_param->conn_latency = latency;
5537		hci_param->supervision_timeout = timeout;
5538	}
5539
5540	hci_dev_unlock(hdev);
5541
5542	return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5543}
5544
5545static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5546			       void *data, u16 len)
5547{
5548	struct mgmt_cp_set_external_config *cp = data;
5549	bool changed;
5550	int err;
5551
5552	BT_DBG("%s", hdev->name);
5553
5554	if (hdev_is_powered(hdev))
5555		return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5556				  MGMT_STATUS_REJECTED);
5557
5558	if (cp->config != 0x00 && cp->config != 0x01)
5559		return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5560				    MGMT_STATUS_INVALID_PARAMS);
5561
5562	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5563		return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5564				  MGMT_STATUS_NOT_SUPPORTED);
5565
5566	hci_dev_lock(hdev);
5567
5568	if (cp->config)
5569		changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5570					    &hdev->dev_flags);
5571	else
5572		changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5573					     &hdev->dev_flags);
5574
5575	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5576	if (err < 0)
5577		goto unlock;
5578
5579	if (!changed)
5580		goto unlock;
5581
5582	err = new_options(hdev, sk);
5583
5584	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5585		mgmt_index_removed(hdev);
5586
5587		if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5588			set_bit(HCI_CONFIG, &hdev->dev_flags);
5589			set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5590
5591			queue_work(hdev->req_workqueue, &hdev->power_on);
5592		} else {
5593			set_bit(HCI_RAW, &hdev->flags);
5594			mgmt_index_added(hdev);
5595		}
5596	}
5597
5598unlock:
5599	hci_dev_unlock(hdev);
5600	return err;
5601}
5602
5603static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5604			      void *data, u16 len)
5605{
5606	struct mgmt_cp_set_public_address *cp = data;
5607	bool changed;
5608	int err;
5609
5610	BT_DBG("%s", hdev->name);
5611
5612	if (hdev_is_powered(hdev))
5613		return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5614				  MGMT_STATUS_REJECTED);
5615
5616	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5617		return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5618				  MGMT_STATUS_INVALID_PARAMS);
5619
5620	if (!hdev->set_bdaddr)
5621		return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5622				  MGMT_STATUS_NOT_SUPPORTED);
5623
5624	hci_dev_lock(hdev);
5625
5626	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5627	bacpy(&hdev->public_addr, &cp->bdaddr);
5628
5629	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5630	if (err < 0)
5631		goto unlock;
5632
5633	if (!changed)
5634		goto unlock;
5635
5636	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5637		err = new_options(hdev, sk);
5638
5639	if (is_configured(hdev)) {
5640		mgmt_index_removed(hdev);
5641
5642		clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5643
5644		set_bit(HCI_CONFIG, &hdev->dev_flags);
5645		set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5646
5647		queue_work(hdev->req_workqueue, &hdev->power_on);
5648	}
5649
5650unlock:
5651	hci_dev_unlock(hdev);
5652	return err;
5653}
5654
5655static const struct mgmt_handler {
5656	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5657		     u16 data_len);
5658	bool var_len;
5659	size_t data_len;
5660} mgmt_handlers[] = {
5661	{ NULL }, /* 0x0000 (no command) */
5662	{ read_version,           false, MGMT_READ_VERSION_SIZE },
5663	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
5664	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
5665	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
5666	{ set_powered,            false, MGMT_SETTING_SIZE },
5667	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
5668	{ set_connectable,        false, MGMT_SETTING_SIZE },
5669	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
5670	{ set_pairable,           false, MGMT_SETTING_SIZE },
5671	{ set_link_security,      false, MGMT_SETTING_SIZE },
5672	{ set_ssp,                false, MGMT_SETTING_SIZE },
5673	{ set_hs,                 false, MGMT_SETTING_SIZE },
5674	{ set_le,                 false, MGMT_SETTING_SIZE },
5675	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
5676	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
5677	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
5678	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
5679	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
5680	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5681	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
5682	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
5683	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
5684	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5685	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
5686	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
5687	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5688	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
5689	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
5690	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5691	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
5692	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5693	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5694	{ add_remote_oob_data,    true,  MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5695	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5696	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
5697	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
5698	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
5699	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
5700	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
5701	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
5702	{ set_advertising,        false, MGMT_SETTING_SIZE },
5703	{ set_bredr,              false, MGMT_SETTING_SIZE },
5704	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
5705	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
5706	{ set_secure_conn,        false, MGMT_SETTING_SIZE },
5707	{ set_debug_keys,         false, MGMT_SETTING_SIZE },
5708	{ set_privacy,            false, MGMT_SET_PRIVACY_SIZE },
5709	{ load_irks,              true,  MGMT_LOAD_IRKS_SIZE },
5710	{ get_conn_info,          false, MGMT_GET_CONN_INFO_SIZE },
5711	{ get_clock_info,         false, MGMT_GET_CLOCK_INFO_SIZE },
5712	{ add_device,             false, MGMT_ADD_DEVICE_SIZE },
5713	{ remove_device,          false, MGMT_REMOVE_DEVICE_SIZE },
5714	{ load_conn_param,        true,  MGMT_LOAD_CONN_PARAM_SIZE },
5715	{ read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5716	{ read_config_info,       false, MGMT_READ_CONFIG_INFO_SIZE },
5717	{ set_external_config,    false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5718	{ set_public_address,     false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5719};
5720
5721int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5722{
5723	void *buf;
5724	u8 *cp;
5725	struct mgmt_hdr *hdr;
5726	u16 opcode, index, len;
5727	struct hci_dev *hdev = NULL;
5728	const struct mgmt_handler *handler;
5729	int err;
5730
5731	BT_DBG("got %zu bytes", msglen);
5732
5733	if (msglen < sizeof(*hdr))
5734		return -EINVAL;
5735
5736	buf = kmalloc(msglen, GFP_KERNEL);
5737	if (!buf)
5738		return -ENOMEM;
5739
5740	if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5741		err = -EFAULT;
5742		goto done;
5743	}
5744
5745	hdr = buf;
5746	opcode = __le16_to_cpu(hdr->opcode);
5747	index = __le16_to_cpu(hdr->index);
5748	len = __le16_to_cpu(hdr->len);
5749
5750	if (len != msglen - sizeof(*hdr)) {
5751		err = -EINVAL;
5752		goto done;
5753	}
5754
5755	if (index != MGMT_INDEX_NONE) {
5756		hdev = hci_dev_get(index);
5757		if (!hdev) {
5758			err = cmd_status(sk, index, opcode,
5759					 MGMT_STATUS_INVALID_INDEX);
5760			goto done;
5761		}
5762
5763		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5764		    test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5765		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5766			err = cmd_status(sk, index, opcode,
5767					 MGMT_STATUS_INVALID_INDEX);
5768			goto done;
5769		}
5770
5771		if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5772		    opcode != MGMT_OP_READ_CONFIG_INFO &&
5773		    opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5774		    opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5775			err = cmd_status(sk, index, opcode,
5776					 MGMT_STATUS_INVALID_INDEX);
5777			goto done;
5778		}
5779	}
5780
5781	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5782	    mgmt_handlers[opcode].func == NULL) {
5783		BT_DBG("Unknown op %u", opcode);
5784		err = cmd_status(sk, index, opcode,
5785				 MGMT_STATUS_UNKNOWN_COMMAND);
5786		goto done;
5787	}
5788
5789	if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5790		     opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5791		err = cmd_status(sk, index, opcode,
5792				 MGMT_STATUS_INVALID_INDEX);
5793		goto done;
5794	}
5795
5796	if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5797		      opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5798		err = cmd_status(sk, index, opcode,
5799				 MGMT_STATUS_INVALID_INDEX);
5800		goto done;
5801	}
5802
5803	handler = &mgmt_handlers[opcode];
5804
5805	if ((handler->var_len && len < handler->data_len) ||
5806	    (!handler->var_len && len != handler->data_len)) {
5807		err = cmd_status(sk, index, opcode,
5808				 MGMT_STATUS_INVALID_PARAMS);
5809		goto done;
5810	}
5811
5812	if (hdev)
5813		mgmt_init_hdev(sk, hdev);
5814
5815	cp = buf + sizeof(*hdr);
5816
5817	err = handler->func(sk, hdev, cp, len);
5818	if (err < 0)
5819		goto done;
5820
5821	err = msglen;
5822
5823done:
5824	if (hdev)
5825		hci_dev_put(hdev);
5826
5827	kfree(buf);
5828	return err;
5829}
5830
5831void mgmt_index_added(struct hci_dev *hdev)
5832{
5833	if (hdev->dev_type != HCI_BREDR)
5834		return;
5835
5836	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5837		return;
5838
5839	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5840		mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5841	else
5842		mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5843}
5844
5845void mgmt_index_removed(struct hci_dev *hdev)
5846{
5847	u8 status = MGMT_STATUS_INVALID_INDEX;
5848
5849	if (hdev->dev_type != HCI_BREDR)
5850		return;
5851
5852	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5853		return;
5854
5855	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5856
5857	if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5858		mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5859	else
5860		mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5861}
5862
5863/* This function requires the caller holds hdev->lock */
5864static void restart_le_actions(struct hci_dev *hdev)
5865{
5866	struct hci_conn_params *p;
5867
5868	list_for_each_entry(p, &hdev->le_conn_params, list) {
5869		/* Needed for AUTO_OFF case where might not "really"
5870		 * have been powered off.
5871		 */
5872		list_del_init(&p->action);
5873
5874		switch (p->auto_connect) {
5875		case HCI_AUTO_CONN_DIRECT:
5876		case HCI_AUTO_CONN_ALWAYS:
5877			list_add(&p->action, &hdev->pend_le_conns);
5878			break;
5879		case HCI_AUTO_CONN_REPORT:
5880			list_add(&p->action, &hdev->pend_le_reports);
5881			break;
5882		default:
5883			break;
5884		}
5885	}
5886
5887	hci_update_background_scan(hdev);
5888}
5889
5890static void powered_complete(struct hci_dev *hdev, u8 status)
5891{
5892	struct cmd_lookup match = { NULL, hdev };
5893
5894	BT_DBG("status 0x%02x", status);
5895
5896	hci_dev_lock(hdev);
5897
5898	restart_le_actions(hdev);
5899
5900	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5901
5902	new_settings(hdev, match.sk);
5903
5904	hci_dev_unlock(hdev);
5905
5906	if (match.sk)
5907		sock_put(match.sk);
5908}
5909
5910static int powered_update_hci(struct hci_dev *hdev)
5911{
5912	struct hci_request req;
5913	u8 link_sec;
5914
5915	hci_req_init(&req, hdev);
5916
5917	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5918	    !lmp_host_ssp_capable(hdev)) {
5919		u8 ssp = 1;
5920
5921		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5922	}
5923
5924	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5925	    lmp_bredr_capable(hdev)) {
5926		struct hci_cp_write_le_host_supported cp;
5927
5928		cp.le = 1;
5929		cp.simul = lmp_le_br_capable(hdev);
5930
5931		/* Check first if we already have the right
5932		 * host state (host features set)
5933		 */
5934		if (cp.le != lmp_host_le_capable(hdev) ||
5935		    cp.simul != lmp_host_le_br_capable(hdev))
5936			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5937				    sizeof(cp), &cp);
5938	}
5939
5940	if (lmp_le_capable(hdev)) {
5941		/* Make sure the controller has a good default for
5942		 * advertising data. This also applies to the case
5943		 * where BR/EDR was toggled during the AUTO_OFF phase.
5944		 */
5945		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5946			update_adv_data(&req);
5947			update_scan_rsp_data(&req);
5948		}
5949
5950		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5951			enable_advertising(&req);
5952	}
5953
5954	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5955	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5956		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5957			    sizeof(link_sec), &link_sec);
5958
5959	if (lmp_bredr_capable(hdev)) {
5960		if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5961			set_bredr_scan(&req);
5962		update_class(&req);
5963		update_name(&req);
5964		update_eir(&req);
5965	}
5966
5967	return hci_req_run(&req, powered_complete);
5968}
5969
5970int mgmt_powered(struct hci_dev *hdev, u8 powered)
5971{
5972	struct cmd_lookup match = { NULL, hdev };
5973	u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5974	u8 zero_cod[] = { 0, 0, 0 };
5975	int err;
5976
5977	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5978		return 0;
5979
5980	if (powered) {
5981		if (powered_update_hci(hdev) == 0)
5982			return 0;
5983
5984		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5985				     &match);
5986		goto new_settings;
5987	}
5988
5989	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5990	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5991
5992	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5993		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5994			   zero_cod, sizeof(zero_cod), NULL);
5995
5996new_settings:
5997	err = new_settings(hdev, match.sk);
5998
5999	if (match.sk)
6000		sock_put(match.sk);
6001
6002	return err;
6003}
6004
6005void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6006{
6007	struct pending_cmd *cmd;
6008	u8 status;
6009
6010	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6011	if (!cmd)
6012		return;
6013
6014	if (err == -ERFKILL)
6015		status = MGMT_STATUS_RFKILLED;
6016	else
6017		status = MGMT_STATUS_FAILED;
6018
6019	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6020
6021	mgmt_pending_remove(cmd);
6022}
6023
6024void mgmt_discoverable_timeout(struct hci_dev *hdev)
6025{
6026	struct hci_request req;
6027
6028	hci_dev_lock(hdev);
6029
6030	/* When discoverable timeout triggers, then just make sure
6031	 * the limited discoverable flag is cleared. Even in the case
6032	 * of a timeout triggered from general discoverable, it is
6033	 * safe to unconditionally clear the flag.
6034	 */
6035	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6036	clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6037
6038	hci_req_init(&req, hdev);
6039	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6040		u8 scan = SCAN_PAGE;
6041		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6042			    sizeof(scan), &scan);
6043	}
6044	update_class(&req);
6045	update_adv_data(&req);
6046	hci_req_run(&req, NULL);
6047
6048	hdev->discov_timeout = 0;
6049
6050	new_settings(hdev, NULL);
6051
6052	hci_dev_unlock(hdev);
6053}
6054
6055void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6056		       bool persistent)
6057{
6058	struct mgmt_ev_new_link_key ev;
6059
6060	memset(&ev, 0, sizeof(ev));
6061
6062	ev.store_hint = persistent;
6063	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6064	ev.key.addr.type = BDADDR_BREDR;
6065	ev.key.type = key->type;
6066	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6067	ev.key.pin_len = key->pin_len;
6068
6069	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6070}
6071
6072static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6073{
6074	if (ltk->authenticated)
6075		return MGMT_LTK_AUTHENTICATED;
6076
6077	return MGMT_LTK_UNAUTHENTICATED;
6078}
6079
6080void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6081{
6082	struct mgmt_ev_new_long_term_key ev;
6083
6084	memset(&ev, 0, sizeof(ev));
6085
6086	/* Devices using resolvable or non-resolvable random addresses
6087	 * without providing an indentity resolving key don't require
6088	 * to store long term keys. Their addresses will change the
6089	 * next time around.
6090	 *
6091	 * Only when a remote device provides an identity address
6092	 * make sure the long term key is stored. If the remote
6093	 * identity is known, the long term keys are internally
6094	 * mapped to the identity address. So allow static random
6095	 * and public addresses here.
6096	 */
6097	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6098	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
6099		ev.store_hint = 0x00;
6100	else
6101		ev.store_hint = persistent;
6102
6103	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6104	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6105	ev.key.type = mgmt_ltk_type(key);
6106	ev.key.enc_size = key->enc_size;
6107	ev.key.ediv = key->ediv;
6108	ev.key.rand = key->rand;
6109
6110	if (key->type == SMP_LTK)
6111		ev.key.master = 1;
6112
6113	memcpy(ev.key.val, key->val, sizeof(key->val));
6114
6115	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6116}
6117
6118void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6119{
6120	struct mgmt_ev_new_irk ev;
6121
6122	memset(&ev, 0, sizeof(ev));
6123
6124	/* For identity resolving keys from devices that are already
6125	 * using a public address or static random address, do not
6126	 * ask for storing this key. The identity resolving key really
6127	 * is only mandatory for devices using resovlable random
6128	 * addresses.
6129	 *
6130	 * Storing all identity resolving keys has the downside that
6131	 * they will be also loaded on next boot of they system. More
6132	 * identity resolving keys, means more time during scanning is
6133	 * needed to actually resolve these addresses.
6134	 */
6135	if (bacmp(&irk->rpa, BDADDR_ANY))
6136		ev.store_hint = 0x01;
6137	else
6138		ev.store_hint = 0x00;
6139
6140	bacpy(&ev.rpa, &irk->rpa);
6141	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6142	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6143	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6144
6145	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6146}
6147
6148void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6149		   bool persistent)
6150{
6151	struct mgmt_ev_new_csrk ev;
6152
6153	memset(&ev, 0, sizeof(ev));
6154
6155	/* Devices using resolvable or non-resolvable random addresses
6156	 * without providing an indentity resolving key don't require
6157	 * to store signature resolving keys. Their addresses will change
6158	 * the next time around.
6159	 *
6160	 * Only when a remote device provides an identity address
6161	 * make sure the signature resolving key is stored. So allow
6162	 * static random and public addresses here.
6163	 */
6164	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6165	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6166		ev.store_hint = 0x00;
6167	else
6168		ev.store_hint = persistent;
6169
6170	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6171	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6172	ev.key.master = csrk->master;
6173	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6174
6175	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6176}
6177
6178void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6179			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6180			 u16 max_interval, u16 latency, u16 timeout)
6181{
6182	struct mgmt_ev_new_conn_param ev;
6183
6184	if (!hci_is_identity_address(bdaddr, bdaddr_type))
6185		return;
6186
6187	memset(&ev, 0, sizeof(ev));
6188	bacpy(&ev.addr.bdaddr, bdaddr);
6189	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6190	ev.store_hint = store_hint;
6191	ev.min_interval = cpu_to_le16(min_interval);
6192	ev.max_interval = cpu_to_le16(max_interval);
6193	ev.latency = cpu_to_le16(latency);
6194	ev.timeout = cpu_to_le16(timeout);
6195
6196	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6197}
6198
6199static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6200				  u8 data_len)
6201{
6202	eir[eir_len++] = sizeof(type) + data_len;
6203	eir[eir_len++] = type;
6204	memcpy(&eir[eir_len], data, data_len);
6205	eir_len += data_len;
6206
6207	return eir_len;
6208}
6209
6210void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6211			   u8 addr_type, u32 flags, u8 *name, u8 name_len,
6212			   u8 *dev_class)
6213{
6214	char buf[512];
6215	struct mgmt_ev_device_connected *ev = (void *) buf;
6216	u16 eir_len = 0;
6217
6218	bacpy(&ev->addr.bdaddr, bdaddr);
6219	ev->addr.type = link_to_bdaddr(link_type, addr_type);
6220
6221	ev->flags = __cpu_to_le32(flags);
6222
6223	if (name_len > 0)
6224		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6225					  name, name_len);
6226
6227	if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
6228		eir_len = eir_append_data(ev->eir, eir_len,
6229					  EIR_CLASS_OF_DEV, dev_class, 3);
6230
6231	ev->eir_len = cpu_to_le16(eir_len);
6232
6233	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6234		    sizeof(*ev) + eir_len, NULL);
6235}
6236
6237static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6238{
6239	struct mgmt_cp_disconnect *cp = cmd->param;
6240	struct sock **sk = data;
6241	struct mgmt_rp_disconnect rp;
6242
6243	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6244	rp.addr.type = cp->addr.type;
6245
6246	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6247		     sizeof(rp));
6248
6249	*sk = cmd->sk;
6250	sock_hold(*sk);
6251
6252	mgmt_pending_remove(cmd);
6253}
6254
6255static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6256{
6257	struct hci_dev *hdev = data;
6258	struct mgmt_cp_unpair_device *cp = cmd->param;
6259	struct mgmt_rp_unpair_device rp;
6260
6261	memset(&rp, 0, sizeof(rp));
6262	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6263	rp.addr.type = cp->addr.type;
6264
6265	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6266
6267	cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6268
6269	mgmt_pending_remove(cmd);
6270}
6271
6272void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6273			      u8 link_type, u8 addr_type, u8 reason,
6274			      bool mgmt_connected)
6275{
6276	struct mgmt_ev_device_disconnected ev;
6277	struct pending_cmd *power_off;
6278	struct sock *sk = NULL;
6279
6280	power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6281	if (power_off) {
6282		struct mgmt_mode *cp = power_off->param;
6283
6284		/* The connection is still in hci_conn_hash so test for 1
6285		 * instead of 0 to know if this is the last one.
6286		 */
6287		if (!cp->val && hci_conn_count(hdev) == 1) {
6288			cancel_delayed_work(&hdev->power_off);
6289			queue_work(hdev->req_workqueue, &hdev->power_off.work);
6290		}
6291	}
6292
6293	if (!mgmt_connected)
6294		return;
6295
6296	if (link_type != ACL_LINK && link_type != LE_LINK)
6297		return;
6298
6299	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6300
6301	bacpy(&ev.addr.bdaddr, bdaddr);
6302	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6303	ev.reason = reason;
6304
6305	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6306
6307	if (sk)
6308		sock_put(sk);
6309
6310	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6311			     hdev);
6312}
6313
6314void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6315			    u8 link_type, u8 addr_type, u8 status)
6316{
6317	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6318	struct mgmt_cp_disconnect *cp;
6319	struct mgmt_rp_disconnect rp;
6320	struct pending_cmd *cmd;
6321
6322	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6323			     hdev);
6324
6325	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6326	if (!cmd)
6327		return;
6328
6329	cp = cmd->param;
6330
6331	if (bacmp(bdaddr, &cp->addr.bdaddr))
6332		return;
6333
6334	if (cp->addr.type != bdaddr_type)
6335		return;
6336
6337	bacpy(&rp.addr.bdaddr, bdaddr);
6338	rp.addr.type = bdaddr_type;
6339
6340	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6341		     mgmt_status(status), &rp, sizeof(rp));
6342
6343	mgmt_pending_remove(cmd);
6344}
6345
6346void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6347			 u8 addr_type, u8 status)
6348{
6349	struct mgmt_ev_connect_failed ev;
6350	struct pending_cmd *power_off;
6351
6352	power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6353	if (power_off) {
6354		struct mgmt_mode *cp = power_off->param;
6355
6356		/* The connection is still in hci_conn_hash so test for 1
6357		 * instead of 0 to know if this is the last one.
6358		 */
6359		if (!cp->val && hci_conn_count(hdev) == 1) {
6360			cancel_delayed_work(&hdev->power_off);
6361			queue_work(hdev->req_workqueue, &hdev->power_off.work);
6362		}
6363	}
6364
6365	bacpy(&ev.addr.bdaddr, bdaddr);
6366	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6367	ev.status = mgmt_status(status);
6368
6369	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6370}
6371
6372void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6373{
6374	struct mgmt_ev_pin_code_request ev;
6375
6376	bacpy(&ev.addr.bdaddr, bdaddr);
6377	ev.addr.type = BDADDR_BREDR;
6378	ev.secure = secure;
6379
6380	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6381}
6382
6383void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6384				  u8 status)
6385{
6386	struct pending_cmd *cmd;
6387	struct mgmt_rp_pin_code_reply rp;
6388
6389	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6390	if (!cmd)
6391		return;
6392
6393	bacpy(&rp.addr.bdaddr, bdaddr);
6394	rp.addr.type = BDADDR_BREDR;
6395
6396	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6397		     mgmt_status(status), &rp, sizeof(rp));
6398
6399	mgmt_pending_remove(cmd);
6400}
6401
6402void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6403				      u8 status)
6404{
6405	struct pending_cmd *cmd;
6406	struct mgmt_rp_pin_code_reply rp;
6407
6408	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6409	if (!cmd)
6410		return;
6411
6412	bacpy(&rp.addr.bdaddr, bdaddr);
6413	rp.addr.type = BDADDR_BREDR;
6414
6415	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6416		     mgmt_status(status), &rp, sizeof(rp));
6417
6418	mgmt_pending_remove(cmd);
6419}
6420
6421int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6422			      u8 link_type, u8 addr_type, u32 value,
6423			      u8 confirm_hint)
6424{
6425	struct mgmt_ev_user_confirm_request ev;
6426
6427	BT_DBG("%s", hdev->name);
6428
6429	bacpy(&ev.addr.bdaddr, bdaddr);
6430	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6431	ev.confirm_hint = confirm_hint;
6432	ev.value = cpu_to_le32(value);
6433
6434	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6435			  NULL);
6436}
6437
6438int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6439			      u8 link_type, u8 addr_type)
6440{
6441	struct mgmt_ev_user_passkey_request ev;
6442
6443	BT_DBG("%s", hdev->name);
6444
6445	bacpy(&ev.addr.bdaddr, bdaddr);
6446	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6447
6448	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6449			  NULL);
6450}
6451
6452static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6453				      u8 link_type, u8 addr_type, u8 status,
6454				      u8 opcode)
6455{
6456	struct pending_cmd *cmd;
6457	struct mgmt_rp_user_confirm_reply rp;
6458	int err;
6459
6460	cmd = mgmt_pending_find(opcode, hdev);
6461	if (!cmd)
6462		return -ENOENT;
6463
6464	bacpy(&rp.addr.bdaddr, bdaddr);
6465	rp.addr.type = link_to_bdaddr(link_type, addr_type);
6466	err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6467			   &rp, sizeof(rp));
6468
6469	mgmt_pending_remove(cmd);
6470
6471	return err;
6472}
6473
6474int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6475				     u8 link_type, u8 addr_type, u8 status)
6476{
6477	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6478					  status, MGMT_OP_USER_CONFIRM_REPLY);
6479}
6480
6481int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6482					 u8 link_type, u8 addr_type, u8 status)
6483{
6484	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6485					  status,
6486					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
6487}
6488
6489int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6490				     u8 link_type, u8 addr_type, u8 status)
6491{
6492	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6493					  status, MGMT_OP_USER_PASSKEY_REPLY);
6494}
6495
6496int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6497					 u8 link_type, u8 addr_type, u8 status)
6498{
6499	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6500					  status,
6501					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
6502}
6503
6504int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6505			     u8 link_type, u8 addr_type, u32 passkey,
6506			     u8 entered)
6507{
6508	struct mgmt_ev_passkey_notify ev;
6509
6510	BT_DBG("%s", hdev->name);
6511
6512	bacpy(&ev.addr.bdaddr, bdaddr);
6513	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6514	ev.passkey = __cpu_to_le32(passkey);
6515	ev.entered = entered;
6516
6517	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6518}
6519
6520void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6521		      u8 addr_type, u8 status)
6522{
6523	struct mgmt_ev_auth_failed ev;
6524
6525	bacpy(&ev.addr.bdaddr, bdaddr);
6526	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6527	ev.status = mgmt_status(status);
6528
6529	mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6530}
6531
6532void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6533{
6534	struct cmd_lookup match = { NULL, hdev };
6535	bool changed;
6536
6537	if (status) {
6538		u8 mgmt_err = mgmt_status(status);
6539		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6540				     cmd_status_rsp, &mgmt_err);
6541		return;
6542	}
6543
6544	if (test_bit(HCI_AUTH, &hdev->flags))
6545		changed = !test_and_set_bit(HCI_LINK_SECURITY,
6546					    &hdev->dev_flags);
6547	else
6548		changed = test_and_clear_bit(HCI_LINK_SECURITY,
6549					     &hdev->dev_flags);
6550
6551	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6552			     &match);
6553
6554	if (changed)
6555		new_settings(hdev, match.sk);
6556
6557	if (match.sk)
6558		sock_put(match.sk);
6559}
6560
6561static void clear_eir(struct hci_request *req)
6562{
6563	struct hci_dev *hdev = req->hdev;
6564	struct hci_cp_write_eir cp;
6565
6566	if (!lmp_ext_inq_capable(hdev))
6567		return;
6568
6569	memset(hdev->eir, 0, sizeof(hdev->eir));
6570
6571	memset(&cp, 0, sizeof(cp));
6572
6573	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6574}
6575
6576void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6577{
6578	struct cmd_lookup match = { NULL, hdev };
6579	struct hci_request req;
6580	bool changed = false;
6581
6582	if (status) {
6583		u8 mgmt_err = mgmt_status(status);
6584
6585		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6586						 &hdev->dev_flags)) {
6587			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6588			new_settings(hdev, NULL);
6589		}
6590
6591		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6592				     &mgmt_err);
6593		return;
6594	}
6595
6596	if (enable) {
6597		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6598	} else {
6599		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6600		if (!changed)
6601			changed = test_and_clear_bit(HCI_HS_ENABLED,
6602						     &hdev->dev_flags);
6603		else
6604			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6605	}
6606
6607	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6608
6609	if (changed)
6610		new_settings(hdev, match.sk);
6611
6612	if (match.sk)
6613		sock_put(match.sk);
6614
6615	hci_req_init(&req, hdev);
6616
6617	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6618		if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6619			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6620				    sizeof(enable), &enable);
6621		update_eir(&req);
6622	} else {
6623		clear_eir(&req);
6624	}
6625
6626	hci_req_run(&req, NULL);
6627}
6628
6629void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6630{
6631	struct cmd_lookup match = { NULL, hdev };
6632	bool changed = false;
6633
6634	if (status) {
6635		u8 mgmt_err = mgmt_status(status);
6636
6637		if (enable) {
6638			if (test_and_clear_bit(HCI_SC_ENABLED,
6639					       &hdev->dev_flags))
6640				new_settings(hdev, NULL);
6641			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6642		}
6643
6644		mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6645				     cmd_status_rsp, &mgmt_err);
6646		return;
6647	}
6648
6649	if (enable) {
6650		changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6651	} else {
6652		changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6653		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6654	}
6655
6656	mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6657			     settings_rsp, &match);
6658
6659	if (changed)
6660		new_settings(hdev, match.sk);
6661
6662	if (match.sk)
6663		sock_put(match.sk);
6664}
6665
6666static void sk_lookup(struct pending_cmd *cmd, void *data)
6667{
6668	struct cmd_lookup *match = data;
6669
6670	if (match->sk == NULL) {
6671		match->sk = cmd->sk;
6672		sock_hold(match->sk);
6673	}
6674}
6675
6676void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6677				    u8 status)
6678{
6679	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6680
6681	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6682	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6683	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6684
6685	if (!status)
6686		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6687			   NULL);
6688
6689	if (match.sk)
6690		sock_put(match.sk);
6691}
6692
6693void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6694{
6695	struct mgmt_cp_set_local_name ev;
6696	struct pending_cmd *cmd;
6697
6698	if (status)
6699		return;
6700
6701	memset(&ev, 0, sizeof(ev));
6702	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6703	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6704
6705	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6706	if (!cmd) {
6707		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6708
6709		/* If this is a HCI command related to powering on the
6710		 * HCI dev don't send any mgmt signals.
6711		 */
6712		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6713			return;
6714	}
6715
6716	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6717		   cmd ? cmd->sk : NULL);
6718}
6719
6720void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6721				       u8 *randomizer192, u8 *hash256,
6722				       u8 *randomizer256, u8 status)
6723{
6724	struct pending_cmd *cmd;
6725
6726	BT_DBG("%s status %u", hdev->name, status);
6727
6728	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6729	if (!cmd)
6730		return;
6731
6732	if (status) {
6733		cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6734			   mgmt_status(status));
6735	} else {
6736		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6737		    hash256 && randomizer256) {
6738			struct mgmt_rp_read_local_oob_ext_data rp;
6739
6740			memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6741			memcpy(rp.randomizer192, randomizer192,
6742			       sizeof(rp.randomizer192));
6743
6744			memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6745			memcpy(rp.randomizer256, randomizer256,
6746			       sizeof(rp.randomizer256));
6747
6748			cmd_complete(cmd->sk, hdev->id,
6749				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6750				     &rp, sizeof(rp));
6751		} else {
6752			struct mgmt_rp_read_local_oob_data rp;
6753
6754			memcpy(rp.hash, hash192, sizeof(rp.hash));
6755			memcpy(rp.randomizer, randomizer192,
6756			       sizeof(rp.randomizer));
6757
6758			cmd_complete(cmd->sk, hdev->id,
6759				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6760				     &rp, sizeof(rp));
6761		}
6762	}
6763
6764	mgmt_pending_remove(cmd);
6765}
6766
6767void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6768		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6769		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6770{
6771	char buf[512];
6772	struct mgmt_ev_device_found *ev = (void *) buf;
6773	size_t ev_size;
6774
6775	/* Don't send events for a non-kernel initiated discovery. With
6776	 * LE one exception is if we have pend_le_reports > 0 in which
6777	 * case we're doing passive scanning and want these events.
6778	 */
6779	if (!hci_discovery_active(hdev)) {
6780		if (link_type == ACL_LINK)
6781			return;
6782		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6783			return;
6784	}
6785
6786	/* Make sure that the buffer is big enough. The 5 extra bytes
6787	 * are for the potential CoD field.
6788	 */
6789	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6790		return;
6791
6792	memset(buf, 0, sizeof(buf));
6793
6794	bacpy(&ev->addr.bdaddr, bdaddr);
6795	ev->addr.type = link_to_bdaddr(link_type, addr_type);
6796	ev->rssi = rssi;
6797	ev->flags = cpu_to_le32(flags);
6798
6799	if (eir_len > 0)
6800		memcpy(ev->eir, eir, eir_len);
6801
6802	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6803		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6804					  dev_class, 3);
6805
6806	if (scan_rsp_len > 0)
6807		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6808
6809	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6810	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6811
6812	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6813}
6814
6815void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6816		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6817{
6818	struct mgmt_ev_device_found *ev;
6819	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6820	u16 eir_len;
6821
6822	ev = (struct mgmt_ev_device_found *) buf;
6823
6824	memset(buf, 0, sizeof(buf));
6825
6826	bacpy(&ev->addr.bdaddr, bdaddr);
6827	ev->addr.type = link_to_bdaddr(link_type, addr_type);
6828	ev->rssi = rssi;
6829
6830	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6831				  name_len);
6832
6833	ev->eir_len = cpu_to_le16(eir_len);
6834
6835	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6836}
6837
6838void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6839{
6840	struct mgmt_ev_discovering ev;
6841	struct pending_cmd *cmd;
6842
6843	BT_DBG("%s discovering %u", hdev->name, discovering);
6844
6845	if (discovering)
6846		cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6847	else
6848		cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6849
6850	if (cmd != NULL) {
6851		u8 type = hdev->discovery.type;
6852
6853		cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6854			     sizeof(type));
6855		mgmt_pending_remove(cmd);
6856	}
6857
6858	memset(&ev, 0, sizeof(ev));
6859	ev.type = hdev->discovery.type;
6860	ev.discovering = discovering;
6861
6862	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6863}
6864
6865static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6866{
6867	BT_DBG("%s status %u", hdev->name, status);
6868}
6869
6870void mgmt_reenable_advertising(struct hci_dev *hdev)
6871{
6872	struct hci_request req;
6873
6874	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6875		return;
6876
6877	hci_req_init(&req, hdev);
6878	enable_advertising(&req);
6879	hci_req_run(&req, adv_enable_complete);
6880}
6881