1/*
2   BlueZ - Bluetooth protocol stack for Linux
3   Copyright (C) 2000-2001 Qualcomm Incorporated
4
5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
27#include <linux/export.h>
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/hci_mon.h>
33
34static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
36/* ----- HCI socket interface ----- */
37
38static inline int hci_test_bit(int nr, void *addr)
39{
40	return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
41}
42
43/* Security filter */
44static struct hci_sec_filter hci_sec_filter = {
45	/* Packet types */
46	0x10,
47	/* Events */
48	{ 0x1000d9fe, 0x0000b00c },
49	/* Commands */
50	{
51		{ 0x0 },
52		/* OGF_LINK_CTL */
53		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
54		/* OGF_LINK_POLICY */
55		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
56		/* OGF_HOST_CTL */
57		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
58		/* OGF_INFO_PARAM */
59		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
60		/* OGF_STATUS_PARAM */
61		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
62	}
63};
64
65static struct bt_sock_list hci_sk_list = {
66	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
67};
68
69/* Send frame to RAW socket */
70void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
71{
72	struct sock *sk;
73	struct sk_buff *skb_copy = NULL;
74
75	BT_DBG("hdev %p len %d", hdev, skb->len);
76
77	read_lock(&hci_sk_list.lock);
78
79	sk_for_each(sk, &hci_sk_list.head) {
80		struct hci_filter *flt;
81		struct sk_buff *nskb;
82
83		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
84			continue;
85
86		/* Don't send frame to the socket it came from */
87		if (skb->sk == sk)
88			continue;
89
90		if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
91			continue;
92
93		/* Apply filter */
94		flt = &hci_pi(sk)->filter;
95
96		if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
97			      0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
98			      &flt->type_mask))
99			continue;
100
101		if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
102			int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
103
104			if (!hci_test_bit(evt, &flt->event_mask))
105				continue;
106
107			if (flt->opcode &&
108			    ((evt == HCI_EV_CMD_COMPLETE &&
109			      flt->opcode !=
110			      get_unaligned((__le16 *)(skb->data + 3))) ||
111			     (evt == HCI_EV_CMD_STATUS &&
112			      flt->opcode !=
113			      get_unaligned((__le16 *)(skb->data + 4)))))
114				continue;
115		}
116
117		if (!skb_copy) {
118			/* Create a private copy with headroom */
119			skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
120			if (!skb_copy)
121				continue;
122
123			/* Put type byte before the data */
124			memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
125		}
126
127		nskb = skb_clone(skb_copy, GFP_ATOMIC);
128		if (!nskb)
129			continue;
130
131		if (sock_queue_rcv_skb(sk, nskb))
132			kfree_skb(nskb);
133	}
134
135	read_unlock(&hci_sk_list.lock);
136
137	kfree_skb(skb_copy);
138}
139
140/* Send frame to control socket */
141void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
142{
143	struct sock *sk;
144
145	BT_DBG("len %d", skb->len);
146
147	read_lock(&hci_sk_list.lock);
148
149	sk_for_each(sk, &hci_sk_list.head) {
150		struct sk_buff *nskb;
151
152		/* Skip the original socket */
153		if (sk == skip_sk)
154			continue;
155
156		if (sk->sk_state != BT_BOUND)
157			continue;
158
159		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
160			continue;
161
162		nskb = skb_clone(skb, GFP_ATOMIC);
163		if (!nskb)
164			continue;
165
166		if (sock_queue_rcv_skb(sk, nskb))
167			kfree_skb(nskb);
168	}
169
170	read_unlock(&hci_sk_list.lock);
171}
172
173/* Send frame to monitor socket */
174void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
175{
176	struct sock *sk;
177	struct sk_buff *skb_copy = NULL;
178	__le16 opcode;
179
180	if (!atomic_read(&monitor_promisc))
181		return;
182
183	BT_DBG("hdev %p len %d", hdev, skb->len);
184
185	switch (bt_cb(skb)->pkt_type) {
186	case HCI_COMMAND_PKT:
187		opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
188		break;
189	case HCI_EVENT_PKT:
190		opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
191		break;
192	case HCI_ACLDATA_PKT:
193		if (bt_cb(skb)->incoming)
194			opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
195		else
196			opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
197		break;
198	case HCI_SCODATA_PKT:
199		if (bt_cb(skb)->incoming)
200			opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
201		else
202			opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
203		break;
204	default:
205		return;
206	}
207
208	read_lock(&hci_sk_list.lock);
209
210	sk_for_each(sk, &hci_sk_list.head) {
211		struct sk_buff *nskb;
212
213		if (sk->sk_state != BT_BOUND)
214			continue;
215
216		if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
217			continue;
218
219		if (!skb_copy) {
220			struct hci_mon_hdr *hdr;
221
222			/* Create a private copy with headroom */
223			skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
224					       GFP_ATOMIC);
225			if (!skb_copy)
226				continue;
227
228			/* Put header before the data */
229			hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
230			hdr->opcode = opcode;
231			hdr->index = cpu_to_le16(hdev->id);
232			hdr->len = cpu_to_le16(skb->len);
233		}
234
235		nskb = skb_clone(skb_copy, GFP_ATOMIC);
236		if (!nskb)
237			continue;
238
239		if (sock_queue_rcv_skb(sk, nskb))
240			kfree_skb(nskb);
241	}
242
243	read_unlock(&hci_sk_list.lock);
244
245	kfree_skb(skb_copy);
246}
247
248static void send_monitor_event(struct sk_buff *skb)
249{
250	struct sock *sk;
251
252	BT_DBG("len %d", skb->len);
253
254	read_lock(&hci_sk_list.lock);
255
256	sk_for_each(sk, &hci_sk_list.head) {
257		struct sk_buff *nskb;
258
259		if (sk->sk_state != BT_BOUND)
260			continue;
261
262		if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
263			continue;
264
265		nskb = skb_clone(skb, GFP_ATOMIC);
266		if (!nskb)
267			continue;
268
269		if (sock_queue_rcv_skb(sk, nskb))
270			kfree_skb(nskb);
271	}
272
273	read_unlock(&hci_sk_list.lock);
274}
275
276static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
277{
278	struct hci_mon_hdr *hdr;
279	struct hci_mon_new_index *ni;
280	struct sk_buff *skb;
281	__le16 opcode;
282
283	switch (event) {
284	case HCI_DEV_REG:
285		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
286		if (!skb)
287			return NULL;
288
289		ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
290		ni->type = hdev->dev_type;
291		ni->bus = hdev->bus;
292		bacpy(&ni->bdaddr, &hdev->bdaddr);
293		memcpy(ni->name, hdev->name, 8);
294
295		opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
296		break;
297
298	case HCI_DEV_UNREG:
299		skb = bt_skb_alloc(0, GFP_ATOMIC);
300		if (!skb)
301			return NULL;
302
303		opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
304		break;
305
306	default:
307		return NULL;
308	}
309
310	__net_timestamp(skb);
311
312	hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
313	hdr->opcode = opcode;
314	hdr->index = cpu_to_le16(hdev->id);
315	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
316
317	return skb;
318}
319
320static void send_monitor_replay(struct sock *sk)
321{
322	struct hci_dev *hdev;
323
324	read_lock(&hci_dev_list_lock);
325
326	list_for_each_entry(hdev, &hci_dev_list, list) {
327		struct sk_buff *skb;
328
329		skb = create_monitor_event(hdev, HCI_DEV_REG);
330		if (!skb)
331			continue;
332
333		if (sock_queue_rcv_skb(sk, skb))
334			kfree_skb(skb);
335	}
336
337	read_unlock(&hci_dev_list_lock);
338}
339
340/* Generate internal stack event */
341static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
342{
343	struct hci_event_hdr *hdr;
344	struct hci_ev_stack_internal *ev;
345	struct sk_buff *skb;
346
347	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
348	if (!skb)
349		return;
350
351	hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
352	hdr->evt  = HCI_EV_STACK_INTERNAL;
353	hdr->plen = sizeof(*ev) + dlen;
354
355	ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
356	ev->type = type;
357	memcpy(ev->data, data, dlen);
358
359	bt_cb(skb)->incoming = 1;
360	__net_timestamp(skb);
361
362	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
363	skb->dev = (void *) hdev;
364	hci_send_to_sock(hdev, skb);
365	kfree_skb(skb);
366}
367
368void hci_sock_dev_event(struct hci_dev *hdev, int event)
369{
370	struct hci_ev_si_device ev;
371
372	BT_DBG("hdev %s event %d", hdev->name, event);
373
374	/* Send event to monitor */
375	if (atomic_read(&monitor_promisc)) {
376		struct sk_buff *skb;
377
378		skb = create_monitor_event(hdev, event);
379		if (skb) {
380			send_monitor_event(skb);
381			kfree_skb(skb);
382		}
383	}
384
385	/* Send event to sockets */
386	ev.event  = event;
387	ev.dev_id = hdev->id;
388	hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
389
390	if (event == HCI_DEV_UNREG) {
391		struct sock *sk;
392
393		/* Detach sockets from device */
394		read_lock(&hci_sk_list.lock);
395		sk_for_each(sk, &hci_sk_list.head) {
396			bh_lock_sock_nested(sk);
397			if (hci_pi(sk)->hdev == hdev) {
398				hci_pi(sk)->hdev = NULL;
399				sk->sk_err = EPIPE;
400				sk->sk_state = BT_OPEN;
401				sk->sk_state_change(sk);
402
403				hci_dev_put(hdev);
404			}
405			bh_unlock_sock(sk);
406		}
407		read_unlock(&hci_sk_list.lock);
408	}
409}
410
411static int hci_sock_release(struct socket *sock)
412{
413	struct sock *sk = sock->sk;
414	struct hci_dev *hdev;
415
416	BT_DBG("sock %p sk %p", sock, sk);
417
418	if (!sk)
419		return 0;
420
421	hdev = hci_pi(sk)->hdev;
422
423	if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
424		atomic_dec(&monitor_promisc);
425
426	bt_sock_unlink(&hci_sk_list, sk);
427
428	if (hdev) {
429		atomic_dec(&hdev->promisc);
430		hci_dev_put(hdev);
431	}
432
433	sock_orphan(sk);
434
435	skb_queue_purge(&sk->sk_receive_queue);
436	skb_queue_purge(&sk->sk_write_queue);
437
438	sock_put(sk);
439	return 0;
440}
441
442static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
443{
444	bdaddr_t bdaddr;
445	int err;
446
447	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
448		return -EFAULT;
449
450	hci_dev_lock(hdev);
451
452	err = hci_blacklist_add(hdev, &bdaddr, 0);
453
454	hci_dev_unlock(hdev);
455
456	return err;
457}
458
459static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
460{
461	bdaddr_t bdaddr;
462	int err;
463
464	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
465		return -EFAULT;
466
467	hci_dev_lock(hdev);
468
469	err = hci_blacklist_del(hdev, &bdaddr, 0);
470
471	hci_dev_unlock(hdev);
472
473	return err;
474}
475
476/* Ioctls that require bound socket */
477static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
478				unsigned long arg)
479{
480	struct hci_dev *hdev = hci_pi(sk)->hdev;
481
482	if (!hdev)
483		return -EBADFD;
484
485	switch (cmd) {
486	case HCISETRAW:
487		if (!capable(CAP_NET_ADMIN))
488			return -EPERM;
489
490		if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
491			return -EPERM;
492
493		if (arg)
494			set_bit(HCI_RAW, &hdev->flags);
495		else
496			clear_bit(HCI_RAW, &hdev->flags);
497
498		return 0;
499
500	case HCIGETCONNINFO:
501		return hci_get_conn_info(hdev, (void __user *) arg);
502
503	case HCIGETAUTHINFO:
504		return hci_get_auth_info(hdev, (void __user *) arg);
505
506	case HCIBLOCKADDR:
507		if (!capable(CAP_NET_ADMIN))
508			return -EPERM;
509		return hci_sock_blacklist_add(hdev, (void __user *) arg);
510
511	case HCIUNBLOCKADDR:
512		if (!capable(CAP_NET_ADMIN))
513			return -EPERM;
514		return hci_sock_blacklist_del(hdev, (void __user *) arg);
515
516	default:
517		if (hdev->ioctl)
518			return hdev->ioctl(hdev, cmd, arg);
519		return -EINVAL;
520	}
521}
522
523static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
524			  unsigned long arg)
525{
526	struct sock *sk = sock->sk;
527	void __user *argp = (void __user *) arg;
528	int err;
529
530	BT_DBG("cmd %x arg %lx", cmd, arg);
531
532	switch (cmd) {
533	case HCIGETDEVLIST:
534		return hci_get_dev_list(argp);
535
536	case HCIGETDEVINFO:
537		return hci_get_dev_info(argp);
538
539	case HCIGETCONNLIST:
540		return hci_get_conn_list(argp);
541
542	case HCIDEVUP:
543		if (!capable(CAP_NET_ADMIN))
544			return -EPERM;
545		return hci_dev_open(arg);
546
547	case HCIDEVDOWN:
548		if (!capable(CAP_NET_ADMIN))
549			return -EPERM;
550		return hci_dev_close(arg);
551
552	case HCIDEVRESET:
553		if (!capable(CAP_NET_ADMIN))
554			return -EPERM;
555		return hci_dev_reset(arg);
556
557	case HCIDEVRESTAT:
558		if (!capable(CAP_NET_ADMIN))
559			return -EPERM;
560		return hci_dev_reset_stat(arg);
561
562	case HCISETSCAN:
563	case HCISETAUTH:
564	case HCISETENCRYPT:
565	case HCISETPTYPE:
566	case HCISETLINKPOL:
567	case HCISETLINKMODE:
568	case HCISETACLMTU:
569	case HCISETSCOMTU:
570		if (!capable(CAP_NET_ADMIN))
571			return -EPERM;
572		return hci_dev_cmd(cmd, argp);
573
574	case HCIINQUIRY:
575		return hci_inquiry(argp);
576
577	default:
578		lock_sock(sk);
579		err = hci_sock_bound_ioctl(sk, cmd, arg);
580		release_sock(sk);
581		return err;
582	}
583}
584
585static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
586			 int addr_len)
587{
588	struct sockaddr_hci haddr;
589	struct sock *sk = sock->sk;
590	struct hci_dev *hdev = NULL;
591	int len, err = 0;
592
593	BT_DBG("sock %p sk %p", sock, sk);
594
595	if (!addr)
596		return -EINVAL;
597
598	memset(&haddr, 0, sizeof(haddr));
599	len = min_t(unsigned int, sizeof(haddr), addr_len);
600	memcpy(&haddr, addr, len);
601
602	if (haddr.hci_family != AF_BLUETOOTH)
603		return -EINVAL;
604
605	lock_sock(sk);
606
607	if (sk->sk_state == BT_BOUND) {
608		err = -EALREADY;
609		goto done;
610	}
611
612	switch (haddr.hci_channel) {
613	case HCI_CHANNEL_RAW:
614		if (hci_pi(sk)->hdev) {
615			err = -EALREADY;
616			goto done;
617		}
618
619		if (haddr.hci_dev != HCI_DEV_NONE) {
620			hdev = hci_dev_get(haddr.hci_dev);
621			if (!hdev) {
622				err = -ENODEV;
623				goto done;
624			}
625
626			atomic_inc(&hdev->promisc);
627		}
628
629		hci_pi(sk)->hdev = hdev;
630		break;
631
632	case HCI_CHANNEL_CONTROL:
633		if (haddr.hci_dev != HCI_DEV_NONE) {
634			err = -EINVAL;
635			goto done;
636		}
637
638		if (!capable(CAP_NET_ADMIN)) {
639			err = -EPERM;
640			goto done;
641		}
642
643		break;
644
645	case HCI_CHANNEL_MONITOR:
646		if (haddr.hci_dev != HCI_DEV_NONE) {
647			err = -EINVAL;
648			goto done;
649		}
650
651		if (!capable(CAP_NET_RAW)) {
652			err = -EPERM;
653			goto done;
654		}
655
656		send_monitor_replay(sk);
657
658		atomic_inc(&monitor_promisc);
659		break;
660
661	default:
662		err = -EINVAL;
663		goto done;
664	}
665
666
667	hci_pi(sk)->channel = haddr.hci_channel;
668	sk->sk_state = BT_BOUND;
669
670done:
671	release_sock(sk);
672	return err;
673}
674
675static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
676			    int *addr_len, int peer)
677{
678	struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
679	struct sock *sk = sock->sk;
680	struct hci_dev *hdev = hci_pi(sk)->hdev;
681
682	BT_DBG("sock %p sk %p", sock, sk);
683
684	if (!hdev)
685		return -EBADFD;
686
687	lock_sock(sk);
688
689	*addr_len = sizeof(*haddr);
690	haddr->hci_family = AF_BLUETOOTH;
691	haddr->hci_dev    = hdev->id;
692	haddr->hci_channel= 0;
693
694	release_sock(sk);
695	return 0;
696}
697
698static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
699			  struct sk_buff *skb)
700{
701	__u32 mask = hci_pi(sk)->cmsg_mask;
702
703	if (mask & HCI_CMSG_DIR) {
704		int incoming = bt_cb(skb)->incoming;
705		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
706			 &incoming);
707	}
708
709	if (mask & HCI_CMSG_TSTAMP) {
710#ifdef CONFIG_COMPAT
711		struct compat_timeval ctv;
712#endif
713		struct timeval tv;
714		void *data;
715		int len;
716
717		skb_get_timestamp(skb, &tv);
718
719		data = &tv;
720		len = sizeof(tv);
721#ifdef CONFIG_COMPAT
722		if (!COMPAT_USE_64BIT_TIME &&
723		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
724			ctv.tv_sec = tv.tv_sec;
725			ctv.tv_usec = tv.tv_usec;
726			data = &ctv;
727			len = sizeof(ctv);
728		}
729#endif
730
731		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
732	}
733}
734
735static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
736			    struct msghdr *msg, size_t len, int flags)
737{
738	int noblock = flags & MSG_DONTWAIT;
739	struct sock *sk = sock->sk;
740	struct sk_buff *skb;
741	int copied, err;
742
743	BT_DBG("sock %p, sk %p", sock, sk);
744
745	if (flags & (MSG_OOB))
746		return -EOPNOTSUPP;
747
748	if (sk->sk_state == BT_CLOSED)
749		return 0;
750
751	skb = skb_recv_datagram(sk, flags, noblock, &err);
752	if (!skb)
753		return err;
754
755	msg->msg_namelen = 0;
756
757	copied = skb->len;
758	if (len < copied) {
759		msg->msg_flags |= MSG_TRUNC;
760		copied = len;
761	}
762
763	skb_reset_transport_header(skb);
764	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
765
766	switch (hci_pi(sk)->channel) {
767	case HCI_CHANNEL_RAW:
768		hci_sock_cmsg(sk, msg, skb);
769		break;
770	case HCI_CHANNEL_CONTROL:
771	case HCI_CHANNEL_MONITOR:
772		sock_recv_timestamp(msg, sk, skb);
773		break;
774	}
775
776	skb_free_datagram(sk, skb);
777
778	return err ? : copied;
779}
780
781static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
782			    struct msghdr *msg, size_t len)
783{
784	struct sock *sk = sock->sk;
785	struct hci_dev *hdev;
786	struct sk_buff *skb;
787	int err;
788
789	BT_DBG("sock %p sk %p", sock, sk);
790
791	if (msg->msg_flags & MSG_OOB)
792		return -EOPNOTSUPP;
793
794	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
795		return -EINVAL;
796
797	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
798		return -EINVAL;
799
800	lock_sock(sk);
801
802	switch (hci_pi(sk)->channel) {
803	case HCI_CHANNEL_RAW:
804		break;
805	case HCI_CHANNEL_CONTROL:
806		err = mgmt_control(sk, msg, len);
807		goto done;
808	case HCI_CHANNEL_MONITOR:
809		err = -EOPNOTSUPP;
810		goto done;
811	default:
812		err = -EINVAL;
813		goto done;
814	}
815
816	hdev = hci_pi(sk)->hdev;
817	if (!hdev) {
818		err = -EBADFD;
819		goto done;
820	}
821
822	if (!test_bit(HCI_UP, &hdev->flags)) {
823		err = -ENETDOWN;
824		goto done;
825	}
826
827	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
828	if (!skb)
829		goto done;
830
831	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
832		err = -EFAULT;
833		goto drop;
834	}
835
836	bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
837	skb_pull(skb, 1);
838	skb->dev = (void *) hdev;
839
840	if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
841		u16 opcode = get_unaligned_le16(skb->data);
842		u16 ogf = hci_opcode_ogf(opcode);
843		u16 ocf = hci_opcode_ocf(opcode);
844
845		if (((ogf > HCI_SFLT_MAX_OGF) ||
846		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
847				   &hci_sec_filter.ocf_mask[ogf])) &&
848		    !capable(CAP_NET_RAW)) {
849			err = -EPERM;
850			goto drop;
851		}
852
853		if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
854			skb_queue_tail(&hdev->raw_q, skb);
855			queue_work(hdev->workqueue, &hdev->tx_work);
856		} else {
857			/* Stand-alone HCI commands must be flaged as
858			 * single-command requests.
859			 */
860			bt_cb(skb)->req.start = true;
861
862			skb_queue_tail(&hdev->cmd_q, skb);
863			queue_work(hdev->workqueue, &hdev->cmd_work);
864		}
865	} else {
866		if (!capable(CAP_NET_RAW)) {
867			err = -EPERM;
868			goto drop;
869		}
870
871		skb_queue_tail(&hdev->raw_q, skb);
872		queue_work(hdev->workqueue, &hdev->tx_work);
873	}
874
875	err = len;
876
877done:
878	release_sock(sk);
879	return err;
880
881drop:
882	kfree_skb(skb);
883	goto done;
884}
885
886static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
887			       char __user *optval, unsigned int len)
888{
889	struct hci_ufilter uf = { .opcode = 0 };
890	struct sock *sk = sock->sk;
891	int err = 0, opt = 0;
892
893	BT_DBG("sk %p, opt %d", sk, optname);
894
895	lock_sock(sk);
896
897	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
898		err = -EINVAL;
899		goto done;
900	}
901
902	switch (optname) {
903	case HCI_DATA_DIR:
904		if (get_user(opt, (int __user *)optval)) {
905			err = -EFAULT;
906			break;
907		}
908
909		if (opt)
910			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
911		else
912			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
913		break;
914
915	case HCI_TIME_STAMP:
916		if (get_user(opt, (int __user *)optval)) {
917			err = -EFAULT;
918			break;
919		}
920
921		if (opt)
922			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
923		else
924			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
925		break;
926
927	case HCI_FILTER:
928		{
929			struct hci_filter *f = &hci_pi(sk)->filter;
930
931			uf.type_mask = f->type_mask;
932			uf.opcode    = f->opcode;
933			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
934			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
935		}
936
937		len = min_t(unsigned int, len, sizeof(uf));
938		if (copy_from_user(&uf, optval, len)) {
939			err = -EFAULT;
940			break;
941		}
942
943		if (!capable(CAP_NET_RAW)) {
944			uf.type_mask &= hci_sec_filter.type_mask;
945			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
946			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
947		}
948
949		{
950			struct hci_filter *f = &hci_pi(sk)->filter;
951
952			f->type_mask = uf.type_mask;
953			f->opcode    = uf.opcode;
954			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
955			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
956		}
957		break;
958
959	default:
960		err = -ENOPROTOOPT;
961		break;
962	}
963
964done:
965	release_sock(sk);
966	return err;
967}
968
969static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
970			       char __user *optval, int __user *optlen)
971{
972	struct hci_ufilter uf;
973	struct sock *sk = sock->sk;
974	int len, opt, err = 0;
975
976	BT_DBG("sk %p, opt %d", sk, optname);
977
978	if (get_user(len, optlen))
979		return -EFAULT;
980
981	lock_sock(sk);
982
983	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
984		err = -EINVAL;
985		goto done;
986	}
987
988	switch (optname) {
989	case HCI_DATA_DIR:
990		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
991			opt = 1;
992		else
993			opt = 0;
994
995		if (put_user(opt, optval))
996			err = -EFAULT;
997		break;
998
999	case HCI_TIME_STAMP:
1000		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1001			opt = 1;
1002		else
1003			opt = 0;
1004
1005		if (put_user(opt, optval))
1006			err = -EFAULT;
1007		break;
1008
1009	case HCI_FILTER:
1010		{
1011			struct hci_filter *f = &hci_pi(sk)->filter;
1012
1013			memset(&uf, 0, sizeof(uf));
1014			uf.type_mask = f->type_mask;
1015			uf.opcode    = f->opcode;
1016			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1017			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1018		}
1019
1020		len = min_t(unsigned int, len, sizeof(uf));
1021		if (copy_to_user(optval, &uf, len))
1022			err = -EFAULT;
1023		break;
1024
1025	default:
1026		err = -ENOPROTOOPT;
1027		break;
1028	}
1029
1030done:
1031	release_sock(sk);
1032	return err;
1033}
1034
1035static const struct proto_ops hci_sock_ops = {
1036	.family		= PF_BLUETOOTH,
1037	.owner		= THIS_MODULE,
1038	.release	= hci_sock_release,
1039	.bind		= hci_sock_bind,
1040	.getname	= hci_sock_getname,
1041	.sendmsg	= hci_sock_sendmsg,
1042	.recvmsg	= hci_sock_recvmsg,
1043	.ioctl		= hci_sock_ioctl,
1044	.poll		= datagram_poll,
1045	.listen		= sock_no_listen,
1046	.shutdown	= sock_no_shutdown,
1047	.setsockopt	= hci_sock_setsockopt,
1048	.getsockopt	= hci_sock_getsockopt,
1049	.connect	= sock_no_connect,
1050	.socketpair	= sock_no_socketpair,
1051	.accept		= sock_no_accept,
1052	.mmap		= sock_no_mmap
1053};
1054
1055static struct proto hci_sk_proto = {
1056	.name		= "HCI",
1057	.owner		= THIS_MODULE,
1058	.obj_size	= sizeof(struct hci_pinfo)
1059};
1060
1061static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1062			   int kern)
1063{
1064	struct sock *sk;
1065
1066	BT_DBG("sock %p", sock);
1067
1068	if (sock->type != SOCK_RAW)
1069		return -ESOCKTNOSUPPORT;
1070
1071	sock->ops = &hci_sock_ops;
1072
1073	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1074	if (!sk)
1075		return -ENOMEM;
1076
1077	sock_init_data(sock, sk);
1078
1079	sock_reset_flag(sk, SOCK_ZAPPED);
1080
1081	sk->sk_protocol = protocol;
1082
1083	sock->state = SS_UNCONNECTED;
1084	sk->sk_state = BT_OPEN;
1085
1086	bt_sock_link(&hci_sk_list, sk);
1087	return 0;
1088}
1089
1090static const struct net_proto_family hci_sock_family_ops = {
1091	.family	= PF_BLUETOOTH,
1092	.owner	= THIS_MODULE,
1093	.create	= hci_sock_create,
1094};
1095
1096int __init hci_sock_init(void)
1097{
1098	int err;
1099
1100	err = proto_register(&hci_sk_proto, 0);
1101	if (err < 0)
1102		return err;
1103
1104	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1105	if (err < 0) {
1106		BT_ERR("HCI socket registration failed");
1107		goto error;
1108	}
1109
1110	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1111	if (err < 0) {
1112		BT_ERR("Failed to create HCI proc file");
1113		bt_sock_unregister(BTPROTO_HCI);
1114		goto error;
1115	}
1116
1117	BT_INFO("HCI socket layer initialized");
1118
1119	return 0;
1120
1121error:
1122	proto_unregister(&hci_sk_proto);
1123	return err;
1124}
1125
1126void hci_sock_cleanup(void)
1127{
1128	bt_procfs_cleanup(&init_net, "hci");
1129	bt_sock_unregister(BTPROTO_HCI);
1130	proto_unregister(&hci_sk_proto);
1131}
1132