1/*
2   Copyright (c) 2013-2014 Intel Corp.
3
4   This program is free software; you can redistribute it and/or modify
5   it under the terms of the GNU General Public License version 2 and
6   only version 2 as published by the Free Software Foundation.
7
8   This program is distributed in the hope that it will be useful,
9   but WITHOUT ANY WARRANTY; without even the implied warranty of
10   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11   GNU General Public License for more details.
12*/
13
14#include <linux/if_arp.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/module.h>
18#include <linux/debugfs.h>
19
20#include <net/ipv6.h>
21#include <net/ip6_route.h>
22#include <net/addrconf.h>
23
24#include <net/af_ieee802154.h> /* to get the address type */
25
26#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
28#include <net/bluetooth/l2cap.h>
29
30#include <net/6lowpan.h> /* for the compression support */
31
32#define VERSION "0.1"
33
34static struct dentry *lowpan_psm_debugfs;
35static struct dentry *lowpan_control_debugfs;
36
37#define IFACE_NAME_TEMPLATE "bt%d"
38#define EUI64_ADDR_LEN 8
39
40struct skb_cb {
41	struct in6_addr addr;
42	struct in6_addr gw;
43	struct l2cap_chan *chan;
44	int status;
45};
46#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
47
48/* The devices list contains those devices that we are acting
49 * as a proxy. The BT 6LoWPAN device is a virtual device that
50 * connects to the Bluetooth LE device. The real connection to
51 * BT device is done via l2cap layer. There exists one
52 * virtual device / one BT 6LoWPAN network (=hciX device).
53 * The list contains struct lowpan_dev elements.
54 */
55static LIST_HEAD(bt_6lowpan_devices);
56static DEFINE_RWLOCK(devices_lock);
57
58/* If psm is set to 0 (default value), then 6lowpan is disabled.
59 * Other values are used to indicate a Protocol Service Multiplexer
60 * value for 6lowpan.
61 */
62static u16 psm_6lowpan;
63
64/* We are listening incoming connections via this channel
65 */
66static struct l2cap_chan *listen_chan;
67
68struct lowpan_peer {
69	struct list_head list;
70	struct l2cap_chan *chan;
71
72	/* peer addresses in various formats */
73	unsigned char eui64_addr[EUI64_ADDR_LEN];
74	struct in6_addr peer_addr;
75};
76
77struct lowpan_dev {
78	struct list_head list;
79
80	struct hci_dev *hdev;
81	struct net_device *netdev;
82	struct list_head peers;
83	atomic_t peer_count; /* number of items in peers list */
84
85	struct work_struct delete_netdev;
86	struct delayed_work notify_peers;
87};
88
89static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
90{
91	return netdev_priv(netdev);
92}
93
94static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
95{
96	list_add(&peer->list, &dev->peers);
97	atomic_inc(&dev->peer_count);
98}
99
100static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
101{
102	list_del(&peer->list);
103
104	module_put(THIS_MODULE);
105
106	if (atomic_dec_and_test(&dev->peer_count)) {
107		BT_DBG("last peer");
108		return true;
109	}
110
111	return false;
112}
113
114static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
115						 bdaddr_t *ba, __u8 type)
116{
117	struct lowpan_peer *peer, *tmp;
118
119	BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
120	       ba, type);
121
122	list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
123		BT_DBG("dst addr %pMR dst type %d",
124		       &peer->chan->dst, peer->chan->dst_type);
125
126		if (bacmp(&peer->chan->dst, ba))
127			continue;
128
129		if (type == peer->chan->dst_type)
130			return peer;
131	}
132
133	return NULL;
134}
135
136static inline struct lowpan_peer *peer_lookup_chan(struct lowpan_dev *dev,
137						   struct l2cap_chan *chan)
138{
139	struct lowpan_peer *peer, *tmp;
140
141	list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
142		if (peer->chan == chan)
143			return peer;
144	}
145
146	return NULL;
147}
148
149static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev,
150						   struct l2cap_conn *conn)
151{
152	struct lowpan_peer *peer, *tmp;
153
154	list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
155		if (peer->chan->conn == conn)
156			return peer;
157	}
158
159	return NULL;
160}
161
162static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev,
163						  struct in6_addr *daddr,
164						  struct sk_buff *skb)
165{
166	struct lowpan_peer *peer, *tmp;
167	struct in6_addr *nexthop;
168	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
169	int count = atomic_read(&dev->peer_count);
170
171	BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt);
172
173	/* If we have multiple 6lowpan peers, then check where we should
174	 * send the packet. If only one peer exists, then we can send the
175	 * packet right away.
176	 */
177	if (count == 1)
178		return list_first_entry(&dev->peers, struct lowpan_peer,
179					list);
180
181	if (!rt) {
182		nexthop = &lowpan_cb(skb)->gw;
183
184		if (ipv6_addr_any(nexthop))
185			return NULL;
186	} else {
187		nexthop = rt6_nexthop(rt);
188
189		/* We need to remember the address because it is needed
190		 * by bt_xmit() when sending the packet. In bt_xmit(), the
191		 * destination routing info is not set.
192		 */
193		memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr));
194	}
195
196	BT_DBG("gw %pI6c", nexthop);
197
198	list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
199		BT_DBG("dst addr %pMR dst type %d ip %pI6c",
200		       &peer->chan->dst, peer->chan->dst_type,
201		       &peer->peer_addr);
202
203		if (!ipv6_addr_cmp(&peer->peer_addr, nexthop))
204			return peer;
205	}
206
207	return NULL;
208}
209
210static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
211{
212	struct lowpan_dev *entry, *tmp;
213	struct lowpan_peer *peer = NULL;
214	unsigned long flags;
215
216	read_lock_irqsave(&devices_lock, flags);
217
218	list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
219		peer = peer_lookup_conn(entry, conn);
220		if (peer)
221			break;
222	}
223
224	read_unlock_irqrestore(&devices_lock, flags);
225
226	return peer;
227}
228
229static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
230{
231	struct lowpan_dev *entry, *tmp;
232	struct lowpan_dev *dev = NULL;
233	unsigned long flags;
234
235	read_lock_irqsave(&devices_lock, flags);
236
237	list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
238		if (conn->hcon->hdev == entry->hdev) {
239			dev = entry;
240			break;
241		}
242	}
243
244	read_unlock_irqrestore(&devices_lock, flags);
245
246	return dev;
247}
248
249static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
250{
251	struct sk_buff *skb_cp;
252	int ret;
253
254	skb_cp = skb_copy(skb, GFP_ATOMIC);
255	if (!skb_cp)
256		return -ENOMEM;
257
258	ret = netif_rx(skb_cp);
259	if (ret < 0) {
260		BT_DBG("receive skb %d", ret);
261		return NET_RX_DROP;
262	}
263
264	return ret;
265}
266
267static int process_data(struct sk_buff *skb, struct net_device *netdev,
268			struct l2cap_chan *chan)
269{
270	const u8 *saddr, *daddr;
271	u8 iphc0, iphc1;
272	struct lowpan_dev *dev;
273	struct lowpan_peer *peer;
274	unsigned long flags;
275
276	dev = lowpan_dev(netdev);
277
278	read_lock_irqsave(&devices_lock, flags);
279	peer = peer_lookup_chan(dev, chan);
280	read_unlock_irqrestore(&devices_lock, flags);
281	if (!peer)
282		goto drop;
283
284	saddr = peer->eui64_addr;
285	daddr = dev->netdev->dev_addr;
286
287	/* at least two bytes will be used for the encoding */
288	if (skb->len < 2)
289		goto drop;
290
291	if (lowpan_fetch_skb_u8(skb, &iphc0))
292		goto drop;
293
294	if (lowpan_fetch_skb_u8(skb, &iphc1))
295		goto drop;
296
297	return lowpan_process_data(skb, netdev,
298				   saddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
299				   daddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
300				   iphc0, iphc1, give_skb_to_upper);
301
302drop:
303	kfree_skb(skb);
304	return -EINVAL;
305}
306
307static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
308		    struct l2cap_chan *chan)
309{
310	struct sk_buff *local_skb;
311	int ret;
312
313	if (!netif_running(dev))
314		goto drop;
315
316	if (dev->type != ARPHRD_6LOWPAN)
317		goto drop;
318
319	/* check that it's our buffer */
320	if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
321		/* Copy the packet so that the IPv6 header is
322		 * properly aligned.
323		 */
324		local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
325					    skb_tailroom(skb), GFP_ATOMIC);
326		if (!local_skb)
327			goto drop;
328
329		local_skb->protocol = htons(ETH_P_IPV6);
330		local_skb->pkt_type = PACKET_HOST;
331
332		skb_reset_network_header(local_skb);
333		skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
334
335		if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
336			kfree_skb(local_skb);
337			goto drop;
338		}
339
340		dev->stats.rx_bytes += skb->len;
341		dev->stats.rx_packets++;
342
343		kfree_skb(local_skb);
344		kfree_skb(skb);
345	} else {
346		switch (skb->data[0] & 0xe0) {
347		case LOWPAN_DISPATCH_IPHC:	/* ipv6 datagram */
348			local_skb = skb_clone(skb, GFP_ATOMIC);
349			if (!local_skb)
350				goto drop;
351
352			ret = process_data(local_skb, dev, chan);
353			if (ret != NET_RX_SUCCESS)
354				goto drop;
355
356			dev->stats.rx_bytes += skb->len;
357			dev->stats.rx_packets++;
358
359			kfree_skb(skb);
360			break;
361		default:
362			break;
363		}
364	}
365
366	return NET_RX_SUCCESS;
367
368drop:
369	dev->stats.rx_dropped++;
370	kfree_skb(skb);
371	return NET_RX_DROP;
372}
373
374/* Packet from BT LE device */
375static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
376{
377	struct lowpan_dev *dev;
378	struct lowpan_peer *peer;
379	int err;
380
381	peer = lookup_peer(chan->conn);
382	if (!peer)
383		return -ENOENT;
384
385	dev = lookup_dev(chan->conn);
386	if (!dev || !dev->netdev)
387		return -ENOENT;
388
389	err = recv_pkt(skb, dev->netdev, chan);
390	if (err) {
391		BT_DBG("recv pkt %d", err);
392		err = -EAGAIN;
393	}
394
395	return err;
396}
397
398static u8 get_addr_type_from_eui64(u8 byte)
399{
400	/* Is universal(0) or local(1) bit */
401	return ((byte & 0x02) ? BDADDR_LE_RANDOM : BDADDR_LE_PUBLIC);
402}
403
404static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
405{
406	u8 *eui64 = ip6_daddr->s6_addr + 8;
407
408	addr->b[0] = eui64[7];
409	addr->b[1] = eui64[6];
410	addr->b[2] = eui64[5];
411	addr->b[3] = eui64[2];
412	addr->b[4] = eui64[1];
413	addr->b[5] = eui64[0];
414}
415
416static void convert_dest_bdaddr(struct in6_addr *ip6_daddr,
417				bdaddr_t *addr, u8 *addr_type)
418{
419	copy_to_bdaddr(ip6_daddr, addr);
420
421	/* We need to toggle the U/L bit that we got from IPv6 address
422	 * so that we get the proper address and type of the BD address.
423	 */
424	addr->b[5] ^= 0x02;
425
426	*addr_type = get_addr_type_from_eui64(addr->b[5]);
427}
428
429static int setup_header(struct sk_buff *skb, struct net_device *netdev,
430			bdaddr_t *peer_addr, u8 *peer_addr_type)
431{
432	struct in6_addr ipv6_daddr;
433	struct lowpan_dev *dev;
434	struct lowpan_peer *peer;
435	bdaddr_t addr, *any = BDADDR_ANY;
436	u8 *daddr = any->b;
437	int err, status = 0;
438
439	dev = lowpan_dev(netdev);
440
441	memcpy(&ipv6_daddr, &lowpan_cb(skb)->addr, sizeof(ipv6_daddr));
442
443	if (ipv6_addr_is_multicast(&ipv6_daddr)) {
444		lowpan_cb(skb)->chan = NULL;
445	} else {
446		unsigned long flags;
447		u8 addr_type;
448
449		/* Get destination BT device from skb.
450		 * If there is no such peer then discard the packet.
451		 */
452		convert_dest_bdaddr(&ipv6_daddr, &addr, &addr_type);
453
454		BT_DBG("dest addr %pMR type %d IP %pI6c", &addr,
455		       addr_type, &ipv6_daddr);
456
457		read_lock_irqsave(&devices_lock, flags);
458		peer = peer_lookup_ba(dev, &addr, addr_type);
459		read_unlock_irqrestore(&devices_lock, flags);
460
461		if (!peer) {
462			/* The packet might be sent to 6lowpan interface
463			 * because of routing (either via default route
464			 * or user set route) so get peer according to
465			 * the destination address.
466			 */
467			read_lock_irqsave(&devices_lock, flags);
468			peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
469			read_unlock_irqrestore(&devices_lock, flags);
470			if (!peer) {
471				BT_DBG("no such peer %pMR found", &addr);
472				return -ENOENT;
473			}
474		}
475
476		daddr = peer->eui64_addr;
477		*peer_addr = addr;
478		*peer_addr_type = addr_type;
479		lowpan_cb(skb)->chan = peer->chan;
480
481		status = 1;
482	}
483
484	lowpan_header_compress(skb, netdev, ETH_P_IPV6, daddr,
485			       dev->netdev->dev_addr, skb->len);
486
487	err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
488	if (err < 0)
489		return err;
490
491	return status;
492}
493
494static int header_create(struct sk_buff *skb, struct net_device *netdev,
495			 unsigned short type, const void *_daddr,
496			 const void *_saddr, unsigned int len)
497{
498	struct ipv6hdr *hdr;
499
500	if (type != ETH_P_IPV6)
501		return -EINVAL;
502
503	hdr = ipv6_hdr(skb);
504
505	memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, sizeof(struct in6_addr));
506
507	return 0;
508}
509
510/* Packet to BT LE device */
511static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
512		    struct net_device *netdev)
513{
514	struct msghdr msg;
515	struct kvec iv;
516	int err;
517
518	/* Remember the skb so that we can send EAGAIN to the caller if
519	 * we run out of credits.
520	 */
521	chan->data = skb;
522
523	memset(&msg, 0, sizeof(msg));
524	msg.msg_iov = (struct iovec *) &iv;
525	msg.msg_iovlen = 1;
526	iv.iov_base = skb->data;
527	iv.iov_len = skb->len;
528
529	err = l2cap_chan_send(chan, &msg, skb->len);
530	if (err > 0) {
531		netdev->stats.tx_bytes += err;
532		netdev->stats.tx_packets++;
533		return 0;
534	}
535
536	if (!err)
537		err = lowpan_cb(skb)->status;
538
539	if (err < 0) {
540		if (err == -EAGAIN)
541			netdev->stats.tx_dropped++;
542		else
543			netdev->stats.tx_errors++;
544	}
545
546	return err;
547}
548
549static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
550{
551	struct sk_buff *local_skb;
552	struct lowpan_dev *entry, *tmp;
553	unsigned long flags;
554	int err = 0;
555
556	read_lock_irqsave(&devices_lock, flags);
557
558	list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
559		struct lowpan_peer *pentry, *ptmp;
560		struct lowpan_dev *dev;
561
562		if (entry->netdev != netdev)
563			continue;
564
565		dev = lowpan_dev(entry->netdev);
566
567		list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) {
568			int ret;
569
570			local_skb = skb_clone(skb, GFP_ATOMIC);
571
572			BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
573			       netdev->name,
574			       &pentry->chan->dst, pentry->chan->dst_type,
575			       &pentry->peer_addr, pentry->chan);
576			ret = send_pkt(pentry->chan, local_skb, netdev);
577			if (ret < 0)
578				err = ret;
579
580			kfree_skb(local_skb);
581		}
582	}
583
584	read_unlock_irqrestore(&devices_lock, flags);
585
586	return err;
587}
588
589static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
590{
591	int err = 0;
592	bdaddr_t addr;
593	u8 addr_type;
594	struct sk_buff *tmpskb;
595
596	/* We must take a copy of the skb before we modify/replace the ipv6
597	 * header as the header could be used elsewhere
598	 */
599	tmpskb = skb_unshare(skb, GFP_ATOMIC);
600	if (!tmpskb) {
601		kfree_skb(skb);
602		return NET_XMIT_DROP;
603	}
604	skb = tmpskb;
605
606	/* Return values from setup_header()
607	 *  <0 - error, packet is dropped
608	 *   0 - this is a multicast packet
609	 *   1 - this is unicast packet
610	 */
611	err = setup_header(skb, netdev, &addr, &addr_type);
612	if (err < 0) {
613		kfree_skb(skb);
614		return NET_XMIT_DROP;
615	}
616
617	if (err) {
618		if (lowpan_cb(skb)->chan) {
619			BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
620			       netdev->name, &addr, addr_type,
621			       &lowpan_cb(skb)->addr, lowpan_cb(skb)->chan);
622			err = send_pkt(lowpan_cb(skb)->chan, skb, netdev);
623		} else {
624			err = -ENOENT;
625		}
626	} else {
627		/* We need to send the packet to every device behind this
628		 * interface.
629		 */
630		err = send_mcast_pkt(skb, netdev);
631	}
632
633	dev_kfree_skb(skb);
634
635	if (err)
636		BT_DBG("ERROR: xmit failed (%d)", err);
637
638	return err < 0 ? NET_XMIT_DROP : err;
639}
640
641static const struct net_device_ops netdev_ops = {
642	.ndo_start_xmit		= bt_xmit,
643};
644
645static struct header_ops header_ops = {
646	.create	= header_create,
647};
648
649static void netdev_setup(struct net_device *dev)
650{
651	dev->addr_len		= EUI64_ADDR_LEN;
652	dev->type		= ARPHRD_6LOWPAN;
653
654	dev->hard_header_len	= 0;
655	dev->needed_tailroom	= 0;
656	dev->mtu		= IPV6_MIN_MTU;
657	dev->tx_queue_len	= 0;
658	dev->flags		= IFF_RUNNING | IFF_POINTOPOINT |
659				  IFF_MULTICAST;
660	dev->watchdog_timeo	= 0;
661
662	dev->netdev_ops		= &netdev_ops;
663	dev->header_ops		= &header_ops;
664	dev->destructor		= free_netdev;
665}
666
667static struct device_type bt_type = {
668	.name	= "bluetooth",
669};
670
671static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
672{
673	/* addr is the BT address in little-endian format */
674	eui[0] = addr[5];
675	eui[1] = addr[4];
676	eui[2] = addr[3];
677	eui[3] = 0xFF;
678	eui[4] = 0xFE;
679	eui[5] = addr[2];
680	eui[6] = addr[1];
681	eui[7] = addr[0];
682
683	/* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
684	if (addr_type == BDADDR_LE_PUBLIC)
685		eui[0] &= ~0x02;
686	else
687		eui[0] |= 0x02;
688
689	BT_DBG("type %d addr %*phC", addr_type, 8, eui);
690}
691
692static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
693		         u8 addr_type)
694{
695	netdev->addr_assign_type = NET_ADDR_PERM;
696	set_addr(netdev->dev_addr, addr->b, addr_type);
697}
698
699static void ifup(struct net_device *netdev)
700{
701	int err;
702
703	rtnl_lock();
704	err = dev_open(netdev);
705	if (err < 0)
706		BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
707	rtnl_unlock();
708}
709
710static void ifdown(struct net_device *netdev)
711{
712	int err;
713
714	rtnl_lock();
715	err = dev_close(netdev);
716	if (err < 0)
717		BT_INFO("iface %s cannot be closed (%d)", netdev->name, err);
718	rtnl_unlock();
719}
720
721static void do_notify_peers(struct work_struct *work)
722{
723	struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
724					      notify_peers.work);
725
726	netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
727}
728
729static bool is_bt_6lowpan(struct hci_conn *hcon)
730{
731	if (hcon->type != LE_LINK)
732		return false;
733
734	if (!psm_6lowpan)
735		return false;
736
737	return true;
738}
739
740static struct l2cap_chan *chan_create(void)
741{
742	struct l2cap_chan *chan;
743
744	chan = l2cap_chan_create();
745	if (!chan)
746		return NULL;
747
748	l2cap_chan_set_defaults(chan);
749
750	chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
751	chan->mode = L2CAP_MODE_LE_FLOWCTL;
752	chan->omtu = 65535;
753	chan->imtu = chan->omtu;
754
755	return chan;
756}
757
758static struct l2cap_chan *chan_open(struct l2cap_chan *pchan)
759{
760	struct l2cap_chan *chan;
761
762	chan = chan_create();
763	if (!chan)
764		return NULL;
765
766	chan->remote_mps = chan->omtu;
767	chan->mps = chan->omtu;
768
769	chan->state = BT_CONNECTED;
770
771	return chan;
772}
773
774static void set_ip_addr_bits(u8 addr_type, u8 *addr)
775{
776	if (addr_type == BDADDR_LE_PUBLIC)
777		*addr |= 0x02;
778	else
779		*addr &= ~0x02;
780}
781
782static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
783					struct lowpan_dev *dev)
784{
785	struct lowpan_peer *peer;
786	unsigned long flags;
787
788	peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
789	if (!peer)
790		return NULL;
791
792	peer->chan = chan;
793	memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
794
795	/* RFC 2464 ch. 5 */
796	peer->peer_addr.s6_addr[0] = 0xFE;
797	peer->peer_addr.s6_addr[1] = 0x80;
798	set_addr((u8 *)&peer->peer_addr.s6_addr + 8, chan->dst.b,
799		 chan->dst_type);
800
801	memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
802	       EUI64_ADDR_LEN);
803
804	/* IPv6 address needs to have the U/L bit set properly so toggle
805	 * it back here.
806	 */
807	set_ip_addr_bits(chan->dst_type, (u8 *)&peer->peer_addr.s6_addr + 8);
808
809	write_lock_irqsave(&devices_lock, flags);
810	INIT_LIST_HEAD(&peer->list);
811	peer_add(dev, peer);
812	write_unlock_irqrestore(&devices_lock, flags);
813
814	/* Notifying peers about us needs to be done without locks held */
815	INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
816	schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
817
818	return peer->chan;
819}
820
821static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
822{
823	struct net_device *netdev;
824	int err = 0;
825	unsigned long flags;
826
827	netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE,
828			      NET_NAME_UNKNOWN, netdev_setup);
829	if (!netdev)
830		return -ENOMEM;
831
832	set_dev_addr(netdev, &chan->src, chan->src_type);
833
834	netdev->netdev_ops = &netdev_ops;
835	SET_NETDEV_DEV(netdev, &chan->conn->hcon->dev);
836	SET_NETDEV_DEVTYPE(netdev, &bt_type);
837
838	err = register_netdev(netdev);
839	if (err < 0) {
840		BT_INFO("register_netdev failed %d", err);
841		free_netdev(netdev);
842		goto out;
843	}
844
845	BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
846	       netdev->ifindex, &chan->dst, chan->dst_type,
847	       &chan->src, chan->src_type);
848	set_bit(__LINK_STATE_PRESENT, &netdev->state);
849
850	*dev = netdev_priv(netdev);
851	(*dev)->netdev = netdev;
852	(*dev)->hdev = chan->conn->hcon->hdev;
853	INIT_LIST_HEAD(&(*dev)->peers);
854
855	write_lock_irqsave(&devices_lock, flags);
856	INIT_LIST_HEAD(&(*dev)->list);
857	list_add(&(*dev)->list, &bt_6lowpan_devices);
858	write_unlock_irqrestore(&devices_lock, flags);
859
860	return 0;
861
862out:
863	return err;
864}
865
866static inline void chan_ready_cb(struct l2cap_chan *chan)
867{
868	struct lowpan_dev *dev;
869
870	dev = lookup_dev(chan->conn);
871
872	BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev);
873
874	if (!dev) {
875		if (setup_netdev(chan, &dev) < 0) {
876			l2cap_chan_del(chan, -ENOENT);
877			return;
878		}
879	}
880
881	if (!try_module_get(THIS_MODULE))
882		return;
883
884	add_peer_chan(chan, dev);
885	ifup(dev->netdev);
886}
887
888static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan)
889{
890	struct l2cap_chan *chan;
891
892	chan = chan_open(pchan);
893	chan->ops = pchan->ops;
894
895	BT_DBG("chan %p pchan %p", chan, pchan);
896
897	return chan;
898}
899
900static void delete_netdev(struct work_struct *work)
901{
902	struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
903						delete_netdev);
904
905	unregister_netdev(entry->netdev);
906
907	/* The entry pointer is deleted in device_event() */
908}
909
910static void chan_close_cb(struct l2cap_chan *chan)
911{
912	struct lowpan_dev *entry, *tmp;
913	struct lowpan_dev *dev = NULL;
914	struct lowpan_peer *peer;
915	int err = -ENOENT;
916	unsigned long flags;
917	bool last = false, removed = true;
918
919	BT_DBG("chan %p conn %p", chan, chan->conn);
920
921	if (chan->conn && chan->conn->hcon) {
922		if (!is_bt_6lowpan(chan->conn->hcon))
923			return;
924
925		/* If conn is set, then the netdev is also there and we should
926		 * not remove it.
927		 */
928		removed = false;
929	}
930
931	write_lock_irqsave(&devices_lock, flags);
932
933	list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
934		dev = lowpan_dev(entry->netdev);
935		peer = peer_lookup_chan(dev, chan);
936		if (peer) {
937			last = peer_del(dev, peer);
938			err = 0;
939
940			BT_DBG("dev %p removing %speer %p", dev,
941			       last ? "last " : "1 ", peer);
942			BT_DBG("chan %p orig refcnt %d", chan,
943			       atomic_read(&chan->kref.refcount));
944
945			l2cap_chan_put(chan);
946			kfree(peer);
947			break;
948		}
949	}
950
951	if (!err && last && dev && !atomic_read(&dev->peer_count)) {
952		write_unlock_irqrestore(&devices_lock, flags);
953
954		cancel_delayed_work_sync(&dev->notify_peers);
955
956		ifdown(dev->netdev);
957
958		if (!removed) {
959			INIT_WORK(&entry->delete_netdev, delete_netdev);
960			schedule_work(&entry->delete_netdev);
961		}
962	} else {
963		write_unlock_irqrestore(&devices_lock, flags);
964	}
965
966	return;
967}
968
969static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err)
970{
971	BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn,
972	       state_to_string(state), err);
973}
974
975static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
976					 unsigned long hdr_len,
977					 unsigned long len, int nb)
978{
979	/* Note that we must allocate using GFP_ATOMIC here as
980	 * this function is called originally from netdev hard xmit
981	 * function in atomic context.
982	 */
983	return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
984}
985
986static void chan_suspend_cb(struct l2cap_chan *chan)
987{
988	struct sk_buff *skb = chan->data;
989
990	BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);
991
992	if (!skb)
993		return;
994
995	lowpan_cb(skb)->status = -EAGAIN;
996}
997
998static void chan_resume_cb(struct l2cap_chan *chan)
999{
1000	struct sk_buff *skb = chan->data;
1001
1002	BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);
1003
1004	if (!skb)
1005		return;
1006
1007	lowpan_cb(skb)->status = 0;
1008}
1009
1010static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
1011{
1012	return L2CAP_CONN_TIMEOUT;
1013}
1014
1015static const struct l2cap_ops bt_6lowpan_chan_ops = {
1016	.name			= "L2CAP 6LoWPAN channel",
1017	.new_connection		= chan_new_conn_cb,
1018	.recv			= chan_recv_cb,
1019	.close			= chan_close_cb,
1020	.state_change		= chan_state_change_cb,
1021	.ready			= chan_ready_cb,
1022	.resume			= chan_resume_cb,
1023	.suspend		= chan_suspend_cb,
1024	.get_sndtimeo		= chan_get_sndtimeo_cb,
1025	.alloc_skb		= chan_alloc_skb_cb,
1026	.memcpy_fromiovec	= l2cap_chan_no_memcpy_fromiovec,
1027
1028	.teardown		= l2cap_chan_no_teardown,
1029	.defer			= l2cap_chan_no_defer,
1030	.set_shutdown		= l2cap_chan_no_set_shutdown,
1031};
1032
1033static inline __u8 bdaddr_type(__u8 type)
1034{
1035	if (type == ADDR_LE_DEV_PUBLIC)
1036		return BDADDR_LE_PUBLIC;
1037	else
1038		return BDADDR_LE_RANDOM;
1039}
1040
1041static struct l2cap_chan *chan_get(void)
1042{
1043	struct l2cap_chan *pchan;
1044
1045	pchan = chan_create();
1046	if (!pchan)
1047		return NULL;
1048
1049	pchan->ops = &bt_6lowpan_chan_ops;
1050
1051	return pchan;
1052}
1053
1054static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
1055{
1056	struct l2cap_chan *pchan;
1057	int err;
1058
1059	pchan = chan_get();
1060	if (!pchan)
1061		return -EINVAL;
1062
1063	err = l2cap_chan_connect(pchan, cpu_to_le16(psm_6lowpan), 0,
1064				 addr, dst_type);
1065
1066	BT_DBG("chan %p err %d", pchan, err);
1067	if (err < 0)
1068		l2cap_chan_put(pchan);
1069
1070	return err;
1071}
1072
1073static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
1074{
1075	struct lowpan_peer *peer;
1076
1077	BT_DBG("conn %p dst type %d", conn, dst_type);
1078
1079	peer = lookup_peer(conn);
1080	if (!peer)
1081		return -ENOENT;
1082
1083	BT_DBG("peer %p chan %p", peer, peer->chan);
1084
1085	l2cap_chan_close(peer->chan, ENOENT);
1086
1087	return 0;
1088}
1089
1090static struct l2cap_chan *bt_6lowpan_listen(void)
1091{
1092	bdaddr_t *addr = BDADDR_ANY;
1093	struct l2cap_chan *pchan;
1094	int err;
1095
1096	if (psm_6lowpan == 0)
1097		return NULL;
1098
1099	pchan = chan_get();
1100	if (!pchan)
1101		return NULL;
1102
1103	pchan->state = BT_LISTEN;
1104	pchan->src_type = BDADDR_LE_PUBLIC;
1105
1106	BT_DBG("psm 0x%04x chan %p src type %d", psm_6lowpan, pchan,
1107	       pchan->src_type);
1108
1109	err = l2cap_add_psm(pchan, addr, cpu_to_le16(psm_6lowpan));
1110	if (err) {
1111		l2cap_chan_put(pchan);
1112		BT_ERR("psm cannot be added err %d", err);
1113		return NULL;
1114	}
1115
1116	return pchan;
1117}
1118
1119static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
1120			  struct l2cap_conn **conn)
1121{
1122	struct hci_conn *hcon;
1123	struct hci_dev *hdev;
1124	bdaddr_t *src = BDADDR_ANY;
1125	int n;
1126
1127	n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
1128		   &addr->b[5], &addr->b[4], &addr->b[3],
1129		   &addr->b[2], &addr->b[1], &addr->b[0],
1130		   addr_type);
1131
1132	if (n < 7)
1133		return -EINVAL;
1134
1135	hdev = hci_get_route(addr, src);
1136	if (!hdev)
1137		return -ENOENT;
1138
1139	hci_dev_lock(hdev);
1140	hcon = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
1141	hci_dev_unlock(hdev);
1142
1143	if (!hcon)
1144		return -ENOENT;
1145
1146	*conn = (struct l2cap_conn *)hcon->l2cap_data;
1147
1148	BT_DBG("conn %p dst %pMR type %d", *conn, &hcon->dst, hcon->dst_type);
1149
1150	return 0;
1151}
1152
1153static void disconnect_all_peers(void)
1154{
1155	struct lowpan_dev *entry, *tmp_dev;
1156	struct lowpan_peer *peer, *tmp_peer, *new_peer;
1157	struct list_head peers;
1158	unsigned long flags;
1159
1160	INIT_LIST_HEAD(&peers);
1161
1162	/* We make a separate list of peers as the close_cb() will
1163	 * modify the device peers list so it is better not to mess
1164	 * with the same list at the same time.
1165	 */
1166
1167	read_lock_irqsave(&devices_lock, flags);
1168
1169	list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) {
1170		list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list) {
1171			new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
1172			if (!new_peer)
1173				break;
1174
1175			new_peer->chan = peer->chan;
1176			INIT_LIST_HEAD(&new_peer->list);
1177
1178			list_add(&new_peer->list, &peers);
1179		}
1180	}
1181
1182	read_unlock_irqrestore(&devices_lock, flags);
1183
1184	list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
1185		l2cap_chan_close(peer->chan, ENOENT);
1186		kfree(peer);
1187	}
1188}
1189
1190static int lowpan_psm_set(void *data, u64 val)
1191{
1192	u16 psm;
1193
1194	psm = val;
1195	if (psm == 0 || psm_6lowpan != psm)
1196		/* Disconnect existing connections if 6lowpan is
1197		 * disabled (psm = 0), or if psm changes.
1198		 */
1199		disconnect_all_peers();
1200
1201	psm_6lowpan = psm;
1202
1203	if (listen_chan) {
1204		l2cap_chan_close(listen_chan, 0);
1205		l2cap_chan_put(listen_chan);
1206	}
1207
1208	listen_chan = bt_6lowpan_listen();
1209
1210	return 0;
1211}
1212
1213static int lowpan_psm_get(void *data, u64 *val)
1214{
1215	*val = psm_6lowpan;
1216	return 0;
1217}
1218
1219DEFINE_SIMPLE_ATTRIBUTE(lowpan_psm_fops, lowpan_psm_get,
1220			lowpan_psm_set, "%llu\n");
1221
1222static ssize_t lowpan_control_write(struct file *fp,
1223				    const char __user *user_buffer,
1224				    size_t count,
1225				    loff_t *position)
1226{
1227	char buf[32];
1228	size_t buf_size = min(count, sizeof(buf) - 1);
1229	int ret;
1230	bdaddr_t addr;
1231	u8 addr_type;
1232	struct l2cap_conn *conn = NULL;
1233
1234	if (copy_from_user(buf, user_buffer, buf_size))
1235		return -EFAULT;
1236
1237	buf[buf_size] = '\0';
1238
1239	if (memcmp(buf, "connect ", 8) == 0) {
1240		ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn);
1241		if (ret == -EINVAL)
1242			return ret;
1243
1244		if (listen_chan) {
1245			l2cap_chan_close(listen_chan, 0);
1246			l2cap_chan_put(listen_chan);
1247			listen_chan = NULL;
1248		}
1249
1250		if (conn) {
1251			struct lowpan_peer *peer;
1252
1253			if (!is_bt_6lowpan(conn->hcon))
1254				return -EINVAL;
1255
1256			peer = lookup_peer(conn);
1257			if (peer) {
1258				BT_DBG("6LoWPAN connection already exists");
1259				return -EALREADY;
1260			}
1261
1262			BT_DBG("conn %p dst %pMR type %d user %d", conn,
1263			       &conn->hcon->dst, conn->hcon->dst_type,
1264			       addr_type);
1265		}
1266
1267		ret = bt_6lowpan_connect(&addr, addr_type);
1268		if (ret < 0)
1269			return ret;
1270
1271		return count;
1272	}
1273
1274	if (memcmp(buf, "disconnect ", 11) == 0) {
1275		ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn);
1276		if (ret < 0)
1277			return ret;
1278
1279		ret = bt_6lowpan_disconnect(conn, addr_type);
1280		if (ret < 0)
1281			return ret;
1282
1283		return count;
1284	}
1285
1286	return count;
1287}
1288
1289static int lowpan_control_show(struct seq_file *f, void *ptr)
1290{
1291	struct lowpan_dev *entry, *tmp_dev;
1292	struct lowpan_peer *peer, *tmp_peer;
1293	unsigned long flags;
1294
1295	read_lock_irqsave(&devices_lock, flags);
1296
1297	list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) {
1298		list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list)
1299			seq_printf(f, "%pMR (type %u)\n",
1300				   &peer->chan->dst, peer->chan->dst_type);
1301	}
1302
1303	read_unlock_irqrestore(&devices_lock, flags);
1304
1305	return 0;
1306}
1307
1308static int lowpan_control_open(struct inode *inode, struct file *file)
1309{
1310	return single_open(file, lowpan_control_show, inode->i_private);
1311}
1312
1313static const struct file_operations lowpan_control_fops = {
1314	.open		= lowpan_control_open,
1315	.read		= seq_read,
1316	.write		= lowpan_control_write,
1317	.llseek		= seq_lseek,
1318	.release	= single_release,
1319};
1320
1321static void disconnect_devices(void)
1322{
1323	struct lowpan_dev *entry, *tmp, *new_dev;
1324	struct list_head devices;
1325	unsigned long flags;
1326
1327	INIT_LIST_HEAD(&devices);
1328
1329	/* We make a separate list of devices because the unregister_netdev()
1330	 * will call device_event() which will also want to modify the same
1331	 * devices list.
1332	 */
1333
1334	read_lock_irqsave(&devices_lock, flags);
1335
1336	list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
1337		new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
1338		if (!new_dev)
1339			break;
1340
1341		new_dev->netdev = entry->netdev;
1342		INIT_LIST_HEAD(&new_dev->list);
1343
1344		list_add(&new_dev->list, &devices);
1345	}
1346
1347	read_unlock_irqrestore(&devices_lock, flags);
1348
1349	list_for_each_entry_safe(entry, tmp, &devices, list) {
1350		ifdown(entry->netdev);
1351		BT_DBG("Unregistering netdev %s %p",
1352		       entry->netdev->name, entry->netdev);
1353		unregister_netdev(entry->netdev);
1354		kfree(entry);
1355	}
1356}
1357
1358static int device_event(struct notifier_block *unused,
1359			unsigned long event, void *ptr)
1360{
1361	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1362	struct lowpan_dev *entry, *tmp;
1363	unsigned long flags;
1364
1365	if (netdev->type != ARPHRD_6LOWPAN)
1366		return NOTIFY_DONE;
1367
1368	switch (event) {
1369	case NETDEV_UNREGISTER:
1370		write_lock_irqsave(&devices_lock, flags);
1371		list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices,
1372					 list) {
1373			if (entry->netdev == netdev) {
1374				BT_DBG("Unregistered netdev %s %p",
1375				       netdev->name, netdev);
1376				list_del(&entry->list);
1377				kfree(entry);
1378				break;
1379			}
1380		}
1381		write_unlock_irqrestore(&devices_lock, flags);
1382		break;
1383	}
1384
1385	return NOTIFY_DONE;
1386}
1387
1388static struct notifier_block bt_6lowpan_dev_notifier = {
1389	.notifier_call = device_event,
1390};
1391
1392static int __init bt_6lowpan_init(void)
1393{
1394	lowpan_psm_debugfs = debugfs_create_file("6lowpan_psm", 0644,
1395						 bt_debugfs, NULL,
1396						 &lowpan_psm_fops);
1397	lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
1398						     bt_debugfs, NULL,
1399						     &lowpan_control_fops);
1400
1401	return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
1402}
1403
1404static void __exit bt_6lowpan_exit(void)
1405{
1406	debugfs_remove(lowpan_psm_debugfs);
1407	debugfs_remove(lowpan_control_debugfs);
1408
1409	if (listen_chan) {
1410		l2cap_chan_close(listen_chan, 0);
1411		l2cap_chan_put(listen_chan);
1412	}
1413
1414	disconnect_devices();
1415
1416	unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
1417}
1418
1419module_init(bt_6lowpan_init);
1420module_exit(bt_6lowpan_exit);
1421
1422MODULE_AUTHOR("Jukka Rissanen <jukka.rissanen@linux.intel.com>");
1423MODULE_DESCRIPTION("Bluetooth 6LoWPAN");
1424MODULE_VERSION(VERSION);
1425MODULE_LICENSE("GPL");
1426