datapath.c revision 62b68e99faa802352e9cb2ae91adecd8dfddf1b8
1/*
2 * Copyright (c) 2007-2013 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/if_arp.h>
24#include <linux/if_vlan.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/jhash.h>
28#include <linux/delay.h>
29#include <linux/time.h>
30#include <linux/etherdevice.h>
31#include <linux/genetlink.h>
32#include <linux/kernel.h>
33#include <linux/kthread.h>
34#include <linux/mutex.h>
35#include <linux/percpu.h>
36#include <linux/rcupdate.h>
37#include <linux/tcp.h>
38#include <linux/udp.h>
39#include <linux/ethtool.h>
40#include <linux/wait.h>
41#include <asm/div64.h>
42#include <linux/highmem.h>
43#include <linux/netfilter_bridge.h>
44#include <linux/netfilter_ipv4.h>
45#include <linux/inetdevice.h>
46#include <linux/list.h>
47#include <linux/lockdep.h>
48#include <linux/openvswitch.h>
49#include <linux/rculist.h>
50#include <linux/dmi.h>
51#include <linux/workqueue.h>
52#include <net/genetlink.h>
53#include <net/net_namespace.h>
54#include <net/netns/generic.h>
55
56#include "datapath.h"
57#include "flow.h"
58#include "flow_netlink.h"
59#include "vport-internal_dev.h"
60#include "vport-netdev.h"
61
62int ovs_net_id __read_mostly;
63
64static void ovs_notify(struct sk_buff *skb, struct genl_info *info,
65		       struct genl_multicast_group *grp)
66{
67	genl_notify(skb, genl_info_net(info), info->snd_portid,
68		    grp->id, info->nlhdr, GFP_KERNEL);
69}
70
71/**
72 * DOC: Locking:
73 *
74 * All writes e.g. Writes to device state (add/remove datapath, port, set
75 * operations on vports, etc.), Writes to other state (flow table
76 * modifications, set miscellaneous datapath parameters, etc.) are protected
77 * by ovs_lock.
78 *
79 * Reads are protected by RCU.
80 *
81 * There are a few special cases (mostly stats) that have their own
82 * synchronization but they nest under all of above and don't interact with
83 * each other.
84 *
85 * The RTNL lock nests inside ovs_mutex.
86 */
87
88static DEFINE_MUTEX(ovs_mutex);
89
90void ovs_lock(void)
91{
92	mutex_lock(&ovs_mutex);
93}
94
95void ovs_unlock(void)
96{
97	mutex_unlock(&ovs_mutex);
98}
99
100#ifdef CONFIG_LOCKDEP
101int lockdep_ovsl_is_held(void)
102{
103	if (debug_locks)
104		return lockdep_is_held(&ovs_mutex);
105	else
106		return 1;
107}
108#endif
109
110static struct vport *new_vport(const struct vport_parms *);
111static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
112			     const struct dp_upcall_info *);
113static int queue_userspace_packet(struct net *, int dp_ifindex,
114				  struct sk_buff *,
115				  const struct dp_upcall_info *);
116
117/* Must be called with rcu_read_lock or ovs_mutex. */
118static struct datapath *get_dp(struct net *net, int dp_ifindex)
119{
120	struct datapath *dp = NULL;
121	struct net_device *dev;
122
123	rcu_read_lock();
124	dev = dev_get_by_index_rcu(net, dp_ifindex);
125	if (dev) {
126		struct vport *vport = ovs_internal_dev_get_vport(dev);
127		if (vport)
128			dp = vport->dp;
129	}
130	rcu_read_unlock();
131
132	return dp;
133}
134
135/* Must be called with rcu_read_lock or ovs_mutex. */
136const char *ovs_dp_name(const struct datapath *dp)
137{
138	struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
139	return vport->ops->get_name(vport);
140}
141
142static int get_dpifindex(struct datapath *dp)
143{
144	struct vport *local;
145	int ifindex;
146
147	rcu_read_lock();
148
149	local = ovs_vport_rcu(dp, OVSP_LOCAL);
150	if (local)
151		ifindex = netdev_vport_priv(local)->dev->ifindex;
152	else
153		ifindex = 0;
154
155	rcu_read_unlock();
156
157	return ifindex;
158}
159
160static void destroy_dp_rcu(struct rcu_head *rcu)
161{
162	struct datapath *dp = container_of(rcu, struct datapath, rcu);
163
164	ovs_flow_tbl_destroy(&dp->table);
165	free_percpu(dp->stats_percpu);
166	release_net(ovs_dp_get_net(dp));
167	kfree(dp->ports);
168	kfree(dp);
169}
170
171static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
172					    u16 port_no)
173{
174	return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
175}
176
177struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
178{
179	struct vport *vport;
180	struct hlist_head *head;
181
182	head = vport_hash_bucket(dp, port_no);
183	hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
184		if (vport->port_no == port_no)
185			return vport;
186	}
187	return NULL;
188}
189
190/* Called with ovs_mutex. */
191static struct vport *new_vport(const struct vport_parms *parms)
192{
193	struct vport *vport;
194
195	vport = ovs_vport_add(parms);
196	if (!IS_ERR(vport)) {
197		struct datapath *dp = parms->dp;
198		struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
199
200		hlist_add_head_rcu(&vport->dp_hash_node, head);
201	}
202	return vport;
203}
204
205void ovs_dp_detach_port(struct vport *p)
206{
207	ASSERT_OVSL();
208
209	/* First drop references to device. */
210	hlist_del_rcu(&p->dp_hash_node);
211
212	/* Then destroy it. */
213	ovs_vport_del(p);
214}
215
216/* Must be called with rcu_read_lock. */
217void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
218{
219	struct datapath *dp = p->dp;
220	struct sw_flow *flow;
221	struct dp_stats_percpu *stats;
222	struct sw_flow_key key;
223	u64 *stats_counter;
224	u32 n_mask_hit;
225	int error;
226
227	stats = this_cpu_ptr(dp->stats_percpu);
228
229	/* Extract flow from 'skb' into 'key'. */
230	error = ovs_flow_extract(skb, p->port_no, &key);
231	if (unlikely(error)) {
232		kfree_skb(skb);
233		return;
234	}
235
236	/* Look up flow. */
237	flow = ovs_flow_tbl_lookup(&dp->table, &key, &n_mask_hit);
238	if (unlikely(!flow)) {
239		struct dp_upcall_info upcall;
240
241		upcall.cmd = OVS_PACKET_CMD_MISS;
242		upcall.key = &key;
243		upcall.userdata = NULL;
244		upcall.portid = p->upcall_portid;
245		ovs_dp_upcall(dp, skb, &upcall);
246		consume_skb(skb);
247		stats_counter = &stats->n_missed;
248		goto out;
249	}
250
251	OVS_CB(skb)->flow = flow;
252	OVS_CB(skb)->pkt_key = &key;
253
254	stats_counter = &stats->n_hit;
255	ovs_flow_used(OVS_CB(skb)->flow, skb);
256	ovs_execute_actions(dp, skb);
257
258out:
259	/* Update datapath statistics. */
260	u64_stats_update_begin(&stats->sync);
261	(*stats_counter)++;
262	stats->n_mask_hit += n_mask_hit;
263	u64_stats_update_end(&stats->sync);
264}
265
266static struct genl_family dp_packet_genl_family = {
267	.id = GENL_ID_GENERATE,
268	.hdrsize = sizeof(struct ovs_header),
269	.name = OVS_PACKET_FAMILY,
270	.version = OVS_PACKET_VERSION,
271	.maxattr = OVS_PACKET_ATTR_MAX,
272	.netnsok = true,
273	.parallel_ops = true,
274};
275
276int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
277		  const struct dp_upcall_info *upcall_info)
278{
279	struct dp_stats_percpu *stats;
280	int dp_ifindex;
281	int err;
282
283	if (upcall_info->portid == 0) {
284		err = -ENOTCONN;
285		goto err;
286	}
287
288	dp_ifindex = get_dpifindex(dp);
289	if (!dp_ifindex) {
290		err = -ENODEV;
291		goto err;
292	}
293
294	if (!skb_is_gso(skb))
295		err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
296	else
297		err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
298	if (err)
299		goto err;
300
301	return 0;
302
303err:
304	stats = this_cpu_ptr(dp->stats_percpu);
305
306	u64_stats_update_begin(&stats->sync);
307	stats->n_lost++;
308	u64_stats_update_end(&stats->sync);
309
310	return err;
311}
312
313static int queue_gso_packets(struct net *net, int dp_ifindex,
314			     struct sk_buff *skb,
315			     const struct dp_upcall_info *upcall_info)
316{
317	unsigned short gso_type = skb_shinfo(skb)->gso_type;
318	struct dp_upcall_info later_info;
319	struct sw_flow_key later_key;
320	struct sk_buff *segs, *nskb;
321	int err;
322
323	segs = __skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM, false);
324	if (IS_ERR(segs))
325		return PTR_ERR(segs);
326
327	/* Queue all of the segments. */
328	skb = segs;
329	do {
330		err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info);
331		if (err)
332			break;
333
334		if (skb == segs && gso_type & SKB_GSO_UDP) {
335			/* The initial flow key extracted by ovs_flow_extract()
336			 * in this case is for a first fragment, so we need to
337			 * properly mark later fragments.
338			 */
339			later_key = *upcall_info->key;
340			later_key.ip.frag = OVS_FRAG_TYPE_LATER;
341
342			later_info = *upcall_info;
343			later_info.key = &later_key;
344			upcall_info = &later_info;
345		}
346	} while ((skb = skb->next));
347
348	/* Free all of the segments. */
349	skb = segs;
350	do {
351		nskb = skb->next;
352		if (err)
353			kfree_skb(skb);
354		else
355			consume_skb(skb);
356	} while ((skb = nskb));
357	return err;
358}
359
360static size_t key_attr_size(void)
361{
362	return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
363		+ nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
364		  + nla_total_size(8)   /* OVS_TUNNEL_KEY_ATTR_ID */
365		  + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
366		  + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
367		  + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TOS */
368		  + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TTL */
369		  + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
370		  + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_CSUM */
371		+ nla_total_size(4)   /* OVS_KEY_ATTR_IN_PORT */
372		+ nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
373		+ nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
374		+ nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
375		+ nla_total_size(4)   /* OVS_KEY_ATTR_8021Q */
376		+ nla_total_size(0)   /* OVS_KEY_ATTR_ENCAP */
377		+ nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
378		+ nla_total_size(40)  /* OVS_KEY_ATTR_IPV6 */
379		+ nla_total_size(2)   /* OVS_KEY_ATTR_ICMPV6 */
380		+ nla_total_size(28); /* OVS_KEY_ATTR_ND */
381}
382
383static size_t upcall_msg_size(const struct sk_buff *skb,
384			      const struct nlattr *userdata)
385{
386	size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
387		+ nla_total_size(skb->len) /* OVS_PACKET_ATTR_PACKET */
388		+ nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
389
390	/* OVS_PACKET_ATTR_USERDATA */
391	if (userdata)
392		size += NLA_ALIGN(userdata->nla_len);
393
394	return size;
395}
396
397static int queue_userspace_packet(struct net *net, int dp_ifindex,
398				  struct sk_buff *skb,
399				  const struct dp_upcall_info *upcall_info)
400{
401	struct ovs_header *upcall;
402	struct sk_buff *nskb = NULL;
403	struct sk_buff *user_skb; /* to be queued to userspace */
404	struct nlattr *nla;
405	int err;
406
407	if (vlan_tx_tag_present(skb)) {
408		nskb = skb_clone(skb, GFP_ATOMIC);
409		if (!nskb)
410			return -ENOMEM;
411
412		nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
413		if (!nskb)
414			return -ENOMEM;
415
416		nskb->vlan_tci = 0;
417		skb = nskb;
418	}
419
420	if (nla_attr_size(skb->len) > USHRT_MAX) {
421		err = -EFBIG;
422		goto out;
423	}
424
425	user_skb = genlmsg_new(upcall_msg_size(skb, upcall_info->userdata), GFP_ATOMIC);
426	if (!user_skb) {
427		err = -ENOMEM;
428		goto out;
429	}
430
431	upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
432			     0, upcall_info->cmd);
433	upcall->dp_ifindex = dp_ifindex;
434
435	nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
436	ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
437	nla_nest_end(user_skb, nla);
438
439	if (upcall_info->userdata)
440		__nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
441			  nla_len(upcall_info->userdata),
442			  nla_data(upcall_info->userdata));
443
444	nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
445
446	skb_copy_and_csum_dev(skb, nla_data(nla));
447
448	genlmsg_end(user_skb, upcall);
449	err = genlmsg_unicast(net, user_skb, upcall_info->portid);
450
451out:
452	kfree_skb(nskb);
453	return err;
454}
455
456static void clear_stats(struct sw_flow *flow)
457{
458	flow->used = 0;
459	flow->tcp_flags = 0;
460	flow->packet_count = 0;
461	flow->byte_count = 0;
462}
463
464static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
465{
466	struct ovs_header *ovs_header = info->userhdr;
467	struct nlattr **a = info->attrs;
468	struct sw_flow_actions *acts;
469	struct sk_buff *packet;
470	struct sw_flow *flow;
471	struct datapath *dp;
472	struct ethhdr *eth;
473	int len;
474	int err;
475
476	err = -EINVAL;
477	if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
478	    !a[OVS_PACKET_ATTR_ACTIONS])
479		goto err;
480
481	len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
482	packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
483	err = -ENOMEM;
484	if (!packet)
485		goto err;
486	skb_reserve(packet, NET_IP_ALIGN);
487
488	nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
489
490	skb_reset_mac_header(packet);
491	eth = eth_hdr(packet);
492
493	/* Normally, setting the skb 'protocol' field would be handled by a
494	 * call to eth_type_trans(), but it assumes there's a sending
495	 * device, which we may not have. */
496	if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
497		packet->protocol = eth->h_proto;
498	else
499		packet->protocol = htons(ETH_P_802_2);
500
501	/* Build an sw_flow for sending this packet. */
502	flow = ovs_flow_alloc();
503	err = PTR_ERR(flow);
504	if (IS_ERR(flow))
505		goto err_kfree_skb;
506
507	err = ovs_flow_extract(packet, -1, &flow->key);
508	if (err)
509		goto err_flow_free;
510
511	err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]);
512	if (err)
513		goto err_flow_free;
514	acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
515	err = PTR_ERR(acts);
516	if (IS_ERR(acts))
517		goto err_flow_free;
518
519	err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
520				   &flow->key, 0, &acts);
521	rcu_assign_pointer(flow->sf_acts, acts);
522	if (err)
523		goto err_flow_free;
524
525	OVS_CB(packet)->flow = flow;
526	OVS_CB(packet)->pkt_key = &flow->key;
527	packet->priority = flow->key.phy.priority;
528	packet->mark = flow->key.phy.skb_mark;
529
530	rcu_read_lock();
531	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
532	err = -ENODEV;
533	if (!dp)
534		goto err_unlock;
535
536	local_bh_disable();
537	err = ovs_execute_actions(dp, packet);
538	local_bh_enable();
539	rcu_read_unlock();
540
541	ovs_flow_free(flow, false);
542	return err;
543
544err_unlock:
545	rcu_read_unlock();
546err_flow_free:
547	ovs_flow_free(flow, false);
548err_kfree_skb:
549	kfree_skb(packet);
550err:
551	return err;
552}
553
554static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
555	[OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
556	[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
557	[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
558};
559
560static const struct genl_ops dp_packet_genl_ops[] = {
561	{ .cmd = OVS_PACKET_CMD_EXECUTE,
562	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
563	  .policy = packet_policy,
564	  .doit = ovs_packet_cmd_execute
565	}
566};
567
568static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
569			 struct ovs_dp_megaflow_stats *mega_stats)
570{
571	int i;
572
573	memset(mega_stats, 0, sizeof(*mega_stats));
574
575	stats->n_flows = ovs_flow_tbl_count(&dp->table);
576	mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
577
578	stats->n_hit = stats->n_missed = stats->n_lost = 0;
579
580	for_each_possible_cpu(i) {
581		const struct dp_stats_percpu *percpu_stats;
582		struct dp_stats_percpu local_stats;
583		unsigned int start;
584
585		percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
586
587		do {
588			start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
589			local_stats = *percpu_stats;
590		} while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
591
592		stats->n_hit += local_stats.n_hit;
593		stats->n_missed += local_stats.n_missed;
594		stats->n_lost += local_stats.n_lost;
595		mega_stats->n_mask_hit += local_stats.n_mask_hit;
596	}
597}
598
599static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
600	[OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
601	[OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
602	[OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
603};
604
605static struct genl_family dp_flow_genl_family = {
606	.id = GENL_ID_GENERATE,
607	.hdrsize = sizeof(struct ovs_header),
608	.name = OVS_FLOW_FAMILY,
609	.version = OVS_FLOW_VERSION,
610	.maxattr = OVS_FLOW_ATTR_MAX,
611	.netnsok = true,
612	.parallel_ops = true,
613};
614
615static struct genl_multicast_group ovs_dp_flow_multicast_group = {
616	.name = OVS_FLOW_MCGROUP
617};
618
619static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
620{
621	return NLMSG_ALIGN(sizeof(struct ovs_header))
622		+ nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
623		+ nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
624		+ nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
625		+ nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
626		+ nla_total_size(8) /* OVS_FLOW_ATTR_USED */
627		+ nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
628}
629
630/* Called with ovs_mutex. */
631static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
632				  struct sk_buff *skb, u32 portid,
633				  u32 seq, u32 flags, u8 cmd)
634{
635	const int skb_orig_len = skb->len;
636	struct nlattr *start;
637	struct ovs_flow_stats stats;
638	struct ovs_header *ovs_header;
639	struct nlattr *nla;
640	unsigned long used;
641	u8 tcp_flags;
642	int err;
643
644	ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
645	if (!ovs_header)
646		return -EMSGSIZE;
647
648	ovs_header->dp_ifindex = get_dpifindex(dp);
649
650	/* Fill flow key. */
651	nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
652	if (!nla)
653		goto nla_put_failure;
654
655	err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
656	if (err)
657		goto error;
658	nla_nest_end(skb, nla);
659
660	nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
661	if (!nla)
662		goto nla_put_failure;
663
664	err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
665	if (err)
666		goto error;
667
668	nla_nest_end(skb, nla);
669
670	spin_lock_bh(&flow->lock);
671	used = flow->used;
672	stats.n_packets = flow->packet_count;
673	stats.n_bytes = flow->byte_count;
674	tcp_flags = (u8)ntohs(flow->tcp_flags);
675	spin_unlock_bh(&flow->lock);
676
677	if (used &&
678	    nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
679		goto nla_put_failure;
680
681	if (stats.n_packets &&
682	    nla_put(skb, OVS_FLOW_ATTR_STATS,
683		    sizeof(struct ovs_flow_stats), &stats))
684		goto nla_put_failure;
685
686	if (tcp_flags &&
687	    nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
688		goto nla_put_failure;
689
690	/* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
691	 * this is the first flow to be dumped into 'skb'.  This is unusual for
692	 * Netlink but individual action lists can be longer than
693	 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
694	 * The userspace caller can always fetch the actions separately if it
695	 * really wants them.  (Most userspace callers in fact don't care.)
696	 *
697	 * This can only fail for dump operations because the skb is always
698	 * properly sized for single flows.
699	 */
700	start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
701	if (start) {
702		const struct sw_flow_actions *sf_acts;
703
704		sf_acts = rcu_dereference_check(flow->sf_acts,
705						lockdep_ovsl_is_held());
706
707		err = ovs_nla_put_actions(sf_acts->actions,
708					  sf_acts->actions_len, skb);
709		if (!err)
710			nla_nest_end(skb, start);
711		else {
712			if (skb_orig_len)
713				goto error;
714
715			nla_nest_cancel(skb, start);
716		}
717	} else if (skb_orig_len)
718		goto nla_put_failure;
719
720	return genlmsg_end(skb, ovs_header);
721
722nla_put_failure:
723	err = -EMSGSIZE;
724error:
725	genlmsg_cancel(skb, ovs_header);
726	return err;
727}
728
729static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
730{
731	const struct sw_flow_actions *sf_acts;
732
733	sf_acts = ovsl_dereference(flow->sf_acts);
734
735	return genlmsg_new(ovs_flow_cmd_msg_size(sf_acts), GFP_KERNEL);
736}
737
738static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
739					       struct datapath *dp,
740					       u32 portid, u32 seq, u8 cmd)
741{
742	struct sk_buff *skb;
743	int retval;
744
745	skb = ovs_flow_cmd_alloc_info(flow);
746	if (!skb)
747		return ERR_PTR(-ENOMEM);
748
749	retval = ovs_flow_cmd_fill_info(flow, dp, skb, portid, seq, 0, cmd);
750	BUG_ON(retval < 0);
751	return skb;
752}
753
754static struct sw_flow *__ovs_flow_tbl_lookup(struct flow_table *tbl,
755					      const struct sw_flow_key *key)
756{
757	u32 __always_unused n_mask_hit;
758
759	return ovs_flow_tbl_lookup(tbl, key, &n_mask_hit);
760}
761
762static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
763{
764	struct nlattr **a = info->attrs;
765	struct ovs_header *ovs_header = info->userhdr;
766	struct sw_flow_key key, masked_key;
767	struct sw_flow *flow = NULL;
768	struct sw_flow_mask mask;
769	struct sk_buff *reply;
770	struct datapath *dp;
771	struct sw_flow_actions *acts = NULL;
772	struct sw_flow_match match;
773	int error;
774
775	/* Extract key. */
776	error = -EINVAL;
777	if (!a[OVS_FLOW_ATTR_KEY])
778		goto error;
779
780	ovs_match_init(&match, &key, &mask);
781	error = ovs_nla_get_match(&match,
782				  a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
783	if (error)
784		goto error;
785
786	/* Validate actions. */
787	if (a[OVS_FLOW_ATTR_ACTIONS]) {
788		acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
789		error = PTR_ERR(acts);
790		if (IS_ERR(acts))
791			goto error;
792
793		ovs_flow_mask_key(&masked_key, &key, &mask);
794		error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
795					     &masked_key, 0, &acts);
796		if (error) {
797			OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
798			goto err_kfree;
799		}
800	} else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
801		error = -EINVAL;
802		goto error;
803	}
804
805	ovs_lock();
806	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
807	error = -ENODEV;
808	if (!dp)
809		goto err_unlock_ovs;
810
811	/* Check if this is a duplicate flow */
812	flow = __ovs_flow_tbl_lookup(&dp->table, &key);
813	if (!flow) {
814		/* Bail out if we're not allowed to create a new flow. */
815		error = -ENOENT;
816		if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
817			goto err_unlock_ovs;
818
819		/* Allocate flow. */
820		flow = ovs_flow_alloc();
821		if (IS_ERR(flow)) {
822			error = PTR_ERR(flow);
823			goto err_unlock_ovs;
824		}
825		clear_stats(flow);
826
827		flow->key = masked_key;
828		flow->unmasked_key = key;
829		rcu_assign_pointer(flow->sf_acts, acts);
830
831		/* Put flow in bucket. */
832		error = ovs_flow_tbl_insert(&dp->table, flow, &mask);
833		if (error) {
834			acts = NULL;
835			goto err_flow_free;
836		}
837
838		reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
839						info->snd_seq, OVS_FLOW_CMD_NEW);
840	} else {
841		/* We found a matching flow. */
842		struct sw_flow_actions *old_acts;
843
844		/* Bail out if we're not allowed to modify an existing flow.
845		 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
846		 * because Generic Netlink treats the latter as a dump
847		 * request.  We also accept NLM_F_EXCL in case that bug ever
848		 * gets fixed.
849		 */
850		error = -EEXIST;
851		if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
852		    info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
853			goto err_unlock_ovs;
854
855		/* The unmasked key has to be the same for flow updates. */
856		error = -EINVAL;
857		if (!ovs_flow_cmp_unmasked_key(flow, &match)) {
858			OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n");
859			goto err_unlock_ovs;
860		}
861
862		/* Update actions. */
863		old_acts = ovsl_dereference(flow->sf_acts);
864		rcu_assign_pointer(flow->sf_acts, acts);
865		ovs_nla_free_flow_actions(old_acts);
866
867		reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
868					       info->snd_seq, OVS_FLOW_CMD_NEW);
869
870		/* Clear stats. */
871		if (a[OVS_FLOW_ATTR_CLEAR]) {
872			spin_lock_bh(&flow->lock);
873			clear_stats(flow);
874			spin_unlock_bh(&flow->lock);
875		}
876	}
877	ovs_unlock();
878
879	if (!IS_ERR(reply))
880		ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
881	else
882		genl_set_err(sock_net(skb->sk), 0,
883			     ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
884	return 0;
885
886err_flow_free:
887	ovs_flow_free(flow, false);
888err_unlock_ovs:
889	ovs_unlock();
890err_kfree:
891	kfree(acts);
892error:
893	return error;
894}
895
896static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
897{
898	struct nlattr **a = info->attrs;
899	struct ovs_header *ovs_header = info->userhdr;
900	struct sw_flow_key key;
901	struct sk_buff *reply;
902	struct sw_flow *flow;
903	struct datapath *dp;
904	struct sw_flow_match match;
905	int err;
906
907	if (!a[OVS_FLOW_ATTR_KEY]) {
908		OVS_NLERR("Flow get message rejected, Key attribute missing.\n");
909		return -EINVAL;
910	}
911
912	ovs_match_init(&match, &key, NULL);
913	err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
914	if (err)
915		return err;
916
917	ovs_lock();
918	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
919	if (!dp) {
920		err = -ENODEV;
921		goto unlock;
922	}
923
924	flow = __ovs_flow_tbl_lookup(&dp->table, &key);
925	if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
926		err = -ENOENT;
927		goto unlock;
928	}
929
930	reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
931					info->snd_seq, OVS_FLOW_CMD_NEW);
932	if (IS_ERR(reply)) {
933		err = PTR_ERR(reply);
934		goto unlock;
935	}
936
937	ovs_unlock();
938	return genlmsg_reply(reply, info);
939unlock:
940	ovs_unlock();
941	return err;
942}
943
944static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
945{
946	struct nlattr **a = info->attrs;
947	struct ovs_header *ovs_header = info->userhdr;
948	struct sw_flow_key key;
949	struct sk_buff *reply;
950	struct sw_flow *flow;
951	struct datapath *dp;
952	struct sw_flow_match match;
953	int err;
954
955	ovs_lock();
956	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
957	if (!dp) {
958		err = -ENODEV;
959		goto unlock;
960	}
961
962	if (!a[OVS_FLOW_ATTR_KEY]) {
963		err = ovs_flow_tbl_flush(&dp->table);
964		goto unlock;
965	}
966
967	ovs_match_init(&match, &key, NULL);
968	err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
969	if (err)
970		goto unlock;
971
972	flow = __ovs_flow_tbl_lookup(&dp->table, &key);
973	if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
974		err = -ENOENT;
975		goto unlock;
976	}
977
978	reply = ovs_flow_cmd_alloc_info(flow);
979	if (!reply) {
980		err = -ENOMEM;
981		goto unlock;
982	}
983
984	ovs_flow_tbl_remove(&dp->table, flow);
985
986	err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
987				     info->snd_seq, 0, OVS_FLOW_CMD_DEL);
988	BUG_ON(err < 0);
989
990	ovs_flow_free(flow, true);
991	ovs_unlock();
992
993	ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
994	return 0;
995unlock:
996	ovs_unlock();
997	return err;
998}
999
1000static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1001{
1002	struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1003	struct table_instance *ti;
1004	struct datapath *dp;
1005
1006	rcu_read_lock();
1007	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1008	if (!dp) {
1009		rcu_read_unlock();
1010		return -ENODEV;
1011	}
1012
1013	ti = rcu_dereference(dp->table.ti);
1014	for (;;) {
1015		struct sw_flow *flow;
1016		u32 bucket, obj;
1017
1018		bucket = cb->args[0];
1019		obj = cb->args[1];
1020		flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1021		if (!flow)
1022			break;
1023
1024		if (ovs_flow_cmd_fill_info(flow, dp, skb,
1025					   NETLINK_CB(cb->skb).portid,
1026					   cb->nlh->nlmsg_seq, NLM_F_MULTI,
1027					   OVS_FLOW_CMD_NEW) < 0)
1028			break;
1029
1030		cb->args[0] = bucket;
1031		cb->args[1] = obj;
1032	}
1033	rcu_read_unlock();
1034	return skb->len;
1035}
1036
1037static const struct genl_ops dp_flow_genl_ops[] = {
1038	{ .cmd = OVS_FLOW_CMD_NEW,
1039	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1040	  .policy = flow_policy,
1041	  .doit = ovs_flow_cmd_new_or_set
1042	},
1043	{ .cmd = OVS_FLOW_CMD_DEL,
1044	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1045	  .policy = flow_policy,
1046	  .doit = ovs_flow_cmd_del
1047	},
1048	{ .cmd = OVS_FLOW_CMD_GET,
1049	  .flags = 0,		    /* OK for unprivileged users. */
1050	  .policy = flow_policy,
1051	  .doit = ovs_flow_cmd_get,
1052	  .dumpit = ovs_flow_cmd_dump
1053	},
1054	{ .cmd = OVS_FLOW_CMD_SET,
1055	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1056	  .policy = flow_policy,
1057	  .doit = ovs_flow_cmd_new_or_set,
1058	},
1059};
1060
1061static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1062	[OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1063	[OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1064};
1065
1066static struct genl_family dp_datapath_genl_family = {
1067	.id = GENL_ID_GENERATE,
1068	.hdrsize = sizeof(struct ovs_header),
1069	.name = OVS_DATAPATH_FAMILY,
1070	.version = OVS_DATAPATH_VERSION,
1071	.maxattr = OVS_DP_ATTR_MAX,
1072	.netnsok = true,
1073	.parallel_ops = true,
1074};
1075
1076static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
1077	.name = OVS_DATAPATH_MCGROUP
1078};
1079
1080static size_t ovs_dp_cmd_msg_size(void)
1081{
1082	size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1083
1084	msgsize += nla_total_size(IFNAMSIZ);
1085	msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
1086	msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
1087
1088	return msgsize;
1089}
1090
1091static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1092				u32 portid, u32 seq, u32 flags, u8 cmd)
1093{
1094	struct ovs_header *ovs_header;
1095	struct ovs_dp_stats dp_stats;
1096	struct ovs_dp_megaflow_stats dp_megaflow_stats;
1097	int err;
1098
1099	ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1100				   flags, cmd);
1101	if (!ovs_header)
1102		goto error;
1103
1104	ovs_header->dp_ifindex = get_dpifindex(dp);
1105
1106	rcu_read_lock();
1107	err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1108	rcu_read_unlock();
1109	if (err)
1110		goto nla_put_failure;
1111
1112	get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1113	if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1114			&dp_stats))
1115		goto nla_put_failure;
1116
1117	if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1118			sizeof(struct ovs_dp_megaflow_stats),
1119			&dp_megaflow_stats))
1120		goto nla_put_failure;
1121
1122	return genlmsg_end(skb, ovs_header);
1123
1124nla_put_failure:
1125	genlmsg_cancel(skb, ovs_header);
1126error:
1127	return -EMSGSIZE;
1128}
1129
1130static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 portid,
1131					     u32 seq, u8 cmd)
1132{
1133	struct sk_buff *skb;
1134	int retval;
1135
1136	skb = genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
1137	if (!skb)
1138		return ERR_PTR(-ENOMEM);
1139
1140	retval = ovs_dp_cmd_fill_info(dp, skb, portid, seq, 0, cmd);
1141	if (retval < 0) {
1142		kfree_skb(skb);
1143		return ERR_PTR(retval);
1144	}
1145	return skb;
1146}
1147
1148/* Called with ovs_mutex. */
1149static struct datapath *lookup_datapath(struct net *net,
1150					struct ovs_header *ovs_header,
1151					struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1152{
1153	struct datapath *dp;
1154
1155	if (!a[OVS_DP_ATTR_NAME])
1156		dp = get_dp(net, ovs_header->dp_ifindex);
1157	else {
1158		struct vport *vport;
1159
1160		rcu_read_lock();
1161		vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1162		dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1163		rcu_read_unlock();
1164	}
1165	return dp ? dp : ERR_PTR(-ENODEV);
1166}
1167
1168static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1169{
1170	struct nlattr **a = info->attrs;
1171	struct vport_parms parms;
1172	struct sk_buff *reply;
1173	struct datapath *dp;
1174	struct vport *vport;
1175	struct ovs_net *ovs_net;
1176	int err, i;
1177
1178	err = -EINVAL;
1179	if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1180		goto err;
1181
1182	ovs_lock();
1183
1184	err = -ENOMEM;
1185	dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1186	if (dp == NULL)
1187		goto err_unlock_ovs;
1188
1189	ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1190
1191	/* Allocate table. */
1192	err = ovs_flow_tbl_init(&dp->table);
1193	if (err)
1194		goto err_free_dp;
1195
1196	dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1197	if (!dp->stats_percpu) {
1198		err = -ENOMEM;
1199		goto err_destroy_table;
1200	}
1201
1202	dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1203			    GFP_KERNEL);
1204	if (!dp->ports) {
1205		err = -ENOMEM;
1206		goto err_destroy_percpu;
1207	}
1208
1209	for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1210		INIT_HLIST_HEAD(&dp->ports[i]);
1211
1212	/* Set up our datapath device. */
1213	parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1214	parms.type = OVS_VPORT_TYPE_INTERNAL;
1215	parms.options = NULL;
1216	parms.dp = dp;
1217	parms.port_no = OVSP_LOCAL;
1218	parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
1219
1220	vport = new_vport(&parms);
1221	if (IS_ERR(vport)) {
1222		err = PTR_ERR(vport);
1223		if (err == -EBUSY)
1224			err = -EEXIST;
1225
1226		goto err_destroy_ports_array;
1227	}
1228
1229	reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1230				      info->snd_seq, OVS_DP_CMD_NEW);
1231	err = PTR_ERR(reply);
1232	if (IS_ERR(reply))
1233		goto err_destroy_local_port;
1234
1235	ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1236	list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1237
1238	ovs_unlock();
1239
1240	ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
1241	return 0;
1242
1243err_destroy_local_port:
1244	ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1245err_destroy_ports_array:
1246	kfree(dp->ports);
1247err_destroy_percpu:
1248	free_percpu(dp->stats_percpu);
1249err_destroy_table:
1250	ovs_flow_tbl_destroy(&dp->table);
1251err_free_dp:
1252	release_net(ovs_dp_get_net(dp));
1253	kfree(dp);
1254err_unlock_ovs:
1255	ovs_unlock();
1256err:
1257	return err;
1258}
1259
1260/* Called with ovs_mutex. */
1261static void __dp_destroy(struct datapath *dp)
1262{
1263	int i;
1264
1265	for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1266		struct vport *vport;
1267		struct hlist_node *n;
1268
1269		hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1270			if (vport->port_no != OVSP_LOCAL)
1271				ovs_dp_detach_port(vport);
1272	}
1273
1274	list_del_rcu(&dp->list_node);
1275
1276	/* OVSP_LOCAL is datapath internal port. We need to make sure that
1277	 * all port in datapath are destroyed first before freeing datapath.
1278	 */
1279	ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1280
1281	call_rcu(&dp->rcu, destroy_dp_rcu);
1282}
1283
1284static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1285{
1286	struct sk_buff *reply;
1287	struct datapath *dp;
1288	int err;
1289
1290	ovs_lock();
1291	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1292	err = PTR_ERR(dp);
1293	if (IS_ERR(dp))
1294		goto unlock;
1295
1296	reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1297				      info->snd_seq, OVS_DP_CMD_DEL);
1298	err = PTR_ERR(reply);
1299	if (IS_ERR(reply))
1300		goto unlock;
1301
1302	__dp_destroy(dp);
1303	ovs_unlock();
1304
1305	ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
1306
1307	return 0;
1308unlock:
1309	ovs_unlock();
1310	return err;
1311}
1312
1313static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1314{
1315	struct sk_buff *reply;
1316	struct datapath *dp;
1317	int err;
1318
1319	ovs_lock();
1320	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1321	err = PTR_ERR(dp);
1322	if (IS_ERR(dp))
1323		goto unlock;
1324
1325	reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1326				      info->snd_seq, OVS_DP_CMD_NEW);
1327	if (IS_ERR(reply)) {
1328		err = PTR_ERR(reply);
1329		genl_set_err(sock_net(skb->sk), 0,
1330			     ovs_dp_datapath_multicast_group.id, err);
1331		err = 0;
1332		goto unlock;
1333	}
1334
1335	ovs_unlock();
1336	ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
1337
1338	return 0;
1339unlock:
1340	ovs_unlock();
1341	return err;
1342}
1343
1344static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1345{
1346	struct sk_buff *reply;
1347	struct datapath *dp;
1348	int err;
1349
1350	ovs_lock();
1351	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1352	if (IS_ERR(dp)) {
1353		err = PTR_ERR(dp);
1354		goto unlock;
1355	}
1356
1357	reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1358				      info->snd_seq, OVS_DP_CMD_NEW);
1359	if (IS_ERR(reply)) {
1360		err = PTR_ERR(reply);
1361		goto unlock;
1362	}
1363
1364	ovs_unlock();
1365	return genlmsg_reply(reply, info);
1366
1367unlock:
1368	ovs_unlock();
1369	return err;
1370}
1371
1372static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1373{
1374	struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1375	struct datapath *dp;
1376	int skip = cb->args[0];
1377	int i = 0;
1378
1379	rcu_read_lock();
1380	list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) {
1381		if (i >= skip &&
1382		    ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1383					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1384					 OVS_DP_CMD_NEW) < 0)
1385			break;
1386		i++;
1387	}
1388	rcu_read_unlock();
1389
1390	cb->args[0] = i;
1391
1392	return skb->len;
1393}
1394
1395static const struct genl_ops dp_datapath_genl_ops[] = {
1396	{ .cmd = OVS_DP_CMD_NEW,
1397	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1398	  .policy = datapath_policy,
1399	  .doit = ovs_dp_cmd_new
1400	},
1401	{ .cmd = OVS_DP_CMD_DEL,
1402	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1403	  .policy = datapath_policy,
1404	  .doit = ovs_dp_cmd_del
1405	},
1406	{ .cmd = OVS_DP_CMD_GET,
1407	  .flags = 0,		    /* OK for unprivileged users. */
1408	  .policy = datapath_policy,
1409	  .doit = ovs_dp_cmd_get,
1410	  .dumpit = ovs_dp_cmd_dump
1411	},
1412	{ .cmd = OVS_DP_CMD_SET,
1413	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1414	  .policy = datapath_policy,
1415	  .doit = ovs_dp_cmd_set,
1416	},
1417};
1418
1419static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1420	[OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1421	[OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1422	[OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1423	[OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1424	[OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1425	[OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1426};
1427
1428static struct genl_family dp_vport_genl_family = {
1429	.id = GENL_ID_GENERATE,
1430	.hdrsize = sizeof(struct ovs_header),
1431	.name = OVS_VPORT_FAMILY,
1432	.version = OVS_VPORT_VERSION,
1433	.maxattr = OVS_VPORT_ATTR_MAX,
1434	.netnsok = true,
1435	.parallel_ops = true,
1436};
1437
1438struct genl_multicast_group ovs_dp_vport_multicast_group = {
1439	.name = OVS_VPORT_MCGROUP
1440};
1441
1442/* Called with ovs_mutex or RCU read lock. */
1443static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1444				   u32 portid, u32 seq, u32 flags, u8 cmd)
1445{
1446	struct ovs_header *ovs_header;
1447	struct ovs_vport_stats vport_stats;
1448	int err;
1449
1450	ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1451				 flags, cmd);
1452	if (!ovs_header)
1453		return -EMSGSIZE;
1454
1455	ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1456
1457	if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1458	    nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1459	    nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
1460	    nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_portid))
1461		goto nla_put_failure;
1462
1463	ovs_vport_get_stats(vport, &vport_stats);
1464	if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1465		    &vport_stats))
1466		goto nla_put_failure;
1467
1468	err = ovs_vport_get_options(vport, skb);
1469	if (err == -EMSGSIZE)
1470		goto error;
1471
1472	return genlmsg_end(skb, ovs_header);
1473
1474nla_put_failure:
1475	err = -EMSGSIZE;
1476error:
1477	genlmsg_cancel(skb, ovs_header);
1478	return err;
1479}
1480
1481/* Called with ovs_mutex or RCU read lock. */
1482struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1483					 u32 seq, u8 cmd)
1484{
1485	struct sk_buff *skb;
1486	int retval;
1487
1488	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1489	if (!skb)
1490		return ERR_PTR(-ENOMEM);
1491
1492	retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1493	BUG_ON(retval < 0);
1494
1495	return skb;
1496}
1497
1498/* Called with ovs_mutex or RCU read lock. */
1499static struct vport *lookup_vport(struct net *net,
1500				  struct ovs_header *ovs_header,
1501				  struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1502{
1503	struct datapath *dp;
1504	struct vport *vport;
1505
1506	if (a[OVS_VPORT_ATTR_NAME]) {
1507		vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1508		if (!vport)
1509			return ERR_PTR(-ENODEV);
1510		if (ovs_header->dp_ifindex &&
1511		    ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1512			return ERR_PTR(-ENODEV);
1513		return vport;
1514	} else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1515		u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1516
1517		if (port_no >= DP_MAX_PORTS)
1518			return ERR_PTR(-EFBIG);
1519
1520		dp = get_dp(net, ovs_header->dp_ifindex);
1521		if (!dp)
1522			return ERR_PTR(-ENODEV);
1523
1524		vport = ovs_vport_ovsl_rcu(dp, port_no);
1525		if (!vport)
1526			return ERR_PTR(-ENODEV);
1527		return vport;
1528	} else
1529		return ERR_PTR(-EINVAL);
1530}
1531
1532static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1533{
1534	struct nlattr **a = info->attrs;
1535	struct ovs_header *ovs_header = info->userhdr;
1536	struct vport_parms parms;
1537	struct sk_buff *reply;
1538	struct vport *vport;
1539	struct datapath *dp;
1540	u32 port_no;
1541	int err;
1542
1543	err = -EINVAL;
1544	if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1545	    !a[OVS_VPORT_ATTR_UPCALL_PID])
1546		goto exit;
1547
1548	ovs_lock();
1549	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1550	err = -ENODEV;
1551	if (!dp)
1552		goto exit_unlock;
1553
1554	if (a[OVS_VPORT_ATTR_PORT_NO]) {
1555		port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1556
1557		err = -EFBIG;
1558		if (port_no >= DP_MAX_PORTS)
1559			goto exit_unlock;
1560
1561		vport = ovs_vport_ovsl(dp, port_no);
1562		err = -EBUSY;
1563		if (vport)
1564			goto exit_unlock;
1565	} else {
1566		for (port_no = 1; ; port_no++) {
1567			if (port_no >= DP_MAX_PORTS) {
1568				err = -EFBIG;
1569				goto exit_unlock;
1570			}
1571			vport = ovs_vport_ovsl(dp, port_no);
1572			if (!vport)
1573				break;
1574		}
1575	}
1576
1577	parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1578	parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1579	parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1580	parms.dp = dp;
1581	parms.port_no = port_no;
1582	parms.upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1583
1584	vport = new_vport(&parms);
1585	err = PTR_ERR(vport);
1586	if (IS_ERR(vport))
1587		goto exit_unlock;
1588
1589	err = 0;
1590	reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
1591					 OVS_VPORT_CMD_NEW);
1592	if (IS_ERR(reply)) {
1593		err = PTR_ERR(reply);
1594		ovs_dp_detach_port(vport);
1595		goto exit_unlock;
1596	}
1597
1598	ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
1599
1600exit_unlock:
1601	ovs_unlock();
1602exit:
1603	return err;
1604}
1605
1606static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1607{
1608	struct nlattr **a = info->attrs;
1609	struct sk_buff *reply;
1610	struct vport *vport;
1611	int err;
1612
1613	ovs_lock();
1614	vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1615	err = PTR_ERR(vport);
1616	if (IS_ERR(vport))
1617		goto exit_unlock;
1618
1619	if (a[OVS_VPORT_ATTR_TYPE] &&
1620	    nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
1621		err = -EINVAL;
1622		goto exit_unlock;
1623	}
1624
1625	reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1626	if (!reply) {
1627		err = -ENOMEM;
1628		goto exit_unlock;
1629	}
1630
1631	if (a[OVS_VPORT_ATTR_OPTIONS]) {
1632		err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1633		if (err)
1634			goto exit_free;
1635	}
1636
1637	if (a[OVS_VPORT_ATTR_UPCALL_PID])
1638		vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1639
1640	err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1641				      info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1642	BUG_ON(err < 0);
1643
1644	ovs_unlock();
1645	ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
1646	return 0;
1647
1648exit_free:
1649	kfree_skb(reply);
1650exit_unlock:
1651	ovs_unlock();
1652	return err;
1653}
1654
1655static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1656{
1657	struct nlattr **a = info->attrs;
1658	struct sk_buff *reply;
1659	struct vport *vport;
1660	int err;
1661
1662	ovs_lock();
1663	vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1664	err = PTR_ERR(vport);
1665	if (IS_ERR(vport))
1666		goto exit_unlock;
1667
1668	if (vport->port_no == OVSP_LOCAL) {
1669		err = -EINVAL;
1670		goto exit_unlock;
1671	}
1672
1673	reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
1674					 info->snd_seq, OVS_VPORT_CMD_DEL);
1675	err = PTR_ERR(reply);
1676	if (IS_ERR(reply))
1677		goto exit_unlock;
1678
1679	err = 0;
1680	ovs_dp_detach_port(vport);
1681
1682	ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
1683
1684exit_unlock:
1685	ovs_unlock();
1686	return err;
1687}
1688
1689static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1690{
1691	struct nlattr **a = info->attrs;
1692	struct ovs_header *ovs_header = info->userhdr;
1693	struct sk_buff *reply;
1694	struct vport *vport;
1695	int err;
1696
1697	rcu_read_lock();
1698	vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
1699	err = PTR_ERR(vport);
1700	if (IS_ERR(vport))
1701		goto exit_unlock;
1702
1703	reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
1704					 info->snd_seq, OVS_VPORT_CMD_NEW);
1705	err = PTR_ERR(reply);
1706	if (IS_ERR(reply))
1707		goto exit_unlock;
1708
1709	rcu_read_unlock();
1710
1711	return genlmsg_reply(reply, info);
1712
1713exit_unlock:
1714	rcu_read_unlock();
1715	return err;
1716}
1717
1718static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1719{
1720	struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1721	struct datapath *dp;
1722	int bucket = cb->args[0], skip = cb->args[1];
1723	int i, j = 0;
1724
1725	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1726	if (!dp)
1727		return -ENODEV;
1728
1729	rcu_read_lock();
1730	for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
1731		struct vport *vport;
1732
1733		j = 0;
1734		hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1735			if (j >= skip &&
1736			    ovs_vport_cmd_fill_info(vport, skb,
1737						    NETLINK_CB(cb->skb).portid,
1738						    cb->nlh->nlmsg_seq,
1739						    NLM_F_MULTI,
1740						    OVS_VPORT_CMD_NEW) < 0)
1741				goto out;
1742
1743			j++;
1744		}
1745		skip = 0;
1746	}
1747out:
1748	rcu_read_unlock();
1749
1750	cb->args[0] = i;
1751	cb->args[1] = j;
1752
1753	return skb->len;
1754}
1755
1756static const struct genl_ops dp_vport_genl_ops[] = {
1757	{ .cmd = OVS_VPORT_CMD_NEW,
1758	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1759	  .policy = vport_policy,
1760	  .doit = ovs_vport_cmd_new
1761	},
1762	{ .cmd = OVS_VPORT_CMD_DEL,
1763	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1764	  .policy = vport_policy,
1765	  .doit = ovs_vport_cmd_del
1766	},
1767	{ .cmd = OVS_VPORT_CMD_GET,
1768	  .flags = 0,		    /* OK for unprivileged users. */
1769	  .policy = vport_policy,
1770	  .doit = ovs_vport_cmd_get,
1771	  .dumpit = ovs_vport_cmd_dump
1772	},
1773	{ .cmd = OVS_VPORT_CMD_SET,
1774	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1775	  .policy = vport_policy,
1776	  .doit = ovs_vport_cmd_set,
1777	},
1778};
1779
1780struct genl_family_and_ops {
1781	struct genl_family *family;
1782	const struct genl_ops *ops;
1783	int n_ops;
1784	struct genl_multicast_group *group;
1785};
1786
1787static const struct genl_family_and_ops dp_genl_families[] = {
1788	{ &dp_datapath_genl_family,
1789	  dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
1790	  &ovs_dp_datapath_multicast_group },
1791	{ &dp_vport_genl_family,
1792	  dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
1793	  &ovs_dp_vport_multicast_group },
1794	{ &dp_flow_genl_family,
1795	  dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
1796	  &ovs_dp_flow_multicast_group },
1797	{ &dp_packet_genl_family,
1798	  dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
1799	  NULL },
1800};
1801
1802static void dp_unregister_genl(int n_families)
1803{
1804	int i;
1805
1806	for (i = 0; i < n_families; i++)
1807		genl_unregister_family(dp_genl_families[i].family);
1808}
1809
1810static int dp_register_genl(void)
1811{
1812	int n_registered;
1813	int err;
1814	int i;
1815
1816	n_registered = 0;
1817	for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
1818		const struct genl_family_and_ops *f = &dp_genl_families[i];
1819
1820		f->family->ops = f->ops;
1821		f->family->n_ops = f->n_ops;
1822		err = genl_register_family(f->family);
1823		if (err)
1824			goto error;
1825		n_registered++;
1826
1827		if (f->group) {
1828			err = genl_register_mc_group(f->family, f->group);
1829			if (err)
1830				goto error;
1831		}
1832	}
1833
1834	return 0;
1835
1836error:
1837	dp_unregister_genl(n_registered);
1838	return err;
1839}
1840
1841static int __net_init ovs_init_net(struct net *net)
1842{
1843	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1844
1845	INIT_LIST_HEAD(&ovs_net->dps);
1846	INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
1847	return 0;
1848}
1849
1850static void __net_exit ovs_exit_net(struct net *net)
1851{
1852	struct datapath *dp, *dp_next;
1853	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1854
1855	ovs_lock();
1856	list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
1857		__dp_destroy(dp);
1858	ovs_unlock();
1859
1860	cancel_work_sync(&ovs_net->dp_notify_work);
1861}
1862
1863static struct pernet_operations ovs_net_ops = {
1864	.init = ovs_init_net,
1865	.exit = ovs_exit_net,
1866	.id   = &ovs_net_id,
1867	.size = sizeof(struct ovs_net),
1868};
1869
1870static int __init dp_init(void)
1871{
1872	int err;
1873
1874	BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
1875
1876	pr_info("Open vSwitch switching datapath\n");
1877
1878	err = ovs_flow_init();
1879	if (err)
1880		goto error;
1881
1882	err = ovs_vport_init();
1883	if (err)
1884		goto error_flow_exit;
1885
1886	err = register_pernet_device(&ovs_net_ops);
1887	if (err)
1888		goto error_vport_exit;
1889
1890	err = register_netdevice_notifier(&ovs_dp_device_notifier);
1891	if (err)
1892		goto error_netns_exit;
1893
1894	err = dp_register_genl();
1895	if (err < 0)
1896		goto error_unreg_notifier;
1897
1898	return 0;
1899
1900error_unreg_notifier:
1901	unregister_netdevice_notifier(&ovs_dp_device_notifier);
1902error_netns_exit:
1903	unregister_pernet_device(&ovs_net_ops);
1904error_vport_exit:
1905	ovs_vport_exit();
1906error_flow_exit:
1907	ovs_flow_exit();
1908error:
1909	return err;
1910}
1911
1912static void dp_cleanup(void)
1913{
1914	dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
1915	unregister_netdevice_notifier(&ovs_dp_device_notifier);
1916	unregister_pernet_device(&ovs_net_ops);
1917	rcu_barrier();
1918	ovs_vport_exit();
1919	ovs_flow_exit();
1920}
1921
1922module_init(dp_init);
1923module_exit(dp_cleanup);
1924
1925MODULE_DESCRIPTION("Open vSwitch switching datapath");
1926MODULE_LICENSE("GPL");
1927