1/*
2 * Copyright (c) 2007-2014 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/skbuff.h>
22#include <linux/in.h>
23#include <linux/ip.h>
24#include <linux/openvswitch.h>
25#include <linux/sctp.h>
26#include <linux/tcp.h>
27#include <linux/udp.h>
28#include <linux/in6.h>
29#include <linux/if_arp.h>
30#include <linux/if_vlan.h>
31#include <net/ip.h>
32#include <net/ipv6.h>
33#include <net/checksum.h>
34#include <net/dsfield.h>
35#include <net/sctp/checksum.h>
36
37#include "datapath.h"
38#include "flow.h"
39#include "vport.h"
40
41static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
42			      struct sw_flow_key *key,
43			      const struct nlattr *attr, int len);
44
45struct deferred_action {
46	struct sk_buff *skb;
47	const struct nlattr *actions;
48
49	/* Store pkt_key clone when creating deferred action. */
50	struct sw_flow_key pkt_key;
51};
52
53#define DEFERRED_ACTION_FIFO_SIZE 10
54struct action_fifo {
55	int head;
56	int tail;
57	/* Deferred action fifo queue storage. */
58	struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
59};
60
61static struct action_fifo __percpu *action_fifos;
62static DEFINE_PER_CPU(int, exec_actions_level);
63
64static void action_fifo_init(struct action_fifo *fifo)
65{
66	fifo->head = 0;
67	fifo->tail = 0;
68}
69
70static bool action_fifo_is_empty(struct action_fifo *fifo)
71{
72	return (fifo->head == fifo->tail);
73}
74
75static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
76{
77	if (action_fifo_is_empty(fifo))
78		return NULL;
79
80	return &fifo->fifo[fifo->tail++];
81}
82
83static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
84{
85	if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
86		return NULL;
87
88	return &fifo->fifo[fifo->head++];
89}
90
91/* Return true if fifo is not full */
92static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
93						    struct sw_flow_key *key,
94						    const struct nlattr *attr)
95{
96	struct action_fifo *fifo;
97	struct deferred_action *da;
98
99	fifo = this_cpu_ptr(action_fifos);
100	da = action_fifo_put(fifo);
101	if (da) {
102		da->skb = skb;
103		da->actions = attr;
104		da->pkt_key = *key;
105	}
106
107	return da;
108}
109
110static int make_writable(struct sk_buff *skb, int write_len)
111{
112	if (!pskb_may_pull(skb, write_len))
113		return -ENOMEM;
114
115	if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
116		return 0;
117
118	return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
119}
120
121/* remove VLAN header from packet and update csum accordingly. */
122static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
123{
124	struct vlan_hdr *vhdr;
125	int err;
126
127	err = make_writable(skb, VLAN_ETH_HLEN);
128	if (unlikely(err))
129		return err;
130
131	if (skb->ip_summed == CHECKSUM_COMPLETE)
132		skb->csum = csum_sub(skb->csum, csum_partial(skb->data
133					+ (2 * ETH_ALEN), VLAN_HLEN, 0));
134
135	vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
136	*current_tci = vhdr->h_vlan_TCI;
137
138	memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
139	__skb_pull(skb, VLAN_HLEN);
140
141	vlan_set_encap_proto(skb, vhdr);
142	skb->mac_header += VLAN_HLEN;
143	if (skb_network_offset(skb) < ETH_HLEN)
144		skb_set_network_header(skb, ETH_HLEN);
145	skb_reset_mac_len(skb);
146
147	return 0;
148}
149
150static int pop_vlan(struct sk_buff *skb)
151{
152	__be16 tci;
153	int err;
154
155	if (likely(vlan_tx_tag_present(skb))) {
156		skb->vlan_tci = 0;
157	} else {
158		if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
159			     skb->len < VLAN_ETH_HLEN))
160			return 0;
161
162		err = __pop_vlan_tci(skb, &tci);
163		if (err)
164			return err;
165	}
166	/* move next vlan tag to hw accel tag */
167	if (likely(skb->protocol != htons(ETH_P_8021Q) ||
168		   skb->len < VLAN_ETH_HLEN))
169		return 0;
170
171	err = __pop_vlan_tci(skb, &tci);
172	if (unlikely(err))
173		return err;
174
175	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci));
176	return 0;
177}
178
179static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
180{
181	if (unlikely(vlan_tx_tag_present(skb))) {
182		u16 current_tag;
183
184		/* push down current VLAN tag */
185		current_tag = vlan_tx_tag_get(skb);
186
187		if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
188			return -ENOMEM;
189
190		if (skb->ip_summed == CHECKSUM_COMPLETE)
191			skb->csum = csum_add(skb->csum, csum_partial(skb->data
192					+ (2 * ETH_ALEN), VLAN_HLEN, 0));
193
194	}
195	__vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
196	return 0;
197}
198
199static int set_eth_addr(struct sk_buff *skb,
200			const struct ovs_key_ethernet *eth_key)
201{
202	int err;
203	err = make_writable(skb, ETH_HLEN);
204	if (unlikely(err))
205		return err;
206
207	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
208
209	ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
210	ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
211
212	ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
213
214	return 0;
215}
216
217static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
218				__be32 *addr, __be32 new_addr)
219{
220	int transport_len = skb->len - skb_transport_offset(skb);
221
222	if (nh->protocol == IPPROTO_TCP) {
223		if (likely(transport_len >= sizeof(struct tcphdr)))
224			inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
225						 *addr, new_addr, 1);
226	} else if (nh->protocol == IPPROTO_UDP) {
227		if (likely(transport_len >= sizeof(struct udphdr))) {
228			struct udphdr *uh = udp_hdr(skb);
229
230			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
231				inet_proto_csum_replace4(&uh->check, skb,
232							 *addr, new_addr, 1);
233				if (!uh->check)
234					uh->check = CSUM_MANGLED_0;
235			}
236		}
237	}
238
239	csum_replace4(&nh->check, *addr, new_addr);
240	skb_clear_hash(skb);
241	*addr = new_addr;
242}
243
244static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
245				 __be32 addr[4], const __be32 new_addr[4])
246{
247	int transport_len = skb->len - skb_transport_offset(skb);
248
249	if (l4_proto == NEXTHDR_TCP) {
250		if (likely(transport_len >= sizeof(struct tcphdr)))
251			inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
252						  addr, new_addr, 1);
253	} else if (l4_proto == NEXTHDR_UDP) {
254		if (likely(transport_len >= sizeof(struct udphdr))) {
255			struct udphdr *uh = udp_hdr(skb);
256
257			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
258				inet_proto_csum_replace16(&uh->check, skb,
259							  addr, new_addr, 1);
260				if (!uh->check)
261					uh->check = CSUM_MANGLED_0;
262			}
263		}
264	} else if (l4_proto == NEXTHDR_ICMP) {
265		if (likely(transport_len >= sizeof(struct icmp6hdr)))
266			inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
267						  skb, addr, new_addr, 1);
268	}
269}
270
271static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
272			  __be32 addr[4], const __be32 new_addr[4],
273			  bool recalculate_csum)
274{
275	if (recalculate_csum)
276		update_ipv6_checksum(skb, l4_proto, addr, new_addr);
277
278	skb_clear_hash(skb);
279	memcpy(addr, new_addr, sizeof(__be32[4]));
280}
281
282static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc)
283{
284	nh->priority = tc >> 4;
285	nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4);
286}
287
288static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl)
289{
290	nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16;
291	nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8;
292	nh->flow_lbl[2] = fl & 0x000000FF;
293}
294
295static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
296{
297	csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
298	nh->ttl = new_ttl;
299}
300
301static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
302{
303	struct iphdr *nh;
304	int err;
305
306	err = make_writable(skb, skb_network_offset(skb) +
307				 sizeof(struct iphdr));
308	if (unlikely(err))
309		return err;
310
311	nh = ip_hdr(skb);
312
313	if (ipv4_key->ipv4_src != nh->saddr)
314		set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
315
316	if (ipv4_key->ipv4_dst != nh->daddr)
317		set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
318
319	if (ipv4_key->ipv4_tos != nh->tos)
320		ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
321
322	if (ipv4_key->ipv4_ttl != nh->ttl)
323		set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
324
325	return 0;
326}
327
328static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key)
329{
330	struct ipv6hdr *nh;
331	int err;
332	__be32 *saddr;
333	__be32 *daddr;
334
335	err = make_writable(skb, skb_network_offset(skb) +
336			    sizeof(struct ipv6hdr));
337	if (unlikely(err))
338		return err;
339
340	nh = ipv6_hdr(skb);
341	saddr = (__be32 *)&nh->saddr;
342	daddr = (__be32 *)&nh->daddr;
343
344	if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src)))
345		set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
346			      ipv6_key->ipv6_src, true);
347
348	if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
349		unsigned int offset = 0;
350		int flags = IP6_FH_F_SKIP_RH;
351		bool recalc_csum = true;
352
353		if (ipv6_ext_hdr(nh->nexthdr))
354			recalc_csum = ipv6_find_hdr(skb, &offset,
355						    NEXTHDR_ROUTING, NULL,
356						    &flags) != NEXTHDR_ROUTING;
357
358		set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
359			      ipv6_key->ipv6_dst, recalc_csum);
360	}
361
362	set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
363	set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
364	nh->hop_limit = ipv6_key->ipv6_hlimit;
365
366	return 0;
367}
368
369/* Must follow make_writable() since that can move the skb data. */
370static void set_tp_port(struct sk_buff *skb, __be16 *port,
371			 __be16 new_port, __sum16 *check)
372{
373	inet_proto_csum_replace2(check, skb, *port, new_port, 0);
374	*port = new_port;
375	skb_clear_hash(skb);
376}
377
378static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
379{
380	struct udphdr *uh = udp_hdr(skb);
381
382	if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
383		set_tp_port(skb, port, new_port, &uh->check);
384
385		if (!uh->check)
386			uh->check = CSUM_MANGLED_0;
387	} else {
388		*port = new_port;
389		skb_clear_hash(skb);
390	}
391}
392
393static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key)
394{
395	struct udphdr *uh;
396	int err;
397
398	err = make_writable(skb, skb_transport_offset(skb) +
399				 sizeof(struct udphdr));
400	if (unlikely(err))
401		return err;
402
403	uh = udp_hdr(skb);
404	if (udp_port_key->udp_src != uh->source)
405		set_udp_port(skb, &uh->source, udp_port_key->udp_src);
406
407	if (udp_port_key->udp_dst != uh->dest)
408		set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
409
410	return 0;
411}
412
413static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
414{
415	struct tcphdr *th;
416	int err;
417
418	err = make_writable(skb, skb_transport_offset(skb) +
419				 sizeof(struct tcphdr));
420	if (unlikely(err))
421		return err;
422
423	th = tcp_hdr(skb);
424	if (tcp_port_key->tcp_src != th->source)
425		set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
426
427	if (tcp_port_key->tcp_dst != th->dest)
428		set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
429
430	return 0;
431}
432
433static int set_sctp(struct sk_buff *skb,
434		     const struct ovs_key_sctp *sctp_port_key)
435{
436	struct sctphdr *sh;
437	int err;
438	unsigned int sctphoff = skb_transport_offset(skb);
439
440	err = make_writable(skb, sctphoff + sizeof(struct sctphdr));
441	if (unlikely(err))
442		return err;
443
444	sh = sctp_hdr(skb);
445	if (sctp_port_key->sctp_src != sh->source ||
446	    sctp_port_key->sctp_dst != sh->dest) {
447		__le32 old_correct_csum, new_csum, old_csum;
448
449		old_csum = sh->checksum;
450		old_correct_csum = sctp_compute_cksum(skb, sctphoff);
451
452		sh->source = sctp_port_key->sctp_src;
453		sh->dest = sctp_port_key->sctp_dst;
454
455		new_csum = sctp_compute_cksum(skb, sctphoff);
456
457		/* Carry any checksum errors through. */
458		sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
459
460		skb_clear_hash(skb);
461	}
462
463	return 0;
464}
465
466static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
467{
468	struct vport *vport;
469
470	if (unlikely(!skb))
471		return -ENOMEM;
472
473	vport = ovs_vport_rcu(dp, out_port);
474	if (unlikely(!vport)) {
475		kfree_skb(skb);
476		return -ENODEV;
477	}
478
479	ovs_vport_send(vport, skb);
480	return 0;
481}
482
483static int output_userspace(struct datapath *dp, struct sk_buff *skb,
484			    struct sw_flow_key *key, const struct nlattr *attr)
485{
486	struct dp_upcall_info upcall;
487	const struct nlattr *a;
488	int rem;
489
490	upcall.cmd = OVS_PACKET_CMD_ACTION;
491	upcall.key = key;
492	upcall.userdata = NULL;
493	upcall.portid = 0;
494
495	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
496		 a = nla_next(a, &rem)) {
497		switch (nla_type(a)) {
498		case OVS_USERSPACE_ATTR_USERDATA:
499			upcall.userdata = a;
500			break;
501
502		case OVS_USERSPACE_ATTR_PID:
503			upcall.portid = nla_get_u32(a);
504			break;
505		}
506	}
507
508	return ovs_dp_upcall(dp, skb, &upcall);
509}
510
511static bool last_action(const struct nlattr *a, int rem)
512{
513	return a->nla_len == rem;
514}
515
516static int sample(struct datapath *dp, struct sk_buff *skb,
517		  struct sw_flow_key *key, const struct nlattr *attr)
518{
519	const struct nlattr *acts_list = NULL;
520	const struct nlattr *a;
521	int rem;
522
523	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
524		 a = nla_next(a, &rem)) {
525		switch (nla_type(a)) {
526		case OVS_SAMPLE_ATTR_PROBABILITY:
527			if (prandom_u32() >= nla_get_u32(a))
528				return 0;
529			break;
530
531		case OVS_SAMPLE_ATTR_ACTIONS:
532			acts_list = a;
533			break;
534		}
535	}
536
537	rem = nla_len(acts_list);
538	a = nla_data(acts_list);
539
540	/* Actions list is empty, do nothing */
541	if (unlikely(!rem))
542		return 0;
543
544	/* The only known usage of sample action is having a single user-space
545	 * action. Treat this usage as a special case.
546	 * The output_userspace() should clone the skb to be sent to the
547	 * user space. This skb will be consumed by its caller.
548	 */
549	if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
550		   last_action(a, rem)))
551		return output_userspace(dp, skb, key, a);
552
553	skb = skb_clone(skb, GFP_ATOMIC);
554	if (!skb)
555		/* Skip the sample action when out of memory. */
556		return 0;
557
558	if (!add_deferred_actions(skb, key, a)) {
559		if (net_ratelimit())
560			pr_warn("%s: deferred actions limit reached, dropping sample action\n",
561				ovs_dp_name(dp));
562
563		kfree_skb(skb);
564	}
565	return 0;
566}
567
568static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
569			 const struct nlattr *attr)
570{
571	struct ovs_action_hash *hash_act = nla_data(attr);
572	u32 hash = 0;
573
574	/* OVS_HASH_ALG_L4 is the only possible hash algorithm.  */
575	hash = skb_get_hash(skb);
576	hash = jhash_1word(hash, hash_act->hash_basis);
577	if (!hash)
578		hash = 0x1;
579
580	key->ovs_flow_hash = hash;
581}
582
583static int execute_set_action(struct sk_buff *skb,
584				 const struct nlattr *nested_attr)
585{
586	int err = 0;
587
588	switch (nla_type(nested_attr)) {
589	case OVS_KEY_ATTR_PRIORITY:
590		skb->priority = nla_get_u32(nested_attr);
591		break;
592
593	case OVS_KEY_ATTR_SKB_MARK:
594		skb->mark = nla_get_u32(nested_attr);
595		break;
596
597	case OVS_KEY_ATTR_TUNNEL_INFO:
598		OVS_CB(skb)->egress_tun_info = nla_data(nested_attr);
599		break;
600
601	case OVS_KEY_ATTR_ETHERNET:
602		err = set_eth_addr(skb, nla_data(nested_attr));
603		break;
604
605	case OVS_KEY_ATTR_IPV4:
606		err = set_ipv4(skb, nla_data(nested_attr));
607		break;
608
609	case OVS_KEY_ATTR_IPV6:
610		err = set_ipv6(skb, nla_data(nested_attr));
611		break;
612
613	case OVS_KEY_ATTR_TCP:
614		err = set_tcp(skb, nla_data(nested_attr));
615		break;
616
617	case OVS_KEY_ATTR_UDP:
618		err = set_udp(skb, nla_data(nested_attr));
619		break;
620
621	case OVS_KEY_ATTR_SCTP:
622		err = set_sctp(skb, nla_data(nested_attr));
623		break;
624	}
625
626	return err;
627}
628
629static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
630			  struct sw_flow_key *key,
631			  const struct nlattr *a, int rem)
632{
633	struct deferred_action *da;
634	int err;
635
636	err = ovs_flow_key_update(skb, key);
637	if (err)
638		return err;
639
640	if (!last_action(a, rem)) {
641		/* Recirc action is the not the last action
642		 * of the action list, need to clone the skb.
643		 */
644		skb = skb_clone(skb, GFP_ATOMIC);
645
646		/* Skip the recirc action when out of memory, but
647		 * continue on with the rest of the action list.
648		 */
649		if (!skb)
650			return 0;
651	}
652
653	da = add_deferred_actions(skb, key, NULL);
654	if (da) {
655		da->pkt_key.recirc_id = nla_get_u32(a);
656	} else {
657		kfree_skb(skb);
658
659		if (net_ratelimit())
660			pr_warn("%s: deferred action limit reached, drop recirc action\n",
661				ovs_dp_name(dp));
662	}
663
664	return 0;
665}
666
667/* Execute a list of actions against 'skb'. */
668static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
669			      struct sw_flow_key *key,
670			      const struct nlattr *attr, int len)
671{
672	/* Every output action needs a separate clone of 'skb', but the common
673	 * case is just a single output action, so that doing a clone and
674	 * then freeing the original skbuff is wasteful.  So the following code
675	 * is slightly obscure just to avoid that. */
676	int prev_port = -1;
677	const struct nlattr *a;
678	int rem;
679
680	for (a = attr, rem = len; rem > 0;
681	     a = nla_next(a, &rem)) {
682		int err = 0;
683
684		if (prev_port != -1) {
685			do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
686			prev_port = -1;
687		}
688
689		switch (nla_type(a)) {
690		case OVS_ACTION_ATTR_OUTPUT:
691			prev_port = nla_get_u32(a);
692			break;
693
694		case OVS_ACTION_ATTR_USERSPACE:
695			output_userspace(dp, skb, key, a);
696			break;
697
698		case OVS_ACTION_ATTR_HASH:
699			execute_hash(skb, key, a);
700			break;
701
702		case OVS_ACTION_ATTR_PUSH_VLAN:
703			err = push_vlan(skb, nla_data(a));
704			if (unlikely(err)) /* skb already freed. */
705				return err;
706			break;
707
708		case OVS_ACTION_ATTR_POP_VLAN:
709			err = pop_vlan(skb);
710			break;
711
712		case OVS_ACTION_ATTR_RECIRC:
713			err = execute_recirc(dp, skb, key, a, rem);
714			if (last_action(a, rem)) {
715				/* If this is the last action, the skb has
716				 * been consumed or freed.
717				 * Return immediately.
718				 */
719				return err;
720			}
721			break;
722
723		case OVS_ACTION_ATTR_SET:
724			err = execute_set_action(skb, nla_data(a));
725			break;
726
727		case OVS_ACTION_ATTR_SAMPLE:
728			err = sample(dp, skb, key, a);
729			break;
730		}
731
732		if (unlikely(err)) {
733			kfree_skb(skb);
734			return err;
735		}
736	}
737
738	if (prev_port != -1)
739		do_output(dp, skb, prev_port);
740	else
741		consume_skb(skb);
742
743	return 0;
744}
745
746static void process_deferred_actions(struct datapath *dp)
747{
748	struct action_fifo *fifo = this_cpu_ptr(action_fifos);
749
750	/* Do not touch the FIFO in case there is no deferred actions. */
751	if (action_fifo_is_empty(fifo))
752		return;
753
754	/* Finishing executing all deferred actions. */
755	do {
756		struct deferred_action *da = action_fifo_get(fifo);
757		struct sk_buff *skb = da->skb;
758		struct sw_flow_key *key = &da->pkt_key;
759		const struct nlattr *actions = da->actions;
760
761		if (actions)
762			do_execute_actions(dp, skb, key, actions,
763					   nla_len(actions));
764		else
765			ovs_dp_process_packet(skb, key);
766	} while (!action_fifo_is_empty(fifo));
767
768	/* Reset FIFO for the next packet.  */
769	action_fifo_init(fifo);
770}
771
772/* Execute a list of actions against 'skb'. */
773int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
774			struct sw_flow_key *key)
775{
776	int level = this_cpu_read(exec_actions_level);
777	struct sw_flow_actions *acts;
778	int err;
779
780	acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
781
782	this_cpu_inc(exec_actions_level);
783	OVS_CB(skb)->egress_tun_info = NULL;
784	err = do_execute_actions(dp, skb, key,
785				 acts->actions, acts->actions_len);
786
787	if (!level)
788		process_deferred_actions(dp);
789
790	this_cpu_dec(exec_actions_level);
791	return err;
792}
793
794int action_fifos_init(void)
795{
796	action_fifos = alloc_percpu(struct action_fifo);
797	if (!action_fifos)
798		return -ENOMEM;
799
800	return 0;
801}
802
803void action_fifos_exit(void)
804{
805	free_percpu(action_fifos);
806}
807