1/*
2 * This is a module which is used for queueing packets and communicating with
3 * userspace via nfnetlink.
4 *
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
6 * (C) 2007 by Patrick McHardy <kaber@trash.net>
7 *
8 * Based on the old ipv4-only ip_queue.c:
9 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
10 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17#include <linux/module.h>
18#include <linux/skbuff.h>
19#include <linux/init.h>
20#include <linux/spinlock.h>
21#include <linux/slab.h>
22#include <linux/notifier.h>
23#include <linux/netdevice.h>
24#include <linux/netfilter.h>
25#include <linux/proc_fs.h>
26#include <linux/netfilter_ipv4.h>
27#include <linux/netfilter_ipv6.h>
28#include <linux/netfilter/nfnetlink.h>
29#include <linux/netfilter/nfnetlink_queue.h>
30#include <linux/list.h>
31#include <net/sock.h>
32#include <net/tcp_states.h>
33#include <net/netfilter/nf_queue.h>
34#include <net/netns/generic.h>
35#include <net/netfilter/nfnetlink_queue.h>
36
37#include <linux/atomic.h>
38
39#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
40#include "../bridge/br_private.h"
41#endif
42
43#define NFQNL_QMAX_DEFAULT 1024
44
45/* We're using struct nlattr which has 16bit nla_len. Note that nla_len
46 * includes the header length. Thus, the maximum packet length that we
47 * support is 65531 bytes. We send truncated packets if the specified length
48 * is larger than that.  Userspace can check for presence of NFQA_CAP_LEN
49 * attribute to detect truncation.
50 */
51#define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN)
52
53struct nfqnl_instance {
54	struct hlist_node hlist;		/* global list of queues */
55	struct rcu_head rcu;
56
57	int peer_portid;
58	unsigned int queue_maxlen;
59	unsigned int copy_range;
60	unsigned int queue_dropped;
61	unsigned int queue_user_dropped;
62
63
64	u_int16_t queue_num;			/* number of this queue */
65	u_int8_t copy_mode;
66	u_int32_t flags;			/* Set using NFQA_CFG_FLAGS */
67/*
68 * Following fields are dirtied for each queued packet,
69 * keep them in same cache line if possible.
70 */
71	spinlock_t	lock;
72	unsigned int	queue_total;
73	unsigned int	id_sequence;		/* 'sequence' of pkt ids */
74	struct list_head queue_list;		/* packets in queue */
75};
76
77typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
78
79static int nfnl_queue_net_id __read_mostly;
80
81#define INSTANCE_BUCKETS	16
82struct nfnl_queue_net {
83	spinlock_t instances_lock;
84	struct hlist_head instance_table[INSTANCE_BUCKETS];
85};
86
87static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net)
88{
89	return net_generic(net, nfnl_queue_net_id);
90}
91
92static inline u_int8_t instance_hashfn(u_int16_t queue_num)
93{
94	return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS;
95}
96
97static struct nfqnl_instance *
98instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
99{
100	struct hlist_head *head;
101	struct nfqnl_instance *inst;
102
103	head = &q->instance_table[instance_hashfn(queue_num)];
104	hlist_for_each_entry_rcu(inst, head, hlist) {
105		if (inst->queue_num == queue_num)
106			return inst;
107	}
108	return NULL;
109}
110
111static struct nfqnl_instance *
112instance_create(struct nfnl_queue_net *q, u_int16_t queue_num,
113		int portid)
114{
115	struct nfqnl_instance *inst;
116	unsigned int h;
117	int err;
118
119	spin_lock(&q->instances_lock);
120	if (instance_lookup(q, queue_num)) {
121		err = -EEXIST;
122		goto out_unlock;
123	}
124
125	inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
126	if (!inst) {
127		err = -ENOMEM;
128		goto out_unlock;
129	}
130
131	inst->queue_num = queue_num;
132	inst->peer_portid = portid;
133	inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
134	inst->copy_range = NFQNL_MAX_COPY_RANGE;
135	inst->copy_mode = NFQNL_COPY_NONE;
136	spin_lock_init(&inst->lock);
137	INIT_LIST_HEAD(&inst->queue_list);
138
139	if (!try_module_get(THIS_MODULE)) {
140		err = -EAGAIN;
141		goto out_free;
142	}
143
144	h = instance_hashfn(queue_num);
145	hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]);
146
147	spin_unlock(&q->instances_lock);
148
149	return inst;
150
151out_free:
152	kfree(inst);
153out_unlock:
154	spin_unlock(&q->instances_lock);
155	return ERR_PTR(err);
156}
157
158static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
159			unsigned long data);
160
161static void
162instance_destroy_rcu(struct rcu_head *head)
163{
164	struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
165						   rcu);
166
167	nfqnl_flush(inst, NULL, 0);
168	kfree(inst);
169	module_put(THIS_MODULE);
170}
171
172static void
173__instance_destroy(struct nfqnl_instance *inst)
174{
175	hlist_del_rcu(&inst->hlist);
176	call_rcu(&inst->rcu, instance_destroy_rcu);
177}
178
179static void
180instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
181{
182	spin_lock(&q->instances_lock);
183	__instance_destroy(inst);
184	spin_unlock(&q->instances_lock);
185}
186
187static inline void
188__enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
189{
190       list_add_tail(&entry->list, &queue->queue_list);
191       queue->queue_total++;
192}
193
194static void
195__dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
196{
197	list_del(&entry->list);
198	queue->queue_total--;
199}
200
201static struct nf_queue_entry *
202find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
203{
204	struct nf_queue_entry *entry = NULL, *i;
205
206	spin_lock_bh(&queue->lock);
207
208	list_for_each_entry(i, &queue->queue_list, list) {
209		if (i->id == id) {
210			entry = i;
211			break;
212		}
213	}
214
215	if (entry)
216		__dequeue_entry(queue, entry);
217
218	spin_unlock_bh(&queue->lock);
219
220	return entry;
221}
222
223static void
224nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
225{
226	struct nf_queue_entry *entry, *next;
227
228	spin_lock_bh(&queue->lock);
229	list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
230		if (!cmpfn || cmpfn(entry, data)) {
231			list_del(&entry->list);
232			queue->queue_total--;
233			nf_reinject(entry, NF_DROP);
234		}
235	}
236	spin_unlock_bh(&queue->lock);
237}
238
239static int
240nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
241		      bool csum_verify)
242{
243	__u32 flags = 0;
244
245	if (packet->ip_summed == CHECKSUM_PARTIAL)
246		flags = NFQA_SKB_CSUMNOTREADY;
247	else if (csum_verify)
248		flags = NFQA_SKB_CSUM_NOTVERIFIED;
249
250	if (skb_is_gso(packet))
251		flags |= NFQA_SKB_GSO;
252
253	return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0;
254}
255
256static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk)
257{
258	const struct cred *cred;
259
260	if (sk->sk_state == TCP_TIME_WAIT)
261		return 0;
262
263	read_lock_bh(&sk->sk_callback_lock);
264	if (sk->sk_socket && sk->sk_socket->file) {
265		cred = sk->sk_socket->file->f_cred;
266		if (nla_put_be32(skb, NFQA_UID,
267		    htonl(from_kuid_munged(&init_user_ns, cred->fsuid))))
268			goto nla_put_failure;
269		if (nla_put_be32(skb, NFQA_GID,
270		    htonl(from_kgid_munged(&init_user_ns, cred->fsgid))))
271			goto nla_put_failure;
272	}
273	read_unlock_bh(&sk->sk_callback_lock);
274	return 0;
275
276nla_put_failure:
277	read_unlock_bh(&sk->sk_callback_lock);
278	return -1;
279}
280
281static struct sk_buff *
282nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
283			   struct nf_queue_entry *entry,
284			   __be32 **packet_id_ptr)
285{
286	size_t size;
287	size_t data_len = 0, cap_len = 0;
288	unsigned int hlen = 0;
289	struct sk_buff *skb;
290	struct nlattr *nla;
291	struct nfqnl_msg_packet_hdr *pmsg;
292	struct nlmsghdr *nlh;
293	struct nfgenmsg *nfmsg;
294	struct sk_buff *entskb = entry->skb;
295	struct net_device *indev;
296	struct net_device *outdev;
297	struct nf_conn *ct = NULL;
298	enum ip_conntrack_info uninitialized_var(ctinfo);
299	bool csum_verify;
300
301	size =    nlmsg_total_size(sizeof(struct nfgenmsg))
302		+ nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
303		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
304		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
305#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
306		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
307		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
308#endif
309		+ nla_total_size(sizeof(u_int32_t))	/* mark */
310		+ nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
311		+ nla_total_size(sizeof(u_int32_t))	/* skbinfo */
312		+ nla_total_size(sizeof(u_int32_t));	/* cap_len */
313
314	if (entskb->tstamp.tv64)
315		size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
316
317	if (entry->hook <= NF_INET_FORWARD ||
318	   (entry->hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
319		csum_verify = !skb_csum_unnecessary(entskb);
320	else
321		csum_verify = false;
322
323	outdev = entry->outdev;
324
325	switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
326	case NFQNL_COPY_META:
327	case NFQNL_COPY_NONE:
328		break;
329
330	case NFQNL_COPY_PACKET:
331		if (!(queue->flags & NFQA_CFG_F_GSO) &&
332		    entskb->ip_summed == CHECKSUM_PARTIAL &&
333		    skb_checksum_help(entskb))
334			return NULL;
335
336		data_len = ACCESS_ONCE(queue->copy_range);
337		if (data_len > entskb->len)
338			data_len = entskb->len;
339
340		hlen = skb_zerocopy_headlen(entskb);
341		hlen = min_t(unsigned int, hlen, data_len);
342		size += sizeof(struct nlattr) + hlen;
343		cap_len = entskb->len;
344		break;
345	}
346
347	if (queue->flags & NFQA_CFG_F_CONNTRACK)
348		ct = nfqnl_ct_get(entskb, &size, &ctinfo);
349
350	if (queue->flags & NFQA_CFG_F_UID_GID) {
351		size +=  (nla_total_size(sizeof(u_int32_t))	/* uid */
352			+ nla_total_size(sizeof(u_int32_t)));	/* gid */
353	}
354
355	skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
356				  GFP_ATOMIC);
357	if (!skb) {
358		skb_tx_error(entskb);
359		return NULL;
360	}
361
362	nlh = nlmsg_put(skb, 0, 0,
363			NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
364			sizeof(struct nfgenmsg), 0);
365	if (!nlh) {
366		skb_tx_error(entskb);
367		kfree_skb(skb);
368		return NULL;
369	}
370	nfmsg = nlmsg_data(nlh);
371	nfmsg->nfgen_family = entry->pf;
372	nfmsg->version = NFNETLINK_V0;
373	nfmsg->res_id = htons(queue->queue_num);
374
375	nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
376	pmsg = nla_data(nla);
377	pmsg->hw_protocol	= entskb->protocol;
378	pmsg->hook		= entry->hook;
379	*packet_id_ptr		= &pmsg->packet_id;
380
381	indev = entry->indev;
382	if (indev) {
383#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
384		if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
385			goto nla_put_failure;
386#else
387		if (entry->pf == PF_BRIDGE) {
388			/* Case 1: indev is physical input device, we need to
389			 * look for bridge group (when called from
390			 * netfilter_bridge) */
391			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
392					 htonl(indev->ifindex)) ||
393			/* this is the bridge group "brX" */
394			/* rcu_read_lock()ed by __nf_queue */
395			    nla_put_be32(skb, NFQA_IFINDEX_INDEV,
396					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
397				goto nla_put_failure;
398		} else {
399			/* Case 2: indev is bridge group, we need to look for
400			 * physical device (when called from ipv4) */
401			if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
402					 htonl(indev->ifindex)))
403				goto nla_put_failure;
404			if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
405			    nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
406					 htonl(entskb->nf_bridge->physindev->ifindex)))
407				goto nla_put_failure;
408		}
409#endif
410	}
411
412	if (outdev) {
413#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
414		if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
415			goto nla_put_failure;
416#else
417		if (entry->pf == PF_BRIDGE) {
418			/* Case 1: outdev is physical output device, we need to
419			 * look for bridge group (when called from
420			 * netfilter_bridge) */
421			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
422					 htonl(outdev->ifindex)) ||
423			/* this is the bridge group "brX" */
424			/* rcu_read_lock()ed by __nf_queue */
425			    nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
426					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
427				goto nla_put_failure;
428		} else {
429			/* Case 2: outdev is bridge group, we need to look for
430			 * physical output device (when called from ipv4) */
431			if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
432					 htonl(outdev->ifindex)))
433				goto nla_put_failure;
434			if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
435			    nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
436					 htonl(entskb->nf_bridge->physoutdev->ifindex)))
437				goto nla_put_failure;
438		}
439#endif
440	}
441
442	if (entskb->mark &&
443	    nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
444		goto nla_put_failure;
445
446	if (indev && entskb->dev &&
447	    entskb->mac_header != entskb->network_header) {
448		struct nfqnl_msg_packet_hw phw;
449		int len;
450
451		memset(&phw, 0, sizeof(phw));
452		len = dev_parse_header(entskb, phw.hw_addr);
453		if (len) {
454			phw.hw_addrlen = htons(len);
455			if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
456				goto nla_put_failure;
457		}
458	}
459
460	if (entskb->tstamp.tv64) {
461		struct nfqnl_msg_packet_timestamp ts;
462		struct timeval tv = ktime_to_timeval(entskb->tstamp);
463		ts.sec = cpu_to_be64(tv.tv_sec);
464		ts.usec = cpu_to_be64(tv.tv_usec);
465
466		if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
467			goto nla_put_failure;
468	}
469
470	if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk &&
471	    nfqnl_put_sk_uidgid(skb, entskb->sk) < 0)
472		goto nla_put_failure;
473
474	if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
475		goto nla_put_failure;
476
477	if (cap_len > data_len &&
478	    nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
479		goto nla_put_failure;
480
481	if (nfqnl_put_packet_info(skb, entskb, csum_verify))
482		goto nla_put_failure;
483
484	if (data_len) {
485		struct nlattr *nla;
486
487		if (skb_tailroom(skb) < sizeof(*nla) + hlen)
488			goto nla_put_failure;
489
490		nla = (struct nlattr *)skb_put(skb, sizeof(*nla));
491		nla->nla_type = NFQA_PAYLOAD;
492		nla->nla_len = nla_attr_size(data_len);
493
494		if (skb_zerocopy(skb, entskb, data_len, hlen))
495			goto nla_put_failure;
496	}
497
498	nlh->nlmsg_len = skb->len;
499	return skb;
500
501nla_put_failure:
502	skb_tx_error(entskb);
503	kfree_skb(skb);
504	net_err_ratelimited("nf_queue: error creating packet message\n");
505	return NULL;
506}
507
508static int
509__nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
510			struct nf_queue_entry *entry)
511{
512	struct sk_buff *nskb;
513	int err = -ENOBUFS;
514	__be32 *packet_id_ptr;
515	int failopen = 0;
516
517	nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
518	if (nskb == NULL) {
519		err = -ENOMEM;
520		goto err_out;
521	}
522	spin_lock_bh(&queue->lock);
523
524	if (queue->queue_total >= queue->queue_maxlen) {
525		if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
526			failopen = 1;
527			err = 0;
528		} else {
529			queue->queue_dropped++;
530			net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
531					     queue->queue_total);
532		}
533		goto err_out_free_nskb;
534	}
535	entry->id = ++queue->id_sequence;
536	*packet_id_ptr = htonl(entry->id);
537
538	/* nfnetlink_unicast will either free the nskb or add it to a socket */
539	err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
540	if (err < 0) {
541		queue->queue_user_dropped++;
542		goto err_out_unlock;
543	}
544
545	__enqueue_entry(queue, entry);
546
547	spin_unlock_bh(&queue->lock);
548	return 0;
549
550err_out_free_nskb:
551	kfree_skb(nskb);
552err_out_unlock:
553	spin_unlock_bh(&queue->lock);
554	if (failopen)
555		nf_reinject(entry, NF_ACCEPT);
556err_out:
557	return err;
558}
559
560static struct nf_queue_entry *
561nf_queue_entry_dup(struct nf_queue_entry *e)
562{
563	struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
564	if (entry) {
565		if (nf_queue_entry_get_refs(entry))
566			return entry;
567		kfree(entry);
568	}
569	return NULL;
570}
571
572#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
573/* When called from bridge netfilter, skb->data must point to MAC header
574 * before calling skb_gso_segment(). Else, original MAC header is lost
575 * and segmented skbs will be sent to wrong destination.
576 */
577static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
578{
579	if (skb->nf_bridge)
580		__skb_push(skb, skb->network_header - skb->mac_header);
581}
582
583static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
584{
585	if (skb->nf_bridge)
586		__skb_pull(skb, skb->network_header - skb->mac_header);
587}
588#else
589#define nf_bridge_adjust_skb_data(s) do {} while (0)
590#define nf_bridge_adjust_segmented_data(s) do {} while (0)
591#endif
592
593static void free_entry(struct nf_queue_entry *entry)
594{
595	nf_queue_entry_release_refs(entry);
596	kfree(entry);
597}
598
599static int
600__nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
601			   struct sk_buff *skb, struct nf_queue_entry *entry)
602{
603	int ret = -ENOMEM;
604	struct nf_queue_entry *entry_seg;
605
606	nf_bridge_adjust_segmented_data(skb);
607
608	if (skb->next == NULL) { /* last packet, no need to copy entry */
609		struct sk_buff *gso_skb = entry->skb;
610		entry->skb = skb;
611		ret = __nfqnl_enqueue_packet(net, queue, entry);
612		if (ret)
613			entry->skb = gso_skb;
614		return ret;
615	}
616
617	skb->next = NULL;
618
619	entry_seg = nf_queue_entry_dup(entry);
620	if (entry_seg) {
621		entry_seg->skb = skb;
622		ret = __nfqnl_enqueue_packet(net, queue, entry_seg);
623		if (ret)
624			free_entry(entry_seg);
625	}
626	return ret;
627}
628
629static int
630nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
631{
632	unsigned int queued;
633	struct nfqnl_instance *queue;
634	struct sk_buff *skb, *segs;
635	int err = -ENOBUFS;
636	struct net *net = dev_net(entry->indev ?
637				  entry->indev : entry->outdev);
638	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
639
640	/* rcu_read_lock()ed by nf_hook_slow() */
641	queue = instance_lookup(q, queuenum);
642	if (!queue)
643		return -ESRCH;
644
645	if (queue->copy_mode == NFQNL_COPY_NONE)
646		return -EINVAL;
647
648	skb = entry->skb;
649
650	switch (entry->pf) {
651	case NFPROTO_IPV4:
652		skb->protocol = htons(ETH_P_IP);
653		break;
654	case NFPROTO_IPV6:
655		skb->protocol = htons(ETH_P_IPV6);
656		break;
657	}
658
659	if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
660		return __nfqnl_enqueue_packet(net, queue, entry);
661
662	nf_bridge_adjust_skb_data(skb);
663	segs = skb_gso_segment(skb, 0);
664	/* Does not use PTR_ERR to limit the number of error codes that can be
665	 * returned by nf_queue.  For instance, callers rely on -ECANCELED to
666	 * mean 'ignore this hook'.
667	 */
668	if (IS_ERR_OR_NULL(segs))
669		goto out_err;
670	queued = 0;
671	err = 0;
672	do {
673		struct sk_buff *nskb = segs->next;
674		if (err == 0)
675			err = __nfqnl_enqueue_packet_gso(net, queue,
676							segs, entry);
677		if (err == 0)
678			queued++;
679		else
680			kfree_skb(segs);
681		segs = nskb;
682	} while (segs);
683
684	if (queued) {
685		if (err) /* some segments are already queued */
686			free_entry(entry);
687		kfree_skb(skb);
688		return 0;
689	}
690 out_err:
691	nf_bridge_adjust_segmented_data(skb);
692	return err;
693}
694
695static int
696nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
697{
698	struct sk_buff *nskb;
699
700	if (diff < 0) {
701		if (pskb_trim(e->skb, data_len))
702			return -ENOMEM;
703	} else if (diff > 0) {
704		if (data_len > 0xFFFF)
705			return -EINVAL;
706		if (diff > skb_tailroom(e->skb)) {
707			nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
708					       diff, GFP_ATOMIC);
709			if (!nskb) {
710				printk(KERN_WARNING "nf_queue: OOM "
711				      "in mangle, dropping packet\n");
712				return -ENOMEM;
713			}
714			kfree_skb(e->skb);
715			e->skb = nskb;
716		}
717		skb_put(e->skb, diff);
718	}
719	if (!skb_make_writable(e->skb, data_len))
720		return -ENOMEM;
721	skb_copy_to_linear_data(e->skb, data, data_len);
722	e->skb->ip_summed = CHECKSUM_NONE;
723	return 0;
724}
725
726static int
727nfqnl_set_mode(struct nfqnl_instance *queue,
728	       unsigned char mode, unsigned int range)
729{
730	int status = 0;
731
732	spin_lock_bh(&queue->lock);
733	switch (mode) {
734	case NFQNL_COPY_NONE:
735	case NFQNL_COPY_META:
736		queue->copy_mode = mode;
737		queue->copy_range = 0;
738		break;
739
740	case NFQNL_COPY_PACKET:
741		queue->copy_mode = mode;
742		if (range == 0 || range > NFQNL_MAX_COPY_RANGE)
743			queue->copy_range = NFQNL_MAX_COPY_RANGE;
744		else
745			queue->copy_range = range;
746		break;
747
748	default:
749		status = -EINVAL;
750
751	}
752	spin_unlock_bh(&queue->lock);
753
754	return status;
755}
756
757static int
758dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
759{
760	if (entry->indev)
761		if (entry->indev->ifindex == ifindex)
762			return 1;
763	if (entry->outdev)
764		if (entry->outdev->ifindex == ifindex)
765			return 1;
766#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
767	if (entry->skb->nf_bridge) {
768		if (entry->skb->nf_bridge->physindev &&
769		    entry->skb->nf_bridge->physindev->ifindex == ifindex)
770			return 1;
771		if (entry->skb->nf_bridge->physoutdev &&
772		    entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
773			return 1;
774	}
775#endif
776	return 0;
777}
778
779/* drop all packets with either indev or outdev == ifindex from all queue
780 * instances */
781static void
782nfqnl_dev_drop(struct net *net, int ifindex)
783{
784	int i;
785	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
786
787	rcu_read_lock();
788
789	for (i = 0; i < INSTANCE_BUCKETS; i++) {
790		struct nfqnl_instance *inst;
791		struct hlist_head *head = &q->instance_table[i];
792
793		hlist_for_each_entry_rcu(inst, head, hlist)
794			nfqnl_flush(inst, dev_cmp, ifindex);
795	}
796
797	rcu_read_unlock();
798}
799
800#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
801
802static int
803nfqnl_rcv_dev_event(struct notifier_block *this,
804		    unsigned long event, void *ptr)
805{
806	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
807
808	/* Drop any packets associated with the downed device */
809	if (event == NETDEV_DOWN)
810		nfqnl_dev_drop(dev_net(dev), dev->ifindex);
811	return NOTIFY_DONE;
812}
813
814static struct notifier_block nfqnl_dev_notifier = {
815	.notifier_call	= nfqnl_rcv_dev_event,
816};
817
818static int
819nfqnl_rcv_nl_event(struct notifier_block *this,
820		   unsigned long event, void *ptr)
821{
822	struct netlink_notify *n = ptr;
823	struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
824
825	if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
826		int i;
827
828		/* destroy all instances for this portid */
829		spin_lock(&q->instances_lock);
830		for (i = 0; i < INSTANCE_BUCKETS; i++) {
831			struct hlist_node *t2;
832			struct nfqnl_instance *inst;
833			struct hlist_head *head = &q->instance_table[i];
834
835			hlist_for_each_entry_safe(inst, t2, head, hlist) {
836				if (n->portid == inst->peer_portid)
837					__instance_destroy(inst);
838			}
839		}
840		spin_unlock(&q->instances_lock);
841	}
842	return NOTIFY_DONE;
843}
844
845static struct notifier_block nfqnl_rtnl_notifier = {
846	.notifier_call	= nfqnl_rcv_nl_event,
847};
848
849static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
850	[NFQA_VERDICT_HDR]	= { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
851	[NFQA_MARK]		= { .type = NLA_U32 },
852	[NFQA_PAYLOAD]		= { .type = NLA_UNSPEC },
853	[NFQA_CT]		= { .type = NLA_UNSPEC },
854	[NFQA_EXP]		= { .type = NLA_UNSPEC },
855};
856
857static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
858	[NFQA_VERDICT_HDR]	= { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
859	[NFQA_MARK]		= { .type = NLA_U32 },
860};
861
862static struct nfqnl_instance *
863verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, int nlportid)
864{
865	struct nfqnl_instance *queue;
866
867	queue = instance_lookup(q, queue_num);
868	if (!queue)
869		return ERR_PTR(-ENODEV);
870
871	if (queue->peer_portid != nlportid)
872		return ERR_PTR(-EPERM);
873
874	return queue;
875}
876
877static struct nfqnl_msg_verdict_hdr*
878verdicthdr_get(const struct nlattr * const nfqa[])
879{
880	struct nfqnl_msg_verdict_hdr *vhdr;
881	unsigned int verdict;
882
883	if (!nfqa[NFQA_VERDICT_HDR])
884		return NULL;
885
886	vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
887	verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
888	if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
889		return NULL;
890	return vhdr;
891}
892
893static int nfq_id_after(unsigned int id, unsigned int max)
894{
895	return (int)(id - max) > 0;
896}
897
898static int
899nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
900		   const struct nlmsghdr *nlh,
901		   const struct nlattr * const nfqa[])
902{
903	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
904	struct nf_queue_entry *entry, *tmp;
905	unsigned int verdict, maxid;
906	struct nfqnl_msg_verdict_hdr *vhdr;
907	struct nfqnl_instance *queue;
908	LIST_HEAD(batch_list);
909	u16 queue_num = ntohs(nfmsg->res_id);
910
911	struct net *net = sock_net(ctnl);
912	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
913
914	queue = verdict_instance_lookup(q, queue_num,
915					NETLINK_CB(skb).portid);
916	if (IS_ERR(queue))
917		return PTR_ERR(queue);
918
919	vhdr = verdicthdr_get(nfqa);
920	if (!vhdr)
921		return -EINVAL;
922
923	verdict = ntohl(vhdr->verdict);
924	maxid = ntohl(vhdr->id);
925
926	spin_lock_bh(&queue->lock);
927
928	list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
929		if (nfq_id_after(entry->id, maxid))
930			break;
931		__dequeue_entry(queue, entry);
932		list_add_tail(&entry->list, &batch_list);
933	}
934
935	spin_unlock_bh(&queue->lock);
936
937	if (list_empty(&batch_list))
938		return -ENOENT;
939
940	list_for_each_entry_safe(entry, tmp, &batch_list, list) {
941		if (nfqa[NFQA_MARK])
942			entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
943		nf_reinject(entry, verdict);
944	}
945	return 0;
946}
947
948static int
949nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
950		   const struct nlmsghdr *nlh,
951		   const struct nlattr * const nfqa[])
952{
953	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
954	u_int16_t queue_num = ntohs(nfmsg->res_id);
955
956	struct nfqnl_msg_verdict_hdr *vhdr;
957	struct nfqnl_instance *queue;
958	unsigned int verdict;
959	struct nf_queue_entry *entry;
960	enum ip_conntrack_info uninitialized_var(ctinfo);
961	struct nf_conn *ct = NULL;
962
963	struct net *net = sock_net(ctnl);
964	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
965
966	queue = instance_lookup(q, queue_num);
967	if (!queue)
968		queue = verdict_instance_lookup(q, queue_num,
969						NETLINK_CB(skb).portid);
970	if (IS_ERR(queue))
971		return PTR_ERR(queue);
972
973	vhdr = verdicthdr_get(nfqa);
974	if (!vhdr)
975		return -EINVAL;
976
977	verdict = ntohl(vhdr->verdict);
978
979	entry = find_dequeue_entry(queue, ntohl(vhdr->id));
980	if (entry == NULL)
981		return -ENOENT;
982
983	if (nfqa[NFQA_CT]) {
984		ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
985		if (ct && nfqa[NFQA_EXP]) {
986			nfqnl_attach_expect(ct, nfqa[NFQA_EXP],
987					    NETLINK_CB(skb).portid,
988					    nlmsg_report(nlh));
989		}
990	}
991
992	if (nfqa[NFQA_PAYLOAD]) {
993		u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
994		int diff = payload_len - entry->skb->len;
995
996		if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
997				 payload_len, entry, diff) < 0)
998			verdict = NF_DROP;
999
1000		if (ct)
1001			nfqnl_ct_seq_adjust(entry->skb, ct, ctinfo, diff);
1002	}
1003
1004	if (nfqa[NFQA_MARK])
1005		entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
1006
1007	nf_reinject(entry, verdict);
1008	return 0;
1009}
1010
1011static int
1012nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
1013		  const struct nlmsghdr *nlh,
1014		  const struct nlattr * const nfqa[])
1015{
1016	return -ENOTSUPP;
1017}
1018
1019static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
1020	[NFQA_CFG_CMD]		= { .len = sizeof(struct nfqnl_msg_config_cmd) },
1021	[NFQA_CFG_PARAMS]	= { .len = sizeof(struct nfqnl_msg_config_params) },
1022};
1023
1024static const struct nf_queue_handler nfqh = {
1025	.outfn	= &nfqnl_enqueue_packet,
1026};
1027
1028static int
1029nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
1030		  const struct nlmsghdr *nlh,
1031		  const struct nlattr * const nfqa[])
1032{
1033	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1034	u_int16_t queue_num = ntohs(nfmsg->res_id);
1035	struct nfqnl_instance *queue;
1036	struct nfqnl_msg_config_cmd *cmd = NULL;
1037	struct net *net = sock_net(ctnl);
1038	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1039	int ret = 0;
1040
1041	if (nfqa[NFQA_CFG_CMD]) {
1042		cmd = nla_data(nfqa[NFQA_CFG_CMD]);
1043
1044		/* Obsolete commands without queue context */
1045		switch (cmd->command) {
1046		case NFQNL_CFG_CMD_PF_BIND: return 0;
1047		case NFQNL_CFG_CMD_PF_UNBIND: return 0;
1048		}
1049	}
1050
1051	rcu_read_lock();
1052	queue = instance_lookup(q, queue_num);
1053	if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
1054		ret = -EPERM;
1055		goto err_out_unlock;
1056	}
1057
1058	if (cmd != NULL) {
1059		switch (cmd->command) {
1060		case NFQNL_CFG_CMD_BIND:
1061			if (queue) {
1062				ret = -EBUSY;
1063				goto err_out_unlock;
1064			}
1065			queue = instance_create(q, queue_num,
1066						NETLINK_CB(skb).portid);
1067			if (IS_ERR(queue)) {
1068				ret = PTR_ERR(queue);
1069				goto err_out_unlock;
1070			}
1071			break;
1072		case NFQNL_CFG_CMD_UNBIND:
1073			if (!queue) {
1074				ret = -ENODEV;
1075				goto err_out_unlock;
1076			}
1077			instance_destroy(q, queue);
1078			break;
1079		case NFQNL_CFG_CMD_PF_BIND:
1080		case NFQNL_CFG_CMD_PF_UNBIND:
1081			break;
1082		default:
1083			ret = -ENOTSUPP;
1084			break;
1085		}
1086	}
1087
1088	if (nfqa[NFQA_CFG_PARAMS]) {
1089		struct nfqnl_msg_config_params *params;
1090
1091		if (!queue) {
1092			ret = -ENODEV;
1093			goto err_out_unlock;
1094		}
1095		params = nla_data(nfqa[NFQA_CFG_PARAMS]);
1096		nfqnl_set_mode(queue, params->copy_mode,
1097				ntohl(params->copy_range));
1098	}
1099
1100	if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
1101		__be32 *queue_maxlen;
1102
1103		if (!queue) {
1104			ret = -ENODEV;
1105			goto err_out_unlock;
1106		}
1107		queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
1108		spin_lock_bh(&queue->lock);
1109		queue->queue_maxlen = ntohl(*queue_maxlen);
1110		spin_unlock_bh(&queue->lock);
1111	}
1112
1113	if (nfqa[NFQA_CFG_FLAGS]) {
1114		__u32 flags, mask;
1115
1116		if (!queue) {
1117			ret = -ENODEV;
1118			goto err_out_unlock;
1119		}
1120
1121		if (!nfqa[NFQA_CFG_MASK]) {
1122			/* A mask is needed to specify which flags are being
1123			 * changed.
1124			 */
1125			ret = -EINVAL;
1126			goto err_out_unlock;
1127		}
1128
1129		flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
1130		mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
1131
1132		if (flags >= NFQA_CFG_F_MAX) {
1133			ret = -EOPNOTSUPP;
1134			goto err_out_unlock;
1135		}
1136
1137		spin_lock_bh(&queue->lock);
1138		queue->flags &= ~mask;
1139		queue->flags |= flags & mask;
1140		spin_unlock_bh(&queue->lock);
1141	}
1142
1143err_out_unlock:
1144	rcu_read_unlock();
1145	return ret;
1146}
1147
1148static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
1149	[NFQNL_MSG_PACKET]	= { .call_rcu = nfqnl_recv_unsupp,
1150				    .attr_count = NFQA_MAX, },
1151	[NFQNL_MSG_VERDICT]	= { .call_rcu = nfqnl_recv_verdict,
1152				    .attr_count = NFQA_MAX,
1153				    .policy = nfqa_verdict_policy },
1154	[NFQNL_MSG_CONFIG]	= { .call = nfqnl_recv_config,
1155				    .attr_count = NFQA_CFG_MAX,
1156				    .policy = nfqa_cfg_policy },
1157	[NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch,
1158				    .attr_count = NFQA_MAX,
1159				    .policy = nfqa_verdict_batch_policy },
1160};
1161
1162static const struct nfnetlink_subsystem nfqnl_subsys = {
1163	.name		= "nf_queue",
1164	.subsys_id	= NFNL_SUBSYS_QUEUE,
1165	.cb_count	= NFQNL_MSG_MAX,
1166	.cb		= nfqnl_cb,
1167};
1168
1169#ifdef CONFIG_PROC_FS
1170struct iter_state {
1171	struct seq_net_private p;
1172	unsigned int bucket;
1173};
1174
1175static struct hlist_node *get_first(struct seq_file *seq)
1176{
1177	struct iter_state *st = seq->private;
1178	struct net *net;
1179	struct nfnl_queue_net *q;
1180
1181	if (!st)
1182		return NULL;
1183
1184	net = seq_file_net(seq);
1185	q = nfnl_queue_pernet(net);
1186	for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
1187		if (!hlist_empty(&q->instance_table[st->bucket]))
1188			return q->instance_table[st->bucket].first;
1189	}
1190	return NULL;
1191}
1192
1193static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
1194{
1195	struct iter_state *st = seq->private;
1196	struct net *net = seq_file_net(seq);
1197
1198	h = h->next;
1199	while (!h) {
1200		struct nfnl_queue_net *q;
1201
1202		if (++st->bucket >= INSTANCE_BUCKETS)
1203			return NULL;
1204
1205		q = nfnl_queue_pernet(net);
1206		h = q->instance_table[st->bucket].first;
1207	}
1208	return h;
1209}
1210
1211static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
1212{
1213	struct hlist_node *head;
1214	head = get_first(seq);
1215
1216	if (head)
1217		while (pos && (head = get_next(seq, head)))
1218			pos--;
1219	return pos ? NULL : head;
1220}
1221
1222static void *seq_start(struct seq_file *s, loff_t *pos)
1223	__acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1224{
1225	spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1226	return get_idx(s, *pos);
1227}
1228
1229static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1230{
1231	(*pos)++;
1232	return get_next(s, v);
1233}
1234
1235static void seq_stop(struct seq_file *s, void *v)
1236	__releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1237{
1238	spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1239}
1240
1241static int seq_show(struct seq_file *s, void *v)
1242{
1243	const struct nfqnl_instance *inst = v;
1244
1245	return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
1246			  inst->queue_num,
1247			  inst->peer_portid, inst->queue_total,
1248			  inst->copy_mode, inst->copy_range,
1249			  inst->queue_dropped, inst->queue_user_dropped,
1250			  inst->id_sequence, 1);
1251}
1252
1253static const struct seq_operations nfqnl_seq_ops = {
1254	.start	= seq_start,
1255	.next	= seq_next,
1256	.stop	= seq_stop,
1257	.show	= seq_show,
1258};
1259
1260static int nfqnl_open(struct inode *inode, struct file *file)
1261{
1262	return seq_open_net(inode, file, &nfqnl_seq_ops,
1263			sizeof(struct iter_state));
1264}
1265
1266static const struct file_operations nfqnl_file_ops = {
1267	.owner	 = THIS_MODULE,
1268	.open	 = nfqnl_open,
1269	.read	 = seq_read,
1270	.llseek	 = seq_lseek,
1271	.release = seq_release_net,
1272};
1273
1274#endif /* PROC_FS */
1275
1276static int __net_init nfnl_queue_net_init(struct net *net)
1277{
1278	unsigned int i;
1279	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1280
1281	for (i = 0; i < INSTANCE_BUCKETS; i++)
1282		INIT_HLIST_HEAD(&q->instance_table[i]);
1283
1284	spin_lock_init(&q->instances_lock);
1285
1286#ifdef CONFIG_PROC_FS
1287	if (!proc_create("nfnetlink_queue", 0440,
1288			 net->nf.proc_netfilter, &nfqnl_file_ops))
1289		return -ENOMEM;
1290#endif
1291	return 0;
1292}
1293
1294static void __net_exit nfnl_queue_net_exit(struct net *net)
1295{
1296#ifdef CONFIG_PROC_FS
1297	remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
1298#endif
1299}
1300
1301static struct pernet_operations nfnl_queue_net_ops = {
1302	.init	= nfnl_queue_net_init,
1303	.exit	= nfnl_queue_net_exit,
1304	.id	= &nfnl_queue_net_id,
1305	.size	= sizeof(struct nfnl_queue_net),
1306};
1307
1308static int __init nfnetlink_queue_init(void)
1309{
1310	int status = -ENOMEM;
1311
1312	netlink_register_notifier(&nfqnl_rtnl_notifier);
1313	status = nfnetlink_subsys_register(&nfqnl_subsys);
1314	if (status < 0) {
1315		pr_err("nf_queue: failed to create netlink socket\n");
1316		goto cleanup_netlink_notifier;
1317	}
1318
1319	status = register_pernet_subsys(&nfnl_queue_net_ops);
1320	if (status < 0) {
1321		pr_err("nf_queue: failed to register pernet ops\n");
1322		goto cleanup_subsys;
1323	}
1324	register_netdevice_notifier(&nfqnl_dev_notifier);
1325	nf_register_queue_handler(&nfqh);
1326	return status;
1327
1328cleanup_subsys:
1329	nfnetlink_subsys_unregister(&nfqnl_subsys);
1330cleanup_netlink_notifier:
1331	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1332	return status;
1333}
1334
1335static void __exit nfnetlink_queue_fini(void)
1336{
1337	nf_unregister_queue_handler();
1338	unregister_netdevice_notifier(&nfqnl_dev_notifier);
1339	unregister_pernet_subsys(&nfnl_queue_net_ops);
1340	nfnetlink_subsys_unregister(&nfqnl_subsys);
1341	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1342
1343	rcu_barrier(); /* Wait for completion of call_rcu()'s */
1344}
1345
1346MODULE_DESCRIPTION("netfilter packet queue handler");
1347MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1348MODULE_LICENSE("GPL");
1349MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1350
1351module_init(nfnetlink_queue_init);
1352module_exit(nfnetlink_queue_fini);
1353