1/* Structure dynamic extension infrastructure
2 * Copyright (C) 2004 Rusty Russell IBM Corporation
3 * Copyright (C) 2007 Netfilter Core Team <coreteam@netfilter.org>
4 * Copyright (C) 2007 USAGI/WIDE Project <http://www.linux-ipv6.org>
5 *
6 *      This program is free software; you can redistribute it and/or
7 *      modify it under the terms of the GNU General Public License
8 *      as published by the Free Software Foundation; either version
9 *      2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/rcupdate.h>
15#include <linux/slab.h>
16#include <linux/skbuff.h>
17#include <net/netfilter/nf_conntrack_extend.h>
18
19static struct nf_ct_ext_type __rcu *nf_ct_ext_types[NF_CT_EXT_NUM];
20static DEFINE_MUTEX(nf_ct_ext_type_mutex);
21
22void __nf_ct_ext_destroy(struct nf_conn *ct)
23{
24	unsigned int i;
25	struct nf_ct_ext_type *t;
26	struct nf_ct_ext *ext = ct->ext;
27
28	for (i = 0; i < NF_CT_EXT_NUM; i++) {
29		if (!__nf_ct_ext_exist(ext, i))
30			continue;
31
32		rcu_read_lock();
33		t = rcu_dereference(nf_ct_ext_types[i]);
34
35		/* Here the nf_ct_ext_type might have been unregisterd.
36		 * I.e., it has responsible to cleanup private
37		 * area in all conntracks when it is unregisterd.
38		 */
39		if (t && t->destroy)
40			t->destroy(ct);
41		rcu_read_unlock();
42	}
43}
44EXPORT_SYMBOL(__nf_ct_ext_destroy);
45
46static void *
47nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
48		 size_t var_alloc_len, gfp_t gfp)
49{
50	unsigned int off, len;
51	struct nf_ct_ext_type *t;
52	size_t alloc_size;
53
54	rcu_read_lock();
55	t = rcu_dereference(nf_ct_ext_types[id]);
56	BUG_ON(t == NULL);
57	off = ALIGN(sizeof(struct nf_ct_ext), t->align);
58	len = off + t->len + var_alloc_len;
59	alloc_size = t->alloc_size + var_alloc_len;
60	rcu_read_unlock();
61
62	*ext = kzalloc(alloc_size, gfp);
63	if (!*ext)
64		return NULL;
65
66	(*ext)->offset[id] = off;
67	(*ext)->len = len;
68
69	return (void *)(*ext) + off;
70}
71
72void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
73			     size_t var_alloc_len, gfp_t gfp)
74{
75	struct nf_ct_ext *old, *new;
76	int i, newlen, newoff;
77	struct nf_ct_ext_type *t;
78
79	/* Conntrack must not be confirmed to avoid races on reallocation. */
80	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
81
82	old = ct->ext;
83	if (!old)
84		return nf_ct_ext_create(&ct->ext, id, var_alloc_len, gfp);
85
86	if (__nf_ct_ext_exist(old, id))
87		return NULL;
88
89	rcu_read_lock();
90	t = rcu_dereference(nf_ct_ext_types[id]);
91	BUG_ON(t == NULL);
92
93	newoff = ALIGN(old->len, t->align);
94	newlen = newoff + t->len + var_alloc_len;
95	rcu_read_unlock();
96
97	new = __krealloc(old, newlen, gfp);
98	if (!new)
99		return NULL;
100
101	if (new != old) {
102		for (i = 0; i < NF_CT_EXT_NUM; i++) {
103			if (!__nf_ct_ext_exist(old, i))
104				continue;
105
106			rcu_read_lock();
107			t = rcu_dereference(nf_ct_ext_types[i]);
108			if (t && t->move)
109				t->move((void *)new + new->offset[i],
110					(void *)old + old->offset[i]);
111			rcu_read_unlock();
112		}
113		kfree_rcu(old, rcu);
114		ct->ext = new;
115	}
116
117	new->offset[id] = newoff;
118	new->len = newlen;
119	memset((void *)new + newoff, 0, newlen - newoff);
120	return (void *)new + newoff;
121}
122EXPORT_SYMBOL(__nf_ct_ext_add_length);
123
124static void update_alloc_size(struct nf_ct_ext_type *type)
125{
126	int i, j;
127	struct nf_ct_ext_type *t1, *t2;
128	enum nf_ct_ext_id min = 0, max = NF_CT_EXT_NUM - 1;
129
130	/* unnecessary to update all types */
131	if ((type->flags & NF_CT_EXT_F_PREALLOC) == 0) {
132		min = type->id;
133		max = type->id;
134	}
135
136	/* This assumes that extended areas in conntrack for the types
137	   whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
138	for (i = min; i <= max; i++) {
139		t1 = rcu_dereference_protected(nf_ct_ext_types[i],
140				lockdep_is_held(&nf_ct_ext_type_mutex));
141		if (!t1)
142			continue;
143
144		t1->alloc_size = ALIGN(sizeof(struct nf_ct_ext), t1->align) +
145				 t1->len;
146		for (j = 0; j < NF_CT_EXT_NUM; j++) {
147			t2 = rcu_dereference_protected(nf_ct_ext_types[j],
148				lockdep_is_held(&nf_ct_ext_type_mutex));
149			if (t2 == NULL || t2 == t1 ||
150			    (t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
151				continue;
152
153			t1->alloc_size = ALIGN(t1->alloc_size, t2->align)
154					 + t2->len;
155		}
156	}
157}
158
159/* This MUST be called in process context. */
160int nf_ct_extend_register(struct nf_ct_ext_type *type)
161{
162	int ret = 0;
163
164	mutex_lock(&nf_ct_ext_type_mutex);
165	if (nf_ct_ext_types[type->id]) {
166		ret = -EBUSY;
167		goto out;
168	}
169
170	/* This ensures that nf_ct_ext_create() can allocate enough area
171	   before updating alloc_size */
172	type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
173			   + type->len;
174	rcu_assign_pointer(nf_ct_ext_types[type->id], type);
175	update_alloc_size(type);
176out:
177	mutex_unlock(&nf_ct_ext_type_mutex);
178	return ret;
179}
180EXPORT_SYMBOL_GPL(nf_ct_extend_register);
181
182/* This MUST be called in process context. */
183void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
184{
185	mutex_lock(&nf_ct_ext_type_mutex);
186	RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
187	update_alloc_size(type);
188	mutex_unlock(&nf_ct_ext_type_mutex);
189	rcu_barrier(); /* Wait for completion of call_rcu()'s */
190}
191EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
192