1/* Event cache for netfilter. */ 2 3/* 4 * (C) 2005 Harald Welte <laforge@gnumonks.org> 5 * (C) 2005 Patrick McHardy <kaber@trash.net> 6 * (C) 2005-2006 Netfilter Core Team <coreteam@netfilter.org> 7 * (C) 2005 USAGI/WIDE Project <http://www.linux-ipv6.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14#include <linux/types.h> 15#include <linux/netfilter.h> 16#include <linux/skbuff.h> 17#include <linux/vmalloc.h> 18#include <linux/stddef.h> 19#include <linux/err.h> 20#include <linux/percpu.h> 21#include <linux/kernel.h> 22#include <linux/netdevice.h> 23#include <linux/slab.h> 24#include <linux/export.h> 25 26#include <net/netfilter/nf_conntrack.h> 27#include <net/netfilter/nf_conntrack_core.h> 28#include <net/netfilter/nf_conntrack_extend.h> 29 30static DEFINE_MUTEX(nf_ct_ecache_mutex); 31 32#define ECACHE_RETRY_WAIT (HZ/10) 33 34enum retry_state { 35 STATE_CONGESTED, 36 STATE_RESTART, 37 STATE_DONE, 38}; 39 40static enum retry_state ecache_work_evict_list(struct ct_pcpu *pcpu) 41{ 42 struct nf_conn *refs[16]; 43 struct nf_conntrack_tuple_hash *h; 44 struct hlist_nulls_node *n; 45 unsigned int evicted = 0; 46 enum retry_state ret = STATE_DONE; 47 48 spin_lock(&pcpu->lock); 49 50 hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) { 51 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); 52 53 if (nf_ct_is_dying(ct)) 54 continue; 55 56 if (nf_conntrack_event(IPCT_DESTROY, ct)) { 57 ret = STATE_CONGESTED; 58 break; 59 } 60 61 /* we've got the event delivered, now it's dying */ 62 set_bit(IPS_DYING_BIT, &ct->status); 63 refs[evicted] = ct; 64 65 if (++evicted >= ARRAY_SIZE(refs)) { 66 ret = STATE_RESTART; 67 break; 68 } 69 } 70 71 spin_unlock(&pcpu->lock); 72 73 /* can't _put while holding lock */ 74 while (evicted) 75 nf_ct_put(refs[--evicted]); 76 77 return ret; 78} 79 80static void ecache_work(struct work_struct *work) 81{ 82 struct netns_ct *ctnet = 83 container_of(work, struct netns_ct, ecache_dwork.work); 84 int cpu, delay = -1; 85 struct ct_pcpu *pcpu; 86 87 local_bh_disable(); 88 89 for_each_possible_cpu(cpu) { 90 enum retry_state ret; 91 92 pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu); 93 94 ret = ecache_work_evict_list(pcpu); 95 96 switch (ret) { 97 case STATE_CONGESTED: 98 delay = ECACHE_RETRY_WAIT; 99 goto out; 100 case STATE_RESTART: 101 delay = 0; 102 break; 103 case STATE_DONE: 104 break; 105 } 106 } 107 108 out: 109 local_bh_enable(); 110 111 ctnet->ecache_dwork_pending = delay > 0; 112 if (delay >= 0) 113 schedule_delayed_work(&ctnet->ecache_dwork, delay); 114} 115 116/* deliver cached events and clear cache entry - must be called with locally 117 * disabled softirqs */ 118void nf_ct_deliver_cached_events(struct nf_conn *ct) 119{ 120 struct net *net = nf_ct_net(ct); 121 unsigned long events, missed; 122 struct nf_ct_event_notifier *notify; 123 struct nf_conntrack_ecache *e; 124 struct nf_ct_event item; 125 int ret; 126 127 rcu_read_lock(); 128 notify = rcu_dereference(net->ct.nf_conntrack_event_cb); 129 if (notify == NULL) 130 goto out_unlock; 131 132 e = nf_ct_ecache_find(ct); 133 if (e == NULL) 134 goto out_unlock; 135 136 events = xchg(&e->cache, 0); 137 138 if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events) 139 goto out_unlock; 140 141 /* We make a copy of the missed event cache without taking 142 * the lock, thus we may send missed events twice. However, 143 * this does not harm and it happens very rarely. */ 144 missed = e->missed; 145 146 if (!((events | missed) & e->ctmask)) 147 goto out_unlock; 148 149 item.ct = ct; 150 item.portid = 0; 151 item.report = 0; 152 153 ret = notify->fcn(events | missed, &item); 154 155 if (likely(ret >= 0 && !missed)) 156 goto out_unlock; 157 158 spin_lock_bh(&ct->lock); 159 if (ret < 0) 160 e->missed |= events; 161 else 162 e->missed &= ~missed; 163 spin_unlock_bh(&ct->lock); 164 165out_unlock: 166 rcu_read_unlock(); 167} 168EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); 169 170int nf_conntrack_register_notifier(struct net *net, 171 struct nf_ct_event_notifier *new) 172{ 173 int ret; 174 struct nf_ct_event_notifier *notify; 175 176 mutex_lock(&nf_ct_ecache_mutex); 177 notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb, 178 lockdep_is_held(&nf_ct_ecache_mutex)); 179 if (notify != NULL) { 180 ret = -EBUSY; 181 goto out_unlock; 182 } 183 rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new); 184 ret = 0; 185 186out_unlock: 187 mutex_unlock(&nf_ct_ecache_mutex); 188 return ret; 189} 190EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier); 191 192void nf_conntrack_unregister_notifier(struct net *net, 193 struct nf_ct_event_notifier *new) 194{ 195 struct nf_ct_event_notifier *notify; 196 197 mutex_lock(&nf_ct_ecache_mutex); 198 notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb, 199 lockdep_is_held(&nf_ct_ecache_mutex)); 200 BUG_ON(notify != new); 201 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL); 202 mutex_unlock(&nf_ct_ecache_mutex); 203} 204EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); 205 206int nf_ct_expect_register_notifier(struct net *net, 207 struct nf_exp_event_notifier *new) 208{ 209 int ret; 210 struct nf_exp_event_notifier *notify; 211 212 mutex_lock(&nf_ct_ecache_mutex); 213 notify = rcu_dereference_protected(net->ct.nf_expect_event_cb, 214 lockdep_is_held(&nf_ct_ecache_mutex)); 215 if (notify != NULL) { 216 ret = -EBUSY; 217 goto out_unlock; 218 } 219 rcu_assign_pointer(net->ct.nf_expect_event_cb, new); 220 ret = 0; 221 222out_unlock: 223 mutex_unlock(&nf_ct_ecache_mutex); 224 return ret; 225} 226EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier); 227 228void nf_ct_expect_unregister_notifier(struct net *net, 229 struct nf_exp_event_notifier *new) 230{ 231 struct nf_exp_event_notifier *notify; 232 233 mutex_lock(&nf_ct_ecache_mutex); 234 notify = rcu_dereference_protected(net->ct.nf_expect_event_cb, 235 lockdep_is_held(&nf_ct_ecache_mutex)); 236 BUG_ON(notify != new); 237 RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL); 238 mutex_unlock(&nf_ct_ecache_mutex); 239} 240EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); 241 242#define NF_CT_EVENTS_DEFAULT 1 243static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT; 244 245#ifdef CONFIG_SYSCTL 246static struct ctl_table event_sysctl_table[] = { 247 { 248 .procname = "nf_conntrack_events", 249 .data = &init_net.ct.sysctl_events, 250 .maxlen = sizeof(unsigned int), 251 .mode = 0644, 252 .proc_handler = proc_dointvec, 253 }, 254 {} 255}; 256#endif /* CONFIG_SYSCTL */ 257 258static struct nf_ct_ext_type event_extend __read_mostly = { 259 .len = sizeof(struct nf_conntrack_ecache), 260 .align = __alignof__(struct nf_conntrack_ecache), 261 .id = NF_CT_EXT_ECACHE, 262}; 263 264#ifdef CONFIG_SYSCTL 265static int nf_conntrack_event_init_sysctl(struct net *net) 266{ 267 struct ctl_table *table; 268 269 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table), 270 GFP_KERNEL); 271 if (!table) 272 goto out; 273 274 table[0].data = &net->ct.sysctl_events; 275 276 /* Don't export sysctls to unprivileged users */ 277 if (net->user_ns != &init_user_ns) 278 table[0].procname = NULL; 279 280 net->ct.event_sysctl_header = 281 register_net_sysctl(net, "net/netfilter", table); 282 if (!net->ct.event_sysctl_header) { 283 printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n"); 284 goto out_register; 285 } 286 return 0; 287 288out_register: 289 kfree(table); 290out: 291 return -ENOMEM; 292} 293 294static void nf_conntrack_event_fini_sysctl(struct net *net) 295{ 296 struct ctl_table *table; 297 298 table = net->ct.event_sysctl_header->ctl_table_arg; 299 unregister_net_sysctl_table(net->ct.event_sysctl_header); 300 kfree(table); 301} 302#else 303static int nf_conntrack_event_init_sysctl(struct net *net) 304{ 305 return 0; 306} 307 308static void nf_conntrack_event_fini_sysctl(struct net *net) 309{ 310} 311#endif /* CONFIG_SYSCTL */ 312 313int nf_conntrack_ecache_pernet_init(struct net *net) 314{ 315 net->ct.sysctl_events = nf_ct_events; 316 INIT_DELAYED_WORK(&net->ct.ecache_dwork, ecache_work); 317 return nf_conntrack_event_init_sysctl(net); 318} 319 320void nf_conntrack_ecache_pernet_fini(struct net *net) 321{ 322 cancel_delayed_work_sync(&net->ct.ecache_dwork); 323 nf_conntrack_event_fini_sysctl(net); 324} 325 326int nf_conntrack_ecache_init(void) 327{ 328 int ret = nf_ct_extend_register(&event_extend); 329 if (ret < 0) 330 pr_err("nf_ct_event: Unable to register event extension.\n"); 331 return ret; 332} 333 334void nf_conntrack_ecache_fini(void) 335{ 336 nf_ct_extend_unregister(&event_extend); 337} 338