cls_cgroup.c revision 2f068bf8711c35b98bf9a0172555b8390a762fc0
1/* 2 * net/sched/cls_cgroup.c Control Group Classifier 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Thomas Graf <tgraf@suug.ch> 10 */ 11 12#include <linux/module.h> 13#include <linux/types.h> 14#include <linux/string.h> 15#include <linux/errno.h> 16#include <linux/skbuff.h> 17#include <linux/cgroup.h> 18#include <net/rtnetlink.h> 19#include <net/pkt_cls.h> 20 21struct cgroup_cls_state 22{ 23 struct cgroup_subsys_state css; 24 u32 classid; 25}; 26 27static inline struct cgroup_cls_state *net_cls_state(struct cgroup *cgrp) 28{ 29 return (struct cgroup_cls_state *) 30 cgroup_subsys_state(cgrp, net_cls_subsys_id); 31} 32 33static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, 34 struct cgroup *cgrp) 35{ 36 struct cgroup_cls_state *cs; 37 38 if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL))) 39 return ERR_PTR(-ENOMEM); 40 41 if (cgrp->parent) 42 cs->classid = net_cls_state(cgrp->parent)->classid; 43 44 return &cs->css; 45} 46 47static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) 48{ 49 kfree(net_cls_state(cgrp)); 50} 51 52static u64 read_classid(struct cgroup *cgrp, struct cftype *cft) 53{ 54 return net_cls_state(cgrp)->classid; 55} 56 57static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value) 58{ 59 if (!cgroup_lock_live_group(cgrp)) 60 return -ENODEV; 61 62 net_cls_state(cgrp)->classid = (u32) value; 63 64 cgroup_unlock(); 65 66 return 0; 67} 68 69static struct cftype ss_files[] = { 70 { 71 .name = "classid", 72 .read_u64 = read_classid, 73 .write_u64 = write_classid, 74 }, 75}; 76 77static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) 78{ 79 return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); 80} 81 82struct cgroup_subsys net_cls_subsys = { 83 .name = "net_cls", 84 .create = cgrp_create, 85 .destroy = cgrp_destroy, 86 .populate = cgrp_populate, 87 .subsys_id = net_cls_subsys_id, 88}; 89 90struct cls_cgroup_head 91{ 92 u32 handle; 93 struct tcf_exts exts; 94 struct tcf_ematch_tree ematches; 95}; 96 97static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, 98 struct tcf_result *res) 99{ 100 struct cls_cgroup_head *head = tp->root; 101 struct cgroup_cls_state *cs; 102 int ret = 0; 103 104 /* 105 * Due to the nature of the classifier it is required to ignore all 106 * packets originating from softirq context as accessing `current' 107 * would lead to false results. 108 * 109 * This test assumes that all callers of dev_queue_xmit() explicitely 110 * disable bh. Knowing this, it is possible to detect softirq based 111 * calls by looking at the number of nested bh disable calls because 112 * softirqs always disables bh. 113 */ 114 if (softirq_count() != SOFTIRQ_OFFSET) 115 return -1; 116 117 rcu_read_lock(); 118 cs = (struct cgroup_cls_state *) task_subsys_state(current, 119 net_cls_subsys_id); 120 if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) { 121 res->classid = cs->classid; 122 res->class = 0; 123 ret = tcf_exts_exec(skb, &head->exts, res); 124 } else 125 ret = -1; 126 127 rcu_read_unlock(); 128 129 return ret; 130} 131 132static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) 133{ 134 return 0UL; 135} 136 137static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f) 138{ 139} 140 141static int cls_cgroup_init(struct tcf_proto *tp) 142{ 143 return 0; 144} 145 146static const struct tcf_ext_map cgroup_ext_map = { 147 .action = TCA_CGROUP_ACT, 148 .police = TCA_CGROUP_POLICE, 149}; 150 151static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = { 152 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, 153}; 154 155static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base, 156 u32 handle, struct nlattr **tca, 157 unsigned long *arg) 158{ 159 struct nlattr *tb[TCA_CGROUP_MAX+1]; 160 struct cls_cgroup_head *head = tp->root; 161 struct tcf_ematch_tree t; 162 struct tcf_exts e; 163 int err; 164 165 if (head == NULL) { 166 if (!handle) 167 return -EINVAL; 168 169 head = kzalloc(sizeof(*head), GFP_KERNEL); 170 if (head == NULL) 171 return -ENOBUFS; 172 173 head->handle = handle; 174 175 tcf_tree_lock(tp); 176 tp->root = head; 177 tcf_tree_unlock(tp); 178 } 179 180 if (handle != head->handle) 181 return -ENOENT; 182 183 err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS], 184 cgroup_policy); 185 if (err < 0) 186 return err; 187 188 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &cgroup_ext_map); 189 if (err < 0) 190 return err; 191 192 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t); 193 if (err < 0) 194 return err; 195 196 tcf_exts_change(tp, &head->exts, &e); 197 tcf_em_tree_change(tp, &head->ematches, &t); 198 199 return 0; 200} 201 202static void cls_cgroup_destroy(struct tcf_proto *tp) 203{ 204 struct cls_cgroup_head *head = tp->root; 205 206 if (head) { 207 tcf_exts_destroy(tp, &head->exts); 208 tcf_em_tree_destroy(tp, &head->ematches); 209 kfree(head); 210 } 211} 212 213static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg) 214{ 215 return -EOPNOTSUPP; 216} 217 218static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg) 219{ 220 struct cls_cgroup_head *head = tp->root; 221 222 if (arg->count < arg->skip) 223 goto skip; 224 225 if (arg->fn(tp, (unsigned long) head, arg) < 0) { 226 arg->stop = 1; 227 return; 228 } 229skip: 230 arg->count++; 231} 232 233static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh, 234 struct sk_buff *skb, struct tcmsg *t) 235{ 236 struct cls_cgroup_head *head = tp->root; 237 unsigned char *b = skb_tail_pointer(skb); 238 struct nlattr *nest; 239 240 t->tcm_handle = head->handle; 241 242 nest = nla_nest_start(skb, TCA_OPTIONS); 243 if (nest == NULL) 244 goto nla_put_failure; 245 246 if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 || 247 tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0) 248 goto nla_put_failure; 249 250 nla_nest_end(skb, nest); 251 252 if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0) 253 goto nla_put_failure; 254 255 return skb->len; 256 257nla_put_failure: 258 nlmsg_trim(skb, b); 259 return -1; 260} 261 262static struct tcf_proto_ops cls_cgroup_ops __read_mostly = { 263 .kind = "cgroup", 264 .init = cls_cgroup_init, 265 .change = cls_cgroup_change, 266 .classify = cls_cgroup_classify, 267 .destroy = cls_cgroup_destroy, 268 .get = cls_cgroup_get, 269 .put = cls_cgroup_put, 270 .delete = cls_cgroup_delete, 271 .walk = cls_cgroup_walk, 272 .dump = cls_cgroup_dump, 273 .owner = THIS_MODULE, 274}; 275 276static int __init init_cgroup_cls(void) 277{ 278 return register_tcf_proto_ops(&cls_cgroup_ops); 279} 280 281static void __exit exit_cgroup_cls(void) 282{ 283 unregister_tcf_proto_ops(&cls_cgroup_ops); 284} 285 286module_init(init_cgroup_cls); 287module_exit(exit_cgroup_cls); 288MODULE_LICENSE("GPL"); 289