blk-cgroup.c revision accee7854b378a8ab5995d8f5dc5d8abc3b3d23a
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 *		      Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * 	              Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
14#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
16#include <linux/module.h>
17#include <linux/err.h>
18#include "blk-cgroup.h"
19
20static DEFINE_SPINLOCK(blkio_list_lock);
21static LIST_HEAD(blkio_list);
22
23struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
24EXPORT_SYMBOL_GPL(blkio_root_cgroup);
25
26bool blkiocg_css_tryget(struct blkio_cgroup *blkcg)
27{
28	if (!css_tryget(&blkcg->css))
29		return false;
30	return true;
31}
32EXPORT_SYMBOL_GPL(blkiocg_css_tryget);
33
34void blkiocg_css_put(struct blkio_cgroup *blkcg)
35{
36	css_put(&blkcg->css);
37}
38EXPORT_SYMBOL_GPL(blkiocg_css_put);
39
40struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
41{
42	return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
43			    struct blkio_cgroup, css);
44}
45EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
46
47void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
48			unsigned long time, unsigned long sectors)
49{
50	blkg->time += time;
51	blkg->sectors += sectors;
52}
53EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_stats);
54
55void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
56			struct blkio_group *blkg, void *key, dev_t dev)
57{
58	unsigned long flags;
59
60	spin_lock_irqsave(&blkcg->lock, flags);
61	rcu_assign_pointer(blkg->key, key);
62	blkg->blkcg_id = css_id(&blkcg->css);
63	hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
64	spin_unlock_irqrestore(&blkcg->lock, flags);
65#ifdef CONFIG_DEBUG_BLK_CGROUP
66	/* Need to take css reference ? */
67	cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
68#endif
69	blkg->dev = dev;
70}
71EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
72
73static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
74{
75	hlist_del_init_rcu(&blkg->blkcg_node);
76	blkg->blkcg_id = 0;
77}
78
79/*
80 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
81 * indicating that blk_group was unhashed by the time we got to it.
82 */
83int blkiocg_del_blkio_group(struct blkio_group *blkg)
84{
85	struct blkio_cgroup *blkcg;
86	unsigned long flags;
87	struct cgroup_subsys_state *css;
88	int ret = 1;
89
90	rcu_read_lock();
91	css = css_lookup(&blkio_subsys, blkg->blkcg_id);
92	if (!css)
93		goto out;
94
95	blkcg = container_of(css, struct blkio_cgroup, css);
96	spin_lock_irqsave(&blkcg->lock, flags);
97	if (!hlist_unhashed(&blkg->blkcg_node)) {
98		__blkiocg_del_blkio_group(blkg);
99		ret = 0;
100	}
101	spin_unlock_irqrestore(&blkcg->lock, flags);
102out:
103	rcu_read_unlock();
104	return ret;
105}
106EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
107
108/* called under rcu_read_lock(). */
109struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
110{
111	struct blkio_group *blkg;
112	struct hlist_node *n;
113	void *__key;
114
115	hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
116		__key = blkg->key;
117		if (__key == key)
118			return blkg;
119	}
120
121	return NULL;
122}
123EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
124
125#define SHOW_FUNCTION(__VAR)						\
126static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup,		\
127				       struct cftype *cftype)		\
128{									\
129	struct blkio_cgroup *blkcg;					\
130									\
131	blkcg = cgroup_to_blkio_cgroup(cgroup);				\
132	return (u64)blkcg->__VAR;					\
133}
134
135SHOW_FUNCTION(weight);
136#undef SHOW_FUNCTION
137
138static int
139blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
140{
141	struct blkio_cgroup *blkcg;
142	struct blkio_group *blkg;
143	struct hlist_node *n;
144	struct blkio_policy_type *blkiop;
145
146	if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
147		return -EINVAL;
148
149	blkcg = cgroup_to_blkio_cgroup(cgroup);
150	spin_lock_irq(&blkcg->lock);
151	blkcg->weight = (unsigned int)val;
152	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
153		spin_lock(&blkio_list_lock);
154		list_for_each_entry(blkiop, &blkio_list, list)
155			blkiop->ops.blkio_update_group_weight_fn(blkg,
156					blkcg->weight);
157		spin_unlock(&blkio_list_lock);
158	}
159	spin_unlock_irq(&blkcg->lock);
160	return 0;
161}
162
163#define SHOW_FUNCTION_PER_GROUP(__VAR)					\
164static int blkiocg_##__VAR##_read(struct cgroup *cgroup,		\
165			struct cftype *cftype, struct seq_file *m)	\
166{									\
167	struct blkio_cgroup *blkcg;					\
168	struct blkio_group *blkg;					\
169	struct hlist_node *n;						\
170									\
171	if (!cgroup_lock_live_group(cgroup))				\
172		return -ENODEV;						\
173									\
174	blkcg = cgroup_to_blkio_cgroup(cgroup);				\
175	rcu_read_lock();						\
176	hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
177		if (blkg->dev)						\
178			seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev),	\
179				 MINOR(blkg->dev), blkg->__VAR);	\
180	}								\
181	rcu_read_unlock();						\
182	cgroup_unlock();						\
183	return 0;							\
184}
185
186SHOW_FUNCTION_PER_GROUP(time);
187SHOW_FUNCTION_PER_GROUP(sectors);
188#ifdef CONFIG_DEBUG_BLK_CGROUP
189SHOW_FUNCTION_PER_GROUP(dequeue);
190#endif
191#undef SHOW_FUNCTION_PER_GROUP
192
193#ifdef CONFIG_DEBUG_BLK_CGROUP
194void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
195			unsigned long dequeue)
196{
197	blkg->dequeue += dequeue;
198}
199EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats);
200#endif
201
202struct cftype blkio_files[] = {
203	{
204		.name = "weight",
205		.read_u64 = blkiocg_weight_read,
206		.write_u64 = blkiocg_weight_write,
207	},
208	{
209		.name = "time",
210		.read_seq_string = blkiocg_time_read,
211	},
212	{
213		.name = "sectors",
214		.read_seq_string = blkiocg_sectors_read,
215	},
216#ifdef CONFIG_DEBUG_BLK_CGROUP
217       {
218		.name = "dequeue",
219		.read_seq_string = blkiocg_dequeue_read,
220       },
221#endif
222};
223
224static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
225{
226	return cgroup_add_files(cgroup, subsys, blkio_files,
227				ARRAY_SIZE(blkio_files));
228}
229
230static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
231{
232	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
233	unsigned long flags;
234	struct blkio_group *blkg;
235	void *key;
236	struct blkio_policy_type *blkiop;
237
238	rcu_read_lock();
239remove_entry:
240	spin_lock_irqsave(&blkcg->lock, flags);
241
242	if (hlist_empty(&blkcg->blkg_list)) {
243		spin_unlock_irqrestore(&blkcg->lock, flags);
244		goto done;
245	}
246
247	blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
248				blkcg_node);
249	key = rcu_dereference(blkg->key);
250	__blkiocg_del_blkio_group(blkg);
251
252	spin_unlock_irqrestore(&blkcg->lock, flags);
253
254	/*
255	 * This blkio_group is being unlinked as associated cgroup is going
256	 * away. Let all the IO controlling policies know about this event.
257	 *
258	 * Currently this is static call to one io controlling policy. Once
259	 * we have more policies in place, we need some dynamic registration
260	 * of callback function.
261	 */
262	spin_lock(&blkio_list_lock);
263	list_for_each_entry(blkiop, &blkio_list, list)
264		blkiop->ops.blkio_unlink_group_fn(key, blkg);
265	spin_unlock(&blkio_list_lock);
266	goto remove_entry;
267done:
268	free_css_id(&blkio_subsys, &blkcg->css);
269	rcu_read_unlock();
270	kfree(blkcg);
271}
272
273static struct cgroup_subsys_state *
274blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
275{
276	struct blkio_cgroup *blkcg, *parent_blkcg;
277
278	if (!cgroup->parent) {
279		blkcg = &blkio_root_cgroup;
280		goto done;
281	}
282
283	/* Currently we do not support hierarchy deeper than two level (0,1) */
284	parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
285	if (css_depth(&parent_blkcg->css) > 0)
286		return ERR_PTR(-EINVAL);
287
288	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
289	if (!blkcg)
290		return ERR_PTR(-ENOMEM);
291
292	blkcg->weight = BLKIO_WEIGHT_DEFAULT;
293done:
294	spin_lock_init(&blkcg->lock);
295	INIT_HLIST_HEAD(&blkcg->blkg_list);
296
297	return &blkcg->css;
298}
299
300/*
301 * We cannot support shared io contexts, as we have no mean to support
302 * two tasks with the same ioc in two different groups without major rework
303 * of the main cic data structures.  For now we allow a task to change
304 * its cgroup only if it's the only owner of its ioc.
305 */
306static int blkiocg_can_attach(struct cgroup_subsys *subsys,
307				struct cgroup *cgroup, struct task_struct *tsk,
308				bool threadgroup)
309{
310	struct io_context *ioc;
311	int ret = 0;
312
313	/* task_lock() is needed to avoid races with exit_io_context() */
314	task_lock(tsk);
315	ioc = tsk->io_context;
316	if (ioc && atomic_read(&ioc->nr_tasks) > 1)
317		ret = -EINVAL;
318	task_unlock(tsk);
319
320	return ret;
321}
322
323static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
324				struct cgroup *prev, struct task_struct *tsk,
325				bool threadgroup)
326{
327	struct io_context *ioc;
328
329	task_lock(tsk);
330	ioc = tsk->io_context;
331	if (ioc)
332		ioc->cgroup_changed = 1;
333	task_unlock(tsk);
334}
335
336struct cgroup_subsys blkio_subsys = {
337	.name = "blkio",
338	.create = blkiocg_create,
339	.can_attach = blkiocg_can_attach,
340	.attach = blkiocg_attach,
341	.destroy = blkiocg_destroy,
342	.populate = blkiocg_populate,
343	.subsys_id = blkio_subsys_id,
344	.use_id = 1,
345};
346
347void blkio_policy_register(struct blkio_policy_type *blkiop)
348{
349	spin_lock(&blkio_list_lock);
350	list_add_tail(&blkiop->list, &blkio_list);
351	spin_unlock(&blkio_list_lock);
352}
353EXPORT_SYMBOL_GPL(blkio_policy_register);
354
355void blkio_policy_unregister(struct blkio_policy_type *blkiop)
356{
357	spin_lock(&blkio_list_lock);
358	list_del_init(&blkiop->list);
359	spin_unlock(&blkio_list_lock);
360}
361EXPORT_SYMBOL_GPL(blkio_policy_unregister);
362