oom_kill.c revision 4e950f6f0189f65f8bf069cf2272649ef418f5e4
1/*
2 *  linux/mm/oom_kill.c
3 *
4 *  Copyright (C)  1998,2000  Rik van Riel
5 *	Thanks go out to Claus Fischer for some serious inspiration and
6 *	for goading me into coding this file...
7 *
8 *  The routines in this file are used to kill a process when
9 *  we're seriously out of memory. This gets called from __alloc_pages()
10 *  in mm/page_alloc.c when we really run out of memory.
11 *
12 *  Since we won't call these routines often (on a well-configured
13 *  machine) this file will double as a 'coding guide' and a signpost
14 *  for newbie kernel hackers. It features several pointers to major
15 *  kernel subsystems and hints as to where to find out what things do.
16 */
17
18#include <linux/oom.h>
19#include <linux/mm.h>
20#include <linux/err.h>
21#include <linux/sched.h>
22#include <linux/swap.h>
23#include <linux/timex.h>
24#include <linux/jiffies.h>
25#include <linux/cpuset.h>
26#include <linux/module.h>
27#include <linux/notifier.h>
28
29int sysctl_panic_on_oom;
30/* #define DEBUG */
31
32/**
33 * badness - calculate a numeric value for how bad this task has been
34 * @p: task struct of which task we should calculate
35 * @uptime: current uptime in seconds
36 *
37 * The formula used is relatively simple and documented inline in the
38 * function. The main rationale is that we want to select a good task
39 * to kill when we run out of memory.
40 *
41 * Good in this context means that:
42 * 1) we lose the minimum amount of work done
43 * 2) we recover a large amount of memory
44 * 3) we don't kill anything innocent of eating tons of memory
45 * 4) we want to kill the minimum amount of processes (one)
46 * 5) we try to kill the process the user expects us to kill, this
47 *    algorithm has been meticulously tuned to meet the principle
48 *    of least surprise ... (be careful when you change it)
49 */
50
51unsigned long badness(struct task_struct *p, unsigned long uptime)
52{
53	unsigned long points, cpu_time, run_time, s;
54	struct mm_struct *mm;
55	struct task_struct *child;
56
57	task_lock(p);
58	mm = p->mm;
59	if (!mm) {
60		task_unlock(p);
61		return 0;
62	}
63
64	/*
65	 * The memory size of the process is the basis for the badness.
66	 */
67	points = mm->total_vm;
68
69	/*
70	 * After this unlock we can no longer dereference local variable `mm'
71	 */
72	task_unlock(p);
73
74	/*
75	 * swapoff can easily use up all memory, so kill those first.
76	 */
77	if (p->flags & PF_SWAPOFF)
78		return ULONG_MAX;
79
80	/*
81	 * Processes which fork a lot of child processes are likely
82	 * a good choice. We add half the vmsize of the children if they
83	 * have an own mm. This prevents forking servers to flood the
84	 * machine with an endless amount of children. In case a single
85	 * child is eating the vast majority of memory, adding only half
86	 * to the parents will make the child our kill candidate of choice.
87	 */
88	list_for_each_entry(child, &p->children, sibling) {
89		task_lock(child);
90		if (child->mm != mm && child->mm)
91			points += child->mm->total_vm/2 + 1;
92		task_unlock(child);
93	}
94
95	/*
96	 * CPU time is in tens of seconds and run time is in thousands
97         * of seconds. There is no particular reason for this other than
98         * that it turned out to work very well in practice.
99	 */
100	cpu_time = (cputime_to_jiffies(p->utime) + cputime_to_jiffies(p->stime))
101		>> (SHIFT_HZ + 3);
102
103	if (uptime >= p->start_time.tv_sec)
104		run_time = (uptime - p->start_time.tv_sec) >> 10;
105	else
106		run_time = 0;
107
108	s = int_sqrt(cpu_time);
109	if (s)
110		points /= s;
111	s = int_sqrt(int_sqrt(run_time));
112	if (s)
113		points /= s;
114
115	/*
116	 * Niced processes are most likely less important, so double
117	 * their badness points.
118	 */
119	if (task_nice(p) > 0)
120		points *= 2;
121
122	/*
123	 * Superuser processes are usually more important, so we make it
124	 * less likely that we kill those.
125	 */
126	if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_ADMIN) ||
127				p->uid == 0 || p->euid == 0)
128		points /= 4;
129
130	/*
131	 * We don't want to kill a process with direct hardware access.
132	 * Not only could that mess up the hardware, but usually users
133	 * tend to only have this flag set on applications they think
134	 * of as important.
135	 */
136	if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_RAWIO))
137		points /= 4;
138
139	/*
140	 * If p's nodes don't overlap ours, it may still help to kill p
141	 * because p may have allocated or otherwise mapped memory on
142	 * this node before. However it will be less likely.
143	 */
144	if (!cpuset_excl_nodes_overlap(p))
145		points /= 8;
146
147	/*
148	 * Adjust the score by oomkilladj.
149	 */
150	if (p->oomkilladj) {
151		if (p->oomkilladj > 0) {
152			if (!points)
153				points = 1;
154			points <<= p->oomkilladj;
155		} else
156			points >>= -(p->oomkilladj);
157	}
158
159#ifdef DEBUG
160	printk(KERN_DEBUG "OOMkill: task %d (%s) got %d points\n",
161	p->pid, p->comm, points);
162#endif
163	return points;
164}
165
166/*
167 * Types of limitations to the nodes from which allocations may occur
168 */
169#define CONSTRAINT_NONE 1
170#define CONSTRAINT_MEMORY_POLICY 2
171#define CONSTRAINT_CPUSET 3
172
173/*
174 * Determine the type of allocation constraint.
175 */
176static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask)
177{
178#ifdef CONFIG_NUMA
179	struct zone **z;
180	nodemask_t nodes;
181	int node;
182
183	nodes_clear(nodes);
184	/* node has memory ? */
185	for_each_online_node(node)
186		if (NODE_DATA(node)->node_present_pages)
187			node_set(node, nodes);
188
189	for (z = zonelist->zones; *z; z++)
190		if (cpuset_zone_allowed_softwall(*z, gfp_mask))
191			node_clear(zone_to_nid(*z), nodes);
192		else
193			return CONSTRAINT_CPUSET;
194
195	if (!nodes_empty(nodes))
196		return CONSTRAINT_MEMORY_POLICY;
197#endif
198
199	return CONSTRAINT_NONE;
200}
201
202/*
203 * Simple selection loop. We chose the process with the highest
204 * number of 'points'. We expect the caller will lock the tasklist.
205 *
206 * (not docbooked, we don't want this one cluttering up the manual)
207 */
208static struct task_struct *select_bad_process(unsigned long *ppoints)
209{
210	struct task_struct *g, *p;
211	struct task_struct *chosen = NULL;
212	struct timespec uptime;
213	*ppoints = 0;
214
215	do_posix_clock_monotonic_gettime(&uptime);
216	do_each_thread(g, p) {
217		unsigned long points;
218
219		/*
220		 * skip kernel threads and tasks which have already released
221		 * their mm.
222		 */
223		if (!p->mm)
224			continue;
225		/* skip the init task */
226		if (is_init(p))
227			continue;
228
229		/*
230		 * This task already has access to memory reserves and is
231		 * being killed. Don't allow any other task access to the
232		 * memory reserve.
233		 *
234		 * Note: this may have a chance of deadlock if it gets
235		 * blocked waiting for another task which itself is waiting
236		 * for memory. Is there a better alternative?
237		 */
238		if (test_tsk_thread_flag(p, TIF_MEMDIE))
239			return ERR_PTR(-1UL);
240
241		/*
242		 * This is in the process of releasing memory so wait for it
243		 * to finish before killing some other task by mistake.
244		 *
245		 * However, if p is the current task, we allow the 'kill' to
246		 * go ahead if it is exiting: this will simply set TIF_MEMDIE,
247		 * which will allow it to gain access to memory reserves in
248		 * the process of exiting and releasing its resources.
249		 * Otherwise we could get an easy OOM deadlock.
250		 */
251		if (p->flags & PF_EXITING) {
252			if (p != current)
253				return ERR_PTR(-1UL);
254
255			chosen = p;
256			*ppoints = ULONG_MAX;
257		}
258
259		if (p->oomkilladj == OOM_DISABLE)
260			continue;
261
262		points = badness(p, uptime.tv_sec);
263		if (points > *ppoints || !chosen) {
264			chosen = p;
265			*ppoints = points;
266		}
267	} while_each_thread(g, p);
268
269	return chosen;
270}
271
272/**
273 * Send SIGKILL to the selected  process irrespective of  CAP_SYS_RAW_IO
274 * flag though it's unlikely that  we select a process with CAP_SYS_RAW_IO
275 * set.
276 */
277static void __oom_kill_task(struct task_struct *p, int verbose)
278{
279	if (is_init(p)) {
280		WARN_ON(1);
281		printk(KERN_WARNING "tried to kill init!\n");
282		return;
283	}
284
285	if (!p->mm) {
286		WARN_ON(1);
287		printk(KERN_WARNING "tried to kill an mm-less task!\n");
288		return;
289	}
290
291	if (verbose)
292		printk(KERN_ERR "Killed process %d (%s)\n", p->pid, p->comm);
293
294	/*
295	 * We give our sacrificial lamb high priority and access to
296	 * all the memory it needs. That way it should be able to
297	 * exit() and clear out its resources quickly...
298	 */
299	p->time_slice = HZ;
300	set_tsk_thread_flag(p, TIF_MEMDIE);
301
302	force_sig(SIGKILL, p);
303}
304
305static int oom_kill_task(struct task_struct *p)
306{
307	struct mm_struct *mm;
308	struct task_struct *g, *q;
309
310	mm = p->mm;
311
312	/* WARNING: mm may not be dereferenced since we did not obtain its
313	 * value from get_task_mm(p).  This is OK since all we need to do is
314	 * compare mm to q->mm below.
315	 *
316	 * Furthermore, even if mm contains a non-NULL value, p->mm may
317	 * change to NULL at any time since we do not hold task_lock(p).
318	 * However, this is of no concern to us.
319	 */
320
321	if (mm == NULL)
322		return 1;
323
324	/*
325	 * Don't kill the process if any threads are set to OOM_DISABLE
326	 */
327	do_each_thread(g, q) {
328		if (q->mm == mm && q->oomkilladj == OOM_DISABLE)
329			return 1;
330	} while_each_thread(g, q);
331
332	__oom_kill_task(p, 1);
333
334	/*
335	 * kill all processes that share the ->mm (i.e. all threads),
336	 * but are in a different thread group. Don't let them have access
337	 * to memory reserves though, otherwise we might deplete all memory.
338	 */
339	do_each_thread(g, q) {
340		if (q->mm == mm && q->tgid != p->tgid)
341			force_sig(SIGKILL, q);
342	} while_each_thread(g, q);
343
344	return 0;
345}
346
347static int oom_kill_process(struct task_struct *p, unsigned long points,
348		const char *message)
349{
350	struct task_struct *c;
351	struct list_head *tsk;
352
353	/*
354	 * If the task is already exiting, don't alarm the sysadmin or kill
355	 * its children or threads, just set TIF_MEMDIE so it can die quickly
356	 */
357	if (p->flags & PF_EXITING) {
358		__oom_kill_task(p, 0);
359		return 0;
360	}
361
362	printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n",
363					message, p->pid, p->comm, points);
364
365	/* Try to kill a child first */
366	list_for_each(tsk, &p->children) {
367		c = list_entry(tsk, struct task_struct, sibling);
368		if (c->mm == p->mm)
369			continue;
370		if (!oom_kill_task(c))
371			return 0;
372	}
373	return oom_kill_task(p);
374}
375
376static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
377
378int register_oom_notifier(struct notifier_block *nb)
379{
380	return blocking_notifier_chain_register(&oom_notify_list, nb);
381}
382EXPORT_SYMBOL_GPL(register_oom_notifier);
383
384int unregister_oom_notifier(struct notifier_block *nb)
385{
386	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
387}
388EXPORT_SYMBOL_GPL(unregister_oom_notifier);
389
390/**
391 * out_of_memory - kill the "best" process when we run out of memory
392 *
393 * If we run out of memory, we have the choice between either
394 * killing a random task (bad), letting the system crash (worse)
395 * OR try to be smart about which process to kill. Note that we
396 * don't have to be perfect here, we just have to be good.
397 */
398void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
399{
400	struct task_struct *p;
401	unsigned long points = 0;
402	unsigned long freed = 0;
403	int constraint;
404
405	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
406	if (freed > 0)
407		/* Got some memory back in the last second. */
408		return;
409
410	if (printk_ratelimit()) {
411		printk(KERN_WARNING "%s invoked oom-killer: "
412			"gfp_mask=0x%x, order=%d, oomkilladj=%d\n",
413			current->comm, gfp_mask, order, current->oomkilladj);
414		dump_stack();
415		show_mem();
416	}
417
418	if (sysctl_panic_on_oom == 2)
419		panic("out of memory. Compulsory panic_on_oom is selected.\n");
420
421	/*
422	 * Check if there were limitations on the allocation (only relevant for
423	 * NUMA) that may require different handling.
424	 */
425	constraint = constrained_alloc(zonelist, gfp_mask);
426	cpuset_lock();
427	read_lock(&tasklist_lock);
428
429	switch (constraint) {
430	case CONSTRAINT_MEMORY_POLICY:
431		oom_kill_process(current, points,
432				"No available memory (MPOL_BIND)");
433		break;
434
435	case CONSTRAINT_CPUSET:
436		oom_kill_process(current, points,
437				"No available memory in cpuset");
438		break;
439
440	case CONSTRAINT_NONE:
441		if (sysctl_panic_on_oom)
442			panic("out of memory. panic_on_oom is selected\n");
443retry:
444		/*
445		 * Rambo mode: Shoot down a process and hope it solves whatever
446		 * issues we may have.
447		 */
448		p = select_bad_process(&points);
449
450		if (PTR_ERR(p) == -1UL)
451			goto out;
452
453		/* Found nothing?!?! Either we hang forever, or we panic. */
454		if (!p) {
455			read_unlock(&tasklist_lock);
456			cpuset_unlock();
457			panic("Out of memory and no killable processes...\n");
458		}
459
460		if (oom_kill_process(p, points, "Out of memory"))
461			goto retry;
462
463		break;
464	}
465
466out:
467	read_unlock(&tasklist_lock);
468	cpuset_unlock();
469
470	/*
471	 * Give "p" a good chance of killing itself before we
472	 * retry to allocate memory unless "p" is current
473	 */
474	if (!test_thread_flag(TIF_MEMDIE))
475		schedule_timeout_uninterruptible(1);
476}
477