oom_kill.c revision dd8e8f405ca386c7ce7cbb996ccd985d283b0e03
1/*
2 *  linux/mm/oom_kill.c
3 *
4 *  Copyright (C)  1998,2000  Rik van Riel
5 *	Thanks go out to Claus Fischer for some serious inspiration and
6 *	for goading me into coding this file...
7 *
8 *  The routines in this file are used to kill a process when
9 *  we're seriously out of memory. This gets called from __alloc_pages()
10 *  in mm/page_alloc.c when we really run out of memory.
11 *
12 *  Since we won't call these routines often (on a well-configured
13 *  machine) this file will double as a 'coding guide' and a signpost
14 *  for newbie kernel hackers. It features several pointers to major
15 *  kernel subsystems and hints as to where to find out what things do.
16 */
17
18#include <linux/oom.h>
19#include <linux/mm.h>
20#include <linux/err.h>
21#include <linux/gfp.h>
22#include <linux/sched.h>
23#include <linux/swap.h>
24#include <linux/timex.h>
25#include <linux/jiffies.h>
26#include <linux/cpuset.h>
27#include <linux/module.h>
28#include <linux/notifier.h>
29#include <linux/memcontrol.h>
30#include <linux/security.h>
31
32int sysctl_panic_on_oom;
33int sysctl_oom_kill_allocating_task;
34int sysctl_oom_dump_tasks;
35static DEFINE_SPINLOCK(zone_scan_lock);
36/* #define DEBUG */
37
38/*
39 * Is all threads of the target process nodes overlap ours?
40 */
41static int has_intersects_mems_allowed(struct task_struct *tsk)
42{
43	struct task_struct *t;
44
45	t = tsk;
46	do {
47		if (cpuset_mems_allowed_intersects(current, t))
48			return 1;
49		t = next_thread(t);
50	} while (t != tsk);
51
52	return 0;
53}
54
55static struct task_struct *find_lock_task_mm(struct task_struct *p)
56{
57	struct task_struct *t = p;
58
59	do {
60		task_lock(t);
61		if (likely(t->mm))
62			return t;
63		task_unlock(t);
64	} while_each_thread(p, t);
65
66	return NULL;
67}
68
69/**
70 * badness - calculate a numeric value for how bad this task has been
71 * @p: task struct of which task we should calculate
72 * @uptime: current uptime in seconds
73 *
74 * The formula used is relatively simple and documented inline in the
75 * function. The main rationale is that we want to select a good task
76 * to kill when we run out of memory.
77 *
78 * Good in this context means that:
79 * 1) we lose the minimum amount of work done
80 * 2) we recover a large amount of memory
81 * 3) we don't kill anything innocent of eating tons of memory
82 * 4) we want to kill the minimum amount of processes (one)
83 * 5) we try to kill the process the user expects us to kill, this
84 *    algorithm has been meticulously tuned to meet the principle
85 *    of least surprise ... (be careful when you change it)
86 */
87
88unsigned long badness(struct task_struct *p, unsigned long uptime)
89{
90	unsigned long points, cpu_time, run_time;
91	struct task_struct *child;
92	struct task_struct *c, *t;
93	int oom_adj = p->signal->oom_adj;
94	struct task_cputime task_time;
95	unsigned long utime;
96	unsigned long stime;
97
98	if (oom_adj == OOM_DISABLE)
99		return 0;
100
101	p = find_lock_task_mm(p);
102	if (!p)
103		return 0;
104
105	/*
106	 * The memory size of the process is the basis for the badness.
107	 */
108	points = p->mm->total_vm;
109
110	/*
111	 * After this unlock we can no longer dereference local variable `mm'
112	 */
113	task_unlock(p);
114
115	/*
116	 * swapoff can easily use up all memory, so kill those first.
117	 */
118	if (p->flags & PF_OOM_ORIGIN)
119		return ULONG_MAX;
120
121	/*
122	 * Processes which fork a lot of child processes are likely
123	 * a good choice. We add half the vmsize of the children if they
124	 * have an own mm. This prevents forking servers to flood the
125	 * machine with an endless amount of children. In case a single
126	 * child is eating the vast majority of memory, adding only half
127	 * to the parents will make the child our kill candidate of choice.
128	 */
129	t = p;
130	do {
131		list_for_each_entry(c, &t->children, sibling) {
132			child = find_lock_task_mm(c);
133			if (child) {
134				if (child->mm != p->mm)
135					points += child->mm->total_vm/2 + 1;
136				task_unlock(child);
137			}
138		}
139	} while_each_thread(p, t);
140
141	/*
142	 * CPU time is in tens of seconds and run time is in thousands
143         * of seconds. There is no particular reason for this other than
144         * that it turned out to work very well in practice.
145	 */
146	thread_group_cputime(p, &task_time);
147	utime = cputime_to_jiffies(task_time.utime);
148	stime = cputime_to_jiffies(task_time.stime);
149	cpu_time = (utime + stime) >> (SHIFT_HZ + 3);
150
151
152	if (uptime >= p->start_time.tv_sec)
153		run_time = (uptime - p->start_time.tv_sec) >> 10;
154	else
155		run_time = 0;
156
157	if (cpu_time)
158		points /= int_sqrt(cpu_time);
159	if (run_time)
160		points /= int_sqrt(int_sqrt(run_time));
161
162	/*
163	 * Niced processes are most likely less important, so double
164	 * their badness points.
165	 */
166	if (task_nice(p) > 0)
167		points *= 2;
168
169	/*
170	 * Superuser processes are usually more important, so we make it
171	 * less likely that we kill those.
172	 */
173	if (has_capability_noaudit(p, CAP_SYS_ADMIN) ||
174	    has_capability_noaudit(p, CAP_SYS_RESOURCE))
175		points /= 4;
176
177	/*
178	 * We don't want to kill a process with direct hardware access.
179	 * Not only could that mess up the hardware, but usually users
180	 * tend to only have this flag set on applications they think
181	 * of as important.
182	 */
183	if (has_capability_noaudit(p, CAP_SYS_RAWIO))
184		points /= 4;
185
186	/*
187	 * If p's nodes don't overlap ours, it may still help to kill p
188	 * because p may have allocated or otherwise mapped memory on
189	 * this node before. However it will be less likely.
190	 */
191	if (!has_intersects_mems_allowed(p))
192		points /= 8;
193
194	/*
195	 * Adjust the score by oom_adj.
196	 */
197	if (oom_adj) {
198		if (oom_adj > 0) {
199			if (!points)
200				points = 1;
201			points <<= oom_adj;
202		} else
203			points >>= -(oom_adj);
204	}
205
206#ifdef DEBUG
207	printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n",
208	p->pid, p->comm, points);
209#endif
210	return points;
211}
212
213/*
214 * Determine the type of allocation constraint.
215 */
216#ifdef CONFIG_NUMA
217static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
218				    gfp_t gfp_mask, nodemask_t *nodemask)
219{
220	struct zone *zone;
221	struct zoneref *z;
222	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
223
224	/*
225	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
226	 * to kill current.We have to random task kill in this case.
227	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
228	 */
229	if (gfp_mask & __GFP_THISNODE)
230		return CONSTRAINT_NONE;
231
232	/*
233	 * The nodemask here is a nodemask passed to alloc_pages(). Now,
234	 * cpuset doesn't use this nodemask for its hardwall/softwall/hierarchy
235	 * feature. mempolicy is an only user of nodemask here.
236	 * check mempolicy's nodemask contains all N_HIGH_MEMORY
237	 */
238	if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask))
239		return CONSTRAINT_MEMORY_POLICY;
240
241	/* Check this allocation failure is caused by cpuset's wall function */
242	for_each_zone_zonelist_nodemask(zone, z, zonelist,
243			high_zoneidx, nodemask)
244		if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
245			return CONSTRAINT_CPUSET;
246
247	return CONSTRAINT_NONE;
248}
249#else
250static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
251				gfp_t gfp_mask, nodemask_t *nodemask)
252{
253	return CONSTRAINT_NONE;
254}
255#endif
256
257/*
258 * Simple selection loop. We chose the process with the highest
259 * number of 'points'. We expect the caller will lock the tasklist.
260 *
261 * (not docbooked, we don't want this one cluttering up the manual)
262 */
263static struct task_struct *select_bad_process(unsigned long *ppoints,
264						struct mem_cgroup *mem)
265{
266	struct task_struct *p;
267	struct task_struct *chosen = NULL;
268	struct timespec uptime;
269	*ppoints = 0;
270
271	do_posix_clock_monotonic_gettime(&uptime);
272	for_each_process(p) {
273		unsigned long points;
274
275		/* skip the init task and kthreads */
276		if (is_global_init(p) || (p->flags & PF_KTHREAD))
277			continue;
278		if (mem && !task_in_mem_cgroup(p, mem))
279			continue;
280
281		/*
282		 * This task already has access to memory reserves and is
283		 * being killed. Don't allow any other task access to the
284		 * memory reserve.
285		 *
286		 * Note: this may have a chance of deadlock if it gets
287		 * blocked waiting for another task which itself is waiting
288		 * for memory. Is there a better alternative?
289		 */
290		if (test_tsk_thread_flag(p, TIF_MEMDIE))
291			return ERR_PTR(-1UL);
292
293		/*
294		 * This is in the process of releasing memory so wait for it
295		 * to finish before killing some other task by mistake.
296		 *
297		 * However, if p is the current task, we allow the 'kill' to
298		 * go ahead if it is exiting: this will simply set TIF_MEMDIE,
299		 * which will allow it to gain access to memory reserves in
300		 * the process of exiting and releasing its resources.
301		 * Otherwise we could get an easy OOM deadlock.
302		 */
303		if ((p->flags & PF_EXITING) && p->mm) {
304			if (p != current)
305				return ERR_PTR(-1UL);
306
307			chosen = p;
308			*ppoints = ULONG_MAX;
309		}
310
311		if (p->signal->oom_adj == OOM_DISABLE)
312			continue;
313
314		points = badness(p, uptime.tv_sec);
315		if (points > *ppoints || !chosen) {
316			chosen = p;
317			*ppoints = points;
318		}
319	}
320
321	return chosen;
322}
323
324/**
325 * dump_tasks - dump current memory state of all system tasks
326 * @mem: target memory controller
327 *
328 * Dumps the current memory state of all system tasks, excluding kernel threads.
329 * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
330 * score, and name.
331 *
332 * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
333 * shown.
334 *
335 * Call with tasklist_lock read-locked.
336 */
337static void dump_tasks(const struct mem_cgroup *mem)
338{
339	struct task_struct *g, *p;
340
341	printk(KERN_INFO "[ pid ]   uid  tgid total_vm      rss cpu oom_adj "
342	       "name\n");
343	do_each_thread(g, p) {
344		struct mm_struct *mm;
345
346		if (mem && !task_in_mem_cgroup(p, mem))
347			continue;
348		if (!thread_group_leader(p))
349			continue;
350
351		task_lock(p);
352		mm = p->mm;
353		if (!mm) {
354			/*
355			 * total_vm and rss sizes do not exist for tasks with no
356			 * mm so there's no need to report them; they can't be
357			 * oom killed anyway.
358			 */
359			task_unlock(p);
360			continue;
361		}
362		printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d     %3d %s\n",
363		       p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm,
364		       get_mm_rss(mm), (int)task_cpu(p), p->signal->oom_adj,
365		       p->comm);
366		task_unlock(p);
367	} while_each_thread(g, p);
368}
369
370static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
371							struct mem_cgroup *mem)
372{
373	pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
374		"oom_adj=%d\n",
375		current->comm, gfp_mask, order, current->signal->oom_adj);
376	task_lock(current);
377	cpuset_print_task_mems_allowed(current);
378	task_unlock(current);
379	dump_stack();
380	mem_cgroup_print_oom_info(mem, p);
381	show_mem();
382	if (sysctl_oom_dump_tasks)
383		dump_tasks(mem);
384}
385
386#define K(x) ((x) << (PAGE_SHIFT-10))
387
388/*
389 * Send SIGKILL to the selected  process irrespective of  CAP_SYS_RAW_IO
390 * flag though it's unlikely that  we select a process with CAP_SYS_RAW_IO
391 * set.
392 */
393static void __oom_kill_task(struct task_struct *p, int verbose)
394{
395	if (is_global_init(p)) {
396		WARN_ON(1);
397		printk(KERN_WARNING "tried to kill init!\n");
398		return;
399	}
400
401	p = find_lock_task_mm(p);
402	if (!p)
403		return;
404
405	if (verbose)
406		printk(KERN_ERR "Killed process %d (%s) "
407		       "vsz:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
408		       task_pid_nr(p), p->comm,
409		       K(p->mm->total_vm),
410		       K(get_mm_counter(p->mm, MM_ANONPAGES)),
411		       K(get_mm_counter(p->mm, MM_FILEPAGES)));
412	task_unlock(p);
413
414	/*
415	 * We give our sacrificial lamb high priority and access to
416	 * all the memory it needs. That way it should be able to
417	 * exit() and clear out its resources quickly...
418	 */
419	p->rt.time_slice = HZ;
420	set_tsk_thread_flag(p, TIF_MEMDIE);
421
422	force_sig(SIGKILL, p);
423}
424
425static int oom_kill_task(struct task_struct *p)
426{
427	/* WARNING: mm may not be dereferenced since we did not obtain its
428	 * value from get_task_mm(p).  This is OK since all we need to do is
429	 * compare mm to q->mm below.
430	 *
431	 * Furthermore, even if mm contains a non-NULL value, p->mm may
432	 * change to NULL at any time since we do not hold task_lock(p).
433	 * However, this is of no concern to us.
434	 */
435	if (!p->mm || p->signal->oom_adj == OOM_DISABLE)
436		return 1;
437
438	__oom_kill_task(p, 1);
439
440	return 0;
441}
442
443static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
444			    unsigned long points, struct mem_cgroup *mem,
445			    const char *message)
446{
447	struct task_struct *c;
448	struct task_struct *t = p;
449
450	if (printk_ratelimit())
451		dump_header(p, gfp_mask, order, mem);
452
453	/*
454	 * If the task is already exiting, don't alarm the sysadmin or kill
455	 * its children or threads, just set TIF_MEMDIE so it can die quickly
456	 */
457	if (p->flags & PF_EXITING) {
458		__oom_kill_task(p, 0);
459		return 0;
460	}
461
462	printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n",
463					message, task_pid_nr(p), p->comm, points);
464
465	/* Try to kill a child first */
466	do {
467		list_for_each_entry(c, &t->children, sibling) {
468			if (c->mm == p->mm)
469				continue;
470			if (mem && !task_in_mem_cgroup(c, mem))
471				continue;
472			if (!oom_kill_task(c))
473				return 0;
474		}
475	} while_each_thread(p, t);
476
477	return oom_kill_task(p);
478}
479
480#ifdef CONFIG_CGROUP_MEM_RES_CTLR
481void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
482{
483	unsigned long points = 0;
484	struct task_struct *p;
485
486	if (sysctl_panic_on_oom == 2)
487		panic("out of memory(memcg). panic_on_oom is selected.\n");
488	read_lock(&tasklist_lock);
489retry:
490	p = select_bad_process(&points, mem);
491	if (!p || PTR_ERR(p) == -1UL)
492		goto out;
493
494	if (oom_kill_process(p, gfp_mask, 0, points, mem,
495				"Memory cgroup out of memory"))
496		goto retry;
497out:
498	read_unlock(&tasklist_lock);
499}
500#endif
501
502static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
503
504int register_oom_notifier(struct notifier_block *nb)
505{
506	return blocking_notifier_chain_register(&oom_notify_list, nb);
507}
508EXPORT_SYMBOL_GPL(register_oom_notifier);
509
510int unregister_oom_notifier(struct notifier_block *nb)
511{
512	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
513}
514EXPORT_SYMBOL_GPL(unregister_oom_notifier);
515
516/*
517 * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
518 * if a parallel OOM killing is already taking place that includes a zone in
519 * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
520 */
521int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask)
522{
523	struct zoneref *z;
524	struct zone *zone;
525	int ret = 1;
526
527	spin_lock(&zone_scan_lock);
528	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
529		if (zone_is_oom_locked(zone)) {
530			ret = 0;
531			goto out;
532		}
533	}
534
535	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
536		/*
537		 * Lock each zone in the zonelist under zone_scan_lock so a
538		 * parallel invocation of try_set_zone_oom() doesn't succeed
539		 * when it shouldn't.
540		 */
541		zone_set_flag(zone, ZONE_OOM_LOCKED);
542	}
543
544out:
545	spin_unlock(&zone_scan_lock);
546	return ret;
547}
548
549/*
550 * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
551 * allocation attempts with zonelists containing them may now recall the OOM
552 * killer, if necessary.
553 */
554void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
555{
556	struct zoneref *z;
557	struct zone *zone;
558
559	spin_lock(&zone_scan_lock);
560	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
561		zone_clear_flag(zone, ZONE_OOM_LOCKED);
562	}
563	spin_unlock(&zone_scan_lock);
564}
565
566/*
567 * Must be called with tasklist_lock held for read.
568 */
569static void __out_of_memory(gfp_t gfp_mask, int order)
570{
571	struct task_struct *p;
572	unsigned long points;
573
574	if (sysctl_oom_kill_allocating_task)
575		if (!oom_kill_process(current, gfp_mask, order, 0, NULL,
576				"Out of memory (oom_kill_allocating_task)"))
577			return;
578retry:
579	/*
580	 * Rambo mode: Shoot down a process and hope it solves whatever
581	 * issues we may have.
582	 */
583	p = select_bad_process(&points, NULL);
584
585	if (PTR_ERR(p) == -1UL)
586		return;
587
588	/* Found nothing?!?! Either we hang forever, or we panic. */
589	if (!p) {
590		read_unlock(&tasklist_lock);
591		dump_header(NULL, gfp_mask, order, NULL);
592		panic("Out of memory and no killable processes...\n");
593	}
594
595	if (oom_kill_process(p, gfp_mask, order, points, NULL,
596			     "Out of memory"))
597		goto retry;
598}
599
600/*
601 * pagefault handler calls into here because it is out of memory but
602 * doesn't know exactly how or why.
603 */
604void pagefault_out_of_memory(void)
605{
606	unsigned long freed = 0;
607
608	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
609	if (freed > 0)
610		/* Got some memory back in the last second. */
611		return;
612
613	if (sysctl_panic_on_oom)
614		panic("out of memory from page fault. panic_on_oom is selected.\n");
615
616	read_lock(&tasklist_lock);
617	__out_of_memory(0, 0); /* unknown gfp_mask and order */
618	read_unlock(&tasklist_lock);
619
620	/*
621	 * Give "p" a good chance of killing itself before we
622	 * retry to allocate memory.
623	 */
624	if (!test_thread_flag(TIF_MEMDIE))
625		schedule_timeout_uninterruptible(1);
626}
627
628/**
629 * out_of_memory - kill the "best" process when we run out of memory
630 * @zonelist: zonelist pointer
631 * @gfp_mask: memory allocation flags
632 * @order: amount of memory being requested as a power of 2
633 *
634 * If we run out of memory, we have the choice between either
635 * killing a random task (bad), letting the system crash (worse)
636 * OR try to be smart about which process to kill. Note that we
637 * don't have to be perfect here, we just have to be good.
638 */
639void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
640		int order, nodemask_t *nodemask)
641{
642	unsigned long freed = 0;
643	enum oom_constraint constraint;
644
645	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
646	if (freed > 0)
647		/* Got some memory back in the last second. */
648		return;
649
650	if (sysctl_panic_on_oom == 2) {
651		dump_header(NULL, gfp_mask, order, NULL);
652		panic("out of memory. Compulsory panic_on_oom is selected.\n");
653	}
654
655	/*
656	 * Check if there were limitations on the allocation (only relevant for
657	 * NUMA) that may require different handling.
658	 */
659	constraint = constrained_alloc(zonelist, gfp_mask, nodemask);
660	read_lock(&tasklist_lock);
661
662	switch (constraint) {
663	case CONSTRAINT_MEMORY_POLICY:
664		oom_kill_process(current, gfp_mask, order, 0, NULL,
665				"No available memory (MPOL_BIND)");
666		break;
667
668	case CONSTRAINT_NONE:
669		if (sysctl_panic_on_oom) {
670			dump_header(NULL, gfp_mask, order, NULL);
671			panic("out of memory. panic_on_oom is selected\n");
672		}
673		/* Fall-through */
674	case CONSTRAINT_CPUSET:
675		__out_of_memory(gfp_mask, order);
676		break;
677	}
678
679	read_unlock(&tasklist_lock);
680
681	/*
682	 * Give "p" a good chance of killing itself before we
683	 * retry to allocate memory unless "p" is current
684	 */
685	if (!test_thread_flag(TIF_MEMDIE))
686		schedule_timeout_uninterruptible(1);
687}
688