oom_kill.c revision 81236810226f71bd9ff77321c8e8276dae7efc61
1/*
2 *  linux/mm/oom_kill.c
3 *
4 *  Copyright (C)  1998,2000  Rik van Riel
5 *	Thanks go out to Claus Fischer for some serious inspiration and
6 *	for goading me into coding this file...
7 *
8 *  The routines in this file are used to kill a process when
9 *  we're seriously out of memory. This gets called from __alloc_pages()
10 *  in mm/page_alloc.c when we really run out of memory.
11 *
12 *  Since we won't call these routines often (on a well-configured
13 *  machine) this file will double as a 'coding guide' and a signpost
14 *  for newbie kernel hackers. It features several pointers to major
15 *  kernel subsystems and hints as to where to find out what things do.
16 */
17
18#include <linux/oom.h>
19#include <linux/mm.h>
20#include <linux/err.h>
21#include <linux/sched.h>
22#include <linux/swap.h>
23#include <linux/timex.h>
24#include <linux/jiffies.h>
25#include <linux/cpuset.h>
26#include <linux/module.h>
27#include <linux/notifier.h>
28#include <linux/memcontrol.h>
29#include <linux/security.h>
30
31int sysctl_panic_on_oom;
32int sysctl_oom_kill_allocating_task;
33int sysctl_oom_dump_tasks;
34static DEFINE_SPINLOCK(zone_scan_lock);
35/* #define DEBUG */
36
37/**
38 * badness - calculate a numeric value for how bad this task has been
39 * @p: task struct of which task we should calculate
40 * @uptime: current uptime in seconds
41 *
42 * The formula used is relatively simple and documented inline in the
43 * function. The main rationale is that we want to select a good task
44 * to kill when we run out of memory.
45 *
46 * Good in this context means that:
47 * 1) we lose the minimum amount of work done
48 * 2) we recover a large amount of memory
49 * 3) we don't kill anything innocent of eating tons of memory
50 * 4) we want to kill the minimum amount of processes (one)
51 * 5) we try to kill the process the user expects us to kill, this
52 *    algorithm has been meticulously tuned to meet the principle
53 *    of least surprise ... (be careful when you change it)
54 */
55
56unsigned long badness(struct task_struct *p, unsigned long uptime)
57{
58	unsigned long points, cpu_time, run_time;
59	struct mm_struct *mm;
60	struct task_struct *child;
61	int oom_adj;
62
63	task_lock(p);
64	mm = p->mm;
65	if (!mm) {
66		task_unlock(p);
67		return 0;
68	}
69	oom_adj = mm->oom_adj;
70	if (oom_adj == OOM_DISABLE) {
71		task_unlock(p);
72		return 0;
73	}
74
75	/*
76	 * The memory size of the process is the basis for the badness.
77	 */
78	points = mm->total_vm;
79
80	/*
81	 * After this unlock we can no longer dereference local variable `mm'
82	 */
83	task_unlock(p);
84
85	/*
86	 * swapoff can easily use up all memory, so kill those first.
87	 */
88	if (p->flags & PF_SWAPOFF)
89		return ULONG_MAX;
90
91	/*
92	 * Processes which fork a lot of child processes are likely
93	 * a good choice. We add half the vmsize of the children if they
94	 * have an own mm. This prevents forking servers to flood the
95	 * machine with an endless amount of children. In case a single
96	 * child is eating the vast majority of memory, adding only half
97	 * to the parents will make the child our kill candidate of choice.
98	 */
99	list_for_each_entry(child, &p->children, sibling) {
100		task_lock(child);
101		if (child->mm != mm && child->mm)
102			points += child->mm->total_vm/2 + 1;
103		task_unlock(child);
104	}
105
106	/*
107	 * CPU time is in tens of seconds and run time is in thousands
108         * of seconds. There is no particular reason for this other than
109         * that it turned out to work very well in practice.
110	 */
111	cpu_time = (cputime_to_jiffies(p->utime) + cputime_to_jiffies(p->stime))
112		>> (SHIFT_HZ + 3);
113
114	if (uptime >= p->start_time.tv_sec)
115		run_time = (uptime - p->start_time.tv_sec) >> 10;
116	else
117		run_time = 0;
118
119	if (cpu_time)
120		points /= int_sqrt(cpu_time);
121	if (run_time)
122		points /= int_sqrt(int_sqrt(run_time));
123
124	/*
125	 * Niced processes are most likely less important, so double
126	 * their badness points.
127	 */
128	if (task_nice(p) > 0)
129		points *= 2;
130
131	/*
132	 * Superuser processes are usually more important, so we make it
133	 * less likely that we kill those.
134	 */
135	if (has_capability_noaudit(p, CAP_SYS_ADMIN) ||
136	    has_capability_noaudit(p, CAP_SYS_RESOURCE))
137		points /= 4;
138
139	/*
140	 * We don't want to kill a process with direct hardware access.
141	 * Not only could that mess up the hardware, but usually users
142	 * tend to only have this flag set on applications they think
143	 * of as important.
144	 */
145	if (has_capability_noaudit(p, CAP_SYS_RAWIO))
146		points /= 4;
147
148	/*
149	 * If p's nodes don't overlap ours, it may still help to kill p
150	 * because p may have allocated or otherwise mapped memory on
151	 * this node before. However it will be less likely.
152	 */
153	if (!cpuset_mems_allowed_intersects(current, p))
154		points /= 8;
155
156	/*
157	 * Adjust the score by oom_adj.
158	 */
159	if (oom_adj) {
160		if (oom_adj > 0) {
161			if (!points)
162				points = 1;
163			points <<= oom_adj;
164		} else
165			points >>= -(oom_adj);
166	}
167
168#ifdef DEBUG
169	printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n",
170	p->pid, p->comm, points);
171#endif
172	return points;
173}
174
175/*
176 * Determine the type of allocation constraint.
177 */
178static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist,
179						    gfp_t gfp_mask)
180{
181#ifdef CONFIG_NUMA
182	struct zone *zone;
183	struct zoneref *z;
184	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
185	nodemask_t nodes = node_states[N_HIGH_MEMORY];
186
187	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
188		if (cpuset_zone_allowed_softwall(zone, gfp_mask))
189			node_clear(zone_to_nid(zone), nodes);
190		else
191			return CONSTRAINT_CPUSET;
192
193	if (!nodes_empty(nodes))
194		return CONSTRAINT_MEMORY_POLICY;
195#endif
196
197	return CONSTRAINT_NONE;
198}
199
200/*
201 * Simple selection loop. We chose the process with the highest
202 * number of 'points'. We expect the caller will lock the tasklist.
203 *
204 * (not docbooked, we don't want this one cluttering up the manual)
205 */
206static struct task_struct *select_bad_process(unsigned long *ppoints,
207						struct mem_cgroup *mem)
208{
209	struct task_struct *g, *p;
210	struct task_struct *chosen = NULL;
211	struct timespec uptime;
212	*ppoints = 0;
213
214	do_posix_clock_monotonic_gettime(&uptime);
215	do_each_thread(g, p) {
216		unsigned long points;
217
218		/*
219		 * skip kernel threads and tasks which have already released
220		 * their mm.
221		 */
222		if (!p->mm)
223			continue;
224		/* skip the init task */
225		if (is_global_init(p))
226			continue;
227		if (mem && !task_in_mem_cgroup(p, mem))
228			continue;
229
230		/*
231		 * This task already has access to memory reserves and is
232		 * being killed. Don't allow any other task access to the
233		 * memory reserve.
234		 *
235		 * Note: this may have a chance of deadlock if it gets
236		 * blocked waiting for another task which itself is waiting
237		 * for memory. Is there a better alternative?
238		 */
239		if (test_tsk_thread_flag(p, TIF_MEMDIE))
240			return ERR_PTR(-1UL);
241
242		/*
243		 * This is in the process of releasing memory so wait for it
244		 * to finish before killing some other task by mistake.
245		 *
246		 * However, if p is the current task, we allow the 'kill' to
247		 * go ahead if it is exiting: this will simply set TIF_MEMDIE,
248		 * which will allow it to gain access to memory reserves in
249		 * the process of exiting and releasing its resources.
250		 * Otherwise we could get an easy OOM deadlock.
251		 */
252		if (p->flags & PF_EXITING) {
253			if (p != current)
254				return ERR_PTR(-1UL);
255
256			chosen = p;
257			*ppoints = ULONG_MAX;
258		}
259
260		points = badness(p, uptime.tv_sec);
261		if (points > *ppoints) {
262			chosen = p;
263			*ppoints = points;
264		}
265	} while_each_thread(g, p);
266
267	return chosen;
268}
269
270/**
271 * dump_tasks - dump current memory state of all system tasks
272 * @mem: target memory controller
273 *
274 * Dumps the current memory state of all system tasks, excluding kernel threads.
275 * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
276 * score, and name.
277 *
278 * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
279 * shown.
280 *
281 * Call with tasklist_lock read-locked.
282 */
283static void dump_tasks(const struct mem_cgroup *mem)
284{
285	struct task_struct *g, *p;
286
287	printk(KERN_INFO "[ pid ]   uid  tgid total_vm      rss cpu oom_adj "
288	       "name\n");
289	do_each_thread(g, p) {
290		struct mm_struct *mm;
291
292		if (mem && !task_in_mem_cgroup(p, mem))
293			continue;
294		if (!thread_group_leader(p))
295			continue;
296
297		task_lock(p);
298		mm = p->mm;
299		if (!mm) {
300			/*
301			 * total_vm and rss sizes do not exist for tasks with no
302			 * mm so there's no need to report them; they can't be
303			 * oom killed anyway.
304			 */
305			task_unlock(p);
306			continue;
307		}
308		printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d     %3d %s\n",
309		       p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm,
310		       get_mm_rss(mm), (int)task_cpu(p), mm->oom_adj, p->comm);
311		task_unlock(p);
312	} while_each_thread(g, p);
313}
314
315/*
316 * Send SIGKILL to the selected  process irrespective of  CAP_SYS_RAW_IO
317 * flag though it's unlikely that  we select a process with CAP_SYS_RAW_IO
318 * set.
319 */
320static void __oom_kill_task(struct task_struct *p, int verbose)
321{
322	if (is_global_init(p)) {
323		WARN_ON(1);
324		printk(KERN_WARNING "tried to kill init!\n");
325		return;
326	}
327
328	if (!p->mm)
329		return;
330
331	if (verbose)
332		printk(KERN_ERR "Killed process %d (%s)\n",
333				task_pid_nr(p), p->comm);
334
335	/*
336	 * We give our sacrificial lamb high priority and access to
337	 * all the memory it needs. That way it should be able to
338	 * exit() and clear out its resources quickly...
339	 */
340	p->rt.time_slice = HZ;
341	set_tsk_thread_flag(p, TIF_MEMDIE);
342
343	force_sig(SIGKILL, p);
344}
345
346static int oom_kill_task(struct task_struct *p)
347{
348	struct mm_struct *mm;
349	struct task_struct *g, *q;
350
351	task_lock(p);
352	mm = p->mm;
353	if (!mm || mm->oom_adj == OOM_DISABLE) {
354		task_unlock(p);
355		return 1;
356	}
357	task_unlock(p);
358	__oom_kill_task(p, 1);
359
360	/*
361	 * kill all processes that share the ->mm (i.e. all threads),
362	 * but are in a different thread group. Don't let them have access
363	 * to memory reserves though, otherwise we might deplete all memory.
364	 */
365	do_each_thread(g, q) {
366		if (q->mm == mm && !same_thread_group(q, p))
367			force_sig(SIGKILL, q);
368	} while_each_thread(g, q);
369
370	return 0;
371}
372
373static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
374			    unsigned long points, struct mem_cgroup *mem,
375			    const char *message)
376{
377	struct task_struct *c;
378
379	if (printk_ratelimit()) {
380		task_lock(current);
381		printk(KERN_WARNING "%s invoked oom-killer: "
382			"gfp_mask=0x%x, order=%d, oom_adj=%d\n",
383			current->comm, gfp_mask, order,
384			current->mm ? current->mm->oom_adj : OOM_DISABLE);
385		cpuset_print_task_mems_allowed(current);
386		task_unlock(current);
387		dump_stack();
388		mem_cgroup_print_oom_info(mem, current);
389		show_mem();
390		if (sysctl_oom_dump_tasks)
391			dump_tasks(mem);
392	}
393
394	/*
395	 * If the task is already exiting, don't alarm the sysadmin or kill
396	 * its children or threads, just set TIF_MEMDIE so it can die quickly
397	 * if its mm is still attached.
398	 */
399	if (p->mm && (p->flags & PF_EXITING)) {
400		__oom_kill_task(p, 0);
401		return 0;
402	}
403
404	printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n",
405					message, task_pid_nr(p), p->comm, points);
406
407	/* Try to kill a child first */
408	list_for_each_entry(c, &p->children, sibling) {
409		if (c->mm == p->mm)
410			continue;
411		if (!oom_kill_task(c))
412			return 0;
413	}
414	return oom_kill_task(p);
415}
416
417#ifdef CONFIG_CGROUP_MEM_RES_CTLR
418void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
419{
420	unsigned long points = 0;
421	struct task_struct *p;
422
423	read_lock(&tasklist_lock);
424retry:
425	p = select_bad_process(&points, mem);
426	if (PTR_ERR(p) == -1UL)
427		goto out;
428
429	if (!p)
430		p = current;
431
432	if (oom_kill_process(p, gfp_mask, 0, points, mem,
433				"Memory cgroup out of memory"))
434		goto retry;
435out:
436	read_unlock(&tasklist_lock);
437}
438#endif
439
440static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
441
442int register_oom_notifier(struct notifier_block *nb)
443{
444	return blocking_notifier_chain_register(&oom_notify_list, nb);
445}
446EXPORT_SYMBOL_GPL(register_oom_notifier);
447
448int unregister_oom_notifier(struct notifier_block *nb)
449{
450	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
451}
452EXPORT_SYMBOL_GPL(unregister_oom_notifier);
453
454/*
455 * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
456 * if a parallel OOM killing is already taking place that includes a zone in
457 * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
458 */
459int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask)
460{
461	struct zoneref *z;
462	struct zone *zone;
463	int ret = 1;
464
465	spin_lock(&zone_scan_lock);
466	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
467		if (zone_is_oom_locked(zone)) {
468			ret = 0;
469			goto out;
470		}
471	}
472
473	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
474		/*
475		 * Lock each zone in the zonelist under zone_scan_lock so a
476		 * parallel invocation of try_set_zone_oom() doesn't succeed
477		 * when it shouldn't.
478		 */
479		zone_set_flag(zone, ZONE_OOM_LOCKED);
480	}
481
482out:
483	spin_unlock(&zone_scan_lock);
484	return ret;
485}
486
487/*
488 * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
489 * allocation attempts with zonelists containing them may now recall the OOM
490 * killer, if necessary.
491 */
492void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
493{
494	struct zoneref *z;
495	struct zone *zone;
496
497	spin_lock(&zone_scan_lock);
498	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
499		zone_clear_flag(zone, ZONE_OOM_LOCKED);
500	}
501	spin_unlock(&zone_scan_lock);
502}
503
504/*
505 * Must be called with tasklist_lock held for read.
506 */
507static void __out_of_memory(gfp_t gfp_mask, int order)
508{
509	struct task_struct *p;
510	unsigned long points;
511
512	if (sysctl_oom_kill_allocating_task)
513		if (!oom_kill_process(current, gfp_mask, order, 0, NULL,
514				"Out of memory (oom_kill_allocating_task)"))
515			return;
516retry:
517	/*
518	 * Rambo mode: Shoot down a process and hope it solves whatever
519	 * issues we may have.
520	 */
521	p = select_bad_process(&points, NULL);
522
523	if (PTR_ERR(p) == -1UL)
524		return;
525
526	/* Found nothing?!?! Either we hang forever, or we panic. */
527	if (!p) {
528		read_unlock(&tasklist_lock);
529		panic("Out of memory and no killable processes...\n");
530	}
531
532	if (oom_kill_process(p, gfp_mask, order, points, NULL,
533			     "Out of memory"))
534		goto retry;
535}
536
537/*
538 * pagefault handler calls into here because it is out of memory but
539 * doesn't know exactly how or why.
540 */
541void pagefault_out_of_memory(void)
542{
543	unsigned long freed = 0;
544
545	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
546	if (freed > 0)
547		/* Got some memory back in the last second. */
548		return;
549
550	/*
551	 * If this is from memcg, oom-killer is already invoked.
552	 * and not worth to go system-wide-oom.
553	 */
554	if (mem_cgroup_oom_called(current))
555		goto rest_and_return;
556
557	if (sysctl_panic_on_oom)
558		panic("out of memory from page fault. panic_on_oom is selected.\n");
559
560	read_lock(&tasklist_lock);
561	__out_of_memory(0, 0); /* unknown gfp_mask and order */
562	read_unlock(&tasklist_lock);
563
564	/*
565	 * Give "p" a good chance of killing itself before we
566	 * retry to allocate memory.
567	 */
568rest_and_return:
569	if (!test_thread_flag(TIF_MEMDIE))
570		schedule_timeout_uninterruptible(1);
571}
572
573/**
574 * out_of_memory - kill the "best" process when we run out of memory
575 * @zonelist: zonelist pointer
576 * @gfp_mask: memory allocation flags
577 * @order: amount of memory being requested as a power of 2
578 *
579 * If we run out of memory, we have the choice between either
580 * killing a random task (bad), letting the system crash (worse)
581 * OR try to be smart about which process to kill. Note that we
582 * don't have to be perfect here, we just have to be good.
583 */
584void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
585{
586	unsigned long freed = 0;
587	enum oom_constraint constraint;
588
589	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
590	if (freed > 0)
591		/* Got some memory back in the last second. */
592		return;
593
594	if (sysctl_panic_on_oom == 2)
595		panic("out of memory. Compulsory panic_on_oom is selected.\n");
596
597	/*
598	 * Check if there were limitations on the allocation (only relevant for
599	 * NUMA) that may require different handling.
600	 */
601	constraint = constrained_alloc(zonelist, gfp_mask);
602	read_lock(&tasklist_lock);
603
604	switch (constraint) {
605	case CONSTRAINT_MEMORY_POLICY:
606		oom_kill_process(current, gfp_mask, order, 0, NULL,
607				"No available memory (MPOL_BIND)");
608		break;
609
610	case CONSTRAINT_NONE:
611		if (sysctl_panic_on_oom)
612			panic("out of memory. panic_on_oom is selected\n");
613		/* Fall-through */
614	case CONSTRAINT_CPUSET:
615		__out_of_memory(gfp_mask, order);
616		break;
617	}
618
619	read_unlock(&tasklist_lock);
620
621	/*
622	 * Give "p" a good chance of killing itself before we
623	 * retry to allocate memory unless "p" is current
624	 */
625	if (!test_thread_flag(TIF_MEMDIE))
626		schedule_timeout_uninterruptible(1);
627}
628