oom_kill.c revision 098d7f128a4e53cb64930628915ac767785e0e60
1/*
2 *  linux/mm/oom_kill.c
3 *
4 *  Copyright (C)  1998,2000  Rik van Riel
5 *	Thanks go out to Claus Fischer for some serious inspiration and
6 *	for goading me into coding this file...
7 *
8 *  The routines in this file are used to kill a process when
9 *  we're seriously out of memory. This gets called from __alloc_pages()
10 *  in mm/page_alloc.c when we really run out of memory.
11 *
12 *  Since we won't call these routines often (on a well-configured
13 *  machine) this file will double as a 'coding guide' and a signpost
14 *  for newbie kernel hackers. It features several pointers to major
15 *  kernel subsystems and hints as to where to find out what things do.
16 */
17
18#include <linux/oom.h>
19#include <linux/mm.h>
20#include <linux/err.h>
21#include <linux/sched.h>
22#include <linux/swap.h>
23#include <linux/timex.h>
24#include <linux/jiffies.h>
25#include <linux/cpuset.h>
26#include <linux/module.h>
27#include <linux/notifier.h>
28
29int sysctl_panic_on_oom;
30static DEFINE_MUTEX(zone_scan_mutex);
31/* #define DEBUG */
32
33/**
34 * badness - calculate a numeric value for how bad this task has been
35 * @p: task struct of which task we should calculate
36 * @uptime: current uptime in seconds
37 *
38 * The formula used is relatively simple and documented inline in the
39 * function. The main rationale is that we want to select a good task
40 * to kill when we run out of memory.
41 *
42 * Good in this context means that:
43 * 1) we lose the minimum amount of work done
44 * 2) we recover a large amount of memory
45 * 3) we don't kill anything innocent of eating tons of memory
46 * 4) we want to kill the minimum amount of processes (one)
47 * 5) we try to kill the process the user expects us to kill, this
48 *    algorithm has been meticulously tuned to meet the principle
49 *    of least surprise ... (be careful when you change it)
50 */
51
52unsigned long badness(struct task_struct *p, unsigned long uptime)
53{
54	unsigned long points, cpu_time, run_time, s;
55	struct mm_struct *mm;
56	struct task_struct *child;
57
58	task_lock(p);
59	mm = p->mm;
60	if (!mm) {
61		task_unlock(p);
62		return 0;
63	}
64
65	/*
66	 * The memory size of the process is the basis for the badness.
67	 */
68	points = mm->total_vm;
69
70	/*
71	 * After this unlock we can no longer dereference local variable `mm'
72	 */
73	task_unlock(p);
74
75	/*
76	 * swapoff can easily use up all memory, so kill those first.
77	 */
78	if (p->flags & PF_SWAPOFF)
79		return ULONG_MAX;
80
81	/*
82	 * Processes which fork a lot of child processes are likely
83	 * a good choice. We add half the vmsize of the children if they
84	 * have an own mm. This prevents forking servers to flood the
85	 * machine with an endless amount of children. In case a single
86	 * child is eating the vast majority of memory, adding only half
87	 * to the parents will make the child our kill candidate of choice.
88	 */
89	list_for_each_entry(child, &p->children, sibling) {
90		task_lock(child);
91		if (child->mm != mm && child->mm)
92			points += child->mm->total_vm/2 + 1;
93		task_unlock(child);
94	}
95
96	/*
97	 * CPU time is in tens of seconds and run time is in thousands
98         * of seconds. There is no particular reason for this other than
99         * that it turned out to work very well in practice.
100	 */
101	cpu_time = (cputime_to_jiffies(p->utime) + cputime_to_jiffies(p->stime))
102		>> (SHIFT_HZ + 3);
103
104	if (uptime >= p->start_time.tv_sec)
105		run_time = (uptime - p->start_time.tv_sec) >> 10;
106	else
107		run_time = 0;
108
109	s = int_sqrt(cpu_time);
110	if (s)
111		points /= s;
112	s = int_sqrt(int_sqrt(run_time));
113	if (s)
114		points /= s;
115
116	/*
117	 * Niced processes are most likely less important, so double
118	 * their badness points.
119	 */
120	if (task_nice(p) > 0)
121		points *= 2;
122
123	/*
124	 * Superuser processes are usually more important, so we make it
125	 * less likely that we kill those.
126	 */
127	if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_ADMIN) ||
128				p->uid == 0 || p->euid == 0)
129		points /= 4;
130
131	/*
132	 * We don't want to kill a process with direct hardware access.
133	 * Not only could that mess up the hardware, but usually users
134	 * tend to only have this flag set on applications they think
135	 * of as important.
136	 */
137	if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_RAWIO))
138		points /= 4;
139
140	/*
141	 * If p's nodes don't overlap ours, it may still help to kill p
142	 * because p may have allocated or otherwise mapped memory on
143	 * this node before. However it will be less likely.
144	 */
145	if (!cpuset_excl_nodes_overlap(p))
146		points /= 8;
147
148	/*
149	 * Adjust the score by oomkilladj.
150	 */
151	if (p->oomkilladj) {
152		if (p->oomkilladj > 0) {
153			if (!points)
154				points = 1;
155			points <<= p->oomkilladj;
156		} else
157			points >>= -(p->oomkilladj);
158	}
159
160#ifdef DEBUG
161	printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n",
162	p->pid, p->comm, points);
163#endif
164	return points;
165}
166
167/*
168 * Determine the type of allocation constraint.
169 */
170static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist,
171						    gfp_t gfp_mask)
172{
173#ifdef CONFIG_NUMA
174	struct zone **z;
175	nodemask_t nodes = node_states[N_HIGH_MEMORY];
176
177	for (z = zonelist->zones; *z; z++)
178		if (cpuset_zone_allowed_softwall(*z, gfp_mask))
179			node_clear(zone_to_nid(*z), nodes);
180		else
181			return CONSTRAINT_CPUSET;
182
183	if (!nodes_empty(nodes))
184		return CONSTRAINT_MEMORY_POLICY;
185#endif
186
187	return CONSTRAINT_NONE;
188}
189
190/*
191 * Simple selection loop. We chose the process with the highest
192 * number of 'points'. We expect the caller will lock the tasklist.
193 *
194 * (not docbooked, we don't want this one cluttering up the manual)
195 */
196static struct task_struct *select_bad_process(unsigned long *ppoints)
197{
198	struct task_struct *g, *p;
199	struct task_struct *chosen = NULL;
200	struct timespec uptime;
201	*ppoints = 0;
202
203	do_posix_clock_monotonic_gettime(&uptime);
204	do_each_thread(g, p) {
205		unsigned long points;
206
207		/*
208		 * skip kernel threads and tasks which have already released
209		 * their mm.
210		 */
211		if (!p->mm)
212			continue;
213		/* skip the init task */
214		if (is_init(p))
215			continue;
216
217		/*
218		 * This task already has access to memory reserves and is
219		 * being killed. Don't allow any other task access to the
220		 * memory reserve.
221		 *
222		 * Note: this may have a chance of deadlock if it gets
223		 * blocked waiting for another task which itself is waiting
224		 * for memory. Is there a better alternative?
225		 */
226		if (test_tsk_thread_flag(p, TIF_MEMDIE))
227			return ERR_PTR(-1UL);
228
229		/*
230		 * This is in the process of releasing memory so wait for it
231		 * to finish before killing some other task by mistake.
232		 *
233		 * However, if p is the current task, we allow the 'kill' to
234		 * go ahead if it is exiting: this will simply set TIF_MEMDIE,
235		 * which will allow it to gain access to memory reserves in
236		 * the process of exiting and releasing its resources.
237		 * Otherwise we could get an easy OOM deadlock.
238		 */
239		if (p->flags & PF_EXITING) {
240			if (p != current)
241				return ERR_PTR(-1UL);
242
243			chosen = p;
244			*ppoints = ULONG_MAX;
245		}
246
247		if (p->oomkilladj == OOM_DISABLE)
248			continue;
249
250		points = badness(p, uptime.tv_sec);
251		if (points > *ppoints || !chosen) {
252			chosen = p;
253			*ppoints = points;
254		}
255	} while_each_thread(g, p);
256
257	return chosen;
258}
259
260/**
261 * Send SIGKILL to the selected  process irrespective of  CAP_SYS_RAW_IO
262 * flag though it's unlikely that  we select a process with CAP_SYS_RAW_IO
263 * set.
264 */
265static void __oom_kill_task(struct task_struct *p, int verbose)
266{
267	if (is_init(p)) {
268		WARN_ON(1);
269		printk(KERN_WARNING "tried to kill init!\n");
270		return;
271	}
272
273	if (!p->mm) {
274		WARN_ON(1);
275		printk(KERN_WARNING "tried to kill an mm-less task!\n");
276		return;
277	}
278
279	if (verbose)
280		printk(KERN_ERR "Killed process %d (%s)\n", p->pid, p->comm);
281
282	/*
283	 * We give our sacrificial lamb high priority and access to
284	 * all the memory it needs. That way it should be able to
285	 * exit() and clear out its resources quickly...
286	 */
287	p->time_slice = HZ;
288	set_tsk_thread_flag(p, TIF_MEMDIE);
289
290	force_sig(SIGKILL, p);
291}
292
293static int oom_kill_task(struct task_struct *p)
294{
295	struct mm_struct *mm;
296	struct task_struct *g, *q;
297
298	mm = p->mm;
299
300	/* WARNING: mm may not be dereferenced since we did not obtain its
301	 * value from get_task_mm(p).  This is OK since all we need to do is
302	 * compare mm to q->mm below.
303	 *
304	 * Furthermore, even if mm contains a non-NULL value, p->mm may
305	 * change to NULL at any time since we do not hold task_lock(p).
306	 * However, this is of no concern to us.
307	 */
308
309	if (mm == NULL)
310		return 1;
311
312	/*
313	 * Don't kill the process if any threads are set to OOM_DISABLE
314	 */
315	do_each_thread(g, q) {
316		if (q->mm == mm && q->oomkilladj == OOM_DISABLE)
317			return 1;
318	} while_each_thread(g, q);
319
320	__oom_kill_task(p, 1);
321
322	/*
323	 * kill all processes that share the ->mm (i.e. all threads),
324	 * but are in a different thread group. Don't let them have access
325	 * to memory reserves though, otherwise we might deplete all memory.
326	 */
327	do_each_thread(g, q) {
328		if (q->mm == mm && q->tgid != p->tgid)
329			force_sig(SIGKILL, q);
330	} while_each_thread(g, q);
331
332	return 0;
333}
334
335static int oom_kill_process(struct task_struct *p, unsigned long points,
336		const char *message)
337{
338	struct task_struct *c;
339	struct list_head *tsk;
340
341	/*
342	 * If the task is already exiting, don't alarm the sysadmin or kill
343	 * its children or threads, just set TIF_MEMDIE so it can die quickly
344	 */
345	if (p->flags & PF_EXITING) {
346		__oom_kill_task(p, 0);
347		return 0;
348	}
349
350	printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n",
351					message, p->pid, p->comm, points);
352
353	/* Try to kill a child first */
354	list_for_each(tsk, &p->children) {
355		c = list_entry(tsk, struct task_struct, sibling);
356		if (c->mm == p->mm)
357			continue;
358		if (!oom_kill_task(c))
359			return 0;
360	}
361	return oom_kill_task(p);
362}
363
364static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
365
366int register_oom_notifier(struct notifier_block *nb)
367{
368	return blocking_notifier_chain_register(&oom_notify_list, nb);
369}
370EXPORT_SYMBOL_GPL(register_oom_notifier);
371
372int unregister_oom_notifier(struct notifier_block *nb)
373{
374	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
375}
376EXPORT_SYMBOL_GPL(unregister_oom_notifier);
377
378/*
379 * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
380 * if a parallel OOM killing is already taking place that includes a zone in
381 * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
382 */
383int try_set_zone_oom(struct zonelist *zonelist)
384{
385	struct zone **z;
386	int ret = 1;
387
388	z = zonelist->zones;
389
390	mutex_lock(&zone_scan_mutex);
391	do {
392		if (zone_is_oom_locked(*z)) {
393			ret = 0;
394			goto out;
395		}
396	} while (*(++z) != NULL);
397
398	/*
399	 * Lock each zone in the zonelist under zone_scan_mutex so a parallel
400	 * invocation of try_set_zone_oom() doesn't succeed when it shouldn't.
401	 */
402	z = zonelist->zones;
403	do {
404		zone_set_flag(*z, ZONE_OOM_LOCKED);
405	} while (*(++z) != NULL);
406out:
407	mutex_unlock(&zone_scan_mutex);
408	return ret;
409}
410
411/*
412 * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
413 * allocation attempts with zonelists containing them may now recall the OOM
414 * killer, if necessary.
415 */
416void clear_zonelist_oom(struct zonelist *zonelist)
417{
418	struct zone **z;
419
420	z = zonelist->zones;
421
422	mutex_lock(&zone_scan_mutex);
423	do {
424		zone_clear_flag(*z, ZONE_OOM_LOCKED);
425	} while (*(++z) != NULL);
426	mutex_unlock(&zone_scan_mutex);
427}
428
429/**
430 * out_of_memory - kill the "best" process when we run out of memory
431 *
432 * If we run out of memory, we have the choice between either
433 * killing a random task (bad), letting the system crash (worse)
434 * OR try to be smart about which process to kill. Note that we
435 * don't have to be perfect here, we just have to be good.
436 */
437void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
438{
439	struct task_struct *p;
440	unsigned long points = 0;
441	unsigned long freed = 0;
442	enum oom_constraint constraint;
443
444	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
445	if (freed > 0)
446		/* Got some memory back in the last second. */
447		return;
448
449	if (printk_ratelimit()) {
450		printk(KERN_WARNING "%s invoked oom-killer: "
451			"gfp_mask=0x%x, order=%d, oomkilladj=%d\n",
452			current->comm, gfp_mask, order, current->oomkilladj);
453		dump_stack();
454		show_mem();
455	}
456
457	if (sysctl_panic_on_oom == 2)
458		panic("out of memory. Compulsory panic_on_oom is selected.\n");
459
460	/*
461	 * Check if there were limitations on the allocation (only relevant for
462	 * NUMA) that may require different handling.
463	 */
464	constraint = constrained_alloc(zonelist, gfp_mask);
465	cpuset_lock();
466	read_lock(&tasklist_lock);
467
468	switch (constraint) {
469	case CONSTRAINT_MEMORY_POLICY:
470		oom_kill_process(current, points,
471				"No available memory (MPOL_BIND)");
472		break;
473
474	case CONSTRAINT_CPUSET:
475		oom_kill_process(current, points,
476				"No available memory in cpuset");
477		break;
478
479	case CONSTRAINT_NONE:
480		if (sysctl_panic_on_oom)
481			panic("out of memory. panic_on_oom is selected\n");
482retry:
483		/*
484		 * Rambo mode: Shoot down a process and hope it solves whatever
485		 * issues we may have.
486		 */
487		p = select_bad_process(&points);
488
489		if (PTR_ERR(p) == -1UL)
490			goto out;
491
492		/* Found nothing?!?! Either we hang forever, or we panic. */
493		if (!p) {
494			read_unlock(&tasklist_lock);
495			cpuset_unlock();
496			panic("Out of memory and no killable processes...\n");
497		}
498
499		if (oom_kill_process(p, points, "Out of memory"))
500			goto retry;
501
502		break;
503	}
504
505out:
506	read_unlock(&tasklist_lock);
507	cpuset_unlock();
508
509	/*
510	 * Give "p" a good chance of killing itself before we
511	 * retry to allocate memory unless "p" is current
512	 */
513	if (!test_thread_flag(TIF_MEMDIE))
514		schedule_timeout_uninterruptible(1);
515}
516