oom_kill.c revision 4a3ede107e422a0c53d28024b0aa902ca22a8768
1/* 2 * linux/mm/oom_kill.c 3 * 4 * Copyright (C) 1998,2000 Rik van Riel 5 * Thanks go out to Claus Fischer for some serious inspiration and 6 * for goading me into coding this file... 7 * 8 * The routines in this file are used to kill a process when 9 * we're seriously out of memory. This gets called from __alloc_pages() 10 * in mm/page_alloc.c when we really run out of memory. 11 * 12 * Since we won't call these routines often (on a well-configured 13 * machine) this file will double as a 'coding guide' and a signpost 14 * for newbie kernel hackers. It features several pointers to major 15 * kernel subsystems and hints as to where to find out what things do. 16 */ 17 18#include <linux/mm.h> 19#include <linux/sched.h> 20#include <linux/swap.h> 21#include <linux/timex.h> 22#include <linux/jiffies.h> 23#include <linux/cpuset.h> 24#include <linux/module.h> 25#include <linux/notifier.h> 26 27int sysctl_panic_on_oom; 28/* #define DEBUG */ 29 30/** 31 * badness - calculate a numeric value for how bad this task has been 32 * @p: task struct of which task we should calculate 33 * @uptime: current uptime in seconds 34 * 35 * The formula used is relatively simple and documented inline in the 36 * function. The main rationale is that we want to select a good task 37 * to kill when we run out of memory. 38 * 39 * Good in this context means that: 40 * 1) we lose the minimum amount of work done 41 * 2) we recover a large amount of memory 42 * 3) we don't kill anything innocent of eating tons of memory 43 * 4) we want to kill the minimum amount of processes (one) 44 * 5) we try to kill the process the user expects us to kill, this 45 * algorithm has been meticulously tuned to meet the principle 46 * of least surprise ... (be careful when you change it) 47 */ 48 49unsigned long badness(struct task_struct *p, unsigned long uptime) 50{ 51 unsigned long points, cpu_time, run_time, s; 52 struct mm_struct *mm; 53 struct task_struct *child; 54 55 task_lock(p); 56 mm = p->mm; 57 if (!mm) { 58 task_unlock(p); 59 return 0; 60 } 61 62 /* 63 * The memory size of the process is the basis for the badness. 64 */ 65 points = mm->total_vm; 66 67 /* 68 * After this unlock we can no longer dereference local variable `mm' 69 */ 70 task_unlock(p); 71 72 /* 73 * Processes which fork a lot of child processes are likely 74 * a good choice. We add half the vmsize of the children if they 75 * have an own mm. This prevents forking servers to flood the 76 * machine with an endless amount of children. In case a single 77 * child is eating the vast majority of memory, adding only half 78 * to the parents will make the child our kill candidate of choice. 79 */ 80 list_for_each_entry(child, &p->children, sibling) { 81 task_lock(child); 82 if (child->mm != mm && child->mm) 83 points += child->mm->total_vm/2 + 1; 84 task_unlock(child); 85 } 86 87 /* 88 * CPU time is in tens of seconds and run time is in thousands 89 * of seconds. There is no particular reason for this other than 90 * that it turned out to work very well in practice. 91 */ 92 cpu_time = (cputime_to_jiffies(p->utime) + cputime_to_jiffies(p->stime)) 93 >> (SHIFT_HZ + 3); 94 95 if (uptime >= p->start_time.tv_sec) 96 run_time = (uptime - p->start_time.tv_sec) >> 10; 97 else 98 run_time = 0; 99 100 s = int_sqrt(cpu_time); 101 if (s) 102 points /= s; 103 s = int_sqrt(int_sqrt(run_time)); 104 if (s) 105 points /= s; 106 107 /* 108 * Niced processes are most likely less important, so double 109 * their badness points. 110 */ 111 if (task_nice(p) > 0) 112 points *= 2; 113 114 /* 115 * Superuser processes are usually more important, so we make it 116 * less likely that we kill those. 117 */ 118 if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_ADMIN) || 119 p->uid == 0 || p->euid == 0) 120 points /= 4; 121 122 /* 123 * We don't want to kill a process with direct hardware access. 124 * Not only could that mess up the hardware, but usually users 125 * tend to only have this flag set on applications they think 126 * of as important. 127 */ 128 if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_RAWIO)) 129 points /= 4; 130 131 /* 132 * If p's nodes don't overlap ours, it may still help to kill p 133 * because p may have allocated or otherwise mapped memory on 134 * this node before. However it will be less likely. 135 */ 136 if (!cpuset_excl_nodes_overlap(p)) 137 points /= 8; 138 139 /* 140 * Adjust the score by oomkilladj. 141 */ 142 if (p->oomkilladj) { 143 if (p->oomkilladj > 0) 144 points <<= p->oomkilladj; 145 else 146 points >>= -(p->oomkilladj); 147 } 148 149#ifdef DEBUG 150 printk(KERN_DEBUG "OOMkill: task %d (%s) got %d points\n", 151 p->pid, p->comm, points); 152#endif 153 return points; 154} 155 156/* 157 * Types of limitations to the nodes from which allocations may occur 158 */ 159#define CONSTRAINT_NONE 1 160#define CONSTRAINT_MEMORY_POLICY 2 161#define CONSTRAINT_CPUSET 3 162 163/* 164 * Determine the type of allocation constraint. 165 */ 166static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask) 167{ 168#ifdef CONFIG_NUMA 169 struct zone **z; 170 nodemask_t nodes = node_online_map; 171 172 for (z = zonelist->zones; *z; z++) 173 if (cpuset_zone_allowed(*z, gfp_mask)) 174 node_clear((*z)->zone_pgdat->node_id, 175 nodes); 176 else 177 return CONSTRAINT_CPUSET; 178 179 if (!nodes_empty(nodes)) 180 return CONSTRAINT_MEMORY_POLICY; 181#endif 182 183 return CONSTRAINT_NONE; 184} 185 186/* 187 * Simple selection loop. We chose the process with the highest 188 * number of 'points'. We expect the caller will lock the tasklist. 189 * 190 * (not docbooked, we don't want this one cluttering up the manual) 191 */ 192static struct task_struct *select_bad_process(unsigned long *ppoints) 193{ 194 struct task_struct *g, *p; 195 struct task_struct *chosen = NULL; 196 struct timespec uptime; 197 *ppoints = 0; 198 199 do_posix_clock_monotonic_gettime(&uptime); 200 do_each_thread(g, p) { 201 unsigned long points; 202 int releasing; 203 204 /* skip the init task with pid == 1 */ 205 if (p->pid == 1) 206 continue; 207 208 /* 209 * This is in the process of releasing memory so wait for it 210 * to finish before killing some other task by mistake. 211 * 212 * However, if p is the current task, we allow the 'kill' to 213 * go ahead if it is exiting: this will simply set TIF_MEMDIE, 214 * which will allow it to gain access to memory reserves in 215 * the process of exiting and releasing its resources. 216 * Otherwise we could get an OOM deadlock. 217 */ 218 releasing = test_tsk_thread_flag(p, TIF_MEMDIE) || 219 p->flags & PF_EXITING; 220 if (releasing) { 221 /* PF_DEAD tasks have already released their mm */ 222 if (p->flags & PF_DEAD) 223 continue; 224 if (p->flags & PF_EXITING && p == current) { 225 chosen = p; 226 *ppoints = ULONG_MAX; 227 break; 228 } 229 return ERR_PTR(-1UL); 230 } 231 if (p->oomkilladj == OOM_DISABLE) 232 continue; 233 if (p->flags & PF_SWAPOFF) 234 return p; 235 236 points = badness(p, uptime.tv_sec); 237 if (points > *ppoints || !chosen) { 238 chosen = p; 239 *ppoints = points; 240 } 241 } while_each_thread(g, p); 242 return chosen; 243} 244 245/** 246 * We must be careful though to never send SIGKILL a process with 247 * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that 248 * we select a process with CAP_SYS_RAW_IO set). 249 */ 250static void __oom_kill_task(struct task_struct *p, const char *message) 251{ 252 if (p->pid == 1) { 253 WARN_ON(1); 254 printk(KERN_WARNING "tried to kill init!\n"); 255 return; 256 } 257 258 task_lock(p); 259 if (!p->mm || p->mm == &init_mm) { 260 WARN_ON(1); 261 printk(KERN_WARNING "tried to kill an mm-less task!\n"); 262 task_unlock(p); 263 return; 264 } 265 task_unlock(p); 266 267 if (message) { 268 printk(KERN_ERR "%s: Killed process %d (%s).\n", 269 message, p->pid, p->comm); 270 } 271 272 /* 273 * We give our sacrificial lamb high priority and access to 274 * all the memory it needs. That way it should be able to 275 * exit() and clear out its resources quickly... 276 */ 277 p->time_slice = HZ; 278 set_tsk_thread_flag(p, TIF_MEMDIE); 279 280 force_sig(SIGKILL, p); 281} 282 283static int oom_kill_task(struct task_struct *p, const char *message) 284{ 285 struct mm_struct *mm; 286 struct task_struct *g, *q; 287 288 mm = p->mm; 289 290 /* WARNING: mm may not be dereferenced since we did not obtain its 291 * value from get_task_mm(p). This is OK since all we need to do is 292 * compare mm to q->mm below. 293 * 294 * Furthermore, even if mm contains a non-NULL value, p->mm may 295 * change to NULL at any time since we do not hold task_lock(p). 296 * However, this is of no concern to us. 297 */ 298 299 if (mm == NULL || mm == &init_mm) 300 return 1; 301 302 __oom_kill_task(p, message); 303 /* 304 * kill all processes that share the ->mm (i.e. all threads), 305 * but are in a different thread group 306 */ 307 do_each_thread(g, q) 308 if (q->mm == mm && q->tgid != p->tgid) 309 __oom_kill_task(q, message); 310 while_each_thread(g, q); 311 312 return 0; 313} 314 315static int oom_kill_process(struct task_struct *p, unsigned long points, 316 const char *message) 317{ 318 struct task_struct *c; 319 struct list_head *tsk; 320 321 /* 322 * If the task is already exiting, don't alarm the sysadmin or kill 323 * its children or threads, just set TIF_MEMDIE so it can die quickly 324 */ 325 if (p->flags & PF_EXITING) { 326 __oom_kill_task(p, NULL); 327 return 0; 328 } 329 330 printk(KERN_ERR "Out of Memory: Kill process %d (%s) score %li" 331 " and children.\n", p->pid, p->comm, points); 332 /* Try to kill a child first */ 333 list_for_each(tsk, &p->children) { 334 c = list_entry(tsk, struct task_struct, sibling); 335 if (c->mm == p->mm) 336 continue; 337 if (!oom_kill_task(c, message)) 338 return 0; 339 } 340 return oom_kill_task(p, message); 341} 342 343static BLOCKING_NOTIFIER_HEAD(oom_notify_list); 344 345int register_oom_notifier(struct notifier_block *nb) 346{ 347 return blocking_notifier_chain_register(&oom_notify_list, nb); 348} 349EXPORT_SYMBOL_GPL(register_oom_notifier); 350 351int unregister_oom_notifier(struct notifier_block *nb) 352{ 353 return blocking_notifier_chain_unregister(&oom_notify_list, nb); 354} 355EXPORT_SYMBOL_GPL(unregister_oom_notifier); 356 357/** 358 * out_of_memory - kill the "best" process when we run out of memory 359 * 360 * If we run out of memory, we have the choice between either 361 * killing a random task (bad), letting the system crash (worse) 362 * OR try to be smart about which process to kill. Note that we 363 * don't have to be perfect here, we just have to be good. 364 */ 365void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) 366{ 367 struct task_struct *p; 368 unsigned long points = 0; 369 unsigned long freed = 0; 370 371 blocking_notifier_call_chain(&oom_notify_list, 0, &freed); 372 if (freed > 0) 373 /* Got some memory back in the last second. */ 374 return; 375 376 if (printk_ratelimit()) { 377 printk("oom-killer: gfp_mask=0x%x, order=%d\n", 378 gfp_mask, order); 379 dump_stack(); 380 show_mem(); 381 } 382 383 cpuset_lock(); 384 read_lock(&tasklist_lock); 385 386 /* 387 * Check if there were limitations on the allocation (only relevant for 388 * NUMA) that may require different handling. 389 */ 390 switch (constrained_alloc(zonelist, gfp_mask)) { 391 case CONSTRAINT_MEMORY_POLICY: 392 oom_kill_process(current, points, 393 "No available memory (MPOL_BIND)"); 394 break; 395 396 case CONSTRAINT_CPUSET: 397 oom_kill_process(current, points, 398 "No available memory in cpuset"); 399 break; 400 401 case CONSTRAINT_NONE: 402 if (sysctl_panic_on_oom) 403 panic("out of memory. panic_on_oom is selected\n"); 404retry: 405 /* 406 * Rambo mode: Shoot down a process and hope it solves whatever 407 * issues we may have. 408 */ 409 p = select_bad_process(&points); 410 411 if (PTR_ERR(p) == -1UL) 412 goto out; 413 414 /* Found nothing?!?! Either we hang forever, or we panic. */ 415 if (!p) { 416 read_unlock(&tasklist_lock); 417 cpuset_unlock(); 418 panic("Out of memory and no killable processes...\n"); 419 } 420 421 if (oom_kill_process(p, points, "Out of memory")) 422 goto retry; 423 424 break; 425 } 426 427out: 428 read_unlock(&tasklist_lock); 429 cpuset_unlock(); 430 431 /* 432 * Give "p" a good chance of killing itself before we 433 * retry to allocate memory unless "p" is current 434 */ 435 if (!test_thread_flag(TIF_MEMDIE)) 436 schedule_timeout_uninterruptible(1); 437} 438