tree_plugin.h revision 79a62f957e0b37c59610a96d018cc341aebb48f4
1/* 2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 3 * Internal non-public definitions that provide either classic 4 * or preemptible semantics. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 * 20 * Copyright Red Hat, 2009 21 * Copyright IBM Corporation, 2009 22 * 23 * Author: Ingo Molnar <mingo@elte.hu> 24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> 25 */ 26 27#include <linux/delay.h> 28#include <linux/gfp.h> 29#include <linux/oom.h> 30#include <linux/smpboot.h> 31#include "../time/tick-internal.h" 32 33#define RCU_KTHREAD_PRIO 1 34 35#ifdef CONFIG_RCU_BOOST 36#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO 37#else 38#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO 39#endif 40 41#ifdef CONFIG_RCU_NOCB_CPU 42static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ 43static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ 44static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ 45static char __initdata nocb_buf[NR_CPUS * 5]; 46#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 47 48/* 49 * Check the RCU kernel configuration parameters and print informative 50 * messages about anything out of the ordinary. If you like #ifdef, you 51 * will love this function. 52 */ 53static void __init rcu_bootup_announce_oddness(void) 54{ 55#ifdef CONFIG_RCU_TRACE 56 pr_info("\tRCU debugfs-based tracing is enabled.\n"); 57#endif 58#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32) 59 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n", 60 CONFIG_RCU_FANOUT); 61#endif 62#ifdef CONFIG_RCU_FANOUT_EXACT 63 pr_info("\tHierarchical RCU autobalancing is disabled.\n"); 64#endif 65#ifdef CONFIG_RCU_FAST_NO_HZ 66 pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n"); 67#endif 68#ifdef CONFIG_PROVE_RCU 69 pr_info("\tRCU lockdep checking is enabled.\n"); 70#endif 71#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE 72 pr_info("\tRCU torture testing starts during boot.\n"); 73#endif 74#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE) 75 pr_info("\tDump stacks of tasks blocking RCU-preempt GP.\n"); 76#endif 77#if defined(CONFIG_RCU_CPU_STALL_INFO) 78 pr_info("\tAdditional per-CPU info printed with stalls.\n"); 79#endif 80#if NUM_RCU_LVL_4 != 0 81 pr_info("\tFour-level hierarchy is enabled.\n"); 82#endif 83 if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF) 84 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); 85 if (nr_cpu_ids != NR_CPUS) 86 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); 87#ifdef CONFIG_RCU_NOCB_CPU 88#ifndef CONFIG_RCU_NOCB_CPU_NONE 89 if (!have_rcu_nocb_mask) { 90 zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL); 91 have_rcu_nocb_mask = true; 92 } 93#ifdef CONFIG_RCU_NOCB_CPU_ZERO 94 pr_info("\tOffload RCU callbacks from CPU 0\n"); 95 cpumask_set_cpu(0, rcu_nocb_mask); 96#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */ 97#ifdef CONFIG_RCU_NOCB_CPU_ALL 98 pr_info("\tOffload RCU callbacks from all CPUs\n"); 99 cpumask_copy(rcu_nocb_mask, cpu_possible_mask); 100#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */ 101#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */ 102 if (have_rcu_nocb_mask) { 103 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) { 104 pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n"); 105 cpumask_and(rcu_nocb_mask, cpu_possible_mask, 106 rcu_nocb_mask); 107 } 108 cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask); 109 pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf); 110 if (rcu_nocb_poll) 111 pr_info("\tPoll for callbacks from no-CBs CPUs.\n"); 112 } 113#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 114} 115 116#ifdef CONFIG_TREE_PREEMPT_RCU 117 118RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); 119static struct rcu_state *rcu_state = &rcu_preempt_state; 120 121static int rcu_preempted_readers_exp(struct rcu_node *rnp); 122 123/* 124 * Tell them what RCU they are running. 125 */ 126static void __init rcu_bootup_announce(void) 127{ 128 pr_info("Preemptible hierarchical RCU implementation.\n"); 129 rcu_bootup_announce_oddness(); 130} 131 132/* 133 * Return the number of RCU-preempt batches processed thus far 134 * for debug and statistics. 135 */ 136long rcu_batches_completed_preempt(void) 137{ 138 return rcu_preempt_state.completed; 139} 140EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt); 141 142/* 143 * Return the number of RCU batches processed thus far for debug & stats. 144 */ 145long rcu_batches_completed(void) 146{ 147 return rcu_batches_completed_preempt(); 148} 149EXPORT_SYMBOL_GPL(rcu_batches_completed); 150 151/* 152 * Force a quiescent state for preemptible RCU. 153 */ 154void rcu_force_quiescent_state(void) 155{ 156 force_quiescent_state(&rcu_preempt_state); 157} 158EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 159 160/* 161 * Record a preemptible-RCU quiescent state for the specified CPU. Note 162 * that this just means that the task currently running on the CPU is 163 * not in a quiescent state. There might be any number of tasks blocked 164 * while in an RCU read-side critical section. 165 * 166 * Unlike the other rcu_*_qs() functions, callers to this function 167 * must disable irqs in order to protect the assignment to 168 * ->rcu_read_unlock_special. 169 */ 170static void rcu_preempt_qs(int cpu) 171{ 172 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); 173 174 if (rdp->passed_quiesce == 0) 175 trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs")); 176 rdp->passed_quiesce = 1; 177 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; 178} 179 180/* 181 * We have entered the scheduler, and the current task might soon be 182 * context-switched away from. If this task is in an RCU read-side 183 * critical section, we will no longer be able to rely on the CPU to 184 * record that fact, so we enqueue the task on the blkd_tasks list. 185 * The task will dequeue itself when it exits the outermost enclosing 186 * RCU read-side critical section. Therefore, the current grace period 187 * cannot be permitted to complete until the blkd_tasks list entries 188 * predating the current grace period drain, in other words, until 189 * rnp->gp_tasks becomes NULL. 190 * 191 * Caller must disable preemption. 192 */ 193static void rcu_preempt_note_context_switch(int cpu) 194{ 195 struct task_struct *t = current; 196 unsigned long flags; 197 struct rcu_data *rdp; 198 struct rcu_node *rnp; 199 200 if (t->rcu_read_lock_nesting > 0 && 201 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { 202 203 /* Possibly blocking in an RCU read-side critical section. */ 204 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); 205 rnp = rdp->mynode; 206 raw_spin_lock_irqsave(&rnp->lock, flags); 207 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; 208 t->rcu_blocked_node = rnp; 209 210 /* 211 * If this CPU has already checked in, then this task 212 * will hold up the next grace period rather than the 213 * current grace period. Queue the task accordingly. 214 * If the task is queued for the current grace period 215 * (i.e., this CPU has not yet passed through a quiescent 216 * state for the current grace period), then as long 217 * as that task remains queued, the current grace period 218 * cannot end. Note that there is some uncertainty as 219 * to exactly when the current grace period started. 220 * We take a conservative approach, which can result 221 * in unnecessarily waiting on tasks that started very 222 * slightly after the current grace period began. C'est 223 * la vie!!! 224 * 225 * But first, note that the current CPU must still be 226 * on line! 227 */ 228 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); 229 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); 230 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) { 231 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); 232 rnp->gp_tasks = &t->rcu_node_entry; 233#ifdef CONFIG_RCU_BOOST 234 if (rnp->boost_tasks != NULL) 235 rnp->boost_tasks = rnp->gp_tasks; 236#endif /* #ifdef CONFIG_RCU_BOOST */ 237 } else { 238 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); 239 if (rnp->qsmask & rdp->grpmask) 240 rnp->gp_tasks = &t->rcu_node_entry; 241 } 242 trace_rcu_preempt_task(rdp->rsp->name, 243 t->pid, 244 (rnp->qsmask & rdp->grpmask) 245 ? rnp->gpnum 246 : rnp->gpnum + 1); 247 raw_spin_unlock_irqrestore(&rnp->lock, flags); 248 } else if (t->rcu_read_lock_nesting < 0 && 249 t->rcu_read_unlock_special) { 250 251 /* 252 * Complete exit from RCU read-side critical section on 253 * behalf of preempted instance of __rcu_read_unlock(). 254 */ 255 rcu_read_unlock_special(t); 256 } 257 258 /* 259 * Either we were not in an RCU read-side critical section to 260 * begin with, or we have now recorded that critical section 261 * globally. Either way, we can now note a quiescent state 262 * for this CPU. Again, if we were in an RCU read-side critical 263 * section, and if that critical section was blocking the current 264 * grace period, then the fact that the task has been enqueued 265 * means that we continue to block the current grace period. 266 */ 267 local_irq_save(flags); 268 rcu_preempt_qs(cpu); 269 local_irq_restore(flags); 270} 271 272/* 273 * Check for preempted RCU readers blocking the current grace period 274 * for the specified rcu_node structure. If the caller needs a reliable 275 * answer, it must hold the rcu_node's ->lock. 276 */ 277static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 278{ 279 return rnp->gp_tasks != NULL; 280} 281 282/* 283 * Record a quiescent state for all tasks that were previously queued 284 * on the specified rcu_node structure and that were blocking the current 285 * RCU grace period. The caller must hold the specified rnp->lock with 286 * irqs disabled, and this lock is released upon return, but irqs remain 287 * disabled. 288 */ 289static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 290 __releases(rnp->lock) 291{ 292 unsigned long mask; 293 struct rcu_node *rnp_p; 294 295 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 296 raw_spin_unlock_irqrestore(&rnp->lock, flags); 297 return; /* Still need more quiescent states! */ 298 } 299 300 rnp_p = rnp->parent; 301 if (rnp_p == NULL) { 302 /* 303 * Either there is only one rcu_node in the tree, 304 * or tasks were kicked up to root rcu_node due to 305 * CPUs going offline. 306 */ 307 rcu_report_qs_rsp(&rcu_preempt_state, flags); 308 return; 309 } 310 311 /* Report up the rest of the hierarchy. */ 312 mask = rnp->grpmask; 313 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 314 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */ 315 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); 316} 317 318/* 319 * Advance a ->blkd_tasks-list pointer to the next entry, instead 320 * returning NULL if at the end of the list. 321 */ 322static struct list_head *rcu_next_node_entry(struct task_struct *t, 323 struct rcu_node *rnp) 324{ 325 struct list_head *np; 326 327 np = t->rcu_node_entry.next; 328 if (np == &rnp->blkd_tasks) 329 np = NULL; 330 return np; 331} 332 333/* 334 * Handle special cases during rcu_read_unlock(), such as needing to 335 * notify RCU core processing or task having blocked during the RCU 336 * read-side critical section. 337 */ 338void rcu_read_unlock_special(struct task_struct *t) 339{ 340 int empty; 341 int empty_exp; 342 int empty_exp_now; 343 unsigned long flags; 344 struct list_head *np; 345#ifdef CONFIG_RCU_BOOST 346 struct rt_mutex *rbmp = NULL; 347#endif /* #ifdef CONFIG_RCU_BOOST */ 348 struct rcu_node *rnp; 349 int special; 350 351 /* NMI handlers cannot block and cannot safely manipulate state. */ 352 if (in_nmi()) 353 return; 354 355 local_irq_save(flags); 356 357 /* 358 * If RCU core is waiting for this CPU to exit critical section, 359 * let it know that we have done so. 360 */ 361 special = t->rcu_read_unlock_special; 362 if (special & RCU_READ_UNLOCK_NEED_QS) { 363 rcu_preempt_qs(smp_processor_id()); 364 if (!t->rcu_read_unlock_special) { 365 local_irq_restore(flags); 366 return; 367 } 368 } 369 370 /* Hardware IRQ handlers cannot block, complain if they get here. */ 371 if (WARN_ON_ONCE(in_irq() || in_serving_softirq())) { 372 local_irq_restore(flags); 373 return; 374 } 375 376 /* Clean up if blocked during RCU read-side critical section. */ 377 if (special & RCU_READ_UNLOCK_BLOCKED) { 378 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; 379 380 /* 381 * Remove this task from the list it blocked on. The 382 * task can migrate while we acquire the lock, but at 383 * most one time. So at most two passes through loop. 384 */ 385 for (;;) { 386 rnp = t->rcu_blocked_node; 387 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 388 if (rnp == t->rcu_blocked_node) 389 break; 390 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 391 } 392 empty = !rcu_preempt_blocked_readers_cgp(rnp); 393 empty_exp = !rcu_preempted_readers_exp(rnp); 394 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 395 np = rcu_next_node_entry(t, rnp); 396 list_del_init(&t->rcu_node_entry); 397 t->rcu_blocked_node = NULL; 398 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"), 399 rnp->gpnum, t->pid); 400 if (&t->rcu_node_entry == rnp->gp_tasks) 401 rnp->gp_tasks = np; 402 if (&t->rcu_node_entry == rnp->exp_tasks) 403 rnp->exp_tasks = np; 404#ifdef CONFIG_RCU_BOOST 405 if (&t->rcu_node_entry == rnp->boost_tasks) 406 rnp->boost_tasks = np; 407 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */ 408 if (t->rcu_boost_mutex) { 409 rbmp = t->rcu_boost_mutex; 410 t->rcu_boost_mutex = NULL; 411 } 412#endif /* #ifdef CONFIG_RCU_BOOST */ 413 414 /* 415 * If this was the last task on the current list, and if 416 * we aren't waiting on any CPUs, report the quiescent state. 417 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, 418 * so we must take a snapshot of the expedited state. 419 */ 420 empty_exp_now = !rcu_preempted_readers_exp(rnp); 421 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) { 422 trace_rcu_quiescent_state_report(TPS("preempt_rcu"), 423 rnp->gpnum, 424 0, rnp->qsmask, 425 rnp->level, 426 rnp->grplo, 427 rnp->grphi, 428 !!rnp->gp_tasks); 429 rcu_report_unblock_qs_rnp(rnp, flags); 430 } else { 431 raw_spin_unlock_irqrestore(&rnp->lock, flags); 432 } 433 434#ifdef CONFIG_RCU_BOOST 435 /* Unboost if we were boosted. */ 436 if (rbmp) 437 rt_mutex_unlock(rbmp); 438#endif /* #ifdef CONFIG_RCU_BOOST */ 439 440 /* 441 * If this was the last task on the expedited lists, 442 * then we need to report up the rcu_node hierarchy. 443 */ 444 if (!empty_exp && empty_exp_now) 445 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true); 446 } else { 447 local_irq_restore(flags); 448 } 449} 450 451#ifdef CONFIG_RCU_CPU_STALL_VERBOSE 452 453/* 454 * Dump detailed information for all tasks blocking the current RCU 455 * grace period on the specified rcu_node structure. 456 */ 457static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) 458{ 459 unsigned long flags; 460 struct task_struct *t; 461 462 raw_spin_lock_irqsave(&rnp->lock, flags); 463 if (!rcu_preempt_blocked_readers_cgp(rnp)) { 464 raw_spin_unlock_irqrestore(&rnp->lock, flags); 465 return; 466 } 467 t = list_entry(rnp->gp_tasks, 468 struct task_struct, rcu_node_entry); 469 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) 470 sched_show_task(t); 471 raw_spin_unlock_irqrestore(&rnp->lock, flags); 472} 473 474/* 475 * Dump detailed information for all tasks blocking the current RCU 476 * grace period. 477 */ 478static void rcu_print_detail_task_stall(struct rcu_state *rsp) 479{ 480 struct rcu_node *rnp = rcu_get_root(rsp); 481 482 rcu_print_detail_task_stall_rnp(rnp); 483 rcu_for_each_leaf_node(rsp, rnp) 484 rcu_print_detail_task_stall_rnp(rnp); 485} 486 487#else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ 488 489static void rcu_print_detail_task_stall(struct rcu_state *rsp) 490{ 491} 492 493#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ 494 495#ifdef CONFIG_RCU_CPU_STALL_INFO 496 497static void rcu_print_task_stall_begin(struct rcu_node *rnp) 498{ 499 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", 500 rnp->level, rnp->grplo, rnp->grphi); 501} 502 503static void rcu_print_task_stall_end(void) 504{ 505 pr_cont("\n"); 506} 507 508#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ 509 510static void rcu_print_task_stall_begin(struct rcu_node *rnp) 511{ 512} 513 514static void rcu_print_task_stall_end(void) 515{ 516} 517 518#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */ 519 520/* 521 * Scan the current list of tasks blocked within RCU read-side critical 522 * sections, printing out the tid of each. 523 */ 524static int rcu_print_task_stall(struct rcu_node *rnp) 525{ 526 struct task_struct *t; 527 int ndetected = 0; 528 529 if (!rcu_preempt_blocked_readers_cgp(rnp)) 530 return 0; 531 rcu_print_task_stall_begin(rnp); 532 t = list_entry(rnp->gp_tasks, 533 struct task_struct, rcu_node_entry); 534 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 535 pr_cont(" P%d", t->pid); 536 ndetected++; 537 } 538 rcu_print_task_stall_end(); 539 return ndetected; 540} 541 542/* 543 * Check that the list of blocked tasks for the newly completed grace 544 * period is in fact empty. It is a serious bug to complete a grace 545 * period that still has RCU readers blocked! This function must be 546 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock 547 * must be held by the caller. 548 * 549 * Also, if there are blocked tasks on the list, they automatically 550 * block the newly created grace period, so set up ->gp_tasks accordingly. 551 */ 552static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 553{ 554 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); 555 if (!list_empty(&rnp->blkd_tasks)) 556 rnp->gp_tasks = rnp->blkd_tasks.next; 557 WARN_ON_ONCE(rnp->qsmask); 558} 559 560#ifdef CONFIG_HOTPLUG_CPU 561 562/* 563 * Handle tasklist migration for case in which all CPUs covered by the 564 * specified rcu_node have gone offline. Move them up to the root 565 * rcu_node. The reason for not just moving them to the immediate 566 * parent is to remove the need for rcu_read_unlock_special() to 567 * make more than two attempts to acquire the target rcu_node's lock. 568 * Returns true if there were tasks blocking the current RCU grace 569 * period. 570 * 571 * Returns 1 if there was previously a task blocking the current grace 572 * period on the specified rcu_node structure. 573 * 574 * The caller must hold rnp->lock with irqs disabled. 575 */ 576static int rcu_preempt_offline_tasks(struct rcu_state *rsp, 577 struct rcu_node *rnp, 578 struct rcu_data *rdp) 579{ 580 struct list_head *lp; 581 struct list_head *lp_root; 582 int retval = 0; 583 struct rcu_node *rnp_root = rcu_get_root(rsp); 584 struct task_struct *t; 585 586 if (rnp == rnp_root) { 587 WARN_ONCE(1, "Last CPU thought to be offlined?"); 588 return 0; /* Shouldn't happen: at least one CPU online. */ 589 } 590 591 /* If we are on an internal node, complain bitterly. */ 592 WARN_ON_ONCE(rnp != rdp->mynode); 593 594 /* 595 * Move tasks up to root rcu_node. Don't try to get fancy for 596 * this corner-case operation -- just put this node's tasks 597 * at the head of the root node's list, and update the root node's 598 * ->gp_tasks and ->exp_tasks pointers to those of this node's, 599 * if non-NULL. This might result in waiting for more tasks than 600 * absolutely necessary, but this is a good performance/complexity 601 * tradeoff. 602 */ 603 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0) 604 retval |= RCU_OFL_TASKS_NORM_GP; 605 if (rcu_preempted_readers_exp(rnp)) 606 retval |= RCU_OFL_TASKS_EXP_GP; 607 lp = &rnp->blkd_tasks; 608 lp_root = &rnp_root->blkd_tasks; 609 while (!list_empty(lp)) { 610 t = list_entry(lp->next, typeof(*t), rcu_node_entry); 611 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ 612 list_del(&t->rcu_node_entry); 613 t->rcu_blocked_node = rnp_root; 614 list_add(&t->rcu_node_entry, lp_root); 615 if (&t->rcu_node_entry == rnp->gp_tasks) 616 rnp_root->gp_tasks = rnp->gp_tasks; 617 if (&t->rcu_node_entry == rnp->exp_tasks) 618 rnp_root->exp_tasks = rnp->exp_tasks; 619#ifdef CONFIG_RCU_BOOST 620 if (&t->rcu_node_entry == rnp->boost_tasks) 621 rnp_root->boost_tasks = rnp->boost_tasks; 622#endif /* #ifdef CONFIG_RCU_BOOST */ 623 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ 624 } 625 626 rnp->gp_tasks = NULL; 627 rnp->exp_tasks = NULL; 628#ifdef CONFIG_RCU_BOOST 629 rnp->boost_tasks = NULL; 630 /* 631 * In case root is being boosted and leaf was not. Make sure 632 * that we boost the tasks blocking the current grace period 633 * in this case. 634 */ 635 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ 636 if (rnp_root->boost_tasks != NULL && 637 rnp_root->boost_tasks != rnp_root->gp_tasks && 638 rnp_root->boost_tasks != rnp_root->exp_tasks) 639 rnp_root->boost_tasks = rnp_root->gp_tasks; 640 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ 641#endif /* #ifdef CONFIG_RCU_BOOST */ 642 643 return retval; 644} 645 646#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 647 648/* 649 * Check for a quiescent state from the current CPU. When a task blocks, 650 * the task is recorded in the corresponding CPU's rcu_node structure, 651 * which is checked elsewhere. 652 * 653 * Caller must disable hard irqs. 654 */ 655static void rcu_preempt_check_callbacks(int cpu) 656{ 657 struct task_struct *t = current; 658 659 if (t->rcu_read_lock_nesting == 0) { 660 rcu_preempt_qs(cpu); 661 return; 662 } 663 if (t->rcu_read_lock_nesting > 0 && 664 per_cpu(rcu_preempt_data, cpu).qs_pending) 665 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; 666} 667 668#ifdef CONFIG_RCU_BOOST 669 670static void rcu_preempt_do_callbacks(void) 671{ 672 rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data)); 673} 674 675#endif /* #ifdef CONFIG_RCU_BOOST */ 676 677/* 678 * Queue a preemptible-RCU callback for invocation after a grace period. 679 */ 680void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 681{ 682 __call_rcu(head, func, &rcu_preempt_state, -1, 0); 683} 684EXPORT_SYMBOL_GPL(call_rcu); 685 686/* 687 * Queue an RCU callback for lazy invocation after a grace period. 688 * This will likely be later named something like "call_rcu_lazy()", 689 * but this change will require some way of tagging the lazy RCU 690 * callbacks in the list of pending callbacks. Until then, this 691 * function may only be called from __kfree_rcu(). 692 */ 693void kfree_call_rcu(struct rcu_head *head, 694 void (*func)(struct rcu_head *rcu)) 695{ 696 __call_rcu(head, func, &rcu_preempt_state, -1, 1); 697} 698EXPORT_SYMBOL_GPL(kfree_call_rcu); 699 700/** 701 * synchronize_rcu - wait until a grace period has elapsed. 702 * 703 * Control will return to the caller some time after a full grace 704 * period has elapsed, in other words after all currently executing RCU 705 * read-side critical sections have completed. Note, however, that 706 * upon return from synchronize_rcu(), the caller might well be executing 707 * concurrently with new RCU read-side critical sections that began while 708 * synchronize_rcu() was waiting. RCU read-side critical sections are 709 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. 710 * 711 * See the description of synchronize_sched() for more detailed information 712 * on memory ordering guarantees. 713 */ 714void synchronize_rcu(void) 715{ 716 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && 717 !lock_is_held(&rcu_lock_map) && 718 !lock_is_held(&rcu_sched_lock_map), 719 "Illegal synchronize_rcu() in RCU read-side critical section"); 720 if (!rcu_scheduler_active) 721 return; 722 if (rcu_expedited) 723 synchronize_rcu_expedited(); 724 else 725 wait_rcu_gp(call_rcu); 726} 727EXPORT_SYMBOL_GPL(synchronize_rcu); 728 729static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); 730static unsigned long sync_rcu_preempt_exp_count; 731static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); 732 733/* 734 * Return non-zero if there are any tasks in RCU read-side critical 735 * sections blocking the current preemptible-RCU expedited grace period. 736 * If there is no preemptible-RCU expedited grace period currently in 737 * progress, returns zero unconditionally. 738 */ 739static int rcu_preempted_readers_exp(struct rcu_node *rnp) 740{ 741 return rnp->exp_tasks != NULL; 742} 743 744/* 745 * return non-zero if there is no RCU expedited grace period in progress 746 * for the specified rcu_node structure, in other words, if all CPUs and 747 * tasks covered by the specified rcu_node structure have done their bit 748 * for the current expedited grace period. Works only for preemptible 749 * RCU -- other RCU implementation use other means. 750 * 751 * Caller must hold sync_rcu_preempt_exp_mutex. 752 */ 753static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) 754{ 755 return !rcu_preempted_readers_exp(rnp) && 756 ACCESS_ONCE(rnp->expmask) == 0; 757} 758 759/* 760 * Report the exit from RCU read-side critical section for the last task 761 * that queued itself during or before the current expedited preemptible-RCU 762 * grace period. This event is reported either to the rcu_node structure on 763 * which the task was queued or to one of that rcu_node structure's ancestors, 764 * recursively up the tree. (Calm down, calm down, we do the recursion 765 * iteratively!) 766 * 767 * Most callers will set the "wake" flag, but the task initiating the 768 * expedited grace period need not wake itself. 769 * 770 * Caller must hold sync_rcu_preempt_exp_mutex. 771 */ 772static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, 773 bool wake) 774{ 775 unsigned long flags; 776 unsigned long mask; 777 778 raw_spin_lock_irqsave(&rnp->lock, flags); 779 for (;;) { 780 if (!sync_rcu_preempt_exp_done(rnp)) { 781 raw_spin_unlock_irqrestore(&rnp->lock, flags); 782 break; 783 } 784 if (rnp->parent == NULL) { 785 raw_spin_unlock_irqrestore(&rnp->lock, flags); 786 if (wake) { 787 smp_mb(); /* EGP done before wake_up(). */ 788 wake_up(&sync_rcu_preempt_exp_wq); 789 } 790 break; 791 } 792 mask = rnp->grpmask; 793 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ 794 rnp = rnp->parent; 795 raw_spin_lock(&rnp->lock); /* irqs already disabled */ 796 rnp->expmask &= ~mask; 797 } 798} 799 800/* 801 * Snapshot the tasks blocking the newly started preemptible-RCU expedited 802 * grace period for the specified rcu_node structure. If there are no such 803 * tasks, report it up the rcu_node hierarchy. 804 * 805 * Caller must hold sync_rcu_preempt_exp_mutex and must exclude 806 * CPU hotplug operations. 807 */ 808static void 809sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) 810{ 811 unsigned long flags; 812 int must_wait = 0; 813 814 raw_spin_lock_irqsave(&rnp->lock, flags); 815 if (list_empty(&rnp->blkd_tasks)) { 816 raw_spin_unlock_irqrestore(&rnp->lock, flags); 817 } else { 818 rnp->exp_tasks = rnp->blkd_tasks.next; 819 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ 820 must_wait = 1; 821 } 822 if (!must_wait) 823 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */ 824} 825 826/** 827 * synchronize_rcu_expedited - Brute-force RCU grace period 828 * 829 * Wait for an RCU-preempt grace period, but expedite it. The basic 830 * idea is to invoke synchronize_sched_expedited() to push all the tasks to 831 * the ->blkd_tasks lists and wait for this list to drain. This consumes 832 * significant time on all CPUs and is unfriendly to real-time workloads, 833 * so is thus not recommended for any sort of common-case code. 834 * In fact, if you are using synchronize_rcu_expedited() in a loop, 835 * please restructure your code to batch your updates, and then Use a 836 * single synchronize_rcu() instead. 837 * 838 * Note that it is illegal to call this function while holding any lock 839 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal 840 * to call this function from a CPU-hotplug notifier. Failing to observe 841 * these restriction will result in deadlock. 842 */ 843void synchronize_rcu_expedited(void) 844{ 845 unsigned long flags; 846 struct rcu_node *rnp; 847 struct rcu_state *rsp = &rcu_preempt_state; 848 unsigned long snap; 849 int trycount = 0; 850 851 smp_mb(); /* Caller's modifications seen first by other CPUs. */ 852 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; 853 smp_mb(); /* Above access cannot bleed into critical section. */ 854 855 /* 856 * Block CPU-hotplug operations. This means that any CPU-hotplug 857 * operation that finds an rcu_node structure with tasks in the 858 * process of being boosted will know that all tasks blocking 859 * this expedited grace period will already be in the process of 860 * being boosted. This simplifies the process of moving tasks 861 * from leaf to root rcu_node structures. 862 */ 863 get_online_cpus(); 864 865 /* 866 * Acquire lock, falling back to synchronize_rcu() if too many 867 * lock-acquisition failures. Of course, if someone does the 868 * expedited grace period for us, just leave. 869 */ 870 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { 871 if (ULONG_CMP_LT(snap, 872 ACCESS_ONCE(sync_rcu_preempt_exp_count))) { 873 put_online_cpus(); 874 goto mb_ret; /* Others did our work for us. */ 875 } 876 if (trycount++ < 10) { 877 udelay(trycount * num_online_cpus()); 878 } else { 879 put_online_cpus(); 880 wait_rcu_gp(call_rcu); 881 return; 882 } 883 } 884 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) { 885 put_online_cpus(); 886 goto unlock_mb_ret; /* Others did our work for us. */ 887 } 888 889 /* force all RCU readers onto ->blkd_tasks lists. */ 890 synchronize_sched_expedited(); 891 892 /* Initialize ->expmask for all non-leaf rcu_node structures. */ 893 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { 894 raw_spin_lock_irqsave(&rnp->lock, flags); 895 rnp->expmask = rnp->qsmaskinit; 896 raw_spin_unlock_irqrestore(&rnp->lock, flags); 897 } 898 899 /* Snapshot current state of ->blkd_tasks lists. */ 900 rcu_for_each_leaf_node(rsp, rnp) 901 sync_rcu_preempt_exp_init(rsp, rnp); 902 if (NUM_RCU_NODES > 1) 903 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); 904 905 put_online_cpus(); 906 907 /* Wait for snapshotted ->blkd_tasks lists to drain. */ 908 rnp = rcu_get_root(rsp); 909 wait_event(sync_rcu_preempt_exp_wq, 910 sync_rcu_preempt_exp_done(rnp)); 911 912 /* Clean up and exit. */ 913 smp_mb(); /* ensure expedited GP seen before counter increment. */ 914 ACCESS_ONCE(sync_rcu_preempt_exp_count)++; 915unlock_mb_ret: 916 mutex_unlock(&sync_rcu_preempt_exp_mutex); 917mb_ret: 918 smp_mb(); /* ensure subsequent action seen after grace period. */ 919} 920EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 921 922/** 923 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. 924 * 925 * Note that this primitive does not necessarily wait for an RCU grace period 926 * to complete. For example, if there are no RCU callbacks queued anywhere 927 * in the system, then rcu_barrier() is within its rights to return 928 * immediately, without waiting for anything, much less an RCU grace period. 929 */ 930void rcu_barrier(void) 931{ 932 _rcu_barrier(&rcu_preempt_state); 933} 934EXPORT_SYMBOL_GPL(rcu_barrier); 935 936/* 937 * Initialize preemptible RCU's state structures. 938 */ 939static void __init __rcu_init_preempt(void) 940{ 941 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); 942} 943 944/* 945 * Check for a task exiting while in a preemptible-RCU read-side 946 * critical section, clean up if so. No need to issue warnings, 947 * as debug_check_no_locks_held() already does this if lockdep 948 * is enabled. 949 */ 950void exit_rcu(void) 951{ 952 struct task_struct *t = current; 953 954 if (likely(list_empty(¤t->rcu_node_entry))) 955 return; 956 t->rcu_read_lock_nesting = 1; 957 barrier(); 958 t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED; 959 __rcu_read_unlock(); 960} 961 962#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 963 964static struct rcu_state *rcu_state = &rcu_sched_state; 965 966/* 967 * Tell them what RCU they are running. 968 */ 969static void __init rcu_bootup_announce(void) 970{ 971 pr_info("Hierarchical RCU implementation.\n"); 972 rcu_bootup_announce_oddness(); 973} 974 975/* 976 * Return the number of RCU batches processed thus far for debug & stats. 977 */ 978long rcu_batches_completed(void) 979{ 980 return rcu_batches_completed_sched(); 981} 982EXPORT_SYMBOL_GPL(rcu_batches_completed); 983 984/* 985 * Force a quiescent state for RCU, which, because there is no preemptible 986 * RCU, becomes the same as rcu-sched. 987 */ 988void rcu_force_quiescent_state(void) 989{ 990 rcu_sched_force_quiescent_state(); 991} 992EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 993 994/* 995 * Because preemptible RCU does not exist, we never have to check for 996 * CPUs being in quiescent states. 997 */ 998static void rcu_preempt_note_context_switch(int cpu) 999{ 1000} 1001 1002/* 1003 * Because preemptible RCU does not exist, there are never any preempted 1004 * RCU readers. 1005 */ 1006static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 1007{ 1008 return 0; 1009} 1010 1011#ifdef CONFIG_HOTPLUG_CPU 1012 1013/* Because preemptible RCU does not exist, no quieting of tasks. */ 1014static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 1015{ 1016 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1017} 1018 1019#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 1020 1021/* 1022 * Because preemptible RCU does not exist, we never have to check for 1023 * tasks blocked within RCU read-side critical sections. 1024 */ 1025static void rcu_print_detail_task_stall(struct rcu_state *rsp) 1026{ 1027} 1028 1029/* 1030 * Because preemptible RCU does not exist, we never have to check for 1031 * tasks blocked within RCU read-side critical sections. 1032 */ 1033static int rcu_print_task_stall(struct rcu_node *rnp) 1034{ 1035 return 0; 1036} 1037 1038/* 1039 * Because there is no preemptible RCU, there can be no readers blocked, 1040 * so there is no need to check for blocked tasks. So check only for 1041 * bogus qsmask values. 1042 */ 1043static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 1044{ 1045 WARN_ON_ONCE(rnp->qsmask); 1046} 1047 1048#ifdef CONFIG_HOTPLUG_CPU 1049 1050/* 1051 * Because preemptible RCU does not exist, it never needs to migrate 1052 * tasks that were blocked within RCU read-side critical sections, and 1053 * such non-existent tasks cannot possibly have been blocking the current 1054 * grace period. 1055 */ 1056static int rcu_preempt_offline_tasks(struct rcu_state *rsp, 1057 struct rcu_node *rnp, 1058 struct rcu_data *rdp) 1059{ 1060 return 0; 1061} 1062 1063#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 1064 1065/* 1066 * Because preemptible RCU does not exist, it never has any callbacks 1067 * to check. 1068 */ 1069static void rcu_preempt_check_callbacks(int cpu) 1070{ 1071} 1072 1073/* 1074 * Queue an RCU callback for lazy invocation after a grace period. 1075 * This will likely be later named something like "call_rcu_lazy()", 1076 * but this change will require some way of tagging the lazy RCU 1077 * callbacks in the list of pending callbacks. Until then, this 1078 * function may only be called from __kfree_rcu(). 1079 * 1080 * Because there is no preemptible RCU, we use RCU-sched instead. 1081 */ 1082void kfree_call_rcu(struct rcu_head *head, 1083 void (*func)(struct rcu_head *rcu)) 1084{ 1085 __call_rcu(head, func, &rcu_sched_state, -1, 1); 1086} 1087EXPORT_SYMBOL_GPL(kfree_call_rcu); 1088 1089/* 1090 * Wait for an rcu-preempt grace period, but make it happen quickly. 1091 * But because preemptible RCU does not exist, map to rcu-sched. 1092 */ 1093void synchronize_rcu_expedited(void) 1094{ 1095 synchronize_sched_expedited(); 1096} 1097EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 1098 1099#ifdef CONFIG_HOTPLUG_CPU 1100 1101/* 1102 * Because preemptible RCU does not exist, there is never any need to 1103 * report on tasks preempted in RCU read-side critical sections during 1104 * expedited RCU grace periods. 1105 */ 1106static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, 1107 bool wake) 1108{ 1109} 1110 1111#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 1112 1113/* 1114 * Because preemptible RCU does not exist, rcu_barrier() is just 1115 * another name for rcu_barrier_sched(). 1116 */ 1117void rcu_barrier(void) 1118{ 1119 rcu_barrier_sched(); 1120} 1121EXPORT_SYMBOL_GPL(rcu_barrier); 1122 1123/* 1124 * Because preemptible RCU does not exist, it need not be initialized. 1125 */ 1126static void __init __rcu_init_preempt(void) 1127{ 1128} 1129 1130/* 1131 * Because preemptible RCU does not exist, tasks cannot possibly exit 1132 * while in preemptible RCU read-side critical sections. 1133 */ 1134void exit_rcu(void) 1135{ 1136} 1137 1138#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 1139 1140#ifdef CONFIG_RCU_BOOST 1141 1142#include "../locking/rtmutex_common.h" 1143 1144#ifdef CONFIG_RCU_TRACE 1145 1146static void rcu_initiate_boost_trace(struct rcu_node *rnp) 1147{ 1148 if (list_empty(&rnp->blkd_tasks)) 1149 rnp->n_balk_blkd_tasks++; 1150 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL) 1151 rnp->n_balk_exp_gp_tasks++; 1152 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL) 1153 rnp->n_balk_boost_tasks++; 1154 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0) 1155 rnp->n_balk_notblocked++; 1156 else if (rnp->gp_tasks != NULL && 1157 ULONG_CMP_LT(jiffies, rnp->boost_time)) 1158 rnp->n_balk_notyet++; 1159 else 1160 rnp->n_balk_nos++; 1161} 1162 1163#else /* #ifdef CONFIG_RCU_TRACE */ 1164 1165static void rcu_initiate_boost_trace(struct rcu_node *rnp) 1166{ 1167} 1168 1169#endif /* #else #ifdef CONFIG_RCU_TRACE */ 1170 1171static void rcu_wake_cond(struct task_struct *t, int status) 1172{ 1173 /* 1174 * If the thread is yielding, only wake it when this 1175 * is invoked from idle 1176 */ 1177 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) 1178 wake_up_process(t); 1179} 1180 1181/* 1182 * Carry out RCU priority boosting on the task indicated by ->exp_tasks 1183 * or ->boost_tasks, advancing the pointer to the next task in the 1184 * ->blkd_tasks list. 1185 * 1186 * Note that irqs must be enabled: boosting the task can block. 1187 * Returns 1 if there are more tasks needing to be boosted. 1188 */ 1189static int rcu_boost(struct rcu_node *rnp) 1190{ 1191 unsigned long flags; 1192 struct rt_mutex mtx; 1193 struct task_struct *t; 1194 struct list_head *tb; 1195 1196 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) 1197 return 0; /* Nothing left to boost. */ 1198 1199 raw_spin_lock_irqsave(&rnp->lock, flags); 1200 1201 /* 1202 * Recheck under the lock: all tasks in need of boosting 1203 * might exit their RCU read-side critical sections on their own. 1204 */ 1205 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { 1206 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1207 return 0; 1208 } 1209 1210 /* 1211 * Preferentially boost tasks blocking expedited grace periods. 1212 * This cannot starve the normal grace periods because a second 1213 * expedited grace period must boost all blocked tasks, including 1214 * those blocking the pre-existing normal grace period. 1215 */ 1216 if (rnp->exp_tasks != NULL) { 1217 tb = rnp->exp_tasks; 1218 rnp->n_exp_boosts++; 1219 } else { 1220 tb = rnp->boost_tasks; 1221 rnp->n_normal_boosts++; 1222 } 1223 rnp->n_tasks_boosted++; 1224 1225 /* 1226 * We boost task t by manufacturing an rt_mutex that appears to 1227 * be held by task t. We leave a pointer to that rt_mutex where 1228 * task t can find it, and task t will release the mutex when it 1229 * exits its outermost RCU read-side critical section. Then 1230 * simply acquiring this artificial rt_mutex will boost task 1231 * t's priority. (Thanks to tglx for suggesting this approach!) 1232 * 1233 * Note that task t must acquire rnp->lock to remove itself from 1234 * the ->blkd_tasks list, which it will do from exit() if from 1235 * nowhere else. We therefore are guaranteed that task t will 1236 * stay around at least until we drop rnp->lock. Note that 1237 * rnp->lock also resolves races between our priority boosting 1238 * and task t's exiting its outermost RCU read-side critical 1239 * section. 1240 */ 1241 t = container_of(tb, struct task_struct, rcu_node_entry); 1242 rt_mutex_init_proxy_locked(&mtx, t); 1243 t->rcu_boost_mutex = &mtx; 1244 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1245 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ 1246 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ 1247 1248 return ACCESS_ONCE(rnp->exp_tasks) != NULL || 1249 ACCESS_ONCE(rnp->boost_tasks) != NULL; 1250} 1251 1252/* 1253 * Priority-boosting kthread. One per leaf rcu_node and one for the 1254 * root rcu_node. 1255 */ 1256static int rcu_boost_kthread(void *arg) 1257{ 1258 struct rcu_node *rnp = (struct rcu_node *)arg; 1259 int spincnt = 0; 1260 int more2boost; 1261 1262 trace_rcu_utilization(TPS("Start boost kthread@init")); 1263 for (;;) { 1264 rnp->boost_kthread_status = RCU_KTHREAD_WAITING; 1265 trace_rcu_utilization(TPS("End boost kthread@rcu_wait")); 1266 rcu_wait(rnp->boost_tasks || rnp->exp_tasks); 1267 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait")); 1268 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; 1269 more2boost = rcu_boost(rnp); 1270 if (more2boost) 1271 spincnt++; 1272 else 1273 spincnt = 0; 1274 if (spincnt > 10) { 1275 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING; 1276 trace_rcu_utilization(TPS("End boost kthread@rcu_yield")); 1277 schedule_timeout_interruptible(2); 1278 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield")); 1279 spincnt = 0; 1280 } 1281 } 1282 /* NOTREACHED */ 1283 trace_rcu_utilization(TPS("End boost kthread@notreached")); 1284 return 0; 1285} 1286 1287/* 1288 * Check to see if it is time to start boosting RCU readers that are 1289 * blocking the current grace period, and, if so, tell the per-rcu_node 1290 * kthread to start boosting them. If there is an expedited grace 1291 * period in progress, it is always time to boost. 1292 * 1293 * The caller must hold rnp->lock, which this function releases. 1294 * The ->boost_kthread_task is immortal, so we don't need to worry 1295 * about it going away. 1296 */ 1297static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1298{ 1299 struct task_struct *t; 1300 1301 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { 1302 rnp->n_balk_exp_gp_tasks++; 1303 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1304 return; 1305 } 1306 if (rnp->exp_tasks != NULL || 1307 (rnp->gp_tasks != NULL && 1308 rnp->boost_tasks == NULL && 1309 rnp->qsmask == 0 && 1310 ULONG_CMP_GE(jiffies, rnp->boost_time))) { 1311 if (rnp->exp_tasks == NULL) 1312 rnp->boost_tasks = rnp->gp_tasks; 1313 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1314 t = rnp->boost_kthread_task; 1315 if (t) 1316 rcu_wake_cond(t, rnp->boost_kthread_status); 1317 } else { 1318 rcu_initiate_boost_trace(rnp); 1319 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1320 } 1321} 1322 1323/* 1324 * Wake up the per-CPU kthread to invoke RCU callbacks. 1325 */ 1326static void invoke_rcu_callbacks_kthread(void) 1327{ 1328 unsigned long flags; 1329 1330 local_irq_save(flags); 1331 __this_cpu_write(rcu_cpu_has_work, 1); 1332 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && 1333 current != __this_cpu_read(rcu_cpu_kthread_task)) { 1334 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), 1335 __this_cpu_read(rcu_cpu_kthread_status)); 1336 } 1337 local_irq_restore(flags); 1338} 1339 1340/* 1341 * Is the current CPU running the RCU-callbacks kthread? 1342 * Caller must have preemption disabled. 1343 */ 1344static bool rcu_is_callbacks_kthread(void) 1345{ 1346 return __this_cpu_read(rcu_cpu_kthread_task) == current; 1347} 1348 1349#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) 1350 1351/* 1352 * Do priority-boost accounting for the start of a new grace period. 1353 */ 1354static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1355{ 1356 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; 1357} 1358 1359/* 1360 * Create an RCU-boost kthread for the specified node if one does not 1361 * already exist. We only create this kthread for preemptible RCU. 1362 * Returns zero if all is well, a negated errno otherwise. 1363 */ 1364static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 1365 struct rcu_node *rnp) 1366{ 1367 int rnp_index = rnp - &rsp->node[0]; 1368 unsigned long flags; 1369 struct sched_param sp; 1370 struct task_struct *t; 1371 1372 if (&rcu_preempt_state != rsp) 1373 return 0; 1374 1375 if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0) 1376 return 0; 1377 1378 rsp->boost = 1; 1379 if (rnp->boost_kthread_task != NULL) 1380 return 0; 1381 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1382 "rcub/%d", rnp_index); 1383 if (IS_ERR(t)) 1384 return PTR_ERR(t); 1385 raw_spin_lock_irqsave(&rnp->lock, flags); 1386 rnp->boost_kthread_task = t; 1387 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1388 sp.sched_priority = RCU_BOOST_PRIO; 1389 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1390 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ 1391 return 0; 1392} 1393 1394static void rcu_kthread_do_work(void) 1395{ 1396 rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); 1397 rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); 1398 rcu_preempt_do_callbacks(); 1399} 1400 1401static void rcu_cpu_kthread_setup(unsigned int cpu) 1402{ 1403 struct sched_param sp; 1404 1405 sp.sched_priority = RCU_KTHREAD_PRIO; 1406 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); 1407} 1408 1409static void rcu_cpu_kthread_park(unsigned int cpu) 1410{ 1411 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; 1412} 1413 1414static int rcu_cpu_kthread_should_run(unsigned int cpu) 1415{ 1416 return __this_cpu_read(rcu_cpu_has_work); 1417} 1418 1419/* 1420 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the 1421 * RCU softirq used in flavors and configurations of RCU that do not 1422 * support RCU priority boosting. 1423 */ 1424static void rcu_cpu_kthread(unsigned int cpu) 1425{ 1426 unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); 1427 char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); 1428 int spincnt; 1429 1430 for (spincnt = 0; spincnt < 10; spincnt++) { 1431 trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); 1432 local_bh_disable(); 1433 *statusp = RCU_KTHREAD_RUNNING; 1434 this_cpu_inc(rcu_cpu_kthread_loops); 1435 local_irq_disable(); 1436 work = *workp; 1437 *workp = 0; 1438 local_irq_enable(); 1439 if (work) 1440 rcu_kthread_do_work(); 1441 local_bh_enable(); 1442 if (*workp == 0) { 1443 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); 1444 *statusp = RCU_KTHREAD_WAITING; 1445 return; 1446 } 1447 } 1448 *statusp = RCU_KTHREAD_YIELDING; 1449 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); 1450 schedule_timeout_interruptible(2); 1451 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); 1452 *statusp = RCU_KTHREAD_WAITING; 1453} 1454 1455/* 1456 * Set the per-rcu_node kthread's affinity to cover all CPUs that are 1457 * served by the rcu_node in question. The CPU hotplug lock is still 1458 * held, so the value of rnp->qsmaskinit will be stable. 1459 * 1460 * We don't include outgoingcpu in the affinity set, use -1 if there is 1461 * no outgoing CPU. If there are no CPUs left in the affinity set, 1462 * this function allows the kthread to execute on any CPU. 1463 */ 1464static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1465{ 1466 struct task_struct *t = rnp->boost_kthread_task; 1467 unsigned long mask = rnp->qsmaskinit; 1468 cpumask_var_t cm; 1469 int cpu; 1470 1471 if (!t) 1472 return; 1473 if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) 1474 return; 1475 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) 1476 if ((mask & 0x1) && cpu != outgoingcpu) 1477 cpumask_set_cpu(cpu, cm); 1478 if (cpumask_weight(cm) == 0) { 1479 cpumask_setall(cm); 1480 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) 1481 cpumask_clear_cpu(cpu, cm); 1482 WARN_ON_ONCE(cpumask_weight(cm) == 0); 1483 } 1484 set_cpus_allowed_ptr(t, cm); 1485 free_cpumask_var(cm); 1486} 1487 1488static struct smp_hotplug_thread rcu_cpu_thread_spec = { 1489 .store = &rcu_cpu_kthread_task, 1490 .thread_should_run = rcu_cpu_kthread_should_run, 1491 .thread_fn = rcu_cpu_kthread, 1492 .thread_comm = "rcuc/%u", 1493 .setup = rcu_cpu_kthread_setup, 1494 .park = rcu_cpu_kthread_park, 1495}; 1496 1497/* 1498 * Spawn all kthreads -- called as soon as the scheduler is running. 1499 */ 1500static int __init rcu_spawn_kthreads(void) 1501{ 1502 struct rcu_node *rnp; 1503 int cpu; 1504 1505 rcu_scheduler_fully_active = 1; 1506 for_each_possible_cpu(cpu) 1507 per_cpu(rcu_cpu_has_work, cpu) = 0; 1508 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); 1509 rnp = rcu_get_root(rcu_state); 1510 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); 1511 if (NUM_RCU_NODES > 1) { 1512 rcu_for_each_leaf_node(rcu_state, rnp) 1513 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); 1514 } 1515 return 0; 1516} 1517early_initcall(rcu_spawn_kthreads); 1518 1519static void rcu_prepare_kthreads(int cpu) 1520{ 1521 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); 1522 struct rcu_node *rnp = rdp->mynode; 1523 1524 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ 1525 if (rcu_scheduler_fully_active) 1526 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); 1527} 1528 1529#else /* #ifdef CONFIG_RCU_BOOST */ 1530 1531static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1532{ 1533 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1534} 1535 1536static void invoke_rcu_callbacks_kthread(void) 1537{ 1538 WARN_ON_ONCE(1); 1539} 1540 1541static bool rcu_is_callbacks_kthread(void) 1542{ 1543 return false; 1544} 1545 1546static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1547{ 1548} 1549 1550static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1551{ 1552} 1553 1554static int __init rcu_scheduler_really_started(void) 1555{ 1556 rcu_scheduler_fully_active = 1; 1557 return 0; 1558} 1559early_initcall(rcu_scheduler_really_started); 1560 1561static void rcu_prepare_kthreads(int cpu) 1562{ 1563} 1564 1565#endif /* #else #ifdef CONFIG_RCU_BOOST */ 1566 1567#if !defined(CONFIG_RCU_FAST_NO_HZ) 1568 1569/* 1570 * Check to see if any future RCU-related work will need to be done 1571 * by the current CPU, even if none need be done immediately, returning 1572 * 1 if so. This function is part of the RCU implementation; it is -not- 1573 * an exported member of the RCU API. 1574 * 1575 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs 1576 * any flavor of RCU. 1577 */ 1578int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) 1579{ 1580 *delta_jiffies = ULONG_MAX; 1581 return rcu_cpu_has_callbacks(cpu, NULL); 1582} 1583 1584/* 1585 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up 1586 * after it. 1587 */ 1588static void rcu_cleanup_after_idle(int cpu) 1589{ 1590} 1591 1592/* 1593 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n, 1594 * is nothing. 1595 */ 1596static void rcu_prepare_for_idle(int cpu) 1597{ 1598} 1599 1600/* 1601 * Don't bother keeping a running count of the number of RCU callbacks 1602 * posted because CONFIG_RCU_FAST_NO_HZ=n. 1603 */ 1604static void rcu_idle_count_callbacks_posted(void) 1605{ 1606} 1607 1608#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1609 1610/* 1611 * This code is invoked when a CPU goes idle, at which point we want 1612 * to have the CPU do everything required for RCU so that it can enter 1613 * the energy-efficient dyntick-idle mode. This is handled by a 1614 * state machine implemented by rcu_prepare_for_idle() below. 1615 * 1616 * The following three proprocessor symbols control this state machine: 1617 * 1618 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted 1619 * to sleep in dyntick-idle mode with RCU callbacks pending. This 1620 * is sized to be roughly one RCU grace period. Those energy-efficiency 1621 * benchmarkers who might otherwise be tempted to set this to a large 1622 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your 1623 * system. And if you are -that- concerned about energy efficiency, 1624 * just power the system down and be done with it! 1625 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is 1626 * permitted to sleep in dyntick-idle mode with only lazy RCU 1627 * callbacks pending. Setting this too high can OOM your system. 1628 * 1629 * The values below work well in practice. If future workloads require 1630 * adjustment, they can be converted into kernel config parameters, though 1631 * making the state machine smarter might be a better option. 1632 */ 1633#define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */ 1634#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ 1635 1636static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY; 1637module_param(rcu_idle_gp_delay, int, 0644); 1638static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; 1639module_param(rcu_idle_lazy_gp_delay, int, 0644); 1640 1641extern int tick_nohz_enabled; 1642 1643/* 1644 * Try to advance callbacks for all flavors of RCU on the current CPU, but 1645 * only if it has been awhile since the last time we did so. Afterwards, 1646 * if there are any callbacks ready for immediate invocation, return true. 1647 */ 1648static bool rcu_try_advance_all_cbs(void) 1649{ 1650 bool cbs_ready = false; 1651 struct rcu_data *rdp; 1652 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 1653 struct rcu_node *rnp; 1654 struct rcu_state *rsp; 1655 1656 /* Exit early if we advanced recently. */ 1657 if (jiffies == rdtp->last_advance_all) 1658 return 0; 1659 rdtp->last_advance_all = jiffies; 1660 1661 for_each_rcu_flavor(rsp) { 1662 rdp = this_cpu_ptr(rsp->rda); 1663 rnp = rdp->mynode; 1664 1665 /* 1666 * Don't bother checking unless a grace period has 1667 * completed since we last checked and there are 1668 * callbacks not yet ready to invoke. 1669 */ 1670 if (rdp->completed != rnp->completed && 1671 rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]) 1672 note_gp_changes(rsp, rdp); 1673 1674 if (cpu_has_callbacks_ready_to_invoke(rdp)) 1675 cbs_ready = true; 1676 } 1677 return cbs_ready; 1678} 1679 1680/* 1681 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready 1682 * to invoke. If the CPU has callbacks, try to advance them. Tell the 1683 * caller to set the timeout based on whether or not there are non-lazy 1684 * callbacks. 1685 * 1686 * The caller must have disabled interrupts. 1687 */ 1688int rcu_needs_cpu(int cpu, unsigned long *dj) 1689{ 1690 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 1691 1692 /* Snapshot to detect later posting of non-lazy callback. */ 1693 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; 1694 1695 /* If no callbacks, RCU doesn't need the CPU. */ 1696 if (!rcu_cpu_has_callbacks(cpu, &rdtp->all_lazy)) { 1697 *dj = ULONG_MAX; 1698 return 0; 1699 } 1700 1701 /* Attempt to advance callbacks. */ 1702 if (rcu_try_advance_all_cbs()) { 1703 /* Some ready to invoke, so initiate later invocation. */ 1704 invoke_rcu_core(); 1705 return 1; 1706 } 1707 rdtp->last_accelerate = jiffies; 1708 1709 /* Request timer delay depending on laziness, and round. */ 1710 if (!rdtp->all_lazy) { 1711 *dj = round_up(rcu_idle_gp_delay + jiffies, 1712 rcu_idle_gp_delay) - jiffies; 1713 } else { 1714 *dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies; 1715 } 1716 return 0; 1717} 1718 1719/* 1720 * Prepare a CPU for idle from an RCU perspective. The first major task 1721 * is to sense whether nohz mode has been enabled or disabled via sysfs. 1722 * The second major task is to check to see if a non-lazy callback has 1723 * arrived at a CPU that previously had only lazy callbacks. The third 1724 * major task is to accelerate (that is, assign grace-period numbers to) 1725 * any recently arrived callbacks. 1726 * 1727 * The caller must have disabled interrupts. 1728 */ 1729static void rcu_prepare_for_idle(int cpu) 1730{ 1731 struct rcu_data *rdp; 1732 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 1733 struct rcu_node *rnp; 1734 struct rcu_state *rsp; 1735 int tne; 1736 1737 /* Handle nohz enablement switches conservatively. */ 1738 tne = ACCESS_ONCE(tick_nohz_enabled); 1739 if (tne != rdtp->tick_nohz_enabled_snap) { 1740 if (rcu_cpu_has_callbacks(cpu, NULL)) 1741 invoke_rcu_core(); /* force nohz to see update. */ 1742 rdtp->tick_nohz_enabled_snap = tne; 1743 return; 1744 } 1745 if (!tne) 1746 return; 1747 1748 /* If this is a no-CBs CPU, no callbacks, just return. */ 1749 if (rcu_is_nocb_cpu(cpu)) 1750 return; 1751 1752 /* 1753 * If a non-lazy callback arrived at a CPU having only lazy 1754 * callbacks, invoke RCU core for the side-effect of recalculating 1755 * idle duration on re-entry to idle. 1756 */ 1757 if (rdtp->all_lazy && 1758 rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) { 1759 rdtp->all_lazy = false; 1760 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; 1761 invoke_rcu_core(); 1762 return; 1763 } 1764 1765 /* 1766 * If we have not yet accelerated this jiffy, accelerate all 1767 * callbacks on this CPU. 1768 */ 1769 if (rdtp->last_accelerate == jiffies) 1770 return; 1771 rdtp->last_accelerate = jiffies; 1772 for_each_rcu_flavor(rsp) { 1773 rdp = per_cpu_ptr(rsp->rda, cpu); 1774 if (!*rdp->nxttail[RCU_DONE_TAIL]) 1775 continue; 1776 rnp = rdp->mynode; 1777 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 1778 rcu_accelerate_cbs(rsp, rnp, rdp); 1779 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1780 } 1781} 1782 1783/* 1784 * Clean up for exit from idle. Attempt to advance callbacks based on 1785 * any grace periods that elapsed while the CPU was idle, and if any 1786 * callbacks are now ready to invoke, initiate invocation. 1787 */ 1788static void rcu_cleanup_after_idle(int cpu) 1789{ 1790 1791 if (rcu_is_nocb_cpu(cpu)) 1792 return; 1793 if (rcu_try_advance_all_cbs()) 1794 invoke_rcu_core(); 1795} 1796 1797/* 1798 * Keep a running count of the number of non-lazy callbacks posted 1799 * on this CPU. This running counter (which is never decremented) allows 1800 * rcu_prepare_for_idle() to detect when something out of the idle loop 1801 * posts a callback, even if an equal number of callbacks are invoked. 1802 * Of course, callbacks should only be posted from within a trace event 1803 * designed to be called from idle or from within RCU_NONIDLE(). 1804 */ 1805static void rcu_idle_count_callbacks_posted(void) 1806{ 1807 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); 1808} 1809 1810/* 1811 * Data for flushing lazy RCU callbacks at OOM time. 1812 */ 1813static atomic_t oom_callback_count; 1814static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq); 1815 1816/* 1817 * RCU OOM callback -- decrement the outstanding count and deliver the 1818 * wake-up if we are the last one. 1819 */ 1820static void rcu_oom_callback(struct rcu_head *rhp) 1821{ 1822 if (atomic_dec_and_test(&oom_callback_count)) 1823 wake_up(&oom_callback_wq); 1824} 1825 1826/* 1827 * Post an rcu_oom_notify callback on the current CPU if it has at 1828 * least one lazy callback. This will unnecessarily post callbacks 1829 * to CPUs that already have a non-lazy callback at the end of their 1830 * callback list, but this is an infrequent operation, so accept some 1831 * extra overhead to keep things simple. 1832 */ 1833static void rcu_oom_notify_cpu(void *unused) 1834{ 1835 struct rcu_state *rsp; 1836 struct rcu_data *rdp; 1837 1838 for_each_rcu_flavor(rsp) { 1839 rdp = __this_cpu_ptr(rsp->rda); 1840 if (rdp->qlen_lazy != 0) { 1841 atomic_inc(&oom_callback_count); 1842 rsp->call(&rdp->oom_head, rcu_oom_callback); 1843 } 1844 } 1845} 1846 1847/* 1848 * If low on memory, ensure that each CPU has a non-lazy callback. 1849 * This will wake up CPUs that have only lazy callbacks, in turn 1850 * ensuring that they free up the corresponding memory in a timely manner. 1851 * Because an uncertain amount of memory will be freed in some uncertain 1852 * timeframe, we do not claim to have freed anything. 1853 */ 1854static int rcu_oom_notify(struct notifier_block *self, 1855 unsigned long notused, void *nfreed) 1856{ 1857 int cpu; 1858 1859 /* Wait for callbacks from earlier instance to complete. */ 1860 wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0); 1861 smp_mb(); /* Ensure callback reuse happens after callback invocation. */ 1862 1863 /* 1864 * Prevent premature wakeup: ensure that all increments happen 1865 * before there is a chance of the counter reaching zero. 1866 */ 1867 atomic_set(&oom_callback_count, 1); 1868 1869 get_online_cpus(); 1870 for_each_online_cpu(cpu) { 1871 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1); 1872 cond_resched(); 1873 } 1874 put_online_cpus(); 1875 1876 /* Unconditionally decrement: no need to wake ourselves up. */ 1877 atomic_dec(&oom_callback_count); 1878 1879 return NOTIFY_OK; 1880} 1881 1882static struct notifier_block rcu_oom_nb = { 1883 .notifier_call = rcu_oom_notify 1884}; 1885 1886static int __init rcu_register_oom_notifier(void) 1887{ 1888 register_oom_notifier(&rcu_oom_nb); 1889 return 0; 1890} 1891early_initcall(rcu_register_oom_notifier); 1892 1893#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1894 1895#ifdef CONFIG_RCU_CPU_STALL_INFO 1896 1897#ifdef CONFIG_RCU_FAST_NO_HZ 1898 1899static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 1900{ 1901 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 1902 unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap; 1903 1904 sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c", 1905 rdtp->last_accelerate & 0xffff, jiffies & 0xffff, 1906 ulong2long(nlpd), 1907 rdtp->all_lazy ? 'L' : '.', 1908 rdtp->tick_nohz_enabled_snap ? '.' : 'D'); 1909} 1910 1911#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 1912 1913static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 1914{ 1915 *cp = '\0'; 1916} 1917 1918#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ 1919 1920/* Initiate the stall-info list. */ 1921static void print_cpu_stall_info_begin(void) 1922{ 1923 pr_cont("\n"); 1924} 1925 1926/* 1927 * Print out diagnostic information for the specified stalled CPU. 1928 * 1929 * If the specified CPU is aware of the current RCU grace period 1930 * (flavor specified by rsp), then print the number of scheduling 1931 * clock interrupts the CPU has taken during the time that it has 1932 * been aware. Otherwise, print the number of RCU grace periods 1933 * that this CPU is ignorant of, for example, "1" if the CPU was 1934 * aware of the previous grace period. 1935 * 1936 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. 1937 */ 1938static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) 1939{ 1940 char fast_no_hz[72]; 1941 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 1942 struct rcu_dynticks *rdtp = rdp->dynticks; 1943 char *ticks_title; 1944 unsigned long ticks_value; 1945 1946 if (rsp->gpnum == rdp->gpnum) { 1947 ticks_title = "ticks this GP"; 1948 ticks_value = rdp->ticks_this_gp; 1949 } else { 1950 ticks_title = "GPs behind"; 1951 ticks_value = rsp->gpnum - rdp->gpnum; 1952 } 1953 print_cpu_stall_fast_no_hz(fast_no_hz, cpu); 1954 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n", 1955 cpu, ticks_value, ticks_title, 1956 atomic_read(&rdtp->dynticks) & 0xfff, 1957 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, 1958 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), 1959 fast_no_hz); 1960} 1961 1962/* Terminate the stall-info list. */ 1963static void print_cpu_stall_info_end(void) 1964{ 1965 pr_err("\t"); 1966} 1967 1968/* Zero ->ticks_this_gp for all flavors of RCU. */ 1969static void zero_cpu_stall_ticks(struct rcu_data *rdp) 1970{ 1971 rdp->ticks_this_gp = 0; 1972 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); 1973} 1974 1975/* Increment ->ticks_this_gp for all flavors of RCU. */ 1976static void increment_cpu_stall_ticks(void) 1977{ 1978 struct rcu_state *rsp; 1979 1980 for_each_rcu_flavor(rsp) 1981 __this_cpu_ptr(rsp->rda)->ticks_this_gp++; 1982} 1983 1984#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ 1985 1986static void print_cpu_stall_info_begin(void) 1987{ 1988 pr_cont(" {"); 1989} 1990 1991static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) 1992{ 1993 pr_cont(" %d", cpu); 1994} 1995 1996static void print_cpu_stall_info_end(void) 1997{ 1998 pr_cont("} "); 1999} 2000 2001static void zero_cpu_stall_ticks(struct rcu_data *rdp) 2002{ 2003} 2004 2005static void increment_cpu_stall_ticks(void) 2006{ 2007} 2008 2009#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */ 2010 2011#ifdef CONFIG_RCU_NOCB_CPU 2012 2013/* 2014 * Offload callback processing from the boot-time-specified set of CPUs 2015 * specified by rcu_nocb_mask. For each CPU in the set, there is a 2016 * kthread created that pulls the callbacks from the corresponding CPU, 2017 * waits for a grace period to elapse, and invokes the callbacks. 2018 * The no-CBs CPUs do a wake_up() on their kthread when they insert 2019 * a callback into any empty list, unless the rcu_nocb_poll boot parameter 2020 * has been specified, in which case each kthread actively polls its 2021 * CPU. (Which isn't so great for energy efficiency, but which does 2022 * reduce RCU's overhead on that CPU.) 2023 * 2024 * This is intended to be used in conjunction with Frederic Weisbecker's 2025 * adaptive-idle work, which would seriously reduce OS jitter on CPUs 2026 * running CPU-bound user-mode computations. 2027 * 2028 * Offloading of callback processing could also in theory be used as 2029 * an energy-efficiency measure because CPUs with no RCU callbacks 2030 * queued are more aggressive about entering dyntick-idle mode. 2031 */ 2032 2033 2034/* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */ 2035static int __init rcu_nocb_setup(char *str) 2036{ 2037 alloc_bootmem_cpumask_var(&rcu_nocb_mask); 2038 have_rcu_nocb_mask = true; 2039 cpulist_parse(str, rcu_nocb_mask); 2040 return 1; 2041} 2042__setup("rcu_nocbs=", rcu_nocb_setup); 2043 2044static int __init parse_rcu_nocb_poll(char *arg) 2045{ 2046 rcu_nocb_poll = 1; 2047 return 0; 2048} 2049early_param("rcu_nocb_poll", parse_rcu_nocb_poll); 2050 2051/* 2052 * Do any no-CBs CPUs need another grace period? 2053 * 2054 * Interrupts must be disabled. If the caller does not hold the root 2055 * rnp_node structure's ->lock, the results are advisory only. 2056 */ 2057static int rcu_nocb_needs_gp(struct rcu_state *rsp) 2058{ 2059 struct rcu_node *rnp = rcu_get_root(rsp); 2060 2061 return rnp->need_future_gp[(ACCESS_ONCE(rnp->completed) + 1) & 0x1]; 2062} 2063 2064/* 2065 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended 2066 * grace period. 2067 */ 2068static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) 2069{ 2070 wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); 2071} 2072 2073/* 2074 * Set the root rcu_node structure's ->need_future_gp field 2075 * based on the sum of those of all rcu_node structures. This does 2076 * double-count the root rcu_node structure's requests, but this 2077 * is necessary to handle the possibility of a rcu_nocb_kthread() 2078 * having awakened during the time that the rcu_node structures 2079 * were being updated for the end of the previous grace period. 2080 */ 2081static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) 2082{ 2083 rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq; 2084} 2085 2086static void rcu_init_one_nocb(struct rcu_node *rnp) 2087{ 2088 init_waitqueue_head(&rnp->nocb_gp_wq[0]); 2089 init_waitqueue_head(&rnp->nocb_gp_wq[1]); 2090} 2091 2092/* Is the specified CPU a no-CPUs CPU? */ 2093bool rcu_is_nocb_cpu(int cpu) 2094{ 2095 if (have_rcu_nocb_mask) 2096 return cpumask_test_cpu(cpu, rcu_nocb_mask); 2097 return false; 2098} 2099 2100/* 2101 * Enqueue the specified string of rcu_head structures onto the specified 2102 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the 2103 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy 2104 * counts are supplied by rhcount and rhcount_lazy. 2105 * 2106 * If warranted, also wake up the kthread servicing this CPUs queues. 2107 */ 2108static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, 2109 struct rcu_head *rhp, 2110 struct rcu_head **rhtp, 2111 int rhcount, int rhcount_lazy, 2112 unsigned long flags) 2113{ 2114 int len; 2115 struct rcu_head **old_rhpp; 2116 struct task_struct *t; 2117 2118 /* Enqueue the callback on the nocb list and update counts. */ 2119 old_rhpp = xchg(&rdp->nocb_tail, rhtp); 2120 ACCESS_ONCE(*old_rhpp) = rhp; 2121 atomic_long_add(rhcount, &rdp->nocb_q_count); 2122 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); 2123 2124 /* If we are not being polled and there is a kthread, awaken it ... */ 2125 t = ACCESS_ONCE(rdp->nocb_kthread); 2126 if (rcu_nocb_poll || !t) { 2127 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2128 TPS("WakeNotPoll")); 2129 return; 2130 } 2131 len = atomic_long_read(&rdp->nocb_q_count); 2132 if (old_rhpp == &rdp->nocb_head) { 2133 if (!irqs_disabled_flags(flags)) { 2134 wake_up(&rdp->nocb_wq); /* ... if queue was empty ... */ 2135 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2136 TPS("WakeEmpty")); 2137 } else { 2138 rdp->nocb_defer_wakeup = true; 2139 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2140 TPS("WakeEmptyIsDeferred")); 2141 } 2142 rdp->qlen_last_fqs_check = 0; 2143 } else if (len > rdp->qlen_last_fqs_check + qhimark) { 2144 wake_up_process(t); /* ... or if many callbacks queued. */ 2145 rdp->qlen_last_fqs_check = LONG_MAX / 2; 2146 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeOvf")); 2147 } else { 2148 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot")); 2149 } 2150 return; 2151} 2152 2153/* 2154 * This is a helper for __call_rcu(), which invokes this when the normal 2155 * callback queue is inoperable. If this is not a no-CBs CPU, this 2156 * function returns failure back to __call_rcu(), which can complain 2157 * appropriately. 2158 * 2159 * Otherwise, this function queues the callback where the corresponding 2160 * "rcuo" kthread can find it. 2161 */ 2162static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 2163 bool lazy, unsigned long flags) 2164{ 2165 2166 if (!rcu_is_nocb_cpu(rdp->cpu)) 2167 return 0; 2168 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags); 2169 if (__is_kfree_rcu_offset((unsigned long)rhp->func)) 2170 trace_rcu_kfree_callback(rdp->rsp->name, rhp, 2171 (unsigned long)rhp->func, 2172 -atomic_long_read(&rdp->nocb_q_count_lazy), 2173 -atomic_long_read(&rdp->nocb_q_count)); 2174 else 2175 trace_rcu_callback(rdp->rsp->name, rhp, 2176 -atomic_long_read(&rdp->nocb_q_count_lazy), 2177 -atomic_long_read(&rdp->nocb_q_count)); 2178 return 1; 2179} 2180 2181/* 2182 * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is 2183 * not a no-CBs CPU. 2184 */ 2185static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, 2186 struct rcu_data *rdp, 2187 unsigned long flags) 2188{ 2189 long ql = rsp->qlen; 2190 long qll = rsp->qlen_lazy; 2191 2192 /* If this is not a no-CBs CPU, tell the caller to do it the old way. */ 2193 if (!rcu_is_nocb_cpu(smp_processor_id())) 2194 return 0; 2195 rsp->qlen = 0; 2196 rsp->qlen_lazy = 0; 2197 2198 /* First, enqueue the donelist, if any. This preserves CB ordering. */ 2199 if (rsp->orphan_donelist != NULL) { 2200 __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist, 2201 rsp->orphan_donetail, ql, qll, flags); 2202 ql = qll = 0; 2203 rsp->orphan_donelist = NULL; 2204 rsp->orphan_donetail = &rsp->orphan_donelist; 2205 } 2206 if (rsp->orphan_nxtlist != NULL) { 2207 __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist, 2208 rsp->orphan_nxttail, ql, qll, flags); 2209 ql = qll = 0; 2210 rsp->orphan_nxtlist = NULL; 2211 rsp->orphan_nxttail = &rsp->orphan_nxtlist; 2212 } 2213 return 1; 2214} 2215 2216/* 2217 * If necessary, kick off a new grace period, and either way wait 2218 * for a subsequent grace period to complete. 2219 */ 2220static void rcu_nocb_wait_gp(struct rcu_data *rdp) 2221{ 2222 unsigned long c; 2223 bool d; 2224 unsigned long flags; 2225 struct rcu_node *rnp = rdp->mynode; 2226 2227 raw_spin_lock_irqsave(&rnp->lock, flags); 2228 c = rcu_start_future_gp(rnp, rdp); 2229 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2230 2231 /* 2232 * Wait for the grace period. Do so interruptibly to avoid messing 2233 * up the load average. 2234 */ 2235 trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); 2236 for (;;) { 2237 wait_event_interruptible( 2238 rnp->nocb_gp_wq[c & 0x1], 2239 (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c))); 2240 if (likely(d)) 2241 break; 2242 flush_signals(current); 2243 trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait")); 2244 } 2245 trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait")); 2246 smp_mb(); /* Ensure that CB invocation happens after GP end. */ 2247} 2248 2249/* 2250 * Per-rcu_data kthread, but only for no-CBs CPUs. Each kthread invokes 2251 * callbacks queued by the corresponding no-CBs CPU. 2252 */ 2253static int rcu_nocb_kthread(void *arg) 2254{ 2255 int c, cl; 2256 bool firsttime = 1; 2257 struct rcu_head *list; 2258 struct rcu_head *next; 2259 struct rcu_head **tail; 2260 struct rcu_data *rdp = arg; 2261 2262 /* Each pass through this loop invokes one batch of callbacks */ 2263 for (;;) { 2264 /* If not polling, wait for next batch of callbacks. */ 2265 if (!rcu_nocb_poll) { 2266 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2267 TPS("Sleep")); 2268 wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); 2269 /* Memory barrier provide by xchg() below. */ 2270 } else if (firsttime) { 2271 firsttime = 0; 2272 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2273 TPS("Poll")); 2274 } 2275 list = ACCESS_ONCE(rdp->nocb_head); 2276 if (!list) { 2277 if (!rcu_nocb_poll) 2278 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2279 TPS("WokeEmpty")); 2280 schedule_timeout_interruptible(1); 2281 flush_signals(current); 2282 continue; 2283 } 2284 firsttime = 1; 2285 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2286 TPS("WokeNonEmpty")); 2287 2288 /* 2289 * Extract queued callbacks, update counts, and wait 2290 * for a grace period to elapse. 2291 */ 2292 ACCESS_ONCE(rdp->nocb_head) = NULL; 2293 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); 2294 c = atomic_long_xchg(&rdp->nocb_q_count, 0); 2295 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0); 2296 ACCESS_ONCE(rdp->nocb_p_count) += c; 2297 ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl; 2298 rcu_nocb_wait_gp(rdp); 2299 2300 /* Each pass through the following loop invokes a callback. */ 2301 trace_rcu_batch_start(rdp->rsp->name, cl, c, -1); 2302 c = cl = 0; 2303 while (list) { 2304 next = list->next; 2305 /* Wait for enqueuing to complete, if needed. */ 2306 while (next == NULL && &list->next != tail) { 2307 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2308 TPS("WaitQueue")); 2309 schedule_timeout_interruptible(1); 2310 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2311 TPS("WokeQueue")); 2312 next = list->next; 2313 } 2314 debug_rcu_head_unqueue(list); 2315 local_bh_disable(); 2316 if (__rcu_reclaim(rdp->rsp->name, list)) 2317 cl++; 2318 c++; 2319 local_bh_enable(); 2320 list = next; 2321 } 2322 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); 2323 ACCESS_ONCE(rdp->nocb_p_count) -= c; 2324 ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl; 2325 rdp->n_nocbs_invoked += c; 2326 } 2327 return 0; 2328} 2329 2330/* Is a deferred wakeup of rcu_nocb_kthread() required? */ 2331static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) 2332{ 2333 return ACCESS_ONCE(rdp->nocb_defer_wakeup); 2334} 2335 2336/* Do a deferred wakeup of rcu_nocb_kthread(). */ 2337static void do_nocb_deferred_wakeup(struct rcu_data *rdp) 2338{ 2339 if (!rcu_nocb_need_deferred_wakeup(rdp)) 2340 return; 2341 ACCESS_ONCE(rdp->nocb_defer_wakeup) = false; 2342 wake_up(&rdp->nocb_wq); 2343 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty")); 2344} 2345 2346/* Initialize per-rcu_data variables for no-CBs CPUs. */ 2347static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2348{ 2349 rdp->nocb_tail = &rdp->nocb_head; 2350 init_waitqueue_head(&rdp->nocb_wq); 2351} 2352 2353/* Create a kthread for each RCU flavor for each no-CBs CPU. */ 2354static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) 2355{ 2356 int cpu; 2357 struct rcu_data *rdp; 2358 struct task_struct *t; 2359 2360 if (rcu_nocb_mask == NULL) 2361 return; 2362 for_each_cpu(cpu, rcu_nocb_mask) { 2363 rdp = per_cpu_ptr(rsp->rda, cpu); 2364 t = kthread_run(rcu_nocb_kthread, rdp, 2365 "rcuo%c/%d", rsp->abbr, cpu); 2366 BUG_ON(IS_ERR(t)); 2367 ACCESS_ONCE(rdp->nocb_kthread) = t; 2368 } 2369} 2370 2371/* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */ 2372static bool init_nocb_callback_list(struct rcu_data *rdp) 2373{ 2374 if (rcu_nocb_mask == NULL || 2375 !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask)) 2376 return false; 2377 rdp->nxttail[RCU_NEXT_TAIL] = NULL; 2378 return true; 2379} 2380 2381#else /* #ifdef CONFIG_RCU_NOCB_CPU */ 2382 2383static int rcu_nocb_needs_gp(struct rcu_state *rsp) 2384{ 2385 return 0; 2386} 2387 2388static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) 2389{ 2390} 2391 2392static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) 2393{ 2394} 2395 2396static void rcu_init_one_nocb(struct rcu_node *rnp) 2397{ 2398} 2399 2400static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 2401 bool lazy, unsigned long flags) 2402{ 2403 return 0; 2404} 2405 2406static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, 2407 struct rcu_data *rdp, 2408 unsigned long flags) 2409{ 2410 return 0; 2411} 2412 2413static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2414{ 2415} 2416 2417static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) 2418{ 2419 return false; 2420} 2421 2422static void do_nocb_deferred_wakeup(struct rcu_data *rdp) 2423{ 2424} 2425 2426static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) 2427{ 2428} 2429 2430static bool init_nocb_callback_list(struct rcu_data *rdp) 2431{ 2432 return false; 2433} 2434 2435#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 2436 2437/* 2438 * An adaptive-ticks CPU can potentially execute in kernel mode for an 2439 * arbitrarily long period of time with the scheduling-clock tick turned 2440 * off. RCU will be paying attention to this CPU because it is in the 2441 * kernel, but the CPU cannot be guaranteed to be executing the RCU state 2442 * machine because the scheduling-clock tick has been disabled. Therefore, 2443 * if an adaptive-ticks CPU is failing to respond to the current grace 2444 * period and has not be idle from an RCU perspective, kick it. 2445 */ 2446static void rcu_kick_nohz_cpu(int cpu) 2447{ 2448#ifdef CONFIG_NO_HZ_FULL 2449 if (tick_nohz_full_cpu(cpu)) 2450 smp_send_reschedule(cpu); 2451#endif /* #ifdef CONFIG_NO_HZ_FULL */ 2452} 2453 2454 2455#ifdef CONFIG_NO_HZ_FULL_SYSIDLE 2456 2457/* 2458 * Define RCU flavor that holds sysidle state. This needs to be the 2459 * most active flavor of RCU. 2460 */ 2461#ifdef CONFIG_PREEMPT_RCU 2462static struct rcu_state *rcu_sysidle_state = &rcu_preempt_state; 2463#else /* #ifdef CONFIG_PREEMPT_RCU */ 2464static struct rcu_state *rcu_sysidle_state = &rcu_sched_state; 2465#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 2466 2467static int full_sysidle_state; /* Current system-idle state. */ 2468#define RCU_SYSIDLE_NOT 0 /* Some CPU is not idle. */ 2469#define RCU_SYSIDLE_SHORT 1 /* All CPUs idle for brief period. */ 2470#define RCU_SYSIDLE_LONG 2 /* All CPUs idle for long enough. */ 2471#define RCU_SYSIDLE_FULL 3 /* All CPUs idle, ready for sysidle. */ 2472#define RCU_SYSIDLE_FULL_NOTED 4 /* Actually entered sysidle state. */ 2473 2474/* 2475 * Invoked to note exit from irq or task transition to idle. Note that 2476 * usermode execution does -not- count as idle here! After all, we want 2477 * to detect full-system idle states, not RCU quiescent states and grace 2478 * periods. The caller must have disabled interrupts. 2479 */ 2480static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq) 2481{ 2482 unsigned long j; 2483 2484 /* Adjust nesting, check for fully idle. */ 2485 if (irq) { 2486 rdtp->dynticks_idle_nesting--; 2487 WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0); 2488 if (rdtp->dynticks_idle_nesting != 0) 2489 return; /* Still not fully idle. */ 2490 } else { 2491 if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) == 2492 DYNTICK_TASK_NEST_VALUE) { 2493 rdtp->dynticks_idle_nesting = 0; 2494 } else { 2495 rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE; 2496 WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0); 2497 return; /* Still not fully idle. */ 2498 } 2499 } 2500 2501 /* Record start of fully idle period. */ 2502 j = jiffies; 2503 ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j; 2504 smp_mb__before_atomic_inc(); 2505 atomic_inc(&rdtp->dynticks_idle); 2506 smp_mb__after_atomic_inc(); 2507 WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1); 2508} 2509 2510/* 2511 * Unconditionally force exit from full system-idle state. This is 2512 * invoked when a normal CPU exits idle, but must be called separately 2513 * for the timekeeping CPU (tick_do_timer_cpu). The reason for this 2514 * is that the timekeeping CPU is permitted to take scheduling-clock 2515 * interrupts while the system is in system-idle state, and of course 2516 * rcu_sysidle_exit() has no way of distinguishing a scheduling-clock 2517 * interrupt from any other type of interrupt. 2518 */ 2519void rcu_sysidle_force_exit(void) 2520{ 2521 int oldstate = ACCESS_ONCE(full_sysidle_state); 2522 int newoldstate; 2523 2524 /* 2525 * Each pass through the following loop attempts to exit full 2526 * system-idle state. If contention proves to be a problem, 2527 * a trylock-based contention tree could be used here. 2528 */ 2529 while (oldstate > RCU_SYSIDLE_SHORT) { 2530 newoldstate = cmpxchg(&full_sysidle_state, 2531 oldstate, RCU_SYSIDLE_NOT); 2532 if (oldstate == newoldstate && 2533 oldstate == RCU_SYSIDLE_FULL_NOTED) { 2534 rcu_kick_nohz_cpu(tick_do_timer_cpu); 2535 return; /* We cleared it, done! */ 2536 } 2537 oldstate = newoldstate; 2538 } 2539 smp_mb(); /* Order initial oldstate fetch vs. later non-idle work. */ 2540} 2541 2542/* 2543 * Invoked to note entry to irq or task transition from idle. Note that 2544 * usermode execution does -not- count as idle here! The caller must 2545 * have disabled interrupts. 2546 */ 2547static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) 2548{ 2549 /* Adjust nesting, check for already non-idle. */ 2550 if (irq) { 2551 rdtp->dynticks_idle_nesting++; 2552 WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0); 2553 if (rdtp->dynticks_idle_nesting != 1) 2554 return; /* Already non-idle. */ 2555 } else { 2556 /* 2557 * Allow for irq misnesting. Yes, it really is possible 2558 * to enter an irq handler then never leave it, and maybe 2559 * also vice versa. Handle both possibilities. 2560 */ 2561 if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) { 2562 rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE; 2563 WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0); 2564 return; /* Already non-idle. */ 2565 } else { 2566 rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE; 2567 } 2568 } 2569 2570 /* Record end of idle period. */ 2571 smp_mb__before_atomic_inc(); 2572 atomic_inc(&rdtp->dynticks_idle); 2573 smp_mb__after_atomic_inc(); 2574 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1)); 2575 2576 /* 2577 * If we are the timekeeping CPU, we are permitted to be non-idle 2578 * during a system-idle state. This must be the case, because 2579 * the timekeeping CPU has to take scheduling-clock interrupts 2580 * during the time that the system is transitioning to full 2581 * system-idle state. This means that the timekeeping CPU must 2582 * invoke rcu_sysidle_force_exit() directly if it does anything 2583 * more than take a scheduling-clock interrupt. 2584 */ 2585 if (smp_processor_id() == tick_do_timer_cpu) 2586 return; 2587 2588 /* Update system-idle state: We are clearly no longer fully idle! */ 2589 rcu_sysidle_force_exit(); 2590} 2591 2592/* 2593 * Check to see if the current CPU is idle. Note that usermode execution 2594 * does not count as idle. The caller must have disabled interrupts. 2595 */ 2596static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, 2597 unsigned long *maxj) 2598{ 2599 int cur; 2600 unsigned long j; 2601 struct rcu_dynticks *rdtp = rdp->dynticks; 2602 2603 /* 2604 * If some other CPU has already reported non-idle, if this is 2605 * not the flavor of RCU that tracks sysidle state, or if this 2606 * is an offline or the timekeeping CPU, nothing to do. 2607 */ 2608 if (!*isidle || rdp->rsp != rcu_sysidle_state || 2609 cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu) 2610 return; 2611 if (rcu_gp_in_progress(rdp->rsp)) 2612 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu); 2613 2614 /* Pick up current idle and NMI-nesting counter and check. */ 2615 cur = atomic_read(&rdtp->dynticks_idle); 2616 if (cur & 0x1) { 2617 *isidle = false; /* We are not idle! */ 2618 return; 2619 } 2620 smp_mb(); /* Read counters before timestamps. */ 2621 2622 /* Pick up timestamps. */ 2623 j = ACCESS_ONCE(rdtp->dynticks_idle_jiffies); 2624 /* If this CPU entered idle more recently, update maxj timestamp. */ 2625 if (ULONG_CMP_LT(*maxj, j)) 2626 *maxj = j; 2627} 2628 2629/* 2630 * Is this the flavor of RCU that is handling full-system idle? 2631 */ 2632static bool is_sysidle_rcu_state(struct rcu_state *rsp) 2633{ 2634 return rsp == rcu_sysidle_state; 2635} 2636 2637/* 2638 * Bind the grace-period kthread for the sysidle flavor of RCU to the 2639 * timekeeping CPU. 2640 */ 2641static void rcu_bind_gp_kthread(void) 2642{ 2643 int cpu = ACCESS_ONCE(tick_do_timer_cpu); 2644 2645 if (cpu < 0 || cpu >= nr_cpu_ids) 2646 return; 2647 if (raw_smp_processor_id() != cpu) 2648 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 2649} 2650 2651/* 2652 * Return a delay in jiffies based on the number of CPUs, rcu_node 2653 * leaf fanout, and jiffies tick rate. The idea is to allow larger 2654 * systems more time to transition to full-idle state in order to 2655 * avoid the cache thrashing that otherwise occur on the state variable. 2656 * Really small systems (less than a couple of tens of CPUs) should 2657 * instead use a single global atomically incremented counter, and later 2658 * versions of this will automatically reconfigure themselves accordingly. 2659 */ 2660static unsigned long rcu_sysidle_delay(void) 2661{ 2662 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) 2663 return 0; 2664 return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000); 2665} 2666 2667/* 2668 * Advance the full-system-idle state. This is invoked when all of 2669 * the non-timekeeping CPUs are idle. 2670 */ 2671static void rcu_sysidle(unsigned long j) 2672{ 2673 /* Check the current state. */ 2674 switch (ACCESS_ONCE(full_sysidle_state)) { 2675 case RCU_SYSIDLE_NOT: 2676 2677 /* First time all are idle, so note a short idle period. */ 2678 ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT; 2679 break; 2680 2681 case RCU_SYSIDLE_SHORT: 2682 2683 /* 2684 * Idle for a bit, time to advance to next state? 2685 * cmpxchg failure means race with non-idle, let them win. 2686 */ 2687 if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay())) 2688 (void)cmpxchg(&full_sysidle_state, 2689 RCU_SYSIDLE_SHORT, RCU_SYSIDLE_LONG); 2690 break; 2691 2692 case RCU_SYSIDLE_LONG: 2693 2694 /* 2695 * Do an additional check pass before advancing to full. 2696 * cmpxchg failure means race with non-idle, let them win. 2697 */ 2698 if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay())) 2699 (void)cmpxchg(&full_sysidle_state, 2700 RCU_SYSIDLE_LONG, RCU_SYSIDLE_FULL); 2701 break; 2702 2703 default: 2704 break; 2705 } 2706} 2707 2708/* 2709 * Found a non-idle non-timekeeping CPU, so kick the system-idle state 2710 * back to the beginning. 2711 */ 2712static void rcu_sysidle_cancel(void) 2713{ 2714 smp_mb(); 2715 ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT; 2716} 2717 2718/* 2719 * Update the sysidle state based on the results of a force-quiescent-state 2720 * scan of the CPUs' dyntick-idle state. 2721 */ 2722static void rcu_sysidle_report(struct rcu_state *rsp, int isidle, 2723 unsigned long maxj, bool gpkt) 2724{ 2725 if (rsp != rcu_sysidle_state) 2726 return; /* Wrong flavor, ignore. */ 2727 if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) 2728 return; /* Running state machine from timekeeping CPU. */ 2729 if (isidle) 2730 rcu_sysidle(maxj); /* More idle! */ 2731 else 2732 rcu_sysidle_cancel(); /* Idle is over. */ 2733} 2734 2735/* 2736 * Wrapper for rcu_sysidle_report() when called from the grace-period 2737 * kthread's context. 2738 */ 2739static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, 2740 unsigned long maxj) 2741{ 2742 rcu_sysidle_report(rsp, isidle, maxj, true); 2743} 2744 2745/* Callback and function for forcing an RCU grace period. */ 2746struct rcu_sysidle_head { 2747 struct rcu_head rh; 2748 int inuse; 2749}; 2750 2751static void rcu_sysidle_cb(struct rcu_head *rhp) 2752{ 2753 struct rcu_sysidle_head *rshp; 2754 2755 /* 2756 * The following memory barrier is needed to replace the 2757 * memory barriers that would normally be in the memory 2758 * allocator. 2759 */ 2760 smp_mb(); /* grace period precedes setting inuse. */ 2761 2762 rshp = container_of(rhp, struct rcu_sysidle_head, rh); 2763 ACCESS_ONCE(rshp->inuse) = 0; 2764} 2765 2766/* 2767 * Check to see if the system is fully idle, other than the timekeeping CPU. 2768 * The caller must have disabled interrupts. 2769 */ 2770bool rcu_sys_is_idle(void) 2771{ 2772 static struct rcu_sysidle_head rsh; 2773 int rss = ACCESS_ONCE(full_sysidle_state); 2774 2775 if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu)) 2776 return false; 2777 2778 /* Handle small-system case by doing a full scan of CPUs. */ 2779 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) { 2780 int oldrss = rss - 1; 2781 2782 /* 2783 * One pass to advance to each state up to _FULL. 2784 * Give up if any pass fails to advance the state. 2785 */ 2786 while (rss < RCU_SYSIDLE_FULL && oldrss < rss) { 2787 int cpu; 2788 bool isidle = true; 2789 unsigned long maxj = jiffies - ULONG_MAX / 4; 2790 struct rcu_data *rdp; 2791 2792 /* Scan all the CPUs looking for nonidle CPUs. */ 2793 for_each_possible_cpu(cpu) { 2794 rdp = per_cpu_ptr(rcu_sysidle_state->rda, cpu); 2795 rcu_sysidle_check_cpu(rdp, &isidle, &maxj); 2796 if (!isidle) 2797 break; 2798 } 2799 rcu_sysidle_report(rcu_sysidle_state, 2800 isidle, maxj, false); 2801 oldrss = rss; 2802 rss = ACCESS_ONCE(full_sysidle_state); 2803 } 2804 } 2805 2806 /* If this is the first observation of an idle period, record it. */ 2807 if (rss == RCU_SYSIDLE_FULL) { 2808 rss = cmpxchg(&full_sysidle_state, 2809 RCU_SYSIDLE_FULL, RCU_SYSIDLE_FULL_NOTED); 2810 return rss == RCU_SYSIDLE_FULL; 2811 } 2812 2813 smp_mb(); /* ensure rss load happens before later caller actions. */ 2814 2815 /* If already fully idle, tell the caller (in case of races). */ 2816 if (rss == RCU_SYSIDLE_FULL_NOTED) 2817 return true; 2818 2819 /* 2820 * If we aren't there yet, and a grace period is not in flight, 2821 * initiate a grace period. Either way, tell the caller that 2822 * we are not there yet. We use an xchg() rather than an assignment 2823 * to make up for the memory barriers that would otherwise be 2824 * provided by the memory allocator. 2825 */ 2826 if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL && 2827 !rcu_gp_in_progress(rcu_sysidle_state) && 2828 !rsh.inuse && xchg(&rsh.inuse, 1) == 0) 2829 call_rcu(&rsh.rh, rcu_sysidle_cb); 2830 return false; 2831} 2832 2833/* 2834 * Initialize dynticks sysidle state for CPUs coming online. 2835 */ 2836static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp) 2837{ 2838 rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE; 2839} 2840 2841#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 2842 2843static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq) 2844{ 2845} 2846 2847static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) 2848{ 2849} 2850 2851static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, 2852 unsigned long *maxj) 2853{ 2854} 2855 2856static bool is_sysidle_rcu_state(struct rcu_state *rsp) 2857{ 2858 return false; 2859} 2860 2861static void rcu_bind_gp_kthread(void) 2862{ 2863} 2864 2865static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, 2866 unsigned long maxj) 2867{ 2868} 2869 2870static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp) 2871{ 2872} 2873 2874#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 2875