tree_plugin.h revision 9fdd3bc9005824704f9802bec7b3e06f5edae434
1/* 2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 3 * Internal non-public definitions that provide either classic 4 * or preemptible semantics. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, you can access it online at 18 * http://www.gnu.org/licenses/gpl-2.0.html. 19 * 20 * Copyright Red Hat, 2009 21 * Copyright IBM Corporation, 2009 22 * 23 * Author: Ingo Molnar <mingo@elte.hu> 24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> 25 */ 26 27#include <linux/delay.h> 28#include <linux/gfp.h> 29#include <linux/oom.h> 30#include <linux/smpboot.h> 31#include "../time/tick-internal.h" 32 33#define RCU_KTHREAD_PRIO 1 34 35#ifdef CONFIG_RCU_BOOST 36#include "../locking/rtmutex_common.h" 37#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO 38#else 39#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO 40#endif 41 42#ifdef CONFIG_RCU_NOCB_CPU 43static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ 44static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ 45static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ 46static char __initdata nocb_buf[NR_CPUS * 5]; 47#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 48 49/* 50 * Check the RCU kernel configuration parameters and print informative 51 * messages about anything out of the ordinary. If you like #ifdef, you 52 * will love this function. 53 */ 54static void __init rcu_bootup_announce_oddness(void) 55{ 56#ifdef CONFIG_RCU_TRACE 57 pr_info("\tRCU debugfs-based tracing is enabled.\n"); 58#endif 59#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32) 60 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n", 61 CONFIG_RCU_FANOUT); 62#endif 63#ifdef CONFIG_RCU_FANOUT_EXACT 64 pr_info("\tHierarchical RCU autobalancing is disabled.\n"); 65#endif 66#ifdef CONFIG_RCU_FAST_NO_HZ 67 pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n"); 68#endif 69#ifdef CONFIG_PROVE_RCU 70 pr_info("\tRCU lockdep checking is enabled.\n"); 71#endif 72#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE 73 pr_info("\tRCU torture testing starts during boot.\n"); 74#endif 75#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE) 76 pr_info("\tDump stacks of tasks blocking RCU-preempt GP.\n"); 77#endif 78#if defined(CONFIG_RCU_CPU_STALL_INFO) 79 pr_info("\tAdditional per-CPU info printed with stalls.\n"); 80#endif 81#if NUM_RCU_LVL_4 != 0 82 pr_info("\tFour-level hierarchy is enabled.\n"); 83#endif 84 if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF) 85 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); 86 if (nr_cpu_ids != NR_CPUS) 87 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); 88#ifdef CONFIG_RCU_NOCB_CPU 89#ifndef CONFIG_RCU_NOCB_CPU_NONE 90 if (!have_rcu_nocb_mask) { 91 zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL); 92 have_rcu_nocb_mask = true; 93 } 94#ifdef CONFIG_RCU_NOCB_CPU_ZERO 95 pr_info("\tOffload RCU callbacks from CPU 0\n"); 96 cpumask_set_cpu(0, rcu_nocb_mask); 97#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */ 98#ifdef CONFIG_RCU_NOCB_CPU_ALL 99 pr_info("\tOffload RCU callbacks from all CPUs\n"); 100 cpumask_copy(rcu_nocb_mask, cpu_possible_mask); 101#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */ 102#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */ 103 if (have_rcu_nocb_mask) { 104 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) { 105 pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n"); 106 cpumask_and(rcu_nocb_mask, cpu_possible_mask, 107 rcu_nocb_mask); 108 } 109 cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask); 110 pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf); 111 if (rcu_nocb_poll) 112 pr_info("\tPoll for callbacks from no-CBs CPUs.\n"); 113 } 114#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 115} 116 117#ifdef CONFIG_TREE_PREEMPT_RCU 118 119RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); 120static struct rcu_state *rcu_state_p = &rcu_preempt_state; 121 122static int rcu_preempted_readers_exp(struct rcu_node *rnp); 123 124/* 125 * Tell them what RCU they are running. 126 */ 127static void __init rcu_bootup_announce(void) 128{ 129 pr_info("Preemptible hierarchical RCU implementation.\n"); 130 rcu_bootup_announce_oddness(); 131} 132 133/* 134 * Return the number of RCU-preempt batches processed thus far 135 * for debug and statistics. 136 */ 137static long rcu_batches_completed_preempt(void) 138{ 139 return rcu_preempt_state.completed; 140} 141EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt); 142 143/* 144 * Return the number of RCU batches processed thus far for debug & stats. 145 */ 146long rcu_batches_completed(void) 147{ 148 return rcu_batches_completed_preempt(); 149} 150EXPORT_SYMBOL_GPL(rcu_batches_completed); 151 152/* 153 * Record a preemptible-RCU quiescent state for the specified CPU. Note 154 * that this just means that the task currently running on the CPU is 155 * not in a quiescent state. There might be any number of tasks blocked 156 * while in an RCU read-side critical section. 157 * 158 * Unlike the other rcu_*_qs() functions, callers to this function 159 * must disable irqs in order to protect the assignment to 160 * ->rcu_read_unlock_special. 161 */ 162static void rcu_preempt_qs(int cpu) 163{ 164 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); 165 166 if (rdp->passed_quiesce == 0) 167 trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs")); 168 rdp->passed_quiesce = 1; 169 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; 170} 171 172/* 173 * We have entered the scheduler, and the current task might soon be 174 * context-switched away from. If this task is in an RCU read-side 175 * critical section, we will no longer be able to rely on the CPU to 176 * record that fact, so we enqueue the task on the blkd_tasks list. 177 * The task will dequeue itself when it exits the outermost enclosing 178 * RCU read-side critical section. Therefore, the current grace period 179 * cannot be permitted to complete until the blkd_tasks list entries 180 * predating the current grace period drain, in other words, until 181 * rnp->gp_tasks becomes NULL. 182 * 183 * Caller must disable preemption. 184 */ 185static void rcu_preempt_note_context_switch(int cpu) 186{ 187 struct task_struct *t = current; 188 unsigned long flags; 189 struct rcu_data *rdp; 190 struct rcu_node *rnp; 191 192 if (t->rcu_read_lock_nesting > 0 && 193 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { 194 195 /* Possibly blocking in an RCU read-side critical section. */ 196 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); 197 rnp = rdp->mynode; 198 raw_spin_lock_irqsave(&rnp->lock, flags); 199 smp_mb__after_unlock_lock(); 200 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; 201 t->rcu_blocked_node = rnp; 202 203 /* 204 * If this CPU has already checked in, then this task 205 * will hold up the next grace period rather than the 206 * current grace period. Queue the task accordingly. 207 * If the task is queued for the current grace period 208 * (i.e., this CPU has not yet passed through a quiescent 209 * state for the current grace period), then as long 210 * as that task remains queued, the current grace period 211 * cannot end. Note that there is some uncertainty as 212 * to exactly when the current grace period started. 213 * We take a conservative approach, which can result 214 * in unnecessarily waiting on tasks that started very 215 * slightly after the current grace period began. C'est 216 * la vie!!! 217 * 218 * But first, note that the current CPU must still be 219 * on line! 220 */ 221 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); 222 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); 223 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) { 224 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); 225 rnp->gp_tasks = &t->rcu_node_entry; 226#ifdef CONFIG_RCU_BOOST 227 if (rnp->boost_tasks != NULL) 228 rnp->boost_tasks = rnp->gp_tasks; 229#endif /* #ifdef CONFIG_RCU_BOOST */ 230 } else { 231 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); 232 if (rnp->qsmask & rdp->grpmask) 233 rnp->gp_tasks = &t->rcu_node_entry; 234 } 235 trace_rcu_preempt_task(rdp->rsp->name, 236 t->pid, 237 (rnp->qsmask & rdp->grpmask) 238 ? rnp->gpnum 239 : rnp->gpnum + 1); 240 raw_spin_unlock_irqrestore(&rnp->lock, flags); 241 } else if (t->rcu_read_lock_nesting < 0 && 242 t->rcu_read_unlock_special) { 243 244 /* 245 * Complete exit from RCU read-side critical section on 246 * behalf of preempted instance of __rcu_read_unlock(). 247 */ 248 rcu_read_unlock_special(t); 249 } 250 251 /* 252 * Either we were not in an RCU read-side critical section to 253 * begin with, or we have now recorded that critical section 254 * globally. Either way, we can now note a quiescent state 255 * for this CPU. Again, if we were in an RCU read-side critical 256 * section, and if that critical section was blocking the current 257 * grace period, then the fact that the task has been enqueued 258 * means that we continue to block the current grace period. 259 */ 260 local_irq_save(flags); 261 rcu_preempt_qs(cpu); 262 local_irq_restore(flags); 263} 264 265/* 266 * Check for preempted RCU readers blocking the current grace period 267 * for the specified rcu_node structure. If the caller needs a reliable 268 * answer, it must hold the rcu_node's ->lock. 269 */ 270static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 271{ 272 return rnp->gp_tasks != NULL; 273} 274 275/* 276 * Record a quiescent state for all tasks that were previously queued 277 * on the specified rcu_node structure and that were blocking the current 278 * RCU grace period. The caller must hold the specified rnp->lock with 279 * irqs disabled, and this lock is released upon return, but irqs remain 280 * disabled. 281 */ 282static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 283 __releases(rnp->lock) 284{ 285 unsigned long mask; 286 struct rcu_node *rnp_p; 287 288 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 289 raw_spin_unlock_irqrestore(&rnp->lock, flags); 290 return; /* Still need more quiescent states! */ 291 } 292 293 rnp_p = rnp->parent; 294 if (rnp_p == NULL) { 295 /* 296 * Either there is only one rcu_node in the tree, 297 * or tasks were kicked up to root rcu_node due to 298 * CPUs going offline. 299 */ 300 rcu_report_qs_rsp(&rcu_preempt_state, flags); 301 return; 302 } 303 304 /* Report up the rest of the hierarchy. */ 305 mask = rnp->grpmask; 306 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 307 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */ 308 smp_mb__after_unlock_lock(); 309 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); 310} 311 312/* 313 * Advance a ->blkd_tasks-list pointer to the next entry, instead 314 * returning NULL if at the end of the list. 315 */ 316static struct list_head *rcu_next_node_entry(struct task_struct *t, 317 struct rcu_node *rnp) 318{ 319 struct list_head *np; 320 321 np = t->rcu_node_entry.next; 322 if (np == &rnp->blkd_tasks) 323 np = NULL; 324 return np; 325} 326 327/* 328 * Handle special cases during rcu_read_unlock(), such as needing to 329 * notify RCU core processing or task having blocked during the RCU 330 * read-side critical section. 331 */ 332void rcu_read_unlock_special(struct task_struct *t) 333{ 334 int empty; 335 int empty_exp; 336 int empty_exp_now; 337 unsigned long flags; 338 struct list_head *np; 339#ifdef CONFIG_RCU_BOOST 340 bool drop_boost_mutex = false; 341#endif /* #ifdef CONFIG_RCU_BOOST */ 342 struct rcu_node *rnp; 343 int special; 344 345 /* NMI handlers cannot block and cannot safely manipulate state. */ 346 if (in_nmi()) 347 return; 348 349 local_irq_save(flags); 350 351 /* 352 * If RCU core is waiting for this CPU to exit critical section, 353 * let it know that we have done so. 354 */ 355 special = t->rcu_read_unlock_special; 356 if (special & RCU_READ_UNLOCK_NEED_QS) { 357 rcu_preempt_qs(smp_processor_id()); 358 if (!t->rcu_read_unlock_special) { 359 local_irq_restore(flags); 360 return; 361 } 362 } 363 364 /* Hardware IRQ handlers cannot block, complain if they get here. */ 365 if (WARN_ON_ONCE(in_irq() || in_serving_softirq())) { 366 local_irq_restore(flags); 367 return; 368 } 369 370 /* Clean up if blocked during RCU read-side critical section. */ 371 if (special & RCU_READ_UNLOCK_BLOCKED) { 372 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; 373 374 /* 375 * Remove this task from the list it blocked on. The 376 * task can migrate while we acquire the lock, but at 377 * most one time. So at most two passes through loop. 378 */ 379 for (;;) { 380 rnp = t->rcu_blocked_node; 381 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 382 smp_mb__after_unlock_lock(); 383 if (rnp == t->rcu_blocked_node) 384 break; 385 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 386 } 387 empty = !rcu_preempt_blocked_readers_cgp(rnp); 388 empty_exp = !rcu_preempted_readers_exp(rnp); 389 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 390 np = rcu_next_node_entry(t, rnp); 391 list_del_init(&t->rcu_node_entry); 392 t->rcu_blocked_node = NULL; 393 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"), 394 rnp->gpnum, t->pid); 395 if (&t->rcu_node_entry == rnp->gp_tasks) 396 rnp->gp_tasks = np; 397 if (&t->rcu_node_entry == rnp->exp_tasks) 398 rnp->exp_tasks = np; 399#ifdef CONFIG_RCU_BOOST 400 if (&t->rcu_node_entry == rnp->boost_tasks) 401 rnp->boost_tasks = np; 402 /* Snapshot ->boost_mtx ownership with rcu_node lock held. */ 403 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; 404#endif /* #ifdef CONFIG_RCU_BOOST */ 405 406 /* 407 * If this was the last task on the current list, and if 408 * we aren't waiting on any CPUs, report the quiescent state. 409 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, 410 * so we must take a snapshot of the expedited state. 411 */ 412 empty_exp_now = !rcu_preempted_readers_exp(rnp); 413 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) { 414 trace_rcu_quiescent_state_report(TPS("preempt_rcu"), 415 rnp->gpnum, 416 0, rnp->qsmask, 417 rnp->level, 418 rnp->grplo, 419 rnp->grphi, 420 !!rnp->gp_tasks); 421 rcu_report_unblock_qs_rnp(rnp, flags); 422 } else { 423 raw_spin_unlock_irqrestore(&rnp->lock, flags); 424 } 425 426#ifdef CONFIG_RCU_BOOST 427 /* Unboost if we were boosted. */ 428 if (drop_boost_mutex) { 429 rt_mutex_unlock(&rnp->boost_mtx); 430 complete(&rnp->boost_completion); 431 } 432#endif /* #ifdef CONFIG_RCU_BOOST */ 433 434 /* 435 * If this was the last task on the expedited lists, 436 * then we need to report up the rcu_node hierarchy. 437 */ 438 if (!empty_exp && empty_exp_now) 439 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true); 440 } else { 441 local_irq_restore(flags); 442 } 443} 444 445#ifdef CONFIG_RCU_CPU_STALL_VERBOSE 446 447/* 448 * Dump detailed information for all tasks blocking the current RCU 449 * grace period on the specified rcu_node structure. 450 */ 451static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) 452{ 453 unsigned long flags; 454 struct task_struct *t; 455 456 raw_spin_lock_irqsave(&rnp->lock, flags); 457 if (!rcu_preempt_blocked_readers_cgp(rnp)) { 458 raw_spin_unlock_irqrestore(&rnp->lock, flags); 459 return; 460 } 461 t = list_entry(rnp->gp_tasks, 462 struct task_struct, rcu_node_entry); 463 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) 464 sched_show_task(t); 465 raw_spin_unlock_irqrestore(&rnp->lock, flags); 466} 467 468/* 469 * Dump detailed information for all tasks blocking the current RCU 470 * grace period. 471 */ 472static void rcu_print_detail_task_stall(struct rcu_state *rsp) 473{ 474 struct rcu_node *rnp = rcu_get_root(rsp); 475 476 rcu_print_detail_task_stall_rnp(rnp); 477 rcu_for_each_leaf_node(rsp, rnp) 478 rcu_print_detail_task_stall_rnp(rnp); 479} 480 481#else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ 482 483static void rcu_print_detail_task_stall(struct rcu_state *rsp) 484{ 485} 486 487#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ 488 489#ifdef CONFIG_RCU_CPU_STALL_INFO 490 491static void rcu_print_task_stall_begin(struct rcu_node *rnp) 492{ 493 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", 494 rnp->level, rnp->grplo, rnp->grphi); 495} 496 497static void rcu_print_task_stall_end(void) 498{ 499 pr_cont("\n"); 500} 501 502#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ 503 504static void rcu_print_task_stall_begin(struct rcu_node *rnp) 505{ 506} 507 508static void rcu_print_task_stall_end(void) 509{ 510} 511 512#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */ 513 514/* 515 * Scan the current list of tasks blocked within RCU read-side critical 516 * sections, printing out the tid of each. 517 */ 518static int rcu_print_task_stall(struct rcu_node *rnp) 519{ 520 struct task_struct *t; 521 int ndetected = 0; 522 523 if (!rcu_preempt_blocked_readers_cgp(rnp)) 524 return 0; 525 rcu_print_task_stall_begin(rnp); 526 t = list_entry(rnp->gp_tasks, 527 struct task_struct, rcu_node_entry); 528 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 529 pr_cont(" P%d", t->pid); 530 ndetected++; 531 } 532 rcu_print_task_stall_end(); 533 return ndetected; 534} 535 536/* 537 * Check that the list of blocked tasks for the newly completed grace 538 * period is in fact empty. It is a serious bug to complete a grace 539 * period that still has RCU readers blocked! This function must be 540 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock 541 * must be held by the caller. 542 * 543 * Also, if there are blocked tasks on the list, they automatically 544 * block the newly created grace period, so set up ->gp_tasks accordingly. 545 */ 546static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 547{ 548 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); 549 if (!list_empty(&rnp->blkd_tasks)) 550 rnp->gp_tasks = rnp->blkd_tasks.next; 551 WARN_ON_ONCE(rnp->qsmask); 552} 553 554#ifdef CONFIG_HOTPLUG_CPU 555 556/* 557 * Handle tasklist migration for case in which all CPUs covered by the 558 * specified rcu_node have gone offline. Move them up to the root 559 * rcu_node. The reason for not just moving them to the immediate 560 * parent is to remove the need for rcu_read_unlock_special() to 561 * make more than two attempts to acquire the target rcu_node's lock. 562 * Returns true if there were tasks blocking the current RCU grace 563 * period. 564 * 565 * Returns 1 if there was previously a task blocking the current grace 566 * period on the specified rcu_node structure. 567 * 568 * The caller must hold rnp->lock with irqs disabled. 569 */ 570static int rcu_preempt_offline_tasks(struct rcu_state *rsp, 571 struct rcu_node *rnp, 572 struct rcu_data *rdp) 573{ 574 struct list_head *lp; 575 struct list_head *lp_root; 576 int retval = 0; 577 struct rcu_node *rnp_root = rcu_get_root(rsp); 578 struct task_struct *t; 579 580 if (rnp == rnp_root) { 581 WARN_ONCE(1, "Last CPU thought to be offlined?"); 582 return 0; /* Shouldn't happen: at least one CPU online. */ 583 } 584 585 /* If we are on an internal node, complain bitterly. */ 586 WARN_ON_ONCE(rnp != rdp->mynode); 587 588 /* 589 * Move tasks up to root rcu_node. Don't try to get fancy for 590 * this corner-case operation -- just put this node's tasks 591 * at the head of the root node's list, and update the root node's 592 * ->gp_tasks and ->exp_tasks pointers to those of this node's, 593 * if non-NULL. This might result in waiting for more tasks than 594 * absolutely necessary, but this is a good performance/complexity 595 * tradeoff. 596 */ 597 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0) 598 retval |= RCU_OFL_TASKS_NORM_GP; 599 if (rcu_preempted_readers_exp(rnp)) 600 retval |= RCU_OFL_TASKS_EXP_GP; 601 lp = &rnp->blkd_tasks; 602 lp_root = &rnp_root->blkd_tasks; 603 while (!list_empty(lp)) { 604 t = list_entry(lp->next, typeof(*t), rcu_node_entry); 605 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ 606 smp_mb__after_unlock_lock(); 607 list_del(&t->rcu_node_entry); 608 t->rcu_blocked_node = rnp_root; 609 list_add(&t->rcu_node_entry, lp_root); 610 if (&t->rcu_node_entry == rnp->gp_tasks) 611 rnp_root->gp_tasks = rnp->gp_tasks; 612 if (&t->rcu_node_entry == rnp->exp_tasks) 613 rnp_root->exp_tasks = rnp->exp_tasks; 614#ifdef CONFIG_RCU_BOOST 615 if (&t->rcu_node_entry == rnp->boost_tasks) 616 rnp_root->boost_tasks = rnp->boost_tasks; 617#endif /* #ifdef CONFIG_RCU_BOOST */ 618 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ 619 } 620 621 rnp->gp_tasks = NULL; 622 rnp->exp_tasks = NULL; 623#ifdef CONFIG_RCU_BOOST 624 rnp->boost_tasks = NULL; 625 /* 626 * In case root is being boosted and leaf was not. Make sure 627 * that we boost the tasks blocking the current grace period 628 * in this case. 629 */ 630 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ 631 smp_mb__after_unlock_lock(); 632 if (rnp_root->boost_tasks != NULL && 633 rnp_root->boost_tasks != rnp_root->gp_tasks && 634 rnp_root->boost_tasks != rnp_root->exp_tasks) 635 rnp_root->boost_tasks = rnp_root->gp_tasks; 636 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ 637#endif /* #ifdef CONFIG_RCU_BOOST */ 638 639 return retval; 640} 641 642#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 643 644/* 645 * Check for a quiescent state from the current CPU. When a task blocks, 646 * the task is recorded in the corresponding CPU's rcu_node structure, 647 * which is checked elsewhere. 648 * 649 * Caller must disable hard irqs. 650 */ 651static void rcu_preempt_check_callbacks(int cpu) 652{ 653 struct task_struct *t = current; 654 655 if (t->rcu_read_lock_nesting == 0) { 656 rcu_preempt_qs(cpu); 657 return; 658 } 659 if (t->rcu_read_lock_nesting > 0 && 660 per_cpu(rcu_preempt_data, cpu).qs_pending) 661 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; 662} 663 664#ifdef CONFIG_RCU_BOOST 665 666static void rcu_preempt_do_callbacks(void) 667{ 668 rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data)); 669} 670 671#endif /* #ifdef CONFIG_RCU_BOOST */ 672 673/* 674 * Queue a preemptible-RCU callback for invocation after a grace period. 675 */ 676void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 677{ 678 __call_rcu(head, func, &rcu_preempt_state, -1, 0); 679} 680EXPORT_SYMBOL_GPL(call_rcu); 681 682/** 683 * synchronize_rcu - wait until a grace period has elapsed. 684 * 685 * Control will return to the caller some time after a full grace 686 * period has elapsed, in other words after all currently executing RCU 687 * read-side critical sections have completed. Note, however, that 688 * upon return from synchronize_rcu(), the caller might well be executing 689 * concurrently with new RCU read-side critical sections that began while 690 * synchronize_rcu() was waiting. RCU read-side critical sections are 691 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. 692 * 693 * See the description of synchronize_sched() for more detailed information 694 * on memory ordering guarantees. 695 */ 696void synchronize_rcu(void) 697{ 698 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && 699 !lock_is_held(&rcu_lock_map) && 700 !lock_is_held(&rcu_sched_lock_map), 701 "Illegal synchronize_rcu() in RCU read-side critical section"); 702 if (!rcu_scheduler_active) 703 return; 704 if (rcu_expedited) 705 synchronize_rcu_expedited(); 706 else 707 wait_rcu_gp(call_rcu); 708} 709EXPORT_SYMBOL_GPL(synchronize_rcu); 710 711static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); 712static unsigned long sync_rcu_preempt_exp_count; 713static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); 714 715/* 716 * Return non-zero if there are any tasks in RCU read-side critical 717 * sections blocking the current preemptible-RCU expedited grace period. 718 * If there is no preemptible-RCU expedited grace period currently in 719 * progress, returns zero unconditionally. 720 */ 721static int rcu_preempted_readers_exp(struct rcu_node *rnp) 722{ 723 return rnp->exp_tasks != NULL; 724} 725 726/* 727 * return non-zero if there is no RCU expedited grace period in progress 728 * for the specified rcu_node structure, in other words, if all CPUs and 729 * tasks covered by the specified rcu_node structure have done their bit 730 * for the current expedited grace period. Works only for preemptible 731 * RCU -- other RCU implementation use other means. 732 * 733 * Caller must hold sync_rcu_preempt_exp_mutex. 734 */ 735static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) 736{ 737 return !rcu_preempted_readers_exp(rnp) && 738 ACCESS_ONCE(rnp->expmask) == 0; 739} 740 741/* 742 * Report the exit from RCU read-side critical section for the last task 743 * that queued itself during or before the current expedited preemptible-RCU 744 * grace period. This event is reported either to the rcu_node structure on 745 * which the task was queued or to one of that rcu_node structure's ancestors, 746 * recursively up the tree. (Calm down, calm down, we do the recursion 747 * iteratively!) 748 * 749 * Most callers will set the "wake" flag, but the task initiating the 750 * expedited grace period need not wake itself. 751 * 752 * Caller must hold sync_rcu_preempt_exp_mutex. 753 */ 754static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, 755 bool wake) 756{ 757 unsigned long flags; 758 unsigned long mask; 759 760 raw_spin_lock_irqsave(&rnp->lock, flags); 761 smp_mb__after_unlock_lock(); 762 for (;;) { 763 if (!sync_rcu_preempt_exp_done(rnp)) { 764 raw_spin_unlock_irqrestore(&rnp->lock, flags); 765 break; 766 } 767 if (rnp->parent == NULL) { 768 raw_spin_unlock_irqrestore(&rnp->lock, flags); 769 if (wake) { 770 smp_mb(); /* EGP done before wake_up(). */ 771 wake_up(&sync_rcu_preempt_exp_wq); 772 } 773 break; 774 } 775 mask = rnp->grpmask; 776 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ 777 rnp = rnp->parent; 778 raw_spin_lock(&rnp->lock); /* irqs already disabled */ 779 smp_mb__after_unlock_lock(); 780 rnp->expmask &= ~mask; 781 } 782} 783 784/* 785 * Snapshot the tasks blocking the newly started preemptible-RCU expedited 786 * grace period for the specified rcu_node structure. If there are no such 787 * tasks, report it up the rcu_node hierarchy. 788 * 789 * Caller must hold sync_rcu_preempt_exp_mutex and must exclude 790 * CPU hotplug operations. 791 */ 792static void 793sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) 794{ 795 unsigned long flags; 796 int must_wait = 0; 797 798 raw_spin_lock_irqsave(&rnp->lock, flags); 799 smp_mb__after_unlock_lock(); 800 if (list_empty(&rnp->blkd_tasks)) { 801 raw_spin_unlock_irqrestore(&rnp->lock, flags); 802 } else { 803 rnp->exp_tasks = rnp->blkd_tasks.next; 804 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ 805 must_wait = 1; 806 } 807 if (!must_wait) 808 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */ 809} 810 811/** 812 * synchronize_rcu_expedited - Brute-force RCU grace period 813 * 814 * Wait for an RCU-preempt grace period, but expedite it. The basic 815 * idea is to invoke synchronize_sched_expedited() to push all the tasks to 816 * the ->blkd_tasks lists and wait for this list to drain. This consumes 817 * significant time on all CPUs and is unfriendly to real-time workloads, 818 * so is thus not recommended for any sort of common-case code. 819 * In fact, if you are using synchronize_rcu_expedited() in a loop, 820 * please restructure your code to batch your updates, and then Use a 821 * single synchronize_rcu() instead. 822 * 823 * Note that it is illegal to call this function while holding any lock 824 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal 825 * to call this function from a CPU-hotplug notifier. Failing to observe 826 * these restriction will result in deadlock. 827 */ 828void synchronize_rcu_expedited(void) 829{ 830 unsigned long flags; 831 struct rcu_node *rnp; 832 struct rcu_state *rsp = &rcu_preempt_state; 833 unsigned long snap; 834 int trycount = 0; 835 836 smp_mb(); /* Caller's modifications seen first by other CPUs. */ 837 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; 838 smp_mb(); /* Above access cannot bleed into critical section. */ 839 840 /* 841 * Block CPU-hotplug operations. This means that any CPU-hotplug 842 * operation that finds an rcu_node structure with tasks in the 843 * process of being boosted will know that all tasks blocking 844 * this expedited grace period will already be in the process of 845 * being boosted. This simplifies the process of moving tasks 846 * from leaf to root rcu_node structures. 847 */ 848 get_online_cpus(); 849 850 /* 851 * Acquire lock, falling back to synchronize_rcu() if too many 852 * lock-acquisition failures. Of course, if someone does the 853 * expedited grace period for us, just leave. 854 */ 855 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { 856 if (ULONG_CMP_LT(snap, 857 ACCESS_ONCE(sync_rcu_preempt_exp_count))) { 858 put_online_cpus(); 859 goto mb_ret; /* Others did our work for us. */ 860 } 861 if (trycount++ < 10) { 862 udelay(trycount * num_online_cpus()); 863 } else { 864 put_online_cpus(); 865 wait_rcu_gp(call_rcu); 866 return; 867 } 868 } 869 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) { 870 put_online_cpus(); 871 goto unlock_mb_ret; /* Others did our work for us. */ 872 } 873 874 /* force all RCU readers onto ->blkd_tasks lists. */ 875 synchronize_sched_expedited(); 876 877 /* Initialize ->expmask for all non-leaf rcu_node structures. */ 878 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { 879 raw_spin_lock_irqsave(&rnp->lock, flags); 880 smp_mb__after_unlock_lock(); 881 rnp->expmask = rnp->qsmaskinit; 882 raw_spin_unlock_irqrestore(&rnp->lock, flags); 883 } 884 885 /* Snapshot current state of ->blkd_tasks lists. */ 886 rcu_for_each_leaf_node(rsp, rnp) 887 sync_rcu_preempt_exp_init(rsp, rnp); 888 if (NUM_RCU_NODES > 1) 889 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); 890 891 put_online_cpus(); 892 893 /* Wait for snapshotted ->blkd_tasks lists to drain. */ 894 rnp = rcu_get_root(rsp); 895 wait_event(sync_rcu_preempt_exp_wq, 896 sync_rcu_preempt_exp_done(rnp)); 897 898 /* Clean up and exit. */ 899 smp_mb(); /* ensure expedited GP seen before counter increment. */ 900 ACCESS_ONCE(sync_rcu_preempt_exp_count) = 901 sync_rcu_preempt_exp_count + 1; 902unlock_mb_ret: 903 mutex_unlock(&sync_rcu_preempt_exp_mutex); 904mb_ret: 905 smp_mb(); /* ensure subsequent action seen after grace period. */ 906} 907EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 908 909/** 910 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. 911 * 912 * Note that this primitive does not necessarily wait for an RCU grace period 913 * to complete. For example, if there are no RCU callbacks queued anywhere 914 * in the system, then rcu_barrier() is within its rights to return 915 * immediately, without waiting for anything, much less an RCU grace period. 916 */ 917void rcu_barrier(void) 918{ 919 _rcu_barrier(&rcu_preempt_state); 920} 921EXPORT_SYMBOL_GPL(rcu_barrier); 922 923/* 924 * Initialize preemptible RCU's state structures. 925 */ 926static void __init __rcu_init_preempt(void) 927{ 928 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); 929} 930 931/* 932 * Check for a task exiting while in a preemptible-RCU read-side 933 * critical section, clean up if so. No need to issue warnings, 934 * as debug_check_no_locks_held() already does this if lockdep 935 * is enabled. 936 */ 937void exit_rcu(void) 938{ 939 struct task_struct *t = current; 940 941 if (likely(list_empty(¤t->rcu_node_entry))) 942 return; 943 t->rcu_read_lock_nesting = 1; 944 barrier(); 945 t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED; 946 __rcu_read_unlock(); 947} 948 949#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 950 951static struct rcu_state *rcu_state_p = &rcu_sched_state; 952 953/* 954 * Tell them what RCU they are running. 955 */ 956static void __init rcu_bootup_announce(void) 957{ 958 pr_info("Hierarchical RCU implementation.\n"); 959 rcu_bootup_announce_oddness(); 960} 961 962/* 963 * Return the number of RCU batches processed thus far for debug & stats. 964 */ 965long rcu_batches_completed(void) 966{ 967 return rcu_batches_completed_sched(); 968} 969EXPORT_SYMBOL_GPL(rcu_batches_completed); 970 971/* 972 * Because preemptible RCU does not exist, we never have to check for 973 * CPUs being in quiescent states. 974 */ 975static void rcu_preempt_note_context_switch(int cpu) 976{ 977} 978 979/* 980 * Because preemptible RCU does not exist, there are never any preempted 981 * RCU readers. 982 */ 983static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 984{ 985 return 0; 986} 987 988#ifdef CONFIG_HOTPLUG_CPU 989 990/* Because preemptible RCU does not exist, no quieting of tasks. */ 991static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 992 __releases(rnp->lock) 993{ 994 raw_spin_unlock_irqrestore(&rnp->lock, flags); 995} 996 997#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 998 999/* 1000 * Because preemptible RCU does not exist, we never have to check for 1001 * tasks blocked within RCU read-side critical sections. 1002 */ 1003static void rcu_print_detail_task_stall(struct rcu_state *rsp) 1004{ 1005} 1006 1007/* 1008 * Because preemptible RCU does not exist, we never have to check for 1009 * tasks blocked within RCU read-side critical sections. 1010 */ 1011static int rcu_print_task_stall(struct rcu_node *rnp) 1012{ 1013 return 0; 1014} 1015 1016/* 1017 * Because there is no preemptible RCU, there can be no readers blocked, 1018 * so there is no need to check for blocked tasks. So check only for 1019 * bogus qsmask values. 1020 */ 1021static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 1022{ 1023 WARN_ON_ONCE(rnp->qsmask); 1024} 1025 1026#ifdef CONFIG_HOTPLUG_CPU 1027 1028/* 1029 * Because preemptible RCU does not exist, it never needs to migrate 1030 * tasks that were blocked within RCU read-side critical sections, and 1031 * such non-existent tasks cannot possibly have been blocking the current 1032 * grace period. 1033 */ 1034static int rcu_preempt_offline_tasks(struct rcu_state *rsp, 1035 struct rcu_node *rnp, 1036 struct rcu_data *rdp) 1037{ 1038 return 0; 1039} 1040 1041#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 1042 1043/* 1044 * Because preemptible RCU does not exist, it never has any callbacks 1045 * to check. 1046 */ 1047static void rcu_preempt_check_callbacks(int cpu) 1048{ 1049} 1050 1051/* 1052 * Wait for an rcu-preempt grace period, but make it happen quickly. 1053 * But because preemptible RCU does not exist, map to rcu-sched. 1054 */ 1055void synchronize_rcu_expedited(void) 1056{ 1057 synchronize_sched_expedited(); 1058} 1059EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 1060 1061#ifdef CONFIG_HOTPLUG_CPU 1062 1063/* 1064 * Because preemptible RCU does not exist, there is never any need to 1065 * report on tasks preempted in RCU read-side critical sections during 1066 * expedited RCU grace periods. 1067 */ 1068static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, 1069 bool wake) 1070{ 1071} 1072 1073#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 1074 1075/* 1076 * Because preemptible RCU does not exist, rcu_barrier() is just 1077 * another name for rcu_barrier_sched(). 1078 */ 1079void rcu_barrier(void) 1080{ 1081 rcu_barrier_sched(); 1082} 1083EXPORT_SYMBOL_GPL(rcu_barrier); 1084 1085/* 1086 * Because preemptible RCU does not exist, it need not be initialized. 1087 */ 1088static void __init __rcu_init_preempt(void) 1089{ 1090} 1091 1092/* 1093 * Because preemptible RCU does not exist, tasks cannot possibly exit 1094 * while in preemptible RCU read-side critical sections. 1095 */ 1096void exit_rcu(void) 1097{ 1098} 1099 1100#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 1101 1102#ifdef CONFIG_RCU_BOOST 1103 1104#include "../locking/rtmutex_common.h" 1105 1106#ifdef CONFIG_RCU_TRACE 1107 1108static void rcu_initiate_boost_trace(struct rcu_node *rnp) 1109{ 1110 if (list_empty(&rnp->blkd_tasks)) 1111 rnp->n_balk_blkd_tasks++; 1112 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL) 1113 rnp->n_balk_exp_gp_tasks++; 1114 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL) 1115 rnp->n_balk_boost_tasks++; 1116 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0) 1117 rnp->n_balk_notblocked++; 1118 else if (rnp->gp_tasks != NULL && 1119 ULONG_CMP_LT(jiffies, rnp->boost_time)) 1120 rnp->n_balk_notyet++; 1121 else 1122 rnp->n_balk_nos++; 1123} 1124 1125#else /* #ifdef CONFIG_RCU_TRACE */ 1126 1127static void rcu_initiate_boost_trace(struct rcu_node *rnp) 1128{ 1129} 1130 1131#endif /* #else #ifdef CONFIG_RCU_TRACE */ 1132 1133static void rcu_wake_cond(struct task_struct *t, int status) 1134{ 1135 /* 1136 * If the thread is yielding, only wake it when this 1137 * is invoked from idle 1138 */ 1139 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) 1140 wake_up_process(t); 1141} 1142 1143/* 1144 * Carry out RCU priority boosting on the task indicated by ->exp_tasks 1145 * or ->boost_tasks, advancing the pointer to the next task in the 1146 * ->blkd_tasks list. 1147 * 1148 * Note that irqs must be enabled: boosting the task can block. 1149 * Returns 1 if there are more tasks needing to be boosted. 1150 */ 1151static int rcu_boost(struct rcu_node *rnp) 1152{ 1153 unsigned long flags; 1154 struct task_struct *t; 1155 struct list_head *tb; 1156 1157 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) 1158 return 0; /* Nothing left to boost. */ 1159 1160 raw_spin_lock_irqsave(&rnp->lock, flags); 1161 smp_mb__after_unlock_lock(); 1162 1163 /* 1164 * Recheck under the lock: all tasks in need of boosting 1165 * might exit their RCU read-side critical sections on their own. 1166 */ 1167 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { 1168 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1169 return 0; 1170 } 1171 1172 /* 1173 * Preferentially boost tasks blocking expedited grace periods. 1174 * This cannot starve the normal grace periods because a second 1175 * expedited grace period must boost all blocked tasks, including 1176 * those blocking the pre-existing normal grace period. 1177 */ 1178 if (rnp->exp_tasks != NULL) { 1179 tb = rnp->exp_tasks; 1180 rnp->n_exp_boosts++; 1181 } else { 1182 tb = rnp->boost_tasks; 1183 rnp->n_normal_boosts++; 1184 } 1185 rnp->n_tasks_boosted++; 1186 1187 /* 1188 * We boost task t by manufacturing an rt_mutex that appears to 1189 * be held by task t. We leave a pointer to that rt_mutex where 1190 * task t can find it, and task t will release the mutex when it 1191 * exits its outermost RCU read-side critical section. Then 1192 * simply acquiring this artificial rt_mutex will boost task 1193 * t's priority. (Thanks to tglx for suggesting this approach!) 1194 * 1195 * Note that task t must acquire rnp->lock to remove itself from 1196 * the ->blkd_tasks list, which it will do from exit() if from 1197 * nowhere else. We therefore are guaranteed that task t will 1198 * stay around at least until we drop rnp->lock. Note that 1199 * rnp->lock also resolves races between our priority boosting 1200 * and task t's exiting its outermost RCU read-side critical 1201 * section. 1202 */ 1203 t = container_of(tb, struct task_struct, rcu_node_entry); 1204 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); 1205 init_completion(&rnp->boost_completion); 1206 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1207 /* Lock only for side effect: boosts task t's priority. */ 1208 rt_mutex_lock(&rnp->boost_mtx); 1209 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ 1210 1211 /* Wait for boostee to be done w/boost_mtx before reinitializing. */ 1212 wait_for_completion(&rnp->boost_completion); 1213 1214 return ACCESS_ONCE(rnp->exp_tasks) != NULL || 1215 ACCESS_ONCE(rnp->boost_tasks) != NULL; 1216} 1217 1218/* 1219 * Priority-boosting kthread. One per leaf rcu_node and one for the 1220 * root rcu_node. 1221 */ 1222static int rcu_boost_kthread(void *arg) 1223{ 1224 struct rcu_node *rnp = (struct rcu_node *)arg; 1225 int spincnt = 0; 1226 int more2boost; 1227 1228 trace_rcu_utilization(TPS("Start boost kthread@init")); 1229 for (;;) { 1230 rnp->boost_kthread_status = RCU_KTHREAD_WAITING; 1231 trace_rcu_utilization(TPS("End boost kthread@rcu_wait")); 1232 rcu_wait(rnp->boost_tasks || rnp->exp_tasks); 1233 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait")); 1234 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; 1235 more2boost = rcu_boost(rnp); 1236 if (more2boost) 1237 spincnt++; 1238 else 1239 spincnt = 0; 1240 if (spincnt > 10) { 1241 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING; 1242 trace_rcu_utilization(TPS("End boost kthread@rcu_yield")); 1243 schedule_timeout_interruptible(2); 1244 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield")); 1245 spincnt = 0; 1246 } 1247 } 1248 /* NOTREACHED */ 1249 trace_rcu_utilization(TPS("End boost kthread@notreached")); 1250 return 0; 1251} 1252 1253/* 1254 * Check to see if it is time to start boosting RCU readers that are 1255 * blocking the current grace period, and, if so, tell the per-rcu_node 1256 * kthread to start boosting them. If there is an expedited grace 1257 * period in progress, it is always time to boost. 1258 * 1259 * The caller must hold rnp->lock, which this function releases. 1260 * The ->boost_kthread_task is immortal, so we don't need to worry 1261 * about it going away. 1262 */ 1263static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1264 __releases(rnp->lock) 1265{ 1266 struct task_struct *t; 1267 1268 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { 1269 rnp->n_balk_exp_gp_tasks++; 1270 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1271 return; 1272 } 1273 if (rnp->exp_tasks != NULL || 1274 (rnp->gp_tasks != NULL && 1275 rnp->boost_tasks == NULL && 1276 rnp->qsmask == 0 && 1277 ULONG_CMP_GE(jiffies, rnp->boost_time))) { 1278 if (rnp->exp_tasks == NULL) 1279 rnp->boost_tasks = rnp->gp_tasks; 1280 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1281 t = rnp->boost_kthread_task; 1282 if (t) 1283 rcu_wake_cond(t, rnp->boost_kthread_status); 1284 } else { 1285 rcu_initiate_boost_trace(rnp); 1286 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1287 } 1288} 1289 1290/* 1291 * Wake up the per-CPU kthread to invoke RCU callbacks. 1292 */ 1293static void invoke_rcu_callbacks_kthread(void) 1294{ 1295 unsigned long flags; 1296 1297 local_irq_save(flags); 1298 __this_cpu_write(rcu_cpu_has_work, 1); 1299 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && 1300 current != __this_cpu_read(rcu_cpu_kthread_task)) { 1301 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), 1302 __this_cpu_read(rcu_cpu_kthread_status)); 1303 } 1304 local_irq_restore(flags); 1305} 1306 1307/* 1308 * Is the current CPU running the RCU-callbacks kthread? 1309 * Caller must have preemption disabled. 1310 */ 1311static bool rcu_is_callbacks_kthread(void) 1312{ 1313 return __this_cpu_read(rcu_cpu_kthread_task) == current; 1314} 1315 1316#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) 1317 1318/* 1319 * Do priority-boost accounting for the start of a new grace period. 1320 */ 1321static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1322{ 1323 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; 1324} 1325 1326/* 1327 * Create an RCU-boost kthread for the specified node if one does not 1328 * already exist. We only create this kthread for preemptible RCU. 1329 * Returns zero if all is well, a negated errno otherwise. 1330 */ 1331static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 1332 struct rcu_node *rnp) 1333{ 1334 int rnp_index = rnp - &rsp->node[0]; 1335 unsigned long flags; 1336 struct sched_param sp; 1337 struct task_struct *t; 1338 1339 if (&rcu_preempt_state != rsp) 1340 return 0; 1341 1342 if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0) 1343 return 0; 1344 1345 rsp->boost = 1; 1346 if (rnp->boost_kthread_task != NULL) 1347 return 0; 1348 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1349 "rcub/%d", rnp_index); 1350 if (IS_ERR(t)) 1351 return PTR_ERR(t); 1352 raw_spin_lock_irqsave(&rnp->lock, flags); 1353 smp_mb__after_unlock_lock(); 1354 rnp->boost_kthread_task = t; 1355 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1356 sp.sched_priority = RCU_BOOST_PRIO; 1357 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1358 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ 1359 return 0; 1360} 1361 1362static void rcu_kthread_do_work(void) 1363{ 1364 rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); 1365 rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); 1366 rcu_preempt_do_callbacks(); 1367} 1368 1369static void rcu_cpu_kthread_setup(unsigned int cpu) 1370{ 1371 struct sched_param sp; 1372 1373 sp.sched_priority = RCU_KTHREAD_PRIO; 1374 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); 1375} 1376 1377static void rcu_cpu_kthread_park(unsigned int cpu) 1378{ 1379 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; 1380} 1381 1382static int rcu_cpu_kthread_should_run(unsigned int cpu) 1383{ 1384 return __this_cpu_read(rcu_cpu_has_work); 1385} 1386 1387/* 1388 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the 1389 * RCU softirq used in flavors and configurations of RCU that do not 1390 * support RCU priority boosting. 1391 */ 1392static void rcu_cpu_kthread(unsigned int cpu) 1393{ 1394 unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); 1395 char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); 1396 int spincnt; 1397 1398 for (spincnt = 0; spincnt < 10; spincnt++) { 1399 trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); 1400 local_bh_disable(); 1401 *statusp = RCU_KTHREAD_RUNNING; 1402 this_cpu_inc(rcu_cpu_kthread_loops); 1403 local_irq_disable(); 1404 work = *workp; 1405 *workp = 0; 1406 local_irq_enable(); 1407 if (work) 1408 rcu_kthread_do_work(); 1409 local_bh_enable(); 1410 if (*workp == 0) { 1411 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); 1412 *statusp = RCU_KTHREAD_WAITING; 1413 return; 1414 } 1415 } 1416 *statusp = RCU_KTHREAD_YIELDING; 1417 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); 1418 schedule_timeout_interruptible(2); 1419 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); 1420 *statusp = RCU_KTHREAD_WAITING; 1421} 1422 1423/* 1424 * Set the per-rcu_node kthread's affinity to cover all CPUs that are 1425 * served by the rcu_node in question. The CPU hotplug lock is still 1426 * held, so the value of rnp->qsmaskinit will be stable. 1427 * 1428 * We don't include outgoingcpu in the affinity set, use -1 if there is 1429 * no outgoing CPU. If there are no CPUs left in the affinity set, 1430 * this function allows the kthread to execute on any CPU. 1431 */ 1432static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1433{ 1434 struct task_struct *t = rnp->boost_kthread_task; 1435 unsigned long mask = rnp->qsmaskinit; 1436 cpumask_var_t cm; 1437 int cpu; 1438 1439 if (!t) 1440 return; 1441 if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) 1442 return; 1443 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) 1444 if ((mask & 0x1) && cpu != outgoingcpu) 1445 cpumask_set_cpu(cpu, cm); 1446 if (cpumask_weight(cm) == 0) { 1447 cpumask_setall(cm); 1448 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) 1449 cpumask_clear_cpu(cpu, cm); 1450 WARN_ON_ONCE(cpumask_weight(cm) == 0); 1451 } 1452 set_cpus_allowed_ptr(t, cm); 1453 free_cpumask_var(cm); 1454} 1455 1456static struct smp_hotplug_thread rcu_cpu_thread_spec = { 1457 .store = &rcu_cpu_kthread_task, 1458 .thread_should_run = rcu_cpu_kthread_should_run, 1459 .thread_fn = rcu_cpu_kthread, 1460 .thread_comm = "rcuc/%u", 1461 .setup = rcu_cpu_kthread_setup, 1462 .park = rcu_cpu_kthread_park, 1463}; 1464 1465/* 1466 * Spawn all kthreads -- called as soon as the scheduler is running. 1467 */ 1468static int __init rcu_spawn_kthreads(void) 1469{ 1470 struct rcu_node *rnp; 1471 int cpu; 1472 1473 rcu_scheduler_fully_active = 1; 1474 for_each_possible_cpu(cpu) 1475 per_cpu(rcu_cpu_has_work, cpu) = 0; 1476 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); 1477 rnp = rcu_get_root(rcu_state_p); 1478 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); 1479 if (NUM_RCU_NODES > 1) { 1480 rcu_for_each_leaf_node(rcu_state_p, rnp) 1481 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); 1482 } 1483 return 0; 1484} 1485early_initcall(rcu_spawn_kthreads); 1486 1487static void rcu_prepare_kthreads(int cpu) 1488{ 1489 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); 1490 struct rcu_node *rnp = rdp->mynode; 1491 1492 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ 1493 if (rcu_scheduler_fully_active) 1494 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); 1495} 1496 1497#else /* #ifdef CONFIG_RCU_BOOST */ 1498 1499static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1500 __releases(rnp->lock) 1501{ 1502 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1503} 1504 1505static void invoke_rcu_callbacks_kthread(void) 1506{ 1507 WARN_ON_ONCE(1); 1508} 1509 1510static bool rcu_is_callbacks_kthread(void) 1511{ 1512 return false; 1513} 1514 1515static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1516{ 1517} 1518 1519static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1520{ 1521} 1522 1523static int __init rcu_scheduler_really_started(void) 1524{ 1525 rcu_scheduler_fully_active = 1; 1526 return 0; 1527} 1528early_initcall(rcu_scheduler_really_started); 1529 1530static void rcu_prepare_kthreads(int cpu) 1531{ 1532} 1533 1534#endif /* #else #ifdef CONFIG_RCU_BOOST */ 1535 1536#if !defined(CONFIG_RCU_FAST_NO_HZ) 1537 1538/* 1539 * Check to see if any future RCU-related work will need to be done 1540 * by the current CPU, even if none need be done immediately, returning 1541 * 1 if so. This function is part of the RCU implementation; it is -not- 1542 * an exported member of the RCU API. 1543 * 1544 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs 1545 * any flavor of RCU. 1546 */ 1547#ifndef CONFIG_RCU_NOCB_CPU_ALL 1548int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) 1549{ 1550 *delta_jiffies = ULONG_MAX; 1551 return rcu_cpu_has_callbacks(cpu, NULL); 1552} 1553#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ 1554 1555/* 1556 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up 1557 * after it. 1558 */ 1559static void rcu_cleanup_after_idle(int cpu) 1560{ 1561} 1562 1563/* 1564 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n, 1565 * is nothing. 1566 */ 1567static void rcu_prepare_for_idle(int cpu) 1568{ 1569} 1570 1571/* 1572 * Don't bother keeping a running count of the number of RCU callbacks 1573 * posted because CONFIG_RCU_FAST_NO_HZ=n. 1574 */ 1575static void rcu_idle_count_callbacks_posted(void) 1576{ 1577} 1578 1579#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1580 1581/* 1582 * This code is invoked when a CPU goes idle, at which point we want 1583 * to have the CPU do everything required for RCU so that it can enter 1584 * the energy-efficient dyntick-idle mode. This is handled by a 1585 * state machine implemented by rcu_prepare_for_idle() below. 1586 * 1587 * The following three proprocessor symbols control this state machine: 1588 * 1589 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted 1590 * to sleep in dyntick-idle mode with RCU callbacks pending. This 1591 * is sized to be roughly one RCU grace period. Those energy-efficiency 1592 * benchmarkers who might otherwise be tempted to set this to a large 1593 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your 1594 * system. And if you are -that- concerned about energy efficiency, 1595 * just power the system down and be done with it! 1596 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is 1597 * permitted to sleep in dyntick-idle mode with only lazy RCU 1598 * callbacks pending. Setting this too high can OOM your system. 1599 * 1600 * The values below work well in practice. If future workloads require 1601 * adjustment, they can be converted into kernel config parameters, though 1602 * making the state machine smarter might be a better option. 1603 */ 1604#define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */ 1605#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ 1606 1607static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY; 1608module_param(rcu_idle_gp_delay, int, 0644); 1609static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; 1610module_param(rcu_idle_lazy_gp_delay, int, 0644); 1611 1612extern int tick_nohz_active; 1613 1614/* 1615 * Try to advance callbacks for all flavors of RCU on the current CPU, but 1616 * only if it has been awhile since the last time we did so. Afterwards, 1617 * if there are any callbacks ready for immediate invocation, return true. 1618 */ 1619static bool __maybe_unused rcu_try_advance_all_cbs(void) 1620{ 1621 bool cbs_ready = false; 1622 struct rcu_data *rdp; 1623 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 1624 struct rcu_node *rnp; 1625 struct rcu_state *rsp; 1626 1627 /* Exit early if we advanced recently. */ 1628 if (jiffies == rdtp->last_advance_all) 1629 return false; 1630 rdtp->last_advance_all = jiffies; 1631 1632 for_each_rcu_flavor(rsp) { 1633 rdp = this_cpu_ptr(rsp->rda); 1634 rnp = rdp->mynode; 1635 1636 /* 1637 * Don't bother checking unless a grace period has 1638 * completed since we last checked and there are 1639 * callbacks not yet ready to invoke. 1640 */ 1641 if (rdp->completed != rnp->completed && 1642 rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]) 1643 note_gp_changes(rsp, rdp); 1644 1645 if (cpu_has_callbacks_ready_to_invoke(rdp)) 1646 cbs_ready = true; 1647 } 1648 return cbs_ready; 1649} 1650 1651/* 1652 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready 1653 * to invoke. If the CPU has callbacks, try to advance them. Tell the 1654 * caller to set the timeout based on whether or not there are non-lazy 1655 * callbacks. 1656 * 1657 * The caller must have disabled interrupts. 1658 */ 1659#ifndef CONFIG_RCU_NOCB_CPU_ALL 1660int rcu_needs_cpu(int cpu, unsigned long *dj) 1661{ 1662 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 1663 1664 /* Snapshot to detect later posting of non-lazy callback. */ 1665 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; 1666 1667 /* If no callbacks, RCU doesn't need the CPU. */ 1668 if (!rcu_cpu_has_callbacks(cpu, &rdtp->all_lazy)) { 1669 *dj = ULONG_MAX; 1670 return 0; 1671 } 1672 1673 /* Attempt to advance callbacks. */ 1674 if (rcu_try_advance_all_cbs()) { 1675 /* Some ready to invoke, so initiate later invocation. */ 1676 invoke_rcu_core(); 1677 return 1; 1678 } 1679 rdtp->last_accelerate = jiffies; 1680 1681 /* Request timer delay depending on laziness, and round. */ 1682 if (!rdtp->all_lazy) { 1683 *dj = round_up(rcu_idle_gp_delay + jiffies, 1684 rcu_idle_gp_delay) - jiffies; 1685 } else { 1686 *dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies; 1687 } 1688 return 0; 1689} 1690#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ 1691 1692/* 1693 * Prepare a CPU for idle from an RCU perspective. The first major task 1694 * is to sense whether nohz mode has been enabled or disabled via sysfs. 1695 * The second major task is to check to see if a non-lazy callback has 1696 * arrived at a CPU that previously had only lazy callbacks. The third 1697 * major task is to accelerate (that is, assign grace-period numbers to) 1698 * any recently arrived callbacks. 1699 * 1700 * The caller must have disabled interrupts. 1701 */ 1702static void rcu_prepare_for_idle(int cpu) 1703{ 1704#ifndef CONFIG_RCU_NOCB_CPU_ALL 1705 bool needwake; 1706 struct rcu_data *rdp; 1707 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 1708 struct rcu_node *rnp; 1709 struct rcu_state *rsp; 1710 int tne; 1711 1712 /* Handle nohz enablement switches conservatively. */ 1713 tne = ACCESS_ONCE(tick_nohz_active); 1714 if (tne != rdtp->tick_nohz_enabled_snap) { 1715 if (rcu_cpu_has_callbacks(cpu, NULL)) 1716 invoke_rcu_core(); /* force nohz to see update. */ 1717 rdtp->tick_nohz_enabled_snap = tne; 1718 return; 1719 } 1720 if (!tne) 1721 return; 1722 1723 /* If this is a no-CBs CPU, no callbacks, just return. */ 1724 if (rcu_is_nocb_cpu(cpu)) 1725 return; 1726 1727 /* 1728 * If a non-lazy callback arrived at a CPU having only lazy 1729 * callbacks, invoke RCU core for the side-effect of recalculating 1730 * idle duration on re-entry to idle. 1731 */ 1732 if (rdtp->all_lazy && 1733 rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) { 1734 rdtp->all_lazy = false; 1735 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; 1736 invoke_rcu_core(); 1737 return; 1738 } 1739 1740 /* 1741 * If we have not yet accelerated this jiffy, accelerate all 1742 * callbacks on this CPU. 1743 */ 1744 if (rdtp->last_accelerate == jiffies) 1745 return; 1746 rdtp->last_accelerate = jiffies; 1747 for_each_rcu_flavor(rsp) { 1748 rdp = per_cpu_ptr(rsp->rda, cpu); 1749 if (!*rdp->nxttail[RCU_DONE_TAIL]) 1750 continue; 1751 rnp = rdp->mynode; 1752 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 1753 smp_mb__after_unlock_lock(); 1754 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); 1755 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1756 if (needwake) 1757 rcu_gp_kthread_wake(rsp); 1758 } 1759#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ 1760} 1761 1762/* 1763 * Clean up for exit from idle. Attempt to advance callbacks based on 1764 * any grace periods that elapsed while the CPU was idle, and if any 1765 * callbacks are now ready to invoke, initiate invocation. 1766 */ 1767static void rcu_cleanup_after_idle(int cpu) 1768{ 1769#ifndef CONFIG_RCU_NOCB_CPU_ALL 1770 if (rcu_is_nocb_cpu(cpu)) 1771 return; 1772 if (rcu_try_advance_all_cbs()) 1773 invoke_rcu_core(); 1774#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ 1775} 1776 1777/* 1778 * Keep a running count of the number of non-lazy callbacks posted 1779 * on this CPU. This running counter (which is never decremented) allows 1780 * rcu_prepare_for_idle() to detect when something out of the idle loop 1781 * posts a callback, even if an equal number of callbacks are invoked. 1782 * Of course, callbacks should only be posted from within a trace event 1783 * designed to be called from idle or from within RCU_NONIDLE(). 1784 */ 1785static void rcu_idle_count_callbacks_posted(void) 1786{ 1787 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); 1788} 1789 1790/* 1791 * Data for flushing lazy RCU callbacks at OOM time. 1792 */ 1793static atomic_t oom_callback_count; 1794static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq); 1795 1796/* 1797 * RCU OOM callback -- decrement the outstanding count and deliver the 1798 * wake-up if we are the last one. 1799 */ 1800static void rcu_oom_callback(struct rcu_head *rhp) 1801{ 1802 if (atomic_dec_and_test(&oom_callback_count)) 1803 wake_up(&oom_callback_wq); 1804} 1805 1806/* 1807 * Post an rcu_oom_notify callback on the current CPU if it has at 1808 * least one lazy callback. This will unnecessarily post callbacks 1809 * to CPUs that already have a non-lazy callback at the end of their 1810 * callback list, but this is an infrequent operation, so accept some 1811 * extra overhead to keep things simple. 1812 */ 1813static void rcu_oom_notify_cpu(void *unused) 1814{ 1815 struct rcu_state *rsp; 1816 struct rcu_data *rdp; 1817 1818 for_each_rcu_flavor(rsp) { 1819 rdp = raw_cpu_ptr(rsp->rda); 1820 if (rdp->qlen_lazy != 0) { 1821 atomic_inc(&oom_callback_count); 1822 rsp->call(&rdp->oom_head, rcu_oom_callback); 1823 } 1824 } 1825} 1826 1827/* 1828 * If low on memory, ensure that each CPU has a non-lazy callback. 1829 * This will wake up CPUs that have only lazy callbacks, in turn 1830 * ensuring that they free up the corresponding memory in a timely manner. 1831 * Because an uncertain amount of memory will be freed in some uncertain 1832 * timeframe, we do not claim to have freed anything. 1833 */ 1834static int rcu_oom_notify(struct notifier_block *self, 1835 unsigned long notused, void *nfreed) 1836{ 1837 int cpu; 1838 1839 /* Wait for callbacks from earlier instance to complete. */ 1840 wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0); 1841 smp_mb(); /* Ensure callback reuse happens after callback invocation. */ 1842 1843 /* 1844 * Prevent premature wakeup: ensure that all increments happen 1845 * before there is a chance of the counter reaching zero. 1846 */ 1847 atomic_set(&oom_callback_count, 1); 1848 1849 get_online_cpus(); 1850 for_each_online_cpu(cpu) { 1851 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1); 1852 cond_resched(); 1853 } 1854 put_online_cpus(); 1855 1856 /* Unconditionally decrement: no need to wake ourselves up. */ 1857 atomic_dec(&oom_callback_count); 1858 1859 return NOTIFY_OK; 1860} 1861 1862static struct notifier_block rcu_oom_nb = { 1863 .notifier_call = rcu_oom_notify 1864}; 1865 1866static int __init rcu_register_oom_notifier(void) 1867{ 1868 register_oom_notifier(&rcu_oom_nb); 1869 return 0; 1870} 1871early_initcall(rcu_register_oom_notifier); 1872 1873#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1874 1875#ifdef CONFIG_RCU_CPU_STALL_INFO 1876 1877#ifdef CONFIG_RCU_FAST_NO_HZ 1878 1879static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 1880{ 1881 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 1882 unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap; 1883 1884 sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c", 1885 rdtp->last_accelerate & 0xffff, jiffies & 0xffff, 1886 ulong2long(nlpd), 1887 rdtp->all_lazy ? 'L' : '.', 1888 rdtp->tick_nohz_enabled_snap ? '.' : 'D'); 1889} 1890 1891#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 1892 1893static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 1894{ 1895 *cp = '\0'; 1896} 1897 1898#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ 1899 1900/* Initiate the stall-info list. */ 1901static void print_cpu_stall_info_begin(void) 1902{ 1903 pr_cont("\n"); 1904} 1905 1906/* 1907 * Print out diagnostic information for the specified stalled CPU. 1908 * 1909 * If the specified CPU is aware of the current RCU grace period 1910 * (flavor specified by rsp), then print the number of scheduling 1911 * clock interrupts the CPU has taken during the time that it has 1912 * been aware. Otherwise, print the number of RCU grace periods 1913 * that this CPU is ignorant of, for example, "1" if the CPU was 1914 * aware of the previous grace period. 1915 * 1916 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. 1917 */ 1918static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) 1919{ 1920 char fast_no_hz[72]; 1921 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 1922 struct rcu_dynticks *rdtp = rdp->dynticks; 1923 char *ticks_title; 1924 unsigned long ticks_value; 1925 1926 if (rsp->gpnum == rdp->gpnum) { 1927 ticks_title = "ticks this GP"; 1928 ticks_value = rdp->ticks_this_gp; 1929 } else { 1930 ticks_title = "GPs behind"; 1931 ticks_value = rsp->gpnum - rdp->gpnum; 1932 } 1933 print_cpu_stall_fast_no_hz(fast_no_hz, cpu); 1934 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n", 1935 cpu, ticks_value, ticks_title, 1936 atomic_read(&rdtp->dynticks) & 0xfff, 1937 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, 1938 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), 1939 fast_no_hz); 1940} 1941 1942/* Terminate the stall-info list. */ 1943static void print_cpu_stall_info_end(void) 1944{ 1945 pr_err("\t"); 1946} 1947 1948/* Zero ->ticks_this_gp for all flavors of RCU. */ 1949static void zero_cpu_stall_ticks(struct rcu_data *rdp) 1950{ 1951 rdp->ticks_this_gp = 0; 1952 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); 1953} 1954 1955/* Increment ->ticks_this_gp for all flavors of RCU. */ 1956static void increment_cpu_stall_ticks(void) 1957{ 1958 struct rcu_state *rsp; 1959 1960 for_each_rcu_flavor(rsp) 1961 raw_cpu_inc(rsp->rda->ticks_this_gp); 1962} 1963 1964#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ 1965 1966static void print_cpu_stall_info_begin(void) 1967{ 1968 pr_cont(" {"); 1969} 1970 1971static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) 1972{ 1973 pr_cont(" %d", cpu); 1974} 1975 1976static void print_cpu_stall_info_end(void) 1977{ 1978 pr_cont("} "); 1979} 1980 1981static void zero_cpu_stall_ticks(struct rcu_data *rdp) 1982{ 1983} 1984 1985static void increment_cpu_stall_ticks(void) 1986{ 1987} 1988 1989#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */ 1990 1991#ifdef CONFIG_RCU_NOCB_CPU 1992 1993/* 1994 * Offload callback processing from the boot-time-specified set of CPUs 1995 * specified by rcu_nocb_mask. For each CPU in the set, there is a 1996 * kthread created that pulls the callbacks from the corresponding CPU, 1997 * waits for a grace period to elapse, and invokes the callbacks. 1998 * The no-CBs CPUs do a wake_up() on their kthread when they insert 1999 * a callback into any empty list, unless the rcu_nocb_poll boot parameter 2000 * has been specified, in which case each kthread actively polls its 2001 * CPU. (Which isn't so great for energy efficiency, but which does 2002 * reduce RCU's overhead on that CPU.) 2003 * 2004 * This is intended to be used in conjunction with Frederic Weisbecker's 2005 * adaptive-idle work, which would seriously reduce OS jitter on CPUs 2006 * running CPU-bound user-mode computations. 2007 * 2008 * Offloading of callback processing could also in theory be used as 2009 * an energy-efficiency measure because CPUs with no RCU callbacks 2010 * queued are more aggressive about entering dyntick-idle mode. 2011 */ 2012 2013 2014/* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */ 2015static int __init rcu_nocb_setup(char *str) 2016{ 2017 alloc_bootmem_cpumask_var(&rcu_nocb_mask); 2018 have_rcu_nocb_mask = true; 2019 cpulist_parse(str, rcu_nocb_mask); 2020 return 1; 2021} 2022__setup("rcu_nocbs=", rcu_nocb_setup); 2023 2024static int __init parse_rcu_nocb_poll(char *arg) 2025{ 2026 rcu_nocb_poll = 1; 2027 return 0; 2028} 2029early_param("rcu_nocb_poll", parse_rcu_nocb_poll); 2030 2031/* 2032 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended 2033 * grace period. 2034 */ 2035static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) 2036{ 2037 wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); 2038} 2039 2040/* 2041 * Set the root rcu_node structure's ->need_future_gp field 2042 * based on the sum of those of all rcu_node structures. This does 2043 * double-count the root rcu_node structure's requests, but this 2044 * is necessary to handle the possibility of a rcu_nocb_kthread() 2045 * having awakened during the time that the rcu_node structures 2046 * were being updated for the end of the previous grace period. 2047 */ 2048static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) 2049{ 2050 rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq; 2051} 2052 2053static void rcu_init_one_nocb(struct rcu_node *rnp) 2054{ 2055 init_waitqueue_head(&rnp->nocb_gp_wq[0]); 2056 init_waitqueue_head(&rnp->nocb_gp_wq[1]); 2057} 2058 2059#ifndef CONFIG_RCU_NOCB_CPU_ALL 2060/* Is the specified CPU a no-CBs CPU? */ 2061bool rcu_is_nocb_cpu(int cpu) 2062{ 2063 if (have_rcu_nocb_mask) 2064 return cpumask_test_cpu(cpu, rcu_nocb_mask); 2065 return false; 2066} 2067#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ 2068 2069/* 2070 * Kick the leader kthread for this NOCB group. 2071 */ 2072static void wake_nocb_leader(struct rcu_data *rdp, bool force) 2073{ 2074 struct rcu_data *rdp_leader = rdp->nocb_leader; 2075 2076 if (!ACCESS_ONCE(rdp_leader->nocb_kthread)) 2077 return; 2078 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) { 2079 /* Prior xchg orders against prior callback enqueue. */ 2080 ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false; 2081 wake_up(&rdp_leader->nocb_wq); 2082 } 2083} 2084 2085/* 2086 * Enqueue the specified string of rcu_head structures onto the specified 2087 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the 2088 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy 2089 * counts are supplied by rhcount and rhcount_lazy. 2090 * 2091 * If warranted, also wake up the kthread servicing this CPUs queues. 2092 */ 2093static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, 2094 struct rcu_head *rhp, 2095 struct rcu_head **rhtp, 2096 int rhcount, int rhcount_lazy, 2097 unsigned long flags) 2098{ 2099 int len; 2100 struct rcu_head **old_rhpp; 2101 struct task_struct *t; 2102 2103 /* Enqueue the callback on the nocb list and update counts. */ 2104 old_rhpp = xchg(&rdp->nocb_tail, rhtp); 2105 ACCESS_ONCE(*old_rhpp) = rhp; 2106 atomic_long_add(rhcount, &rdp->nocb_q_count); 2107 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); 2108 2109 /* If we are not being polled and there is a kthread, awaken it ... */ 2110 t = ACCESS_ONCE(rdp->nocb_kthread); 2111 if (rcu_nocb_poll || !t) { 2112 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2113 TPS("WakeNotPoll")); 2114 return; 2115 } 2116 len = atomic_long_read(&rdp->nocb_q_count); 2117 if (old_rhpp == &rdp->nocb_head) { 2118 if (!irqs_disabled_flags(flags)) { 2119 /* ... if queue was empty ... */ 2120 wake_nocb_leader(rdp, false); 2121 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2122 TPS("WakeEmpty")); 2123 } else { 2124 rdp->nocb_defer_wakeup = RCU_NOGP_WAKE; 2125 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2126 TPS("WakeEmptyIsDeferred")); 2127 } 2128 rdp->qlen_last_fqs_check = 0; 2129 } else if (len > rdp->qlen_last_fqs_check + qhimark) { 2130 /* ... or if many callbacks queued. */ 2131 if (!irqs_disabled_flags(flags)) { 2132 wake_nocb_leader(rdp, true); 2133 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2134 TPS("WakeOvf")); 2135 } else { 2136 rdp->nocb_defer_wakeup = RCU_NOGP_WAKE_FORCE; 2137 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2138 TPS("WakeOvfIsDeferred")); 2139 } 2140 rdp->qlen_last_fqs_check = LONG_MAX / 2; 2141 } else { 2142 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot")); 2143 } 2144 return; 2145} 2146 2147/* 2148 * This is a helper for __call_rcu(), which invokes this when the normal 2149 * callback queue is inoperable. If this is not a no-CBs CPU, this 2150 * function returns failure back to __call_rcu(), which can complain 2151 * appropriately. 2152 * 2153 * Otherwise, this function queues the callback where the corresponding 2154 * "rcuo" kthread can find it. 2155 */ 2156static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 2157 bool lazy, unsigned long flags) 2158{ 2159 2160 if (!rcu_is_nocb_cpu(rdp->cpu)) 2161 return 0; 2162 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags); 2163 if (__is_kfree_rcu_offset((unsigned long)rhp->func)) 2164 trace_rcu_kfree_callback(rdp->rsp->name, rhp, 2165 (unsigned long)rhp->func, 2166 -atomic_long_read(&rdp->nocb_q_count_lazy), 2167 -atomic_long_read(&rdp->nocb_q_count)); 2168 else 2169 trace_rcu_callback(rdp->rsp->name, rhp, 2170 -atomic_long_read(&rdp->nocb_q_count_lazy), 2171 -atomic_long_read(&rdp->nocb_q_count)); 2172 return 1; 2173} 2174 2175/* 2176 * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is 2177 * not a no-CBs CPU. 2178 */ 2179static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, 2180 struct rcu_data *rdp, 2181 unsigned long flags) 2182{ 2183 long ql = rsp->qlen; 2184 long qll = rsp->qlen_lazy; 2185 2186 /* If this is not a no-CBs CPU, tell the caller to do it the old way. */ 2187 if (!rcu_is_nocb_cpu(smp_processor_id())) 2188 return 0; 2189 rsp->qlen = 0; 2190 rsp->qlen_lazy = 0; 2191 2192 /* First, enqueue the donelist, if any. This preserves CB ordering. */ 2193 if (rsp->orphan_donelist != NULL) { 2194 __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist, 2195 rsp->orphan_donetail, ql, qll, flags); 2196 ql = qll = 0; 2197 rsp->orphan_donelist = NULL; 2198 rsp->orphan_donetail = &rsp->orphan_donelist; 2199 } 2200 if (rsp->orphan_nxtlist != NULL) { 2201 __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist, 2202 rsp->orphan_nxttail, ql, qll, flags); 2203 ql = qll = 0; 2204 rsp->orphan_nxtlist = NULL; 2205 rsp->orphan_nxttail = &rsp->orphan_nxtlist; 2206 } 2207 return 1; 2208} 2209 2210/* 2211 * If necessary, kick off a new grace period, and either way wait 2212 * for a subsequent grace period to complete. 2213 */ 2214static void rcu_nocb_wait_gp(struct rcu_data *rdp) 2215{ 2216 unsigned long c; 2217 bool d; 2218 unsigned long flags; 2219 bool needwake; 2220 struct rcu_node *rnp = rdp->mynode; 2221 2222 raw_spin_lock_irqsave(&rnp->lock, flags); 2223 smp_mb__after_unlock_lock(); 2224 needwake = rcu_start_future_gp(rnp, rdp, &c); 2225 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2226 if (needwake) 2227 rcu_gp_kthread_wake(rdp->rsp); 2228 2229 /* 2230 * Wait for the grace period. Do so interruptibly to avoid messing 2231 * up the load average. 2232 */ 2233 trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); 2234 for (;;) { 2235 wait_event_interruptible( 2236 rnp->nocb_gp_wq[c & 0x1], 2237 (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c))); 2238 if (likely(d)) 2239 break; 2240 flush_signals(current); 2241 trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait")); 2242 } 2243 trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait")); 2244 smp_mb(); /* Ensure that CB invocation happens after GP end. */ 2245} 2246 2247/* 2248 * Leaders come here to wait for additional callbacks to show up. 2249 * This function does not return until callbacks appear. 2250 */ 2251static void nocb_leader_wait(struct rcu_data *my_rdp) 2252{ 2253 bool firsttime = true; 2254 bool gotcbs; 2255 struct rcu_data *rdp; 2256 struct rcu_head **tail; 2257 2258wait_again: 2259 2260 /* Wait for callbacks to appear. */ 2261 if (!rcu_nocb_poll) { 2262 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); 2263 wait_event_interruptible(my_rdp->nocb_wq, 2264 !ACCESS_ONCE(my_rdp->nocb_leader_sleep)); 2265 /* Memory barrier handled by smp_mb() calls below and repoll. */ 2266 } else if (firsttime) { 2267 firsttime = false; /* Don't drown trace log with "Poll"! */ 2268 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Poll"); 2269 } 2270 2271 /* 2272 * Each pass through the following loop checks a follower for CBs. 2273 * We are our own first follower. Any CBs found are moved to 2274 * nocb_gp_head, where they await a grace period. 2275 */ 2276 gotcbs = false; 2277 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { 2278 rdp->nocb_gp_head = ACCESS_ONCE(rdp->nocb_head); 2279 if (!rdp->nocb_gp_head) 2280 continue; /* No CBs here, try next follower. */ 2281 2282 /* Move callbacks to wait-for-GP list, which is empty. */ 2283 ACCESS_ONCE(rdp->nocb_head) = NULL; 2284 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); 2285 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0); 2286 rdp->nocb_gp_count_lazy = 2287 atomic_long_xchg(&rdp->nocb_q_count_lazy, 0); 2288 gotcbs = true; 2289 } 2290 2291 /* 2292 * If there were no callbacks, sleep a bit, rescan after a 2293 * memory barrier, and go retry. 2294 */ 2295 if (unlikely(!gotcbs)) { 2296 if (!rcu_nocb_poll) 2297 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, 2298 "WokeEmpty"); 2299 flush_signals(current); 2300 schedule_timeout_interruptible(1); 2301 2302 /* Rescan in case we were a victim of memory ordering. */ 2303 my_rdp->nocb_leader_sleep = true; 2304 smp_mb(); /* Ensure _sleep true before scan. */ 2305 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) 2306 if (ACCESS_ONCE(rdp->nocb_head)) { 2307 /* Found CB, so short-circuit next wait. */ 2308 my_rdp->nocb_leader_sleep = false; 2309 break; 2310 } 2311 goto wait_again; 2312 } 2313 2314 /* Wait for one grace period. */ 2315 rcu_nocb_wait_gp(my_rdp); 2316 2317 /* 2318 * We left ->nocb_leader_sleep unset to reduce cache thrashing. 2319 * We set it now, but recheck for new callbacks while 2320 * traversing our follower list. 2321 */ 2322 my_rdp->nocb_leader_sleep = true; 2323 smp_mb(); /* Ensure _sleep true before scan of ->nocb_head. */ 2324 2325 /* Each pass through the following loop wakes a follower, if needed. */ 2326 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { 2327 if (ACCESS_ONCE(rdp->nocb_head)) 2328 my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/ 2329 if (!rdp->nocb_gp_head) 2330 continue; /* No CBs, so no need to wake follower. */ 2331 2332 /* Append callbacks to follower's "done" list. */ 2333 tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail); 2334 *tail = rdp->nocb_gp_head; 2335 atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count); 2336 atomic_long_add(rdp->nocb_gp_count_lazy, 2337 &rdp->nocb_follower_count_lazy); 2338 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) { 2339 /* 2340 * List was empty, wake up the follower. 2341 * Memory barriers supplied by atomic_long_add(). 2342 */ 2343 wake_up(&rdp->nocb_wq); 2344 } 2345 } 2346 2347 /* If we (the leader) don't have CBs, go wait some more. */ 2348 if (!my_rdp->nocb_follower_head) 2349 goto wait_again; 2350} 2351 2352/* 2353 * Followers come here to wait for additional callbacks to show up. 2354 * This function does not return until callbacks appear. 2355 */ 2356static void nocb_follower_wait(struct rcu_data *rdp) 2357{ 2358 bool firsttime = true; 2359 2360 for (;;) { 2361 if (!rcu_nocb_poll) { 2362 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2363 "FollowerSleep"); 2364 wait_event_interruptible(rdp->nocb_wq, 2365 ACCESS_ONCE(rdp->nocb_follower_head)); 2366 } else if (firsttime) { 2367 /* Don't drown trace log with "Poll"! */ 2368 firsttime = false; 2369 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "Poll"); 2370 } 2371 if (smp_load_acquire(&rdp->nocb_follower_head)) { 2372 /* ^^^ Ensure CB invocation follows _head test. */ 2373 return; 2374 } 2375 if (!rcu_nocb_poll) 2376 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2377 "WokeEmpty"); 2378 flush_signals(current); 2379 schedule_timeout_interruptible(1); 2380 } 2381} 2382 2383/* 2384 * Per-rcu_data kthread, but only for no-CBs CPUs. Each kthread invokes 2385 * callbacks queued by the corresponding no-CBs CPU, however, there is 2386 * an optional leader-follower relationship so that the grace-period 2387 * kthreads don't have to do quite so many wakeups. 2388 */ 2389static int rcu_nocb_kthread(void *arg) 2390{ 2391 int c, cl; 2392 struct rcu_head *list; 2393 struct rcu_head *next; 2394 struct rcu_head **tail; 2395 struct rcu_data *rdp = arg; 2396 2397 /* Each pass through this loop invokes one batch of callbacks */ 2398 for (;;) { 2399 /* Wait for callbacks. */ 2400 if (rdp->nocb_leader == rdp) 2401 nocb_leader_wait(rdp); 2402 else 2403 nocb_follower_wait(rdp); 2404 2405 /* Pull the ready-to-invoke callbacks onto local list. */ 2406 list = ACCESS_ONCE(rdp->nocb_follower_head); 2407 BUG_ON(!list); 2408 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty"); 2409 ACCESS_ONCE(rdp->nocb_follower_head) = NULL; 2410 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head); 2411 c = atomic_long_xchg(&rdp->nocb_follower_count, 0); 2412 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0); 2413 rdp->nocb_p_count += c; 2414 rdp->nocb_p_count_lazy += cl; 2415 2416 /* Each pass through the following loop invokes a callback. */ 2417 trace_rcu_batch_start(rdp->rsp->name, cl, c, -1); 2418 c = cl = 0; 2419 while (list) { 2420 next = list->next; 2421 /* Wait for enqueuing to complete, if needed. */ 2422 while (next == NULL && &list->next != tail) { 2423 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2424 TPS("WaitQueue")); 2425 schedule_timeout_interruptible(1); 2426 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2427 TPS("WokeQueue")); 2428 next = list->next; 2429 } 2430 debug_rcu_head_unqueue(list); 2431 local_bh_disable(); 2432 if (__rcu_reclaim(rdp->rsp->name, list)) 2433 cl++; 2434 c++; 2435 local_bh_enable(); 2436 list = next; 2437 } 2438 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); 2439 ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c; 2440 ACCESS_ONCE(rdp->nocb_p_count_lazy) = 2441 rdp->nocb_p_count_lazy - cl; 2442 rdp->n_nocbs_invoked += c; 2443 } 2444 return 0; 2445} 2446 2447/* Is a deferred wakeup of rcu_nocb_kthread() required? */ 2448static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) 2449{ 2450 return ACCESS_ONCE(rdp->nocb_defer_wakeup); 2451} 2452 2453/* Do a deferred wakeup of rcu_nocb_kthread(). */ 2454static void do_nocb_deferred_wakeup(struct rcu_data *rdp) 2455{ 2456 int ndw; 2457 2458 if (!rcu_nocb_need_deferred_wakeup(rdp)) 2459 return; 2460 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup); 2461 ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT; 2462 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE); 2463 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake")); 2464} 2465 2466/* Initialize per-rcu_data variables for no-CBs CPUs. */ 2467static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2468{ 2469 rdp->nocb_tail = &rdp->nocb_head; 2470 init_waitqueue_head(&rdp->nocb_wq); 2471 rdp->nocb_follower_tail = &rdp->nocb_follower_head; 2472} 2473 2474/* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */ 2475static int rcu_nocb_leader_stride = -1; 2476module_param(rcu_nocb_leader_stride, int, 0444); 2477 2478/* 2479 * Create a kthread for each RCU flavor for each no-CBs CPU. 2480 * Also initialize leader-follower relationships. 2481 */ 2482static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) 2483{ 2484 int cpu; 2485 int ls = rcu_nocb_leader_stride; 2486 int nl = 0; /* Next leader. */ 2487 struct rcu_data *rdp; 2488 struct rcu_data *rdp_leader = NULL; /* Suppress misguided gcc warn. */ 2489 struct rcu_data *rdp_prev = NULL; 2490 struct task_struct *t; 2491 2492 if (rcu_nocb_mask == NULL) 2493 return; 2494#if defined(CONFIG_NO_HZ_FULL) && !defined(CONFIG_NO_HZ_FULL_ALL) 2495 if (tick_nohz_full_running) 2496 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask); 2497#endif /* #if defined(CONFIG_NO_HZ_FULL) && !defined(CONFIG_NO_HZ_FULL_ALL) */ 2498 if (ls == -1) { 2499 ls = int_sqrt(nr_cpu_ids); 2500 rcu_nocb_leader_stride = ls; 2501 } 2502 2503 /* 2504 * Each pass through this loop sets up one rcu_data structure and 2505 * spawns one rcu_nocb_kthread(). 2506 */ 2507 for_each_cpu(cpu, rcu_nocb_mask) { 2508 rdp = per_cpu_ptr(rsp->rda, cpu); 2509 if (rdp->cpu >= nl) { 2510 /* New leader, set up for followers & next leader. */ 2511 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; 2512 rdp->nocb_leader = rdp; 2513 rdp_leader = rdp; 2514 } else { 2515 /* Another follower, link to previous leader. */ 2516 rdp->nocb_leader = rdp_leader; 2517 rdp_prev->nocb_next_follower = rdp; 2518 } 2519 rdp_prev = rdp; 2520 2521 /* Spawn the kthread for this CPU. */ 2522 t = kthread_run(rcu_nocb_kthread, rdp, 2523 "rcuo%c/%d", rsp->abbr, cpu); 2524 BUG_ON(IS_ERR(t)); 2525 ACCESS_ONCE(rdp->nocb_kthread) = t; 2526 } 2527} 2528 2529/* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */ 2530static bool init_nocb_callback_list(struct rcu_data *rdp) 2531{ 2532 if (rcu_nocb_mask == NULL || 2533 !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask)) 2534 return false; 2535 rdp->nxttail[RCU_NEXT_TAIL] = NULL; 2536 return true; 2537} 2538 2539#else /* #ifdef CONFIG_RCU_NOCB_CPU */ 2540 2541static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) 2542{ 2543} 2544 2545static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) 2546{ 2547} 2548 2549static void rcu_init_one_nocb(struct rcu_node *rnp) 2550{ 2551} 2552 2553static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 2554 bool lazy, unsigned long flags) 2555{ 2556 return 0; 2557} 2558 2559static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, 2560 struct rcu_data *rdp, 2561 unsigned long flags) 2562{ 2563 return 0; 2564} 2565 2566static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2567{ 2568} 2569 2570static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) 2571{ 2572 return false; 2573} 2574 2575static void do_nocb_deferred_wakeup(struct rcu_data *rdp) 2576{ 2577} 2578 2579static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) 2580{ 2581} 2582 2583static bool init_nocb_callback_list(struct rcu_data *rdp) 2584{ 2585 return false; 2586} 2587 2588#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 2589 2590/* 2591 * An adaptive-ticks CPU can potentially execute in kernel mode for an 2592 * arbitrarily long period of time with the scheduling-clock tick turned 2593 * off. RCU will be paying attention to this CPU because it is in the 2594 * kernel, but the CPU cannot be guaranteed to be executing the RCU state 2595 * machine because the scheduling-clock tick has been disabled. Therefore, 2596 * if an adaptive-ticks CPU is failing to respond to the current grace 2597 * period and has not be idle from an RCU perspective, kick it. 2598 */ 2599static void __maybe_unused rcu_kick_nohz_cpu(int cpu) 2600{ 2601#ifdef CONFIG_NO_HZ_FULL 2602 if (tick_nohz_full_cpu(cpu)) 2603 smp_send_reschedule(cpu); 2604#endif /* #ifdef CONFIG_NO_HZ_FULL */ 2605} 2606 2607 2608#ifdef CONFIG_NO_HZ_FULL_SYSIDLE 2609 2610/* 2611 * Define RCU flavor that holds sysidle state. This needs to be the 2612 * most active flavor of RCU. 2613 */ 2614#ifdef CONFIG_PREEMPT_RCU 2615static struct rcu_state *rcu_sysidle_state = &rcu_preempt_state; 2616#else /* #ifdef CONFIG_PREEMPT_RCU */ 2617static struct rcu_state *rcu_sysidle_state = &rcu_sched_state; 2618#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 2619 2620static int full_sysidle_state; /* Current system-idle state. */ 2621#define RCU_SYSIDLE_NOT 0 /* Some CPU is not idle. */ 2622#define RCU_SYSIDLE_SHORT 1 /* All CPUs idle for brief period. */ 2623#define RCU_SYSIDLE_LONG 2 /* All CPUs idle for long enough. */ 2624#define RCU_SYSIDLE_FULL 3 /* All CPUs idle, ready for sysidle. */ 2625#define RCU_SYSIDLE_FULL_NOTED 4 /* Actually entered sysidle state. */ 2626 2627/* 2628 * Invoked to note exit from irq or task transition to idle. Note that 2629 * usermode execution does -not- count as idle here! After all, we want 2630 * to detect full-system idle states, not RCU quiescent states and grace 2631 * periods. The caller must have disabled interrupts. 2632 */ 2633static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq) 2634{ 2635 unsigned long j; 2636 2637 /* Adjust nesting, check for fully idle. */ 2638 if (irq) { 2639 rdtp->dynticks_idle_nesting--; 2640 WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0); 2641 if (rdtp->dynticks_idle_nesting != 0) 2642 return; /* Still not fully idle. */ 2643 } else { 2644 if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) == 2645 DYNTICK_TASK_NEST_VALUE) { 2646 rdtp->dynticks_idle_nesting = 0; 2647 } else { 2648 rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE; 2649 WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0); 2650 return; /* Still not fully idle. */ 2651 } 2652 } 2653 2654 /* Record start of fully idle period. */ 2655 j = jiffies; 2656 ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j; 2657 smp_mb__before_atomic(); 2658 atomic_inc(&rdtp->dynticks_idle); 2659 smp_mb__after_atomic(); 2660 WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1); 2661} 2662 2663/* 2664 * Unconditionally force exit from full system-idle state. This is 2665 * invoked when a normal CPU exits idle, but must be called separately 2666 * for the timekeeping CPU (tick_do_timer_cpu). The reason for this 2667 * is that the timekeeping CPU is permitted to take scheduling-clock 2668 * interrupts while the system is in system-idle state, and of course 2669 * rcu_sysidle_exit() has no way of distinguishing a scheduling-clock 2670 * interrupt from any other type of interrupt. 2671 */ 2672void rcu_sysidle_force_exit(void) 2673{ 2674 int oldstate = ACCESS_ONCE(full_sysidle_state); 2675 int newoldstate; 2676 2677 /* 2678 * Each pass through the following loop attempts to exit full 2679 * system-idle state. If contention proves to be a problem, 2680 * a trylock-based contention tree could be used here. 2681 */ 2682 while (oldstate > RCU_SYSIDLE_SHORT) { 2683 newoldstate = cmpxchg(&full_sysidle_state, 2684 oldstate, RCU_SYSIDLE_NOT); 2685 if (oldstate == newoldstate && 2686 oldstate == RCU_SYSIDLE_FULL_NOTED) { 2687 rcu_kick_nohz_cpu(tick_do_timer_cpu); 2688 return; /* We cleared it, done! */ 2689 } 2690 oldstate = newoldstate; 2691 } 2692 smp_mb(); /* Order initial oldstate fetch vs. later non-idle work. */ 2693} 2694 2695/* 2696 * Invoked to note entry to irq or task transition from idle. Note that 2697 * usermode execution does -not- count as idle here! The caller must 2698 * have disabled interrupts. 2699 */ 2700static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) 2701{ 2702 /* Adjust nesting, check for already non-idle. */ 2703 if (irq) { 2704 rdtp->dynticks_idle_nesting++; 2705 WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0); 2706 if (rdtp->dynticks_idle_nesting != 1) 2707 return; /* Already non-idle. */ 2708 } else { 2709 /* 2710 * Allow for irq misnesting. Yes, it really is possible 2711 * to enter an irq handler then never leave it, and maybe 2712 * also vice versa. Handle both possibilities. 2713 */ 2714 if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) { 2715 rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE; 2716 WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0); 2717 return; /* Already non-idle. */ 2718 } else { 2719 rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE; 2720 } 2721 } 2722 2723 /* Record end of idle period. */ 2724 smp_mb__before_atomic(); 2725 atomic_inc(&rdtp->dynticks_idle); 2726 smp_mb__after_atomic(); 2727 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1)); 2728 2729 /* 2730 * If we are the timekeeping CPU, we are permitted to be non-idle 2731 * during a system-idle state. This must be the case, because 2732 * the timekeeping CPU has to take scheduling-clock interrupts 2733 * during the time that the system is transitioning to full 2734 * system-idle state. This means that the timekeeping CPU must 2735 * invoke rcu_sysidle_force_exit() directly if it does anything 2736 * more than take a scheduling-clock interrupt. 2737 */ 2738 if (smp_processor_id() == tick_do_timer_cpu) 2739 return; 2740 2741 /* Update system-idle state: We are clearly no longer fully idle! */ 2742 rcu_sysidle_force_exit(); 2743} 2744 2745/* 2746 * Check to see if the current CPU is idle. Note that usermode execution 2747 * does not count as idle. The caller must have disabled interrupts. 2748 */ 2749static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, 2750 unsigned long *maxj) 2751{ 2752 int cur; 2753 unsigned long j; 2754 struct rcu_dynticks *rdtp = rdp->dynticks; 2755 2756 /* 2757 * If some other CPU has already reported non-idle, if this is 2758 * not the flavor of RCU that tracks sysidle state, or if this 2759 * is an offline or the timekeeping CPU, nothing to do. 2760 */ 2761 if (!*isidle || rdp->rsp != rcu_sysidle_state || 2762 cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu) 2763 return; 2764 if (rcu_gp_in_progress(rdp->rsp)) 2765 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu); 2766 2767 /* Pick up current idle and NMI-nesting counter and check. */ 2768 cur = atomic_read(&rdtp->dynticks_idle); 2769 if (cur & 0x1) { 2770 *isidle = false; /* We are not idle! */ 2771 return; 2772 } 2773 smp_mb(); /* Read counters before timestamps. */ 2774 2775 /* Pick up timestamps. */ 2776 j = ACCESS_ONCE(rdtp->dynticks_idle_jiffies); 2777 /* If this CPU entered idle more recently, update maxj timestamp. */ 2778 if (ULONG_CMP_LT(*maxj, j)) 2779 *maxj = j; 2780} 2781 2782/* 2783 * Is this the flavor of RCU that is handling full-system idle? 2784 */ 2785static bool is_sysidle_rcu_state(struct rcu_state *rsp) 2786{ 2787 return rsp == rcu_sysidle_state; 2788} 2789 2790/* 2791 * Return a delay in jiffies based on the number of CPUs, rcu_node 2792 * leaf fanout, and jiffies tick rate. The idea is to allow larger 2793 * systems more time to transition to full-idle state in order to 2794 * avoid the cache thrashing that otherwise occur on the state variable. 2795 * Really small systems (less than a couple of tens of CPUs) should 2796 * instead use a single global atomically incremented counter, and later 2797 * versions of this will automatically reconfigure themselves accordingly. 2798 */ 2799static unsigned long rcu_sysidle_delay(void) 2800{ 2801 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) 2802 return 0; 2803 return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000); 2804} 2805 2806/* 2807 * Advance the full-system-idle state. This is invoked when all of 2808 * the non-timekeeping CPUs are idle. 2809 */ 2810static void rcu_sysidle(unsigned long j) 2811{ 2812 /* Check the current state. */ 2813 switch (ACCESS_ONCE(full_sysidle_state)) { 2814 case RCU_SYSIDLE_NOT: 2815 2816 /* First time all are idle, so note a short idle period. */ 2817 ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT; 2818 break; 2819 2820 case RCU_SYSIDLE_SHORT: 2821 2822 /* 2823 * Idle for a bit, time to advance to next state? 2824 * cmpxchg failure means race with non-idle, let them win. 2825 */ 2826 if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay())) 2827 (void)cmpxchg(&full_sysidle_state, 2828 RCU_SYSIDLE_SHORT, RCU_SYSIDLE_LONG); 2829 break; 2830 2831 case RCU_SYSIDLE_LONG: 2832 2833 /* 2834 * Do an additional check pass before advancing to full. 2835 * cmpxchg failure means race with non-idle, let them win. 2836 */ 2837 if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay())) 2838 (void)cmpxchg(&full_sysidle_state, 2839 RCU_SYSIDLE_LONG, RCU_SYSIDLE_FULL); 2840 break; 2841 2842 default: 2843 break; 2844 } 2845} 2846 2847/* 2848 * Found a non-idle non-timekeeping CPU, so kick the system-idle state 2849 * back to the beginning. 2850 */ 2851static void rcu_sysidle_cancel(void) 2852{ 2853 smp_mb(); 2854 if (full_sysidle_state > RCU_SYSIDLE_SHORT) 2855 ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT; 2856} 2857 2858/* 2859 * Update the sysidle state based on the results of a force-quiescent-state 2860 * scan of the CPUs' dyntick-idle state. 2861 */ 2862static void rcu_sysidle_report(struct rcu_state *rsp, int isidle, 2863 unsigned long maxj, bool gpkt) 2864{ 2865 if (rsp != rcu_sysidle_state) 2866 return; /* Wrong flavor, ignore. */ 2867 if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) 2868 return; /* Running state machine from timekeeping CPU. */ 2869 if (isidle) 2870 rcu_sysidle(maxj); /* More idle! */ 2871 else 2872 rcu_sysidle_cancel(); /* Idle is over. */ 2873} 2874 2875/* 2876 * Wrapper for rcu_sysidle_report() when called from the grace-period 2877 * kthread's context. 2878 */ 2879static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, 2880 unsigned long maxj) 2881{ 2882 rcu_sysidle_report(rsp, isidle, maxj, true); 2883} 2884 2885/* Callback and function for forcing an RCU grace period. */ 2886struct rcu_sysidle_head { 2887 struct rcu_head rh; 2888 int inuse; 2889}; 2890 2891static void rcu_sysidle_cb(struct rcu_head *rhp) 2892{ 2893 struct rcu_sysidle_head *rshp; 2894 2895 /* 2896 * The following memory barrier is needed to replace the 2897 * memory barriers that would normally be in the memory 2898 * allocator. 2899 */ 2900 smp_mb(); /* grace period precedes setting inuse. */ 2901 2902 rshp = container_of(rhp, struct rcu_sysidle_head, rh); 2903 ACCESS_ONCE(rshp->inuse) = 0; 2904} 2905 2906/* 2907 * Check to see if the system is fully idle, other than the timekeeping CPU. 2908 * The caller must have disabled interrupts. 2909 */ 2910bool rcu_sys_is_idle(void) 2911{ 2912 static struct rcu_sysidle_head rsh; 2913 int rss = ACCESS_ONCE(full_sysidle_state); 2914 2915 if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu)) 2916 return false; 2917 2918 /* Handle small-system case by doing a full scan of CPUs. */ 2919 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) { 2920 int oldrss = rss - 1; 2921 2922 /* 2923 * One pass to advance to each state up to _FULL. 2924 * Give up if any pass fails to advance the state. 2925 */ 2926 while (rss < RCU_SYSIDLE_FULL && oldrss < rss) { 2927 int cpu; 2928 bool isidle = true; 2929 unsigned long maxj = jiffies - ULONG_MAX / 4; 2930 struct rcu_data *rdp; 2931 2932 /* Scan all the CPUs looking for nonidle CPUs. */ 2933 for_each_possible_cpu(cpu) { 2934 rdp = per_cpu_ptr(rcu_sysidle_state->rda, cpu); 2935 rcu_sysidle_check_cpu(rdp, &isidle, &maxj); 2936 if (!isidle) 2937 break; 2938 } 2939 rcu_sysidle_report(rcu_sysidle_state, 2940 isidle, maxj, false); 2941 oldrss = rss; 2942 rss = ACCESS_ONCE(full_sysidle_state); 2943 } 2944 } 2945 2946 /* If this is the first observation of an idle period, record it. */ 2947 if (rss == RCU_SYSIDLE_FULL) { 2948 rss = cmpxchg(&full_sysidle_state, 2949 RCU_SYSIDLE_FULL, RCU_SYSIDLE_FULL_NOTED); 2950 return rss == RCU_SYSIDLE_FULL; 2951 } 2952 2953 smp_mb(); /* ensure rss load happens before later caller actions. */ 2954 2955 /* If already fully idle, tell the caller (in case of races). */ 2956 if (rss == RCU_SYSIDLE_FULL_NOTED) 2957 return true; 2958 2959 /* 2960 * If we aren't there yet, and a grace period is not in flight, 2961 * initiate a grace period. Either way, tell the caller that 2962 * we are not there yet. We use an xchg() rather than an assignment 2963 * to make up for the memory barriers that would otherwise be 2964 * provided by the memory allocator. 2965 */ 2966 if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL && 2967 !rcu_gp_in_progress(rcu_sysidle_state) && 2968 !rsh.inuse && xchg(&rsh.inuse, 1) == 0) 2969 call_rcu(&rsh.rh, rcu_sysidle_cb); 2970 return false; 2971} 2972 2973/* 2974 * Initialize dynticks sysidle state for CPUs coming online. 2975 */ 2976static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp) 2977{ 2978 rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE; 2979} 2980 2981#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 2982 2983static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq) 2984{ 2985} 2986 2987static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) 2988{ 2989} 2990 2991static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, 2992 unsigned long *maxj) 2993{ 2994} 2995 2996static bool is_sysidle_rcu_state(struct rcu_state *rsp) 2997{ 2998 return false; 2999} 3000 3001static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, 3002 unsigned long maxj) 3003{ 3004} 3005 3006static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp) 3007{ 3008} 3009 3010#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 3011 3012/* 3013 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the 3014 * grace-period kthread will do force_quiescent_state() processing? 3015 * The idea is to avoid waking up RCU core processing on such a 3016 * CPU unless the grace period has extended for too long. 3017 * 3018 * This code relies on the fact that all NO_HZ_FULL CPUs are also 3019 * CONFIG_RCU_NOCB_CPU CPUs. 3020 */ 3021static bool rcu_nohz_full_cpu(struct rcu_state *rsp) 3022{ 3023#ifdef CONFIG_NO_HZ_FULL 3024 if (tick_nohz_full_cpu(smp_processor_id()) && 3025 (!rcu_gp_in_progress(rsp) || 3026 ULONG_CMP_LT(jiffies, ACCESS_ONCE(rsp->gp_start) + HZ))) 3027 return 1; 3028#endif /* #ifdef CONFIG_NO_HZ_FULL */ 3029 return 0; 3030} 3031 3032/* 3033 * Bind the grace-period kthread for the sysidle flavor of RCU to the 3034 * timekeeping CPU. 3035 */ 3036static void rcu_bind_gp_kthread(void) 3037{ 3038 int __maybe_unused cpu; 3039 3040 if (!tick_nohz_full_enabled()) 3041 return; 3042#ifdef CONFIG_NO_HZ_FULL_SYSIDLE 3043 cpu = tick_do_timer_cpu; 3044 if (cpu >= 0 && cpu < nr_cpu_ids && raw_smp_processor_id() != cpu) 3045 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 3046#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 3047 if (!is_housekeeping_cpu(raw_smp_processor_id())) 3048 housekeeping_affine(current); 3049#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 3050} 3051