tree_plugin.h revision 1772947bd0126661866069157e95197e9c0020e9
1/* 2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 3 * Internal non-public definitions that provide either classic 4 * or preemptible semantics. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, you can access it online at 18 * http://www.gnu.org/licenses/gpl-2.0.html. 19 * 20 * Copyright Red Hat, 2009 21 * Copyright IBM Corporation, 2009 22 * 23 * Author: Ingo Molnar <mingo@elte.hu> 24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> 25 */ 26 27#include <linux/delay.h> 28#include <linux/gfp.h> 29#include <linux/oom.h> 30#include <linux/smpboot.h> 31#include "../time/tick-internal.h" 32 33#define RCU_KTHREAD_PRIO 1 34 35#ifdef CONFIG_RCU_BOOST 36#include "../locking/rtmutex_common.h" 37#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO 38#else 39#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO 40#endif 41 42#ifdef CONFIG_RCU_NOCB_CPU 43static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ 44static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ 45static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ 46static char __initdata nocb_buf[NR_CPUS * 5]; 47#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 48 49/* 50 * Check the RCU kernel configuration parameters and print informative 51 * messages about anything out of the ordinary. If you like #ifdef, you 52 * will love this function. 53 */ 54static void __init rcu_bootup_announce_oddness(void) 55{ 56#ifdef CONFIG_RCU_TRACE 57 pr_info("\tRCU debugfs-based tracing is enabled.\n"); 58#endif 59#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32) 60 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n", 61 CONFIG_RCU_FANOUT); 62#endif 63#ifdef CONFIG_RCU_FANOUT_EXACT 64 pr_info("\tHierarchical RCU autobalancing is disabled.\n"); 65#endif 66#ifdef CONFIG_RCU_FAST_NO_HZ 67 pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n"); 68#endif 69#ifdef CONFIG_PROVE_RCU 70 pr_info("\tRCU lockdep checking is enabled.\n"); 71#endif 72#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE 73 pr_info("\tRCU torture testing starts during boot.\n"); 74#endif 75#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE) 76 pr_info("\tDump stacks of tasks blocking RCU-preempt GP.\n"); 77#endif 78#if defined(CONFIG_RCU_CPU_STALL_INFO) 79 pr_info("\tAdditional per-CPU info printed with stalls.\n"); 80#endif 81#if NUM_RCU_LVL_4 != 0 82 pr_info("\tFour-level hierarchy is enabled.\n"); 83#endif 84 if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF) 85 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); 86 if (nr_cpu_ids != NR_CPUS) 87 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); 88} 89 90#ifdef CONFIG_TREE_PREEMPT_RCU 91 92RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); 93static struct rcu_state *rcu_state_p = &rcu_preempt_state; 94 95static int rcu_preempted_readers_exp(struct rcu_node *rnp); 96 97/* 98 * Tell them what RCU they are running. 99 */ 100static void __init rcu_bootup_announce(void) 101{ 102 pr_info("Preemptible hierarchical RCU implementation.\n"); 103 rcu_bootup_announce_oddness(); 104} 105 106/* 107 * Return the number of RCU-preempt batches processed thus far 108 * for debug and statistics. 109 */ 110long rcu_batches_completed_preempt(void) 111{ 112 return rcu_preempt_state.completed; 113} 114EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt); 115 116/* 117 * Return the number of RCU batches processed thus far for debug & stats. 118 */ 119long rcu_batches_completed(void) 120{ 121 return rcu_batches_completed_preempt(); 122} 123EXPORT_SYMBOL_GPL(rcu_batches_completed); 124 125/* 126 * Record a preemptible-RCU quiescent state for the specified CPU. Note 127 * that this just means that the task currently running on the CPU is 128 * not in a quiescent state. There might be any number of tasks blocked 129 * while in an RCU read-side critical section. 130 * 131 * Unlike the other rcu_*_qs() functions, callers to this function 132 * must disable irqs in order to protect the assignment to 133 * ->rcu_read_unlock_special. 134 */ 135static void rcu_preempt_qs(int cpu) 136{ 137 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); 138 139 if (rdp->passed_quiesce == 0) 140 trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs")); 141 rdp->passed_quiesce = 1; 142 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; 143} 144 145/* 146 * We have entered the scheduler, and the current task might soon be 147 * context-switched away from. If this task is in an RCU read-side 148 * critical section, we will no longer be able to rely on the CPU to 149 * record that fact, so we enqueue the task on the blkd_tasks list. 150 * The task will dequeue itself when it exits the outermost enclosing 151 * RCU read-side critical section. Therefore, the current grace period 152 * cannot be permitted to complete until the blkd_tasks list entries 153 * predating the current grace period drain, in other words, until 154 * rnp->gp_tasks becomes NULL. 155 * 156 * Caller must disable preemption. 157 */ 158static void rcu_preempt_note_context_switch(int cpu) 159{ 160 struct task_struct *t = current; 161 unsigned long flags; 162 struct rcu_data *rdp; 163 struct rcu_node *rnp; 164 165 if (t->rcu_read_lock_nesting > 0 && 166 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { 167 168 /* Possibly blocking in an RCU read-side critical section. */ 169 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); 170 rnp = rdp->mynode; 171 raw_spin_lock_irqsave(&rnp->lock, flags); 172 smp_mb__after_unlock_lock(); 173 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; 174 t->rcu_blocked_node = rnp; 175 176 /* 177 * If this CPU has already checked in, then this task 178 * will hold up the next grace period rather than the 179 * current grace period. Queue the task accordingly. 180 * If the task is queued for the current grace period 181 * (i.e., this CPU has not yet passed through a quiescent 182 * state for the current grace period), then as long 183 * as that task remains queued, the current grace period 184 * cannot end. Note that there is some uncertainty as 185 * to exactly when the current grace period started. 186 * We take a conservative approach, which can result 187 * in unnecessarily waiting on tasks that started very 188 * slightly after the current grace period began. C'est 189 * la vie!!! 190 * 191 * But first, note that the current CPU must still be 192 * on line! 193 */ 194 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); 195 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); 196 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) { 197 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); 198 rnp->gp_tasks = &t->rcu_node_entry; 199#ifdef CONFIG_RCU_BOOST 200 if (rnp->boost_tasks != NULL) 201 rnp->boost_tasks = rnp->gp_tasks; 202#endif /* #ifdef CONFIG_RCU_BOOST */ 203 } else { 204 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); 205 if (rnp->qsmask & rdp->grpmask) 206 rnp->gp_tasks = &t->rcu_node_entry; 207 } 208 trace_rcu_preempt_task(rdp->rsp->name, 209 t->pid, 210 (rnp->qsmask & rdp->grpmask) 211 ? rnp->gpnum 212 : rnp->gpnum + 1); 213 raw_spin_unlock_irqrestore(&rnp->lock, flags); 214 } else if (t->rcu_read_lock_nesting < 0 && 215 t->rcu_read_unlock_special) { 216 217 /* 218 * Complete exit from RCU read-side critical section on 219 * behalf of preempted instance of __rcu_read_unlock(). 220 */ 221 rcu_read_unlock_special(t); 222 } 223 224 /* 225 * Either we were not in an RCU read-side critical section to 226 * begin with, or we have now recorded that critical section 227 * globally. Either way, we can now note a quiescent state 228 * for this CPU. Again, if we were in an RCU read-side critical 229 * section, and if that critical section was blocking the current 230 * grace period, then the fact that the task has been enqueued 231 * means that we continue to block the current grace period. 232 */ 233 local_irq_save(flags); 234 rcu_preempt_qs(cpu); 235 local_irq_restore(flags); 236} 237 238/* 239 * Check for preempted RCU readers blocking the current grace period 240 * for the specified rcu_node structure. If the caller needs a reliable 241 * answer, it must hold the rcu_node's ->lock. 242 */ 243static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 244{ 245 return rnp->gp_tasks != NULL; 246} 247 248/* 249 * Record a quiescent state for all tasks that were previously queued 250 * on the specified rcu_node structure and that were blocking the current 251 * RCU grace period. The caller must hold the specified rnp->lock with 252 * irqs disabled, and this lock is released upon return, but irqs remain 253 * disabled. 254 */ 255static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 256 __releases(rnp->lock) 257{ 258 unsigned long mask; 259 struct rcu_node *rnp_p; 260 261 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 262 raw_spin_unlock_irqrestore(&rnp->lock, flags); 263 return; /* Still need more quiescent states! */ 264 } 265 266 rnp_p = rnp->parent; 267 if (rnp_p == NULL) { 268 /* 269 * Either there is only one rcu_node in the tree, 270 * or tasks were kicked up to root rcu_node due to 271 * CPUs going offline. 272 */ 273 rcu_report_qs_rsp(&rcu_preempt_state, flags); 274 return; 275 } 276 277 /* Report up the rest of the hierarchy. */ 278 mask = rnp->grpmask; 279 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 280 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */ 281 smp_mb__after_unlock_lock(); 282 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); 283} 284 285/* 286 * Advance a ->blkd_tasks-list pointer to the next entry, instead 287 * returning NULL if at the end of the list. 288 */ 289static struct list_head *rcu_next_node_entry(struct task_struct *t, 290 struct rcu_node *rnp) 291{ 292 struct list_head *np; 293 294 np = t->rcu_node_entry.next; 295 if (np == &rnp->blkd_tasks) 296 np = NULL; 297 return np; 298} 299 300/* 301 * Handle special cases during rcu_read_unlock(), such as needing to 302 * notify RCU core processing or task having blocked during the RCU 303 * read-side critical section. 304 */ 305void rcu_read_unlock_special(struct task_struct *t) 306{ 307 int empty; 308 int empty_exp; 309 int empty_exp_now; 310 unsigned long flags; 311 struct list_head *np; 312#ifdef CONFIG_RCU_BOOST 313 bool drop_boost_mutex = false; 314#endif /* #ifdef CONFIG_RCU_BOOST */ 315 struct rcu_node *rnp; 316 int special; 317 318 /* NMI handlers cannot block and cannot safely manipulate state. */ 319 if (in_nmi()) 320 return; 321 322 local_irq_save(flags); 323 324 /* 325 * If RCU core is waiting for this CPU to exit critical section, 326 * let it know that we have done so. 327 */ 328 special = t->rcu_read_unlock_special; 329 if (special & RCU_READ_UNLOCK_NEED_QS) { 330 rcu_preempt_qs(smp_processor_id()); 331 if (!t->rcu_read_unlock_special) { 332 local_irq_restore(flags); 333 return; 334 } 335 } 336 337 /* Hardware IRQ handlers cannot block, complain if they get here. */ 338 if (WARN_ON_ONCE(in_irq() || in_serving_softirq())) { 339 local_irq_restore(flags); 340 return; 341 } 342 343 /* Clean up if blocked during RCU read-side critical section. */ 344 if (special & RCU_READ_UNLOCK_BLOCKED) { 345 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; 346 347 /* 348 * Remove this task from the list it blocked on. The 349 * task can migrate while we acquire the lock, but at 350 * most one time. So at most two passes through loop. 351 */ 352 for (;;) { 353 rnp = t->rcu_blocked_node; 354 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 355 smp_mb__after_unlock_lock(); 356 if (rnp == t->rcu_blocked_node) 357 break; 358 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 359 } 360 empty = !rcu_preempt_blocked_readers_cgp(rnp); 361 empty_exp = !rcu_preempted_readers_exp(rnp); 362 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 363 np = rcu_next_node_entry(t, rnp); 364 list_del_init(&t->rcu_node_entry); 365 t->rcu_blocked_node = NULL; 366 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"), 367 rnp->gpnum, t->pid); 368 if (&t->rcu_node_entry == rnp->gp_tasks) 369 rnp->gp_tasks = np; 370 if (&t->rcu_node_entry == rnp->exp_tasks) 371 rnp->exp_tasks = np; 372#ifdef CONFIG_RCU_BOOST 373 if (&t->rcu_node_entry == rnp->boost_tasks) 374 rnp->boost_tasks = np; 375 /* Snapshot ->boost_mtx ownership with rcu_node lock held. */ 376 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; 377#endif /* #ifdef CONFIG_RCU_BOOST */ 378 379 /* 380 * If this was the last task on the current list, and if 381 * we aren't waiting on any CPUs, report the quiescent state. 382 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, 383 * so we must take a snapshot of the expedited state. 384 */ 385 empty_exp_now = !rcu_preempted_readers_exp(rnp); 386 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) { 387 trace_rcu_quiescent_state_report(TPS("preempt_rcu"), 388 rnp->gpnum, 389 0, rnp->qsmask, 390 rnp->level, 391 rnp->grplo, 392 rnp->grphi, 393 !!rnp->gp_tasks); 394 rcu_report_unblock_qs_rnp(rnp, flags); 395 } else { 396 raw_spin_unlock_irqrestore(&rnp->lock, flags); 397 } 398 399#ifdef CONFIG_RCU_BOOST 400 /* Unboost if we were boosted. */ 401 if (drop_boost_mutex) { 402 rt_mutex_unlock(&rnp->boost_mtx); 403 complete(&rnp->boost_completion); 404 } 405#endif /* #ifdef CONFIG_RCU_BOOST */ 406 407 /* 408 * If this was the last task on the expedited lists, 409 * then we need to report up the rcu_node hierarchy. 410 */ 411 if (!empty_exp && empty_exp_now) 412 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true); 413 } else { 414 local_irq_restore(flags); 415 } 416} 417 418#ifdef CONFIG_RCU_CPU_STALL_VERBOSE 419 420/* 421 * Dump detailed information for all tasks blocking the current RCU 422 * grace period on the specified rcu_node structure. 423 */ 424static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) 425{ 426 unsigned long flags; 427 struct task_struct *t; 428 429 raw_spin_lock_irqsave(&rnp->lock, flags); 430 if (!rcu_preempt_blocked_readers_cgp(rnp)) { 431 raw_spin_unlock_irqrestore(&rnp->lock, flags); 432 return; 433 } 434 t = list_entry(rnp->gp_tasks, 435 struct task_struct, rcu_node_entry); 436 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) 437 sched_show_task(t); 438 raw_spin_unlock_irqrestore(&rnp->lock, flags); 439} 440 441/* 442 * Dump detailed information for all tasks blocking the current RCU 443 * grace period. 444 */ 445static void rcu_print_detail_task_stall(struct rcu_state *rsp) 446{ 447 struct rcu_node *rnp = rcu_get_root(rsp); 448 449 rcu_print_detail_task_stall_rnp(rnp); 450 rcu_for_each_leaf_node(rsp, rnp) 451 rcu_print_detail_task_stall_rnp(rnp); 452} 453 454#else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ 455 456static void rcu_print_detail_task_stall(struct rcu_state *rsp) 457{ 458} 459 460#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ 461 462#ifdef CONFIG_RCU_CPU_STALL_INFO 463 464static void rcu_print_task_stall_begin(struct rcu_node *rnp) 465{ 466 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", 467 rnp->level, rnp->grplo, rnp->grphi); 468} 469 470static void rcu_print_task_stall_end(void) 471{ 472 pr_cont("\n"); 473} 474 475#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ 476 477static void rcu_print_task_stall_begin(struct rcu_node *rnp) 478{ 479} 480 481static void rcu_print_task_stall_end(void) 482{ 483} 484 485#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */ 486 487/* 488 * Scan the current list of tasks blocked within RCU read-side critical 489 * sections, printing out the tid of each. 490 */ 491static int rcu_print_task_stall(struct rcu_node *rnp) 492{ 493 struct task_struct *t; 494 int ndetected = 0; 495 496 if (!rcu_preempt_blocked_readers_cgp(rnp)) 497 return 0; 498 rcu_print_task_stall_begin(rnp); 499 t = list_entry(rnp->gp_tasks, 500 struct task_struct, rcu_node_entry); 501 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 502 pr_cont(" P%d", t->pid); 503 ndetected++; 504 } 505 rcu_print_task_stall_end(); 506 return ndetected; 507} 508 509/* 510 * Check that the list of blocked tasks for the newly completed grace 511 * period is in fact empty. It is a serious bug to complete a grace 512 * period that still has RCU readers blocked! This function must be 513 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock 514 * must be held by the caller. 515 * 516 * Also, if there are blocked tasks on the list, they automatically 517 * block the newly created grace period, so set up ->gp_tasks accordingly. 518 */ 519static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 520{ 521 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); 522 if (!list_empty(&rnp->blkd_tasks)) 523 rnp->gp_tasks = rnp->blkd_tasks.next; 524 WARN_ON_ONCE(rnp->qsmask); 525} 526 527#ifdef CONFIG_HOTPLUG_CPU 528 529/* 530 * Handle tasklist migration for case in which all CPUs covered by the 531 * specified rcu_node have gone offline. Move them up to the root 532 * rcu_node. The reason for not just moving them to the immediate 533 * parent is to remove the need for rcu_read_unlock_special() to 534 * make more than two attempts to acquire the target rcu_node's lock. 535 * Returns true if there were tasks blocking the current RCU grace 536 * period. 537 * 538 * Returns 1 if there was previously a task blocking the current grace 539 * period on the specified rcu_node structure. 540 * 541 * The caller must hold rnp->lock with irqs disabled. 542 */ 543static int rcu_preempt_offline_tasks(struct rcu_state *rsp, 544 struct rcu_node *rnp, 545 struct rcu_data *rdp) 546{ 547 struct list_head *lp; 548 struct list_head *lp_root; 549 int retval = 0; 550 struct rcu_node *rnp_root = rcu_get_root(rsp); 551 struct task_struct *t; 552 553 if (rnp == rnp_root) { 554 WARN_ONCE(1, "Last CPU thought to be offlined?"); 555 return 0; /* Shouldn't happen: at least one CPU online. */ 556 } 557 558 /* If we are on an internal node, complain bitterly. */ 559 WARN_ON_ONCE(rnp != rdp->mynode); 560 561 /* 562 * Move tasks up to root rcu_node. Don't try to get fancy for 563 * this corner-case operation -- just put this node's tasks 564 * at the head of the root node's list, and update the root node's 565 * ->gp_tasks and ->exp_tasks pointers to those of this node's, 566 * if non-NULL. This might result in waiting for more tasks than 567 * absolutely necessary, but this is a good performance/complexity 568 * tradeoff. 569 */ 570 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0) 571 retval |= RCU_OFL_TASKS_NORM_GP; 572 if (rcu_preempted_readers_exp(rnp)) 573 retval |= RCU_OFL_TASKS_EXP_GP; 574 lp = &rnp->blkd_tasks; 575 lp_root = &rnp_root->blkd_tasks; 576 while (!list_empty(lp)) { 577 t = list_entry(lp->next, typeof(*t), rcu_node_entry); 578 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ 579 smp_mb__after_unlock_lock(); 580 list_del(&t->rcu_node_entry); 581 t->rcu_blocked_node = rnp_root; 582 list_add(&t->rcu_node_entry, lp_root); 583 if (&t->rcu_node_entry == rnp->gp_tasks) 584 rnp_root->gp_tasks = rnp->gp_tasks; 585 if (&t->rcu_node_entry == rnp->exp_tasks) 586 rnp_root->exp_tasks = rnp->exp_tasks; 587#ifdef CONFIG_RCU_BOOST 588 if (&t->rcu_node_entry == rnp->boost_tasks) 589 rnp_root->boost_tasks = rnp->boost_tasks; 590#endif /* #ifdef CONFIG_RCU_BOOST */ 591 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ 592 } 593 594 rnp->gp_tasks = NULL; 595 rnp->exp_tasks = NULL; 596#ifdef CONFIG_RCU_BOOST 597 rnp->boost_tasks = NULL; 598 /* 599 * In case root is being boosted and leaf was not. Make sure 600 * that we boost the tasks blocking the current grace period 601 * in this case. 602 */ 603 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ 604 smp_mb__after_unlock_lock(); 605 if (rnp_root->boost_tasks != NULL && 606 rnp_root->boost_tasks != rnp_root->gp_tasks && 607 rnp_root->boost_tasks != rnp_root->exp_tasks) 608 rnp_root->boost_tasks = rnp_root->gp_tasks; 609 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ 610#endif /* #ifdef CONFIG_RCU_BOOST */ 611 612 return retval; 613} 614 615#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 616 617/* 618 * Check for a quiescent state from the current CPU. When a task blocks, 619 * the task is recorded in the corresponding CPU's rcu_node structure, 620 * which is checked elsewhere. 621 * 622 * Caller must disable hard irqs. 623 */ 624static void rcu_preempt_check_callbacks(int cpu) 625{ 626 struct task_struct *t = current; 627 628 if (t->rcu_read_lock_nesting == 0) { 629 rcu_preempt_qs(cpu); 630 return; 631 } 632 if (t->rcu_read_lock_nesting > 0 && 633 per_cpu(rcu_preempt_data, cpu).qs_pending) 634 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; 635} 636 637#ifdef CONFIG_RCU_BOOST 638 639static void rcu_preempt_do_callbacks(void) 640{ 641 rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data)); 642} 643 644#endif /* #ifdef CONFIG_RCU_BOOST */ 645 646/* 647 * Queue a preemptible-RCU callback for invocation after a grace period. 648 */ 649void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 650{ 651 __call_rcu(head, func, &rcu_preempt_state, -1, 0); 652} 653EXPORT_SYMBOL_GPL(call_rcu); 654 655/** 656 * synchronize_rcu - wait until a grace period has elapsed. 657 * 658 * Control will return to the caller some time after a full grace 659 * period has elapsed, in other words after all currently executing RCU 660 * read-side critical sections have completed. Note, however, that 661 * upon return from synchronize_rcu(), the caller might well be executing 662 * concurrently with new RCU read-side critical sections that began while 663 * synchronize_rcu() was waiting. RCU read-side critical sections are 664 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. 665 * 666 * See the description of synchronize_sched() for more detailed information 667 * on memory ordering guarantees. 668 */ 669void synchronize_rcu(void) 670{ 671 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && 672 !lock_is_held(&rcu_lock_map) && 673 !lock_is_held(&rcu_sched_lock_map), 674 "Illegal synchronize_rcu() in RCU read-side critical section"); 675 if (!rcu_scheduler_active) 676 return; 677 if (rcu_expedited) 678 synchronize_rcu_expedited(); 679 else 680 wait_rcu_gp(call_rcu); 681} 682EXPORT_SYMBOL_GPL(synchronize_rcu); 683 684static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); 685static unsigned long sync_rcu_preempt_exp_count; 686static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); 687 688/* 689 * Return non-zero if there are any tasks in RCU read-side critical 690 * sections blocking the current preemptible-RCU expedited grace period. 691 * If there is no preemptible-RCU expedited grace period currently in 692 * progress, returns zero unconditionally. 693 */ 694static int rcu_preempted_readers_exp(struct rcu_node *rnp) 695{ 696 return rnp->exp_tasks != NULL; 697} 698 699/* 700 * return non-zero if there is no RCU expedited grace period in progress 701 * for the specified rcu_node structure, in other words, if all CPUs and 702 * tasks covered by the specified rcu_node structure have done their bit 703 * for the current expedited grace period. Works only for preemptible 704 * RCU -- other RCU implementation use other means. 705 * 706 * Caller must hold sync_rcu_preempt_exp_mutex. 707 */ 708static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) 709{ 710 return !rcu_preempted_readers_exp(rnp) && 711 ACCESS_ONCE(rnp->expmask) == 0; 712} 713 714/* 715 * Report the exit from RCU read-side critical section for the last task 716 * that queued itself during or before the current expedited preemptible-RCU 717 * grace period. This event is reported either to the rcu_node structure on 718 * which the task was queued or to one of that rcu_node structure's ancestors, 719 * recursively up the tree. (Calm down, calm down, we do the recursion 720 * iteratively!) 721 * 722 * Most callers will set the "wake" flag, but the task initiating the 723 * expedited grace period need not wake itself. 724 * 725 * Caller must hold sync_rcu_preempt_exp_mutex. 726 */ 727static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, 728 bool wake) 729{ 730 unsigned long flags; 731 unsigned long mask; 732 733 raw_spin_lock_irqsave(&rnp->lock, flags); 734 smp_mb__after_unlock_lock(); 735 for (;;) { 736 if (!sync_rcu_preempt_exp_done(rnp)) { 737 raw_spin_unlock_irqrestore(&rnp->lock, flags); 738 break; 739 } 740 if (rnp->parent == NULL) { 741 raw_spin_unlock_irqrestore(&rnp->lock, flags); 742 if (wake) { 743 smp_mb(); /* EGP done before wake_up(). */ 744 wake_up(&sync_rcu_preempt_exp_wq); 745 } 746 break; 747 } 748 mask = rnp->grpmask; 749 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ 750 rnp = rnp->parent; 751 raw_spin_lock(&rnp->lock); /* irqs already disabled */ 752 smp_mb__after_unlock_lock(); 753 rnp->expmask &= ~mask; 754 } 755} 756 757/* 758 * Snapshot the tasks blocking the newly started preemptible-RCU expedited 759 * grace period for the specified rcu_node structure. If there are no such 760 * tasks, report it up the rcu_node hierarchy. 761 * 762 * Caller must hold sync_rcu_preempt_exp_mutex and must exclude 763 * CPU hotplug operations. 764 */ 765static void 766sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) 767{ 768 unsigned long flags; 769 int must_wait = 0; 770 771 raw_spin_lock_irqsave(&rnp->lock, flags); 772 smp_mb__after_unlock_lock(); 773 if (list_empty(&rnp->blkd_tasks)) { 774 raw_spin_unlock_irqrestore(&rnp->lock, flags); 775 } else { 776 rnp->exp_tasks = rnp->blkd_tasks.next; 777 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ 778 must_wait = 1; 779 } 780 if (!must_wait) 781 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */ 782} 783 784/** 785 * synchronize_rcu_expedited - Brute-force RCU grace period 786 * 787 * Wait for an RCU-preempt grace period, but expedite it. The basic 788 * idea is to invoke synchronize_sched_expedited() to push all the tasks to 789 * the ->blkd_tasks lists and wait for this list to drain. This consumes 790 * significant time on all CPUs and is unfriendly to real-time workloads, 791 * so is thus not recommended for any sort of common-case code. 792 * In fact, if you are using synchronize_rcu_expedited() in a loop, 793 * please restructure your code to batch your updates, and then Use a 794 * single synchronize_rcu() instead. 795 * 796 * Note that it is illegal to call this function while holding any lock 797 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal 798 * to call this function from a CPU-hotplug notifier. Failing to observe 799 * these restriction will result in deadlock. 800 */ 801void synchronize_rcu_expedited(void) 802{ 803 unsigned long flags; 804 struct rcu_node *rnp; 805 struct rcu_state *rsp = &rcu_preempt_state; 806 unsigned long snap; 807 int trycount = 0; 808 809 smp_mb(); /* Caller's modifications seen first by other CPUs. */ 810 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; 811 smp_mb(); /* Above access cannot bleed into critical section. */ 812 813 /* 814 * Block CPU-hotplug operations. This means that any CPU-hotplug 815 * operation that finds an rcu_node structure with tasks in the 816 * process of being boosted will know that all tasks blocking 817 * this expedited grace period will already be in the process of 818 * being boosted. This simplifies the process of moving tasks 819 * from leaf to root rcu_node structures. 820 */ 821 get_online_cpus(); 822 823 /* 824 * Acquire lock, falling back to synchronize_rcu() if too many 825 * lock-acquisition failures. Of course, if someone does the 826 * expedited grace period for us, just leave. 827 */ 828 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { 829 if (ULONG_CMP_LT(snap, 830 ACCESS_ONCE(sync_rcu_preempt_exp_count))) { 831 put_online_cpus(); 832 goto mb_ret; /* Others did our work for us. */ 833 } 834 if (trycount++ < 10) { 835 udelay(trycount * num_online_cpus()); 836 } else { 837 put_online_cpus(); 838 wait_rcu_gp(call_rcu); 839 return; 840 } 841 } 842 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) { 843 put_online_cpus(); 844 goto unlock_mb_ret; /* Others did our work for us. */ 845 } 846 847 /* force all RCU readers onto ->blkd_tasks lists. */ 848 synchronize_sched_expedited(); 849 850 /* Initialize ->expmask for all non-leaf rcu_node structures. */ 851 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { 852 raw_spin_lock_irqsave(&rnp->lock, flags); 853 smp_mb__after_unlock_lock(); 854 rnp->expmask = rnp->qsmaskinit; 855 raw_spin_unlock_irqrestore(&rnp->lock, flags); 856 } 857 858 /* Snapshot current state of ->blkd_tasks lists. */ 859 rcu_for_each_leaf_node(rsp, rnp) 860 sync_rcu_preempt_exp_init(rsp, rnp); 861 if (NUM_RCU_NODES > 1) 862 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); 863 864 put_online_cpus(); 865 866 /* Wait for snapshotted ->blkd_tasks lists to drain. */ 867 rnp = rcu_get_root(rsp); 868 wait_event(sync_rcu_preempt_exp_wq, 869 sync_rcu_preempt_exp_done(rnp)); 870 871 /* Clean up and exit. */ 872 smp_mb(); /* ensure expedited GP seen before counter increment. */ 873 ACCESS_ONCE(sync_rcu_preempt_exp_count)++; 874unlock_mb_ret: 875 mutex_unlock(&sync_rcu_preempt_exp_mutex); 876mb_ret: 877 smp_mb(); /* ensure subsequent action seen after grace period. */ 878} 879EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 880 881/** 882 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. 883 * 884 * Note that this primitive does not necessarily wait for an RCU grace period 885 * to complete. For example, if there are no RCU callbacks queued anywhere 886 * in the system, then rcu_barrier() is within its rights to return 887 * immediately, without waiting for anything, much less an RCU grace period. 888 */ 889void rcu_barrier(void) 890{ 891 _rcu_barrier(&rcu_preempt_state); 892} 893EXPORT_SYMBOL_GPL(rcu_barrier); 894 895/* 896 * Initialize preemptible RCU's state structures. 897 */ 898static void __init __rcu_init_preempt(void) 899{ 900 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); 901} 902 903/* 904 * Check for a task exiting while in a preemptible-RCU read-side 905 * critical section, clean up if so. No need to issue warnings, 906 * as debug_check_no_locks_held() already does this if lockdep 907 * is enabled. 908 */ 909void exit_rcu(void) 910{ 911 struct task_struct *t = current; 912 913 if (likely(list_empty(¤t->rcu_node_entry))) 914 return; 915 t->rcu_read_lock_nesting = 1; 916 barrier(); 917 t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED; 918 __rcu_read_unlock(); 919} 920 921#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 922 923static struct rcu_state *rcu_state_p = &rcu_sched_state; 924 925/* 926 * Tell them what RCU they are running. 927 */ 928static void __init rcu_bootup_announce(void) 929{ 930 pr_info("Hierarchical RCU implementation.\n"); 931 rcu_bootup_announce_oddness(); 932} 933 934/* 935 * Return the number of RCU batches processed thus far for debug & stats. 936 */ 937long rcu_batches_completed(void) 938{ 939 return rcu_batches_completed_sched(); 940} 941EXPORT_SYMBOL_GPL(rcu_batches_completed); 942 943/* 944 * Because preemptible RCU does not exist, we never have to check for 945 * CPUs being in quiescent states. 946 */ 947static void rcu_preempt_note_context_switch(int cpu) 948{ 949} 950 951/* 952 * Because preemptible RCU does not exist, there are never any preempted 953 * RCU readers. 954 */ 955static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) 956{ 957 return 0; 958} 959 960#ifdef CONFIG_HOTPLUG_CPU 961 962/* Because preemptible RCU does not exist, no quieting of tasks. */ 963static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 964 __releases(rnp->lock) 965{ 966 raw_spin_unlock_irqrestore(&rnp->lock, flags); 967} 968 969#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 970 971/* 972 * Because preemptible RCU does not exist, we never have to check for 973 * tasks blocked within RCU read-side critical sections. 974 */ 975static void rcu_print_detail_task_stall(struct rcu_state *rsp) 976{ 977} 978 979/* 980 * Because preemptible RCU does not exist, we never have to check for 981 * tasks blocked within RCU read-side critical sections. 982 */ 983static int rcu_print_task_stall(struct rcu_node *rnp) 984{ 985 return 0; 986} 987 988/* 989 * Because there is no preemptible RCU, there can be no readers blocked, 990 * so there is no need to check for blocked tasks. So check only for 991 * bogus qsmask values. 992 */ 993static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 994{ 995 WARN_ON_ONCE(rnp->qsmask); 996} 997 998#ifdef CONFIG_HOTPLUG_CPU 999 1000/* 1001 * Because preemptible RCU does not exist, it never needs to migrate 1002 * tasks that were blocked within RCU read-side critical sections, and 1003 * such non-existent tasks cannot possibly have been blocking the current 1004 * grace period. 1005 */ 1006static int rcu_preempt_offline_tasks(struct rcu_state *rsp, 1007 struct rcu_node *rnp, 1008 struct rcu_data *rdp) 1009{ 1010 return 0; 1011} 1012 1013#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 1014 1015/* 1016 * Because preemptible RCU does not exist, it never has any callbacks 1017 * to check. 1018 */ 1019static void rcu_preempt_check_callbacks(int cpu) 1020{ 1021} 1022 1023/* 1024 * Wait for an rcu-preempt grace period, but make it happen quickly. 1025 * But because preemptible RCU does not exist, map to rcu-sched. 1026 */ 1027void synchronize_rcu_expedited(void) 1028{ 1029 synchronize_sched_expedited(); 1030} 1031EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 1032 1033#ifdef CONFIG_HOTPLUG_CPU 1034 1035/* 1036 * Because preemptible RCU does not exist, there is never any need to 1037 * report on tasks preempted in RCU read-side critical sections during 1038 * expedited RCU grace periods. 1039 */ 1040static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, 1041 bool wake) 1042{ 1043} 1044 1045#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 1046 1047/* 1048 * Because preemptible RCU does not exist, rcu_barrier() is just 1049 * another name for rcu_barrier_sched(). 1050 */ 1051void rcu_barrier(void) 1052{ 1053 rcu_barrier_sched(); 1054} 1055EXPORT_SYMBOL_GPL(rcu_barrier); 1056 1057/* 1058 * Because preemptible RCU does not exist, it need not be initialized. 1059 */ 1060static void __init __rcu_init_preempt(void) 1061{ 1062} 1063 1064/* 1065 * Because preemptible RCU does not exist, tasks cannot possibly exit 1066 * while in preemptible RCU read-side critical sections. 1067 */ 1068void exit_rcu(void) 1069{ 1070} 1071 1072#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 1073 1074#ifdef CONFIG_RCU_BOOST 1075 1076#include "../locking/rtmutex_common.h" 1077 1078#ifdef CONFIG_RCU_TRACE 1079 1080static void rcu_initiate_boost_trace(struct rcu_node *rnp) 1081{ 1082 if (list_empty(&rnp->blkd_tasks)) 1083 rnp->n_balk_blkd_tasks++; 1084 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL) 1085 rnp->n_balk_exp_gp_tasks++; 1086 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL) 1087 rnp->n_balk_boost_tasks++; 1088 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0) 1089 rnp->n_balk_notblocked++; 1090 else if (rnp->gp_tasks != NULL && 1091 ULONG_CMP_LT(jiffies, rnp->boost_time)) 1092 rnp->n_balk_notyet++; 1093 else 1094 rnp->n_balk_nos++; 1095} 1096 1097#else /* #ifdef CONFIG_RCU_TRACE */ 1098 1099static void rcu_initiate_boost_trace(struct rcu_node *rnp) 1100{ 1101} 1102 1103#endif /* #else #ifdef CONFIG_RCU_TRACE */ 1104 1105static void rcu_wake_cond(struct task_struct *t, int status) 1106{ 1107 /* 1108 * If the thread is yielding, only wake it when this 1109 * is invoked from idle 1110 */ 1111 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) 1112 wake_up_process(t); 1113} 1114 1115/* 1116 * Carry out RCU priority boosting on the task indicated by ->exp_tasks 1117 * or ->boost_tasks, advancing the pointer to the next task in the 1118 * ->blkd_tasks list. 1119 * 1120 * Note that irqs must be enabled: boosting the task can block. 1121 * Returns 1 if there are more tasks needing to be boosted. 1122 */ 1123static int rcu_boost(struct rcu_node *rnp) 1124{ 1125 unsigned long flags; 1126 struct task_struct *t; 1127 struct list_head *tb; 1128 1129 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) 1130 return 0; /* Nothing left to boost. */ 1131 1132 raw_spin_lock_irqsave(&rnp->lock, flags); 1133 smp_mb__after_unlock_lock(); 1134 1135 /* 1136 * Recheck under the lock: all tasks in need of boosting 1137 * might exit their RCU read-side critical sections on their own. 1138 */ 1139 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { 1140 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1141 return 0; 1142 } 1143 1144 /* 1145 * Preferentially boost tasks blocking expedited grace periods. 1146 * This cannot starve the normal grace periods because a second 1147 * expedited grace period must boost all blocked tasks, including 1148 * those blocking the pre-existing normal grace period. 1149 */ 1150 if (rnp->exp_tasks != NULL) { 1151 tb = rnp->exp_tasks; 1152 rnp->n_exp_boosts++; 1153 } else { 1154 tb = rnp->boost_tasks; 1155 rnp->n_normal_boosts++; 1156 } 1157 rnp->n_tasks_boosted++; 1158 1159 /* 1160 * We boost task t by manufacturing an rt_mutex that appears to 1161 * be held by task t. We leave a pointer to that rt_mutex where 1162 * task t can find it, and task t will release the mutex when it 1163 * exits its outermost RCU read-side critical section. Then 1164 * simply acquiring this artificial rt_mutex will boost task 1165 * t's priority. (Thanks to tglx for suggesting this approach!) 1166 * 1167 * Note that task t must acquire rnp->lock to remove itself from 1168 * the ->blkd_tasks list, which it will do from exit() if from 1169 * nowhere else. We therefore are guaranteed that task t will 1170 * stay around at least until we drop rnp->lock. Note that 1171 * rnp->lock also resolves races between our priority boosting 1172 * and task t's exiting its outermost RCU read-side critical 1173 * section. 1174 */ 1175 t = container_of(tb, struct task_struct, rcu_node_entry); 1176 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); 1177 init_completion(&rnp->boost_completion); 1178 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1179 /* Lock only for side effect: boosts task t's priority. */ 1180 rt_mutex_lock(&rnp->boost_mtx); 1181 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ 1182 1183 /* Wait for boostee to be done w/boost_mtx before reinitializing. */ 1184 wait_for_completion(&rnp->boost_completion); 1185 1186 return ACCESS_ONCE(rnp->exp_tasks) != NULL || 1187 ACCESS_ONCE(rnp->boost_tasks) != NULL; 1188} 1189 1190/* 1191 * Priority-boosting kthread. One per leaf rcu_node and one for the 1192 * root rcu_node. 1193 */ 1194static int rcu_boost_kthread(void *arg) 1195{ 1196 struct rcu_node *rnp = (struct rcu_node *)arg; 1197 int spincnt = 0; 1198 int more2boost; 1199 1200 trace_rcu_utilization(TPS("Start boost kthread@init")); 1201 for (;;) { 1202 rnp->boost_kthread_status = RCU_KTHREAD_WAITING; 1203 trace_rcu_utilization(TPS("End boost kthread@rcu_wait")); 1204 rcu_wait(rnp->boost_tasks || rnp->exp_tasks); 1205 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait")); 1206 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; 1207 more2boost = rcu_boost(rnp); 1208 if (more2boost) 1209 spincnt++; 1210 else 1211 spincnt = 0; 1212 if (spincnt > 10) { 1213 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING; 1214 trace_rcu_utilization(TPS("End boost kthread@rcu_yield")); 1215 schedule_timeout_interruptible(2); 1216 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield")); 1217 spincnt = 0; 1218 } 1219 } 1220 /* NOTREACHED */ 1221 trace_rcu_utilization(TPS("End boost kthread@notreached")); 1222 return 0; 1223} 1224 1225/* 1226 * Check to see if it is time to start boosting RCU readers that are 1227 * blocking the current grace period, and, if so, tell the per-rcu_node 1228 * kthread to start boosting them. If there is an expedited grace 1229 * period in progress, it is always time to boost. 1230 * 1231 * The caller must hold rnp->lock, which this function releases. 1232 * The ->boost_kthread_task is immortal, so we don't need to worry 1233 * about it going away. 1234 */ 1235static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1236 __releases(rnp->lock) 1237{ 1238 struct task_struct *t; 1239 1240 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { 1241 rnp->n_balk_exp_gp_tasks++; 1242 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1243 return; 1244 } 1245 if (rnp->exp_tasks != NULL || 1246 (rnp->gp_tasks != NULL && 1247 rnp->boost_tasks == NULL && 1248 rnp->qsmask == 0 && 1249 ULONG_CMP_GE(jiffies, rnp->boost_time))) { 1250 if (rnp->exp_tasks == NULL) 1251 rnp->boost_tasks = rnp->gp_tasks; 1252 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1253 t = rnp->boost_kthread_task; 1254 if (t) 1255 rcu_wake_cond(t, rnp->boost_kthread_status); 1256 } else { 1257 rcu_initiate_boost_trace(rnp); 1258 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1259 } 1260} 1261 1262/* 1263 * Wake up the per-CPU kthread to invoke RCU callbacks. 1264 */ 1265static void invoke_rcu_callbacks_kthread(void) 1266{ 1267 unsigned long flags; 1268 1269 local_irq_save(flags); 1270 __this_cpu_write(rcu_cpu_has_work, 1); 1271 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && 1272 current != __this_cpu_read(rcu_cpu_kthread_task)) { 1273 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), 1274 __this_cpu_read(rcu_cpu_kthread_status)); 1275 } 1276 local_irq_restore(flags); 1277} 1278 1279/* 1280 * Is the current CPU running the RCU-callbacks kthread? 1281 * Caller must have preemption disabled. 1282 */ 1283static bool rcu_is_callbacks_kthread(void) 1284{ 1285 return __this_cpu_read(rcu_cpu_kthread_task) == current; 1286} 1287 1288#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) 1289 1290/* 1291 * Do priority-boost accounting for the start of a new grace period. 1292 */ 1293static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1294{ 1295 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; 1296} 1297 1298/* 1299 * Create an RCU-boost kthread for the specified node if one does not 1300 * already exist. We only create this kthread for preemptible RCU. 1301 * Returns zero if all is well, a negated errno otherwise. 1302 */ 1303static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 1304 struct rcu_node *rnp) 1305{ 1306 int rnp_index = rnp - &rsp->node[0]; 1307 unsigned long flags; 1308 struct sched_param sp; 1309 struct task_struct *t; 1310 1311 if (&rcu_preempt_state != rsp) 1312 return 0; 1313 1314 if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0) 1315 return 0; 1316 1317 rsp->boost = 1; 1318 if (rnp->boost_kthread_task != NULL) 1319 return 0; 1320 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1321 "rcub/%d", rnp_index); 1322 if (IS_ERR(t)) 1323 return PTR_ERR(t); 1324 raw_spin_lock_irqsave(&rnp->lock, flags); 1325 smp_mb__after_unlock_lock(); 1326 rnp->boost_kthread_task = t; 1327 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1328 sp.sched_priority = RCU_BOOST_PRIO; 1329 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1330 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ 1331 return 0; 1332} 1333 1334static void rcu_kthread_do_work(void) 1335{ 1336 rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); 1337 rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); 1338 rcu_preempt_do_callbacks(); 1339} 1340 1341static void rcu_cpu_kthread_setup(unsigned int cpu) 1342{ 1343 struct sched_param sp; 1344 1345 sp.sched_priority = RCU_KTHREAD_PRIO; 1346 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); 1347} 1348 1349static void rcu_cpu_kthread_park(unsigned int cpu) 1350{ 1351 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; 1352} 1353 1354static int rcu_cpu_kthread_should_run(unsigned int cpu) 1355{ 1356 return __this_cpu_read(rcu_cpu_has_work); 1357} 1358 1359/* 1360 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the 1361 * RCU softirq used in flavors and configurations of RCU that do not 1362 * support RCU priority boosting. 1363 */ 1364static void rcu_cpu_kthread(unsigned int cpu) 1365{ 1366 unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); 1367 char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); 1368 int spincnt; 1369 1370 for (spincnt = 0; spincnt < 10; spincnt++) { 1371 trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); 1372 local_bh_disable(); 1373 *statusp = RCU_KTHREAD_RUNNING; 1374 this_cpu_inc(rcu_cpu_kthread_loops); 1375 local_irq_disable(); 1376 work = *workp; 1377 *workp = 0; 1378 local_irq_enable(); 1379 if (work) 1380 rcu_kthread_do_work(); 1381 local_bh_enable(); 1382 if (*workp == 0) { 1383 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); 1384 *statusp = RCU_KTHREAD_WAITING; 1385 return; 1386 } 1387 } 1388 *statusp = RCU_KTHREAD_YIELDING; 1389 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); 1390 schedule_timeout_interruptible(2); 1391 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); 1392 *statusp = RCU_KTHREAD_WAITING; 1393} 1394 1395/* 1396 * Set the per-rcu_node kthread's affinity to cover all CPUs that are 1397 * served by the rcu_node in question. The CPU hotplug lock is still 1398 * held, so the value of rnp->qsmaskinit will be stable. 1399 * 1400 * We don't include outgoingcpu in the affinity set, use -1 if there is 1401 * no outgoing CPU. If there are no CPUs left in the affinity set, 1402 * this function allows the kthread to execute on any CPU. 1403 */ 1404static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1405{ 1406 struct task_struct *t = rnp->boost_kthread_task; 1407 unsigned long mask = rnp->qsmaskinit; 1408 cpumask_var_t cm; 1409 int cpu; 1410 1411 if (!t) 1412 return; 1413 if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) 1414 return; 1415 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) 1416 if ((mask & 0x1) && cpu != outgoingcpu) 1417 cpumask_set_cpu(cpu, cm); 1418 if (cpumask_weight(cm) == 0) { 1419 cpumask_setall(cm); 1420 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) 1421 cpumask_clear_cpu(cpu, cm); 1422 WARN_ON_ONCE(cpumask_weight(cm) == 0); 1423 } 1424 set_cpus_allowed_ptr(t, cm); 1425 free_cpumask_var(cm); 1426} 1427 1428static struct smp_hotplug_thread rcu_cpu_thread_spec = { 1429 .store = &rcu_cpu_kthread_task, 1430 .thread_should_run = rcu_cpu_kthread_should_run, 1431 .thread_fn = rcu_cpu_kthread, 1432 .thread_comm = "rcuc/%u", 1433 .setup = rcu_cpu_kthread_setup, 1434 .park = rcu_cpu_kthread_park, 1435}; 1436 1437/* 1438 * Spawn boost kthreads -- called as soon as the scheduler is running. 1439 */ 1440static void __init rcu_spawn_boost_kthreads(void) 1441{ 1442 struct rcu_node *rnp; 1443 int cpu; 1444 1445 for_each_possible_cpu(cpu) 1446 per_cpu(rcu_cpu_has_work, cpu) = 0; 1447 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); 1448 rnp = rcu_get_root(rcu_state_p); 1449 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); 1450 if (NUM_RCU_NODES > 1) { 1451 rcu_for_each_leaf_node(rcu_state_p, rnp) 1452 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); 1453 } 1454} 1455 1456static void rcu_prepare_kthreads(int cpu) 1457{ 1458 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); 1459 struct rcu_node *rnp = rdp->mynode; 1460 1461 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ 1462 if (rcu_scheduler_fully_active) 1463 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); 1464} 1465 1466#else /* #ifdef CONFIG_RCU_BOOST */ 1467 1468static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1469 __releases(rnp->lock) 1470{ 1471 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1472} 1473 1474static void invoke_rcu_callbacks_kthread(void) 1475{ 1476 WARN_ON_ONCE(1); 1477} 1478 1479static bool rcu_is_callbacks_kthread(void) 1480{ 1481 return false; 1482} 1483 1484static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1485{ 1486} 1487 1488static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1489{ 1490} 1491 1492static void __init rcu_spawn_boost_kthreads(void) 1493{ 1494} 1495 1496static void rcu_prepare_kthreads(int cpu) 1497{ 1498} 1499 1500#endif /* #else #ifdef CONFIG_RCU_BOOST */ 1501 1502#if !defined(CONFIG_RCU_FAST_NO_HZ) 1503 1504/* 1505 * Check to see if any future RCU-related work will need to be done 1506 * by the current CPU, even if none need be done immediately, returning 1507 * 1 if so. This function is part of the RCU implementation; it is -not- 1508 * an exported member of the RCU API. 1509 * 1510 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs 1511 * any flavor of RCU. 1512 */ 1513#ifndef CONFIG_RCU_NOCB_CPU_ALL 1514int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) 1515{ 1516 *delta_jiffies = ULONG_MAX; 1517 return rcu_cpu_has_callbacks(cpu, NULL); 1518} 1519#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ 1520 1521/* 1522 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up 1523 * after it. 1524 */ 1525static void rcu_cleanup_after_idle(int cpu) 1526{ 1527} 1528 1529/* 1530 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n, 1531 * is nothing. 1532 */ 1533static void rcu_prepare_for_idle(int cpu) 1534{ 1535} 1536 1537/* 1538 * Don't bother keeping a running count of the number of RCU callbacks 1539 * posted because CONFIG_RCU_FAST_NO_HZ=n. 1540 */ 1541static void rcu_idle_count_callbacks_posted(void) 1542{ 1543} 1544 1545#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1546 1547/* 1548 * This code is invoked when a CPU goes idle, at which point we want 1549 * to have the CPU do everything required for RCU so that it can enter 1550 * the energy-efficient dyntick-idle mode. This is handled by a 1551 * state machine implemented by rcu_prepare_for_idle() below. 1552 * 1553 * The following three proprocessor symbols control this state machine: 1554 * 1555 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted 1556 * to sleep in dyntick-idle mode with RCU callbacks pending. This 1557 * is sized to be roughly one RCU grace period. Those energy-efficiency 1558 * benchmarkers who might otherwise be tempted to set this to a large 1559 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your 1560 * system. And if you are -that- concerned about energy efficiency, 1561 * just power the system down and be done with it! 1562 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is 1563 * permitted to sleep in dyntick-idle mode with only lazy RCU 1564 * callbacks pending. Setting this too high can OOM your system. 1565 * 1566 * The values below work well in practice. If future workloads require 1567 * adjustment, they can be converted into kernel config parameters, though 1568 * making the state machine smarter might be a better option. 1569 */ 1570#define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */ 1571#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ 1572 1573static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY; 1574module_param(rcu_idle_gp_delay, int, 0644); 1575static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; 1576module_param(rcu_idle_lazy_gp_delay, int, 0644); 1577 1578extern int tick_nohz_active; 1579 1580/* 1581 * Try to advance callbacks for all flavors of RCU on the current CPU, but 1582 * only if it has been awhile since the last time we did so. Afterwards, 1583 * if there are any callbacks ready for immediate invocation, return true. 1584 */ 1585static bool __maybe_unused rcu_try_advance_all_cbs(void) 1586{ 1587 bool cbs_ready = false; 1588 struct rcu_data *rdp; 1589 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 1590 struct rcu_node *rnp; 1591 struct rcu_state *rsp; 1592 1593 /* Exit early if we advanced recently. */ 1594 if (jiffies == rdtp->last_advance_all) 1595 return 0; 1596 rdtp->last_advance_all = jiffies; 1597 1598 for_each_rcu_flavor(rsp) { 1599 rdp = this_cpu_ptr(rsp->rda); 1600 rnp = rdp->mynode; 1601 1602 /* 1603 * Don't bother checking unless a grace period has 1604 * completed since we last checked and there are 1605 * callbacks not yet ready to invoke. 1606 */ 1607 if (rdp->completed != rnp->completed && 1608 rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]) 1609 note_gp_changes(rsp, rdp); 1610 1611 if (cpu_has_callbacks_ready_to_invoke(rdp)) 1612 cbs_ready = true; 1613 } 1614 return cbs_ready; 1615} 1616 1617/* 1618 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready 1619 * to invoke. If the CPU has callbacks, try to advance them. Tell the 1620 * caller to set the timeout based on whether or not there are non-lazy 1621 * callbacks. 1622 * 1623 * The caller must have disabled interrupts. 1624 */ 1625#ifndef CONFIG_RCU_NOCB_CPU_ALL 1626int rcu_needs_cpu(int cpu, unsigned long *dj) 1627{ 1628 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 1629 1630 /* Snapshot to detect later posting of non-lazy callback. */ 1631 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; 1632 1633 /* If no callbacks, RCU doesn't need the CPU. */ 1634 if (!rcu_cpu_has_callbacks(cpu, &rdtp->all_lazy)) { 1635 *dj = ULONG_MAX; 1636 return 0; 1637 } 1638 1639 /* Attempt to advance callbacks. */ 1640 if (rcu_try_advance_all_cbs()) { 1641 /* Some ready to invoke, so initiate later invocation. */ 1642 invoke_rcu_core(); 1643 return 1; 1644 } 1645 rdtp->last_accelerate = jiffies; 1646 1647 /* Request timer delay depending on laziness, and round. */ 1648 if (!rdtp->all_lazy) { 1649 *dj = round_up(rcu_idle_gp_delay + jiffies, 1650 rcu_idle_gp_delay) - jiffies; 1651 } else { 1652 *dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies; 1653 } 1654 return 0; 1655} 1656#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ 1657 1658/* 1659 * Prepare a CPU for idle from an RCU perspective. The first major task 1660 * is to sense whether nohz mode has been enabled or disabled via sysfs. 1661 * The second major task is to check to see if a non-lazy callback has 1662 * arrived at a CPU that previously had only lazy callbacks. The third 1663 * major task is to accelerate (that is, assign grace-period numbers to) 1664 * any recently arrived callbacks. 1665 * 1666 * The caller must have disabled interrupts. 1667 */ 1668static void rcu_prepare_for_idle(int cpu) 1669{ 1670#ifndef CONFIG_RCU_NOCB_CPU_ALL 1671 bool needwake; 1672 struct rcu_data *rdp; 1673 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 1674 struct rcu_node *rnp; 1675 struct rcu_state *rsp; 1676 int tne; 1677 1678 /* Handle nohz enablement switches conservatively. */ 1679 tne = ACCESS_ONCE(tick_nohz_active); 1680 if (tne != rdtp->tick_nohz_enabled_snap) { 1681 if (rcu_cpu_has_callbacks(cpu, NULL)) 1682 invoke_rcu_core(); /* force nohz to see update. */ 1683 rdtp->tick_nohz_enabled_snap = tne; 1684 return; 1685 } 1686 if (!tne) 1687 return; 1688 1689 /* If this is a no-CBs CPU, no callbacks, just return. */ 1690 if (rcu_is_nocb_cpu(cpu)) 1691 return; 1692 1693 /* 1694 * If a non-lazy callback arrived at a CPU having only lazy 1695 * callbacks, invoke RCU core for the side-effect of recalculating 1696 * idle duration on re-entry to idle. 1697 */ 1698 if (rdtp->all_lazy && 1699 rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) { 1700 rdtp->all_lazy = false; 1701 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; 1702 invoke_rcu_core(); 1703 return; 1704 } 1705 1706 /* 1707 * If we have not yet accelerated this jiffy, accelerate all 1708 * callbacks on this CPU. 1709 */ 1710 if (rdtp->last_accelerate == jiffies) 1711 return; 1712 rdtp->last_accelerate = jiffies; 1713 for_each_rcu_flavor(rsp) { 1714 rdp = per_cpu_ptr(rsp->rda, cpu); 1715 if (!*rdp->nxttail[RCU_DONE_TAIL]) 1716 continue; 1717 rnp = rdp->mynode; 1718 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 1719 smp_mb__after_unlock_lock(); 1720 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); 1721 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1722 if (needwake) 1723 rcu_gp_kthread_wake(rsp); 1724 } 1725#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ 1726} 1727 1728/* 1729 * Clean up for exit from idle. Attempt to advance callbacks based on 1730 * any grace periods that elapsed while the CPU was idle, and if any 1731 * callbacks are now ready to invoke, initiate invocation. 1732 */ 1733static void rcu_cleanup_after_idle(int cpu) 1734{ 1735#ifndef CONFIG_RCU_NOCB_CPU_ALL 1736 if (rcu_is_nocb_cpu(cpu)) 1737 return; 1738 if (rcu_try_advance_all_cbs()) 1739 invoke_rcu_core(); 1740#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ 1741} 1742 1743/* 1744 * Keep a running count of the number of non-lazy callbacks posted 1745 * on this CPU. This running counter (which is never decremented) allows 1746 * rcu_prepare_for_idle() to detect when something out of the idle loop 1747 * posts a callback, even if an equal number of callbacks are invoked. 1748 * Of course, callbacks should only be posted from within a trace event 1749 * designed to be called from idle or from within RCU_NONIDLE(). 1750 */ 1751static void rcu_idle_count_callbacks_posted(void) 1752{ 1753 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); 1754} 1755 1756/* 1757 * Data for flushing lazy RCU callbacks at OOM time. 1758 */ 1759static atomic_t oom_callback_count; 1760static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq); 1761 1762/* 1763 * RCU OOM callback -- decrement the outstanding count and deliver the 1764 * wake-up if we are the last one. 1765 */ 1766static void rcu_oom_callback(struct rcu_head *rhp) 1767{ 1768 if (atomic_dec_and_test(&oom_callback_count)) 1769 wake_up(&oom_callback_wq); 1770} 1771 1772/* 1773 * Post an rcu_oom_notify callback on the current CPU if it has at 1774 * least one lazy callback. This will unnecessarily post callbacks 1775 * to CPUs that already have a non-lazy callback at the end of their 1776 * callback list, but this is an infrequent operation, so accept some 1777 * extra overhead to keep things simple. 1778 */ 1779static void rcu_oom_notify_cpu(void *unused) 1780{ 1781 struct rcu_state *rsp; 1782 struct rcu_data *rdp; 1783 1784 for_each_rcu_flavor(rsp) { 1785 rdp = raw_cpu_ptr(rsp->rda); 1786 if (rdp->qlen_lazy != 0) { 1787 atomic_inc(&oom_callback_count); 1788 rsp->call(&rdp->oom_head, rcu_oom_callback); 1789 } 1790 } 1791} 1792 1793/* 1794 * If low on memory, ensure that each CPU has a non-lazy callback. 1795 * This will wake up CPUs that have only lazy callbacks, in turn 1796 * ensuring that they free up the corresponding memory in a timely manner. 1797 * Because an uncertain amount of memory will be freed in some uncertain 1798 * timeframe, we do not claim to have freed anything. 1799 */ 1800static int rcu_oom_notify(struct notifier_block *self, 1801 unsigned long notused, void *nfreed) 1802{ 1803 int cpu; 1804 1805 /* Wait for callbacks from earlier instance to complete. */ 1806 wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0); 1807 smp_mb(); /* Ensure callback reuse happens after callback invocation. */ 1808 1809 /* 1810 * Prevent premature wakeup: ensure that all increments happen 1811 * before there is a chance of the counter reaching zero. 1812 */ 1813 atomic_set(&oom_callback_count, 1); 1814 1815 get_online_cpus(); 1816 for_each_online_cpu(cpu) { 1817 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1); 1818 cond_resched(); 1819 } 1820 put_online_cpus(); 1821 1822 /* Unconditionally decrement: no need to wake ourselves up. */ 1823 atomic_dec(&oom_callback_count); 1824 1825 return NOTIFY_OK; 1826} 1827 1828static struct notifier_block rcu_oom_nb = { 1829 .notifier_call = rcu_oom_notify 1830}; 1831 1832static int __init rcu_register_oom_notifier(void) 1833{ 1834 register_oom_notifier(&rcu_oom_nb); 1835 return 0; 1836} 1837early_initcall(rcu_register_oom_notifier); 1838 1839#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1840 1841#ifdef CONFIG_RCU_CPU_STALL_INFO 1842 1843#ifdef CONFIG_RCU_FAST_NO_HZ 1844 1845static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 1846{ 1847 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 1848 unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap; 1849 1850 sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c", 1851 rdtp->last_accelerate & 0xffff, jiffies & 0xffff, 1852 ulong2long(nlpd), 1853 rdtp->all_lazy ? 'L' : '.', 1854 rdtp->tick_nohz_enabled_snap ? '.' : 'D'); 1855} 1856 1857#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 1858 1859static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 1860{ 1861 *cp = '\0'; 1862} 1863 1864#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ 1865 1866/* Initiate the stall-info list. */ 1867static void print_cpu_stall_info_begin(void) 1868{ 1869 pr_cont("\n"); 1870} 1871 1872/* 1873 * Print out diagnostic information for the specified stalled CPU. 1874 * 1875 * If the specified CPU is aware of the current RCU grace period 1876 * (flavor specified by rsp), then print the number of scheduling 1877 * clock interrupts the CPU has taken during the time that it has 1878 * been aware. Otherwise, print the number of RCU grace periods 1879 * that this CPU is ignorant of, for example, "1" if the CPU was 1880 * aware of the previous grace period. 1881 * 1882 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. 1883 */ 1884static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) 1885{ 1886 char fast_no_hz[72]; 1887 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 1888 struct rcu_dynticks *rdtp = rdp->dynticks; 1889 char *ticks_title; 1890 unsigned long ticks_value; 1891 1892 if (rsp->gpnum == rdp->gpnum) { 1893 ticks_title = "ticks this GP"; 1894 ticks_value = rdp->ticks_this_gp; 1895 } else { 1896 ticks_title = "GPs behind"; 1897 ticks_value = rsp->gpnum - rdp->gpnum; 1898 } 1899 print_cpu_stall_fast_no_hz(fast_no_hz, cpu); 1900 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n", 1901 cpu, ticks_value, ticks_title, 1902 atomic_read(&rdtp->dynticks) & 0xfff, 1903 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, 1904 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), 1905 fast_no_hz); 1906} 1907 1908/* Terminate the stall-info list. */ 1909static void print_cpu_stall_info_end(void) 1910{ 1911 pr_err("\t"); 1912} 1913 1914/* Zero ->ticks_this_gp for all flavors of RCU. */ 1915static void zero_cpu_stall_ticks(struct rcu_data *rdp) 1916{ 1917 rdp->ticks_this_gp = 0; 1918 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); 1919} 1920 1921/* Increment ->ticks_this_gp for all flavors of RCU. */ 1922static void increment_cpu_stall_ticks(void) 1923{ 1924 struct rcu_state *rsp; 1925 1926 for_each_rcu_flavor(rsp) 1927 raw_cpu_inc(rsp->rda->ticks_this_gp); 1928} 1929 1930#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ 1931 1932static void print_cpu_stall_info_begin(void) 1933{ 1934 pr_cont(" {"); 1935} 1936 1937static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) 1938{ 1939 pr_cont(" %d", cpu); 1940} 1941 1942static void print_cpu_stall_info_end(void) 1943{ 1944 pr_cont("} "); 1945} 1946 1947static void zero_cpu_stall_ticks(struct rcu_data *rdp) 1948{ 1949} 1950 1951static void increment_cpu_stall_ticks(void) 1952{ 1953} 1954 1955#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */ 1956 1957#ifdef CONFIG_RCU_NOCB_CPU 1958 1959/* 1960 * Offload callback processing from the boot-time-specified set of CPUs 1961 * specified by rcu_nocb_mask. For each CPU in the set, there is a 1962 * kthread created that pulls the callbacks from the corresponding CPU, 1963 * waits for a grace period to elapse, and invokes the callbacks. 1964 * The no-CBs CPUs do a wake_up() on their kthread when they insert 1965 * a callback into any empty list, unless the rcu_nocb_poll boot parameter 1966 * has been specified, in which case each kthread actively polls its 1967 * CPU. (Which isn't so great for energy efficiency, but which does 1968 * reduce RCU's overhead on that CPU.) 1969 * 1970 * This is intended to be used in conjunction with Frederic Weisbecker's 1971 * adaptive-idle work, which would seriously reduce OS jitter on CPUs 1972 * running CPU-bound user-mode computations. 1973 * 1974 * Offloading of callback processing could also in theory be used as 1975 * an energy-efficiency measure because CPUs with no RCU callbacks 1976 * queued are more aggressive about entering dyntick-idle mode. 1977 */ 1978 1979 1980/* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */ 1981static int __init rcu_nocb_setup(char *str) 1982{ 1983 alloc_bootmem_cpumask_var(&rcu_nocb_mask); 1984 have_rcu_nocb_mask = true; 1985 cpulist_parse(str, rcu_nocb_mask); 1986 return 1; 1987} 1988__setup("rcu_nocbs=", rcu_nocb_setup); 1989 1990static int __init parse_rcu_nocb_poll(char *arg) 1991{ 1992 rcu_nocb_poll = 1; 1993 return 0; 1994} 1995early_param("rcu_nocb_poll", parse_rcu_nocb_poll); 1996 1997/* 1998 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended 1999 * grace period. 2000 */ 2001static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) 2002{ 2003 wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); 2004} 2005 2006/* 2007 * Set the root rcu_node structure's ->need_future_gp field 2008 * based on the sum of those of all rcu_node structures. This does 2009 * double-count the root rcu_node structure's requests, but this 2010 * is necessary to handle the possibility of a rcu_nocb_kthread() 2011 * having awakened during the time that the rcu_node structures 2012 * were being updated for the end of the previous grace period. 2013 */ 2014static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) 2015{ 2016 rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq; 2017} 2018 2019static void rcu_init_one_nocb(struct rcu_node *rnp) 2020{ 2021 init_waitqueue_head(&rnp->nocb_gp_wq[0]); 2022 init_waitqueue_head(&rnp->nocb_gp_wq[1]); 2023} 2024 2025#ifndef CONFIG_RCU_NOCB_CPU_ALL 2026/* Is the specified CPU a no-CBs CPU? */ 2027bool rcu_is_nocb_cpu(int cpu) 2028{ 2029 if (have_rcu_nocb_mask) 2030 return cpumask_test_cpu(cpu, rcu_nocb_mask); 2031 return false; 2032} 2033#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ 2034 2035/* 2036 * Kick the leader kthread for this NOCB group. 2037 */ 2038static void wake_nocb_leader(struct rcu_data *rdp, bool force) 2039{ 2040 struct rcu_data *rdp_leader = rdp->nocb_leader; 2041 2042 if (!ACCESS_ONCE(rdp_leader->nocb_kthread)) 2043 return; 2044 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) { 2045 /* Prior smp_mb__after_atomic() orders against prior enqueue. */ 2046 ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false; 2047 wake_up(&rdp_leader->nocb_wq); 2048 } 2049} 2050 2051/* 2052 * Enqueue the specified string of rcu_head structures onto the specified 2053 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the 2054 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy 2055 * counts are supplied by rhcount and rhcount_lazy. 2056 * 2057 * If warranted, also wake up the kthread servicing this CPUs queues. 2058 */ 2059static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, 2060 struct rcu_head *rhp, 2061 struct rcu_head **rhtp, 2062 int rhcount, int rhcount_lazy, 2063 unsigned long flags) 2064{ 2065 int len; 2066 struct rcu_head **old_rhpp; 2067 struct task_struct *t; 2068 2069 /* Enqueue the callback on the nocb list and update counts. */ 2070 old_rhpp = xchg(&rdp->nocb_tail, rhtp); 2071 ACCESS_ONCE(*old_rhpp) = rhp; 2072 atomic_long_add(rhcount, &rdp->nocb_q_count); 2073 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); 2074 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */ 2075 2076 /* If we are not being polled and there is a kthread, awaken it ... */ 2077 t = ACCESS_ONCE(rdp->nocb_kthread); 2078 if (rcu_nocb_poll || !t) { 2079 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2080 TPS("WakeNotPoll")); 2081 return; 2082 } 2083 len = atomic_long_read(&rdp->nocb_q_count); 2084 if (old_rhpp == &rdp->nocb_head) { 2085 if (!irqs_disabled_flags(flags)) { 2086 /* ... if queue was empty ... */ 2087 wake_nocb_leader(rdp, false); 2088 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2089 TPS("WakeEmpty")); 2090 } else { 2091 rdp->nocb_defer_wakeup = true; 2092 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2093 TPS("WakeEmptyIsDeferred")); 2094 } 2095 rdp->qlen_last_fqs_check = 0; 2096 } else if (len > rdp->qlen_last_fqs_check + qhimark) { 2097 /* ... or if many callbacks queued. */ 2098 wake_nocb_leader(rdp, true); 2099 rdp->qlen_last_fqs_check = LONG_MAX / 2; 2100 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeOvf")); 2101 } else { 2102 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot")); 2103 } 2104 return; 2105} 2106 2107/* 2108 * This is a helper for __call_rcu(), which invokes this when the normal 2109 * callback queue is inoperable. If this is not a no-CBs CPU, this 2110 * function returns failure back to __call_rcu(), which can complain 2111 * appropriately. 2112 * 2113 * Otherwise, this function queues the callback where the corresponding 2114 * "rcuo" kthread can find it. 2115 */ 2116static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 2117 bool lazy, unsigned long flags) 2118{ 2119 2120 if (!rcu_is_nocb_cpu(rdp->cpu)) 2121 return false; 2122 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags); 2123 if (__is_kfree_rcu_offset((unsigned long)rhp->func)) 2124 trace_rcu_kfree_callback(rdp->rsp->name, rhp, 2125 (unsigned long)rhp->func, 2126 -atomic_long_read(&rdp->nocb_q_count_lazy), 2127 -atomic_long_read(&rdp->nocb_q_count)); 2128 else 2129 trace_rcu_callback(rdp->rsp->name, rhp, 2130 -atomic_long_read(&rdp->nocb_q_count_lazy), 2131 -atomic_long_read(&rdp->nocb_q_count)); 2132 2133 /* 2134 * If called from an extended quiescent state with interrupts 2135 * disabled, invoke the RCU core in order to allow the idle-entry 2136 * deferred-wakeup check to function. 2137 */ 2138 if (irqs_disabled_flags(flags) && 2139 !rcu_is_watching() && 2140 cpu_online(smp_processor_id())) 2141 invoke_rcu_core(); 2142 2143 return true; 2144} 2145 2146/* 2147 * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is 2148 * not a no-CBs CPU. 2149 */ 2150static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, 2151 struct rcu_data *rdp, 2152 unsigned long flags) 2153{ 2154 long ql = rsp->qlen; 2155 long qll = rsp->qlen_lazy; 2156 2157 /* If this is not a no-CBs CPU, tell the caller to do it the old way. */ 2158 if (!rcu_is_nocb_cpu(smp_processor_id())) 2159 return false; 2160 rsp->qlen = 0; 2161 rsp->qlen_lazy = 0; 2162 2163 /* First, enqueue the donelist, if any. This preserves CB ordering. */ 2164 if (rsp->orphan_donelist != NULL) { 2165 __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist, 2166 rsp->orphan_donetail, ql, qll, flags); 2167 ql = qll = 0; 2168 rsp->orphan_donelist = NULL; 2169 rsp->orphan_donetail = &rsp->orphan_donelist; 2170 } 2171 if (rsp->orphan_nxtlist != NULL) { 2172 __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist, 2173 rsp->orphan_nxttail, ql, qll, flags); 2174 ql = qll = 0; 2175 rsp->orphan_nxtlist = NULL; 2176 rsp->orphan_nxttail = &rsp->orphan_nxtlist; 2177 } 2178 return true; 2179} 2180 2181/* 2182 * If necessary, kick off a new grace period, and either way wait 2183 * for a subsequent grace period to complete. 2184 */ 2185static void rcu_nocb_wait_gp(struct rcu_data *rdp) 2186{ 2187 unsigned long c; 2188 bool d; 2189 unsigned long flags; 2190 bool needwake; 2191 struct rcu_node *rnp = rdp->mynode; 2192 2193 raw_spin_lock_irqsave(&rnp->lock, flags); 2194 smp_mb__after_unlock_lock(); 2195 needwake = rcu_start_future_gp(rnp, rdp, &c); 2196 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2197 if (needwake) 2198 rcu_gp_kthread_wake(rdp->rsp); 2199 2200 /* 2201 * Wait for the grace period. Do so interruptibly to avoid messing 2202 * up the load average. 2203 */ 2204 trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); 2205 for (;;) { 2206 wait_event_interruptible( 2207 rnp->nocb_gp_wq[c & 0x1], 2208 (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c))); 2209 if (likely(d)) 2210 break; 2211 flush_signals(current); 2212 trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait")); 2213 } 2214 trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait")); 2215 smp_mb(); /* Ensure that CB invocation happens after GP end. */ 2216} 2217 2218/* 2219 * Leaders come here to wait for additional callbacks to show up. 2220 * This function does not return until callbacks appear. 2221 */ 2222static void nocb_leader_wait(struct rcu_data *my_rdp) 2223{ 2224 bool firsttime = true; 2225 bool gotcbs; 2226 struct rcu_data *rdp; 2227 struct rcu_head **tail; 2228 2229wait_again: 2230 2231 /* Wait for callbacks to appear. */ 2232 if (!rcu_nocb_poll) { 2233 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); 2234 wait_event_interruptible(my_rdp->nocb_wq, 2235 !ACCESS_ONCE(my_rdp->nocb_leader_sleep)); 2236 /* Memory barrier handled by smp_mb() calls below and repoll. */ 2237 } else if (firsttime) { 2238 firsttime = false; /* Don't drown trace log with "Poll"! */ 2239 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Poll"); 2240 } 2241 2242 /* 2243 * Each pass through the following loop checks a follower for CBs. 2244 * We are our own first follower. Any CBs found are moved to 2245 * nocb_gp_head, where they await a grace period. 2246 */ 2247 gotcbs = false; 2248 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { 2249 rdp->nocb_gp_head = ACCESS_ONCE(rdp->nocb_head); 2250 if (!rdp->nocb_gp_head) 2251 continue; /* No CBs here, try next follower. */ 2252 2253 /* Move callbacks to wait-for-GP list, which is empty. */ 2254 ACCESS_ONCE(rdp->nocb_head) = NULL; 2255 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); 2256 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0); 2257 rdp->nocb_gp_count_lazy = 2258 atomic_long_xchg(&rdp->nocb_q_count_lazy, 0); 2259 gotcbs = true; 2260 } 2261 2262 /* 2263 * If there were no callbacks, sleep a bit, rescan after a 2264 * memory barrier, and go retry. 2265 */ 2266 if (unlikely(!gotcbs)) { 2267 if (!rcu_nocb_poll) 2268 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, 2269 "WokeEmpty"); 2270 flush_signals(current); 2271 schedule_timeout_interruptible(1); 2272 2273 /* Rescan in case we were a victim of memory ordering. */ 2274 my_rdp->nocb_leader_sleep = true; 2275 smp_mb(); /* Ensure _sleep true before scan. */ 2276 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) 2277 if (ACCESS_ONCE(rdp->nocb_head)) { 2278 /* Found CB, so short-circuit next wait. */ 2279 my_rdp->nocb_leader_sleep = false; 2280 break; 2281 } 2282 goto wait_again; 2283 } 2284 2285 /* Wait for one grace period. */ 2286 rcu_nocb_wait_gp(my_rdp); 2287 2288 /* 2289 * We left ->nocb_leader_sleep unset to reduce cache thrashing. 2290 * We set it now, but recheck for new callbacks while 2291 * traversing our follower list. 2292 */ 2293 my_rdp->nocb_leader_sleep = true; 2294 smp_mb(); /* Ensure _sleep true before scan of ->nocb_head. */ 2295 2296 /* Each pass through the following loop wakes a follower, if needed. */ 2297 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { 2298 if (ACCESS_ONCE(rdp->nocb_head)) 2299 my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/ 2300 if (!rdp->nocb_gp_head) 2301 continue; /* No CBs, so no need to wake follower. */ 2302 2303 /* Append callbacks to follower's "done" list. */ 2304 tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail); 2305 *tail = rdp->nocb_gp_head; 2306 atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count); 2307 atomic_long_add(rdp->nocb_gp_count_lazy, 2308 &rdp->nocb_follower_count_lazy); 2309 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) { 2310 /* 2311 * List was empty, wake up the follower. 2312 * Memory barriers supplied by atomic_long_add(). 2313 */ 2314 wake_up(&rdp->nocb_wq); 2315 } 2316 } 2317 2318 /* If we (the leader) don't have CBs, go wait some more. */ 2319 if (!my_rdp->nocb_follower_head) 2320 goto wait_again; 2321} 2322 2323/* 2324 * Followers come here to wait for additional callbacks to show up. 2325 * This function does not return until callbacks appear. 2326 */ 2327static void nocb_follower_wait(struct rcu_data *rdp) 2328{ 2329 bool firsttime = true; 2330 2331 for (;;) { 2332 if (!rcu_nocb_poll) { 2333 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2334 "FollowerSleep"); 2335 wait_event_interruptible(rdp->nocb_wq, 2336 ACCESS_ONCE(rdp->nocb_follower_head)); 2337 } else if (firsttime) { 2338 /* Don't drown trace log with "Poll"! */ 2339 firsttime = false; 2340 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "Poll"); 2341 } 2342 if (smp_load_acquire(&rdp->nocb_follower_head)) { 2343 /* ^^^ Ensure CB invocation follows _head test. */ 2344 return; 2345 } 2346 if (!rcu_nocb_poll) 2347 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2348 "WokeEmpty"); 2349 flush_signals(current); 2350 schedule_timeout_interruptible(1); 2351 } 2352} 2353 2354/* 2355 * Per-rcu_data kthread, but only for no-CBs CPUs. Each kthread invokes 2356 * callbacks queued by the corresponding no-CBs CPU, however, there is 2357 * an optional leader-follower relationship so that the grace-period 2358 * kthreads don't have to do quite so many wakeups. 2359 */ 2360static int rcu_nocb_kthread(void *arg) 2361{ 2362 int c, cl; 2363 struct rcu_head *list; 2364 struct rcu_head *next; 2365 struct rcu_head **tail; 2366 struct rcu_data *rdp = arg; 2367 2368 /* Each pass through this loop invokes one batch of callbacks */ 2369 for (;;) { 2370 /* Wait for callbacks. */ 2371 if (rdp->nocb_leader == rdp) 2372 nocb_leader_wait(rdp); 2373 else 2374 nocb_follower_wait(rdp); 2375 2376 /* Pull the ready-to-invoke callbacks onto local list. */ 2377 list = ACCESS_ONCE(rdp->nocb_follower_head); 2378 BUG_ON(!list); 2379 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty"); 2380 ACCESS_ONCE(rdp->nocb_follower_head) = NULL; 2381 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head); 2382 c = atomic_long_xchg(&rdp->nocb_follower_count, 0); 2383 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0); 2384 rdp->nocb_p_count += c; 2385 rdp->nocb_p_count_lazy += cl; 2386 2387 /* Each pass through the following loop invokes a callback. */ 2388 trace_rcu_batch_start(rdp->rsp->name, cl, c, -1); 2389 c = cl = 0; 2390 while (list) { 2391 next = list->next; 2392 /* Wait for enqueuing to complete, if needed. */ 2393 while (next == NULL && &list->next != tail) { 2394 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2395 TPS("WaitQueue")); 2396 schedule_timeout_interruptible(1); 2397 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2398 TPS("WokeQueue")); 2399 next = list->next; 2400 } 2401 debug_rcu_head_unqueue(list); 2402 local_bh_disable(); 2403 if (__rcu_reclaim(rdp->rsp->name, list)) 2404 cl++; 2405 c++; 2406 local_bh_enable(); 2407 list = next; 2408 } 2409 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); 2410 ACCESS_ONCE(rdp->nocb_p_count) -= c; 2411 ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl; 2412 rdp->n_nocbs_invoked += c; 2413 } 2414 return 0; 2415} 2416 2417/* Is a deferred wakeup of rcu_nocb_kthread() required? */ 2418static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) 2419{ 2420 return ACCESS_ONCE(rdp->nocb_defer_wakeup); 2421} 2422 2423/* Do a deferred wakeup of rcu_nocb_kthread(). */ 2424static void do_nocb_deferred_wakeup(struct rcu_data *rdp) 2425{ 2426 if (!rcu_nocb_need_deferred_wakeup(rdp)) 2427 return; 2428 ACCESS_ONCE(rdp->nocb_defer_wakeup) = false; 2429 wake_nocb_leader(rdp, false); 2430 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty")); 2431} 2432 2433void __init rcu_init_nohz(void) 2434{ 2435 int cpu; 2436 bool need_rcu_nocb_mask = true; 2437 struct rcu_state *rsp; 2438 2439#ifdef CONFIG_RCU_NOCB_CPU_NONE 2440 need_rcu_nocb_mask = false; 2441#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */ 2442 2443#if defined(CONFIG_NO_HZ_FULL) 2444 if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask)) 2445 need_rcu_nocb_mask = true; 2446#endif /* #if defined(CONFIG_NO_HZ_FULL) */ 2447 2448 if (!have_rcu_nocb_mask && need_rcu_nocb_mask) { 2449 if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) { 2450 pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n"); 2451 return; 2452 } 2453 have_rcu_nocb_mask = true; 2454 } 2455 if (!have_rcu_nocb_mask) 2456 return; 2457 2458#ifdef CONFIG_RCU_NOCB_CPU_ZERO 2459 pr_info("\tOffload RCU callbacks from CPU 0\n"); 2460 cpumask_set_cpu(0, rcu_nocb_mask); 2461#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */ 2462#ifdef CONFIG_RCU_NOCB_CPU_ALL 2463 pr_info("\tOffload RCU callbacks from all CPUs\n"); 2464 cpumask_copy(rcu_nocb_mask, cpu_possible_mask); 2465#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */ 2466#if defined(CONFIG_NO_HZ_FULL) 2467 if (tick_nohz_full_running) 2468 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask); 2469#endif /* #if defined(CONFIG_NO_HZ_FULL) */ 2470 2471 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) { 2472 pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n"); 2473 cpumask_and(rcu_nocb_mask, cpu_possible_mask, 2474 rcu_nocb_mask); 2475 } 2476 cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask); 2477 pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf); 2478 if (rcu_nocb_poll) 2479 pr_info("\tPoll for callbacks from no-CBs CPUs.\n"); 2480 2481 for_each_rcu_flavor(rsp) { 2482 for_each_cpu(cpu, rcu_nocb_mask) { 2483 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 2484 2485 /* 2486 * If there are early callbacks, they will need 2487 * to be moved to the nocb lists. 2488 */ 2489 WARN_ON_ONCE(rdp->nxttail[RCU_NEXT_TAIL] != 2490 &rdp->nxtlist && 2491 rdp->nxttail[RCU_NEXT_TAIL] != NULL); 2492 init_nocb_callback_list(rdp); 2493 } 2494 rcu_organize_nocb_kthreads(rsp); 2495 } 2496} 2497 2498/* Initialize per-rcu_data variables for no-CBs CPUs. */ 2499static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2500{ 2501 rdp->nocb_tail = &rdp->nocb_head; 2502 init_waitqueue_head(&rdp->nocb_wq); 2503 rdp->nocb_follower_tail = &rdp->nocb_follower_head; 2504} 2505 2506/* 2507 * If the specified CPU is a no-CBs CPU that does not already have its 2508 * rcuo kthread for the specified RCU flavor, spawn it. If the CPUs are 2509 * brought online out of order, this can require re-organizing the 2510 * leader-follower relationships. 2511 */ 2512static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu) 2513{ 2514 struct rcu_data *rdp; 2515 struct rcu_data *rdp_last; 2516 struct rcu_data *rdp_old_leader; 2517 struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu); 2518 struct task_struct *t; 2519 2520 /* 2521 * If this isn't a no-CBs CPU or if it already has an rcuo kthread, 2522 * then nothing to do. 2523 */ 2524 if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread) 2525 return; 2526 2527 /* If we didn't spawn the leader first, reorganize! */ 2528 rdp_old_leader = rdp_spawn->nocb_leader; 2529 if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) { 2530 rdp_last = NULL; 2531 rdp = rdp_old_leader; 2532 do { 2533 rdp->nocb_leader = rdp_spawn; 2534 if (rdp_last && rdp != rdp_spawn) 2535 rdp_last->nocb_next_follower = rdp; 2536 rdp_last = rdp; 2537 rdp = rdp->nocb_next_follower; 2538 rdp_last->nocb_next_follower = NULL; 2539 } while (rdp); 2540 rdp_spawn->nocb_next_follower = rdp_old_leader; 2541 } 2542 2543 /* Spawn the kthread for this CPU and RCU flavor. */ 2544 t = kthread_run(rcu_nocb_kthread, rdp_spawn, 2545 "rcuo%c/%d", rsp->abbr, cpu); 2546 BUG_ON(IS_ERR(t)); 2547 ACCESS_ONCE(rdp_spawn->nocb_kthread) = t; 2548} 2549 2550/* 2551 * If the specified CPU is a no-CBs CPU that does not already have its 2552 * rcuo kthreads, spawn them. 2553 */ 2554static void rcu_spawn_all_nocb_kthreads(int cpu) 2555{ 2556 struct rcu_state *rsp; 2557 2558 if (rcu_scheduler_fully_active) 2559 for_each_rcu_flavor(rsp) 2560 rcu_spawn_one_nocb_kthread(rsp, cpu); 2561} 2562 2563/* 2564 * Once the scheduler is running, spawn rcuo kthreads for all online 2565 * no-CBs CPUs. This assumes that the early_initcall()s happen before 2566 * non-boot CPUs come online -- if this changes, we will need to add 2567 * some mutual exclusion. 2568 */ 2569static void __init rcu_spawn_nocb_kthreads(void) 2570{ 2571 int cpu; 2572 2573 for_each_online_cpu(cpu) 2574 rcu_spawn_all_nocb_kthreads(cpu); 2575} 2576 2577/* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */ 2578static int rcu_nocb_leader_stride = -1; 2579module_param(rcu_nocb_leader_stride, int, 0444); 2580 2581/* 2582 * Initialize leader-follower relationships for all no-CBs CPU. 2583 */ 2584static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp) 2585{ 2586 int cpu; 2587 int ls = rcu_nocb_leader_stride; 2588 int nl = 0; /* Next leader. */ 2589 struct rcu_data *rdp; 2590 struct rcu_data *rdp_leader = NULL; /* Suppress misguided gcc warn. */ 2591 struct rcu_data *rdp_prev = NULL; 2592 2593 if (!have_rcu_nocb_mask) 2594 return; 2595 if (ls == -1) { 2596 ls = int_sqrt(nr_cpu_ids); 2597 rcu_nocb_leader_stride = ls; 2598 } 2599 2600 /* 2601 * Each pass through this loop sets up one rcu_data structure and 2602 * spawns one rcu_nocb_kthread(). 2603 */ 2604 for_each_cpu(cpu, rcu_nocb_mask) { 2605 rdp = per_cpu_ptr(rsp->rda, cpu); 2606 if (rdp->cpu >= nl) { 2607 /* New leader, set up for followers & next leader. */ 2608 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; 2609 rdp->nocb_leader = rdp; 2610 rdp_leader = rdp; 2611 } else { 2612 /* Another follower, link to previous leader. */ 2613 rdp->nocb_leader = rdp_leader; 2614 rdp_prev->nocb_next_follower = rdp; 2615 } 2616 rdp_prev = rdp; 2617 } 2618} 2619 2620/* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */ 2621static bool init_nocb_callback_list(struct rcu_data *rdp) 2622{ 2623 if (!rcu_is_nocb_cpu(rdp->cpu)) 2624 return false; 2625 2626 rdp->nxttail[RCU_NEXT_TAIL] = NULL; 2627 return true; 2628} 2629 2630#else /* #ifdef CONFIG_RCU_NOCB_CPU */ 2631 2632static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) 2633{ 2634} 2635 2636static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) 2637{ 2638} 2639 2640static void rcu_init_one_nocb(struct rcu_node *rnp) 2641{ 2642} 2643 2644static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 2645 bool lazy, unsigned long flags) 2646{ 2647 return false; 2648} 2649 2650static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, 2651 struct rcu_data *rdp, 2652 unsigned long flags) 2653{ 2654 return false; 2655} 2656 2657static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2658{ 2659} 2660 2661static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) 2662{ 2663 return false; 2664} 2665 2666static void do_nocb_deferred_wakeup(struct rcu_data *rdp) 2667{ 2668} 2669 2670static void rcu_spawn_all_nocb_kthreads(int cpu) 2671{ 2672} 2673 2674static void __init rcu_spawn_nocb_kthreads(void) 2675{ 2676} 2677 2678static bool init_nocb_callback_list(struct rcu_data *rdp) 2679{ 2680 return false; 2681} 2682 2683#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 2684 2685/* 2686 * An adaptive-ticks CPU can potentially execute in kernel mode for an 2687 * arbitrarily long period of time with the scheduling-clock tick turned 2688 * off. RCU will be paying attention to this CPU because it is in the 2689 * kernel, but the CPU cannot be guaranteed to be executing the RCU state 2690 * machine because the scheduling-clock tick has been disabled. Therefore, 2691 * if an adaptive-ticks CPU is failing to respond to the current grace 2692 * period and has not be idle from an RCU perspective, kick it. 2693 */ 2694static void __maybe_unused rcu_kick_nohz_cpu(int cpu) 2695{ 2696#ifdef CONFIG_NO_HZ_FULL 2697 if (tick_nohz_full_cpu(cpu)) 2698 smp_send_reschedule(cpu); 2699#endif /* #ifdef CONFIG_NO_HZ_FULL */ 2700} 2701 2702 2703#ifdef CONFIG_NO_HZ_FULL_SYSIDLE 2704 2705static int full_sysidle_state; /* Current system-idle state. */ 2706#define RCU_SYSIDLE_NOT 0 /* Some CPU is not idle. */ 2707#define RCU_SYSIDLE_SHORT 1 /* All CPUs idle for brief period. */ 2708#define RCU_SYSIDLE_LONG 2 /* All CPUs idle for long enough. */ 2709#define RCU_SYSIDLE_FULL 3 /* All CPUs idle, ready for sysidle. */ 2710#define RCU_SYSIDLE_FULL_NOTED 4 /* Actually entered sysidle state. */ 2711 2712/* 2713 * Invoked to note exit from irq or task transition to idle. Note that 2714 * usermode execution does -not- count as idle here! After all, we want 2715 * to detect full-system idle states, not RCU quiescent states and grace 2716 * periods. The caller must have disabled interrupts. 2717 */ 2718static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq) 2719{ 2720 unsigned long j; 2721 2722 /* If there are no nohz_full= CPUs, no need to track this. */ 2723 if (!tick_nohz_full_enabled()) 2724 return; 2725 2726 /* Adjust nesting, check for fully idle. */ 2727 if (irq) { 2728 rdtp->dynticks_idle_nesting--; 2729 WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0); 2730 if (rdtp->dynticks_idle_nesting != 0) 2731 return; /* Still not fully idle. */ 2732 } else { 2733 if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) == 2734 DYNTICK_TASK_NEST_VALUE) { 2735 rdtp->dynticks_idle_nesting = 0; 2736 } else { 2737 rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE; 2738 WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0); 2739 return; /* Still not fully idle. */ 2740 } 2741 } 2742 2743 /* Record start of fully idle period. */ 2744 j = jiffies; 2745 ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j; 2746 smp_mb__before_atomic(); 2747 atomic_inc(&rdtp->dynticks_idle); 2748 smp_mb__after_atomic(); 2749 WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1); 2750} 2751 2752/* 2753 * Unconditionally force exit from full system-idle state. This is 2754 * invoked when a normal CPU exits idle, but must be called separately 2755 * for the timekeeping CPU (tick_do_timer_cpu). The reason for this 2756 * is that the timekeeping CPU is permitted to take scheduling-clock 2757 * interrupts while the system is in system-idle state, and of course 2758 * rcu_sysidle_exit() has no way of distinguishing a scheduling-clock 2759 * interrupt from any other type of interrupt. 2760 */ 2761void rcu_sysidle_force_exit(void) 2762{ 2763 int oldstate = ACCESS_ONCE(full_sysidle_state); 2764 int newoldstate; 2765 2766 /* 2767 * Each pass through the following loop attempts to exit full 2768 * system-idle state. If contention proves to be a problem, 2769 * a trylock-based contention tree could be used here. 2770 */ 2771 while (oldstate > RCU_SYSIDLE_SHORT) { 2772 newoldstate = cmpxchg(&full_sysidle_state, 2773 oldstate, RCU_SYSIDLE_NOT); 2774 if (oldstate == newoldstate && 2775 oldstate == RCU_SYSIDLE_FULL_NOTED) { 2776 rcu_kick_nohz_cpu(tick_do_timer_cpu); 2777 return; /* We cleared it, done! */ 2778 } 2779 oldstate = newoldstate; 2780 } 2781 smp_mb(); /* Order initial oldstate fetch vs. later non-idle work. */ 2782} 2783 2784/* 2785 * Invoked to note entry to irq or task transition from idle. Note that 2786 * usermode execution does -not- count as idle here! The caller must 2787 * have disabled interrupts. 2788 */ 2789static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) 2790{ 2791 /* If there are no nohz_full= CPUs, no need to track this. */ 2792 if (!tick_nohz_full_enabled()) 2793 return; 2794 2795 /* Adjust nesting, check for already non-idle. */ 2796 if (irq) { 2797 rdtp->dynticks_idle_nesting++; 2798 WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0); 2799 if (rdtp->dynticks_idle_nesting != 1) 2800 return; /* Already non-idle. */ 2801 } else { 2802 /* 2803 * Allow for irq misnesting. Yes, it really is possible 2804 * to enter an irq handler then never leave it, and maybe 2805 * also vice versa. Handle both possibilities. 2806 */ 2807 if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) { 2808 rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE; 2809 WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0); 2810 return; /* Already non-idle. */ 2811 } else { 2812 rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE; 2813 } 2814 } 2815 2816 /* Record end of idle period. */ 2817 smp_mb__before_atomic(); 2818 atomic_inc(&rdtp->dynticks_idle); 2819 smp_mb__after_atomic(); 2820 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1)); 2821 2822 /* 2823 * If we are the timekeeping CPU, we are permitted to be non-idle 2824 * during a system-idle state. This must be the case, because 2825 * the timekeeping CPU has to take scheduling-clock interrupts 2826 * during the time that the system is transitioning to full 2827 * system-idle state. This means that the timekeeping CPU must 2828 * invoke rcu_sysidle_force_exit() directly if it does anything 2829 * more than take a scheduling-clock interrupt. 2830 */ 2831 if (smp_processor_id() == tick_do_timer_cpu) 2832 return; 2833 2834 /* Update system-idle state: We are clearly no longer fully idle! */ 2835 rcu_sysidle_force_exit(); 2836} 2837 2838/* 2839 * Check to see if the current CPU is idle. Note that usermode execution 2840 * does not count as idle. The caller must have disabled interrupts. 2841 */ 2842static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, 2843 unsigned long *maxj) 2844{ 2845 int cur; 2846 unsigned long j; 2847 struct rcu_dynticks *rdtp = rdp->dynticks; 2848 2849 /* If there are no nohz_full= CPUs, don't check system-wide idleness. */ 2850 if (!tick_nohz_full_enabled()) 2851 return; 2852 2853 /* 2854 * If some other CPU has already reported non-idle, if this is 2855 * not the flavor of RCU that tracks sysidle state, or if this 2856 * is an offline or the timekeeping CPU, nothing to do. 2857 */ 2858 if (!*isidle || rdp->rsp != rcu_state_p || 2859 cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu) 2860 return; 2861 if (rcu_gp_in_progress(rdp->rsp)) 2862 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu); 2863 2864 /* Pick up current idle and NMI-nesting counter and check. */ 2865 cur = atomic_read(&rdtp->dynticks_idle); 2866 if (cur & 0x1) { 2867 *isidle = false; /* We are not idle! */ 2868 return; 2869 } 2870 smp_mb(); /* Read counters before timestamps. */ 2871 2872 /* Pick up timestamps. */ 2873 j = ACCESS_ONCE(rdtp->dynticks_idle_jiffies); 2874 /* If this CPU entered idle more recently, update maxj timestamp. */ 2875 if (ULONG_CMP_LT(*maxj, j)) 2876 *maxj = j; 2877} 2878 2879/* 2880 * Is this the flavor of RCU that is handling full-system idle? 2881 */ 2882static bool is_sysidle_rcu_state(struct rcu_state *rsp) 2883{ 2884 return rsp == rcu_state_p; 2885} 2886 2887/* 2888 * Return a delay in jiffies based on the number of CPUs, rcu_node 2889 * leaf fanout, and jiffies tick rate. The idea is to allow larger 2890 * systems more time to transition to full-idle state in order to 2891 * avoid the cache thrashing that otherwise occur on the state variable. 2892 * Really small systems (less than a couple of tens of CPUs) should 2893 * instead use a single global atomically incremented counter, and later 2894 * versions of this will automatically reconfigure themselves accordingly. 2895 */ 2896static unsigned long rcu_sysidle_delay(void) 2897{ 2898 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) 2899 return 0; 2900 return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000); 2901} 2902 2903/* 2904 * Advance the full-system-idle state. This is invoked when all of 2905 * the non-timekeeping CPUs are idle. 2906 */ 2907static void rcu_sysidle(unsigned long j) 2908{ 2909 /* Check the current state. */ 2910 switch (ACCESS_ONCE(full_sysidle_state)) { 2911 case RCU_SYSIDLE_NOT: 2912 2913 /* First time all are idle, so note a short idle period. */ 2914 ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT; 2915 break; 2916 2917 case RCU_SYSIDLE_SHORT: 2918 2919 /* 2920 * Idle for a bit, time to advance to next state? 2921 * cmpxchg failure means race with non-idle, let them win. 2922 */ 2923 if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay())) 2924 (void)cmpxchg(&full_sysidle_state, 2925 RCU_SYSIDLE_SHORT, RCU_SYSIDLE_LONG); 2926 break; 2927 2928 case RCU_SYSIDLE_LONG: 2929 2930 /* 2931 * Do an additional check pass before advancing to full. 2932 * cmpxchg failure means race with non-idle, let them win. 2933 */ 2934 if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay())) 2935 (void)cmpxchg(&full_sysidle_state, 2936 RCU_SYSIDLE_LONG, RCU_SYSIDLE_FULL); 2937 break; 2938 2939 default: 2940 break; 2941 } 2942} 2943 2944/* 2945 * Found a non-idle non-timekeeping CPU, so kick the system-idle state 2946 * back to the beginning. 2947 */ 2948static void rcu_sysidle_cancel(void) 2949{ 2950 smp_mb(); 2951 if (full_sysidle_state > RCU_SYSIDLE_SHORT) 2952 ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT; 2953} 2954 2955/* 2956 * Update the sysidle state based on the results of a force-quiescent-state 2957 * scan of the CPUs' dyntick-idle state. 2958 */ 2959static void rcu_sysidle_report(struct rcu_state *rsp, int isidle, 2960 unsigned long maxj, bool gpkt) 2961{ 2962 if (rsp != rcu_state_p) 2963 return; /* Wrong flavor, ignore. */ 2964 if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) 2965 return; /* Running state machine from timekeeping CPU. */ 2966 if (isidle) 2967 rcu_sysidle(maxj); /* More idle! */ 2968 else 2969 rcu_sysidle_cancel(); /* Idle is over. */ 2970} 2971 2972/* 2973 * Wrapper for rcu_sysidle_report() when called from the grace-period 2974 * kthread's context. 2975 */ 2976static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, 2977 unsigned long maxj) 2978{ 2979 /* If there are no nohz_full= CPUs, no need to track this. */ 2980 if (!tick_nohz_full_enabled()) 2981 return; 2982 2983 rcu_sysidle_report(rsp, isidle, maxj, true); 2984} 2985 2986/* Callback and function for forcing an RCU grace period. */ 2987struct rcu_sysidle_head { 2988 struct rcu_head rh; 2989 int inuse; 2990}; 2991 2992static void rcu_sysidle_cb(struct rcu_head *rhp) 2993{ 2994 struct rcu_sysidle_head *rshp; 2995 2996 /* 2997 * The following memory barrier is needed to replace the 2998 * memory barriers that would normally be in the memory 2999 * allocator. 3000 */ 3001 smp_mb(); /* grace period precedes setting inuse. */ 3002 3003 rshp = container_of(rhp, struct rcu_sysidle_head, rh); 3004 ACCESS_ONCE(rshp->inuse) = 0; 3005} 3006 3007/* 3008 * Check to see if the system is fully idle, other than the timekeeping CPU. 3009 * The caller must have disabled interrupts. This is not intended to be 3010 * called unless tick_nohz_full_enabled(). 3011 */ 3012bool rcu_sys_is_idle(void) 3013{ 3014 static struct rcu_sysidle_head rsh; 3015 int rss = ACCESS_ONCE(full_sysidle_state); 3016 3017 if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu)) 3018 return false; 3019 3020 /* Handle small-system case by doing a full scan of CPUs. */ 3021 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) { 3022 int oldrss = rss - 1; 3023 3024 /* 3025 * One pass to advance to each state up to _FULL. 3026 * Give up if any pass fails to advance the state. 3027 */ 3028 while (rss < RCU_SYSIDLE_FULL && oldrss < rss) { 3029 int cpu; 3030 bool isidle = true; 3031 unsigned long maxj = jiffies - ULONG_MAX / 4; 3032 struct rcu_data *rdp; 3033 3034 /* Scan all the CPUs looking for nonidle CPUs. */ 3035 for_each_possible_cpu(cpu) { 3036 rdp = per_cpu_ptr(rcu_state_p->rda, cpu); 3037 rcu_sysidle_check_cpu(rdp, &isidle, &maxj); 3038 if (!isidle) 3039 break; 3040 } 3041 rcu_sysidle_report(rcu_state_p, isidle, maxj, false); 3042 oldrss = rss; 3043 rss = ACCESS_ONCE(full_sysidle_state); 3044 } 3045 } 3046 3047 /* If this is the first observation of an idle period, record it. */ 3048 if (rss == RCU_SYSIDLE_FULL) { 3049 rss = cmpxchg(&full_sysidle_state, 3050 RCU_SYSIDLE_FULL, RCU_SYSIDLE_FULL_NOTED); 3051 return rss == RCU_SYSIDLE_FULL; 3052 } 3053 3054 smp_mb(); /* ensure rss load happens before later caller actions. */ 3055 3056 /* If already fully idle, tell the caller (in case of races). */ 3057 if (rss == RCU_SYSIDLE_FULL_NOTED) 3058 return true; 3059 3060 /* 3061 * If we aren't there yet, and a grace period is not in flight, 3062 * initiate a grace period. Either way, tell the caller that 3063 * we are not there yet. We use an xchg() rather than an assignment 3064 * to make up for the memory barriers that would otherwise be 3065 * provided by the memory allocator. 3066 */ 3067 if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL && 3068 !rcu_gp_in_progress(rcu_state_p) && 3069 !rsh.inuse && xchg(&rsh.inuse, 1) == 0) 3070 call_rcu(&rsh.rh, rcu_sysidle_cb); 3071 return false; 3072} 3073 3074/* 3075 * Initialize dynticks sysidle state for CPUs coming online. 3076 */ 3077static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp) 3078{ 3079 rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE; 3080} 3081 3082#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 3083 3084static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq) 3085{ 3086} 3087 3088static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) 3089{ 3090} 3091 3092static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, 3093 unsigned long *maxj) 3094{ 3095} 3096 3097static bool is_sysidle_rcu_state(struct rcu_state *rsp) 3098{ 3099 return false; 3100} 3101 3102static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, 3103 unsigned long maxj) 3104{ 3105} 3106 3107static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp) 3108{ 3109} 3110 3111#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 3112 3113/* 3114 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the 3115 * grace-period kthread will do force_quiescent_state() processing? 3116 * The idea is to avoid waking up RCU core processing on such a 3117 * CPU unless the grace period has extended for too long. 3118 * 3119 * This code relies on the fact that all NO_HZ_FULL CPUs are also 3120 * CONFIG_RCU_NOCB_CPU CPUs. 3121 */ 3122static bool rcu_nohz_full_cpu(struct rcu_state *rsp) 3123{ 3124#ifdef CONFIG_NO_HZ_FULL 3125 if (tick_nohz_full_cpu(smp_processor_id()) && 3126 (!rcu_gp_in_progress(rsp) || 3127 ULONG_CMP_LT(jiffies, ACCESS_ONCE(rsp->gp_start) + HZ))) 3128 return 1; 3129#endif /* #ifdef CONFIG_NO_HZ_FULL */ 3130 return 0; 3131} 3132 3133/* 3134 * Bind the grace-period kthread for the sysidle flavor of RCU to the 3135 * timekeeping CPU. 3136 */ 3137static void rcu_bind_gp_kthread(void) 3138{ 3139 int __maybe_unused cpu; 3140 3141 if (!tick_nohz_full_enabled()) 3142 return; 3143#ifdef CONFIG_NO_HZ_FULL_SYSIDLE 3144 cpu = tick_do_timer_cpu; 3145 if (cpu >= 0 && cpu < nr_cpu_ids && raw_smp_processor_id() != cpu) 3146 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 3147#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 3148 if (!is_housekeeping_cpu(raw_smp_processor_id())) 3149 housekeeping_affine(current); 3150#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 3151} 3152