tlb_uv.c revision ca444564a947034557a85357b3911d067cac4b8f
1/* 2 * SGI UltraViolet TLB flush routines. 3 * 4 * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI. 5 * 6 * This code is released under the GNU General Public License version 2 or 7 * later. 8 */ 9#include <linux/seq_file.h> 10#include <linux/proc_fs.h> 11#include <linux/debugfs.h> 12#include <linux/kernel.h> 13#include <linux/slab.h> 14#include <linux/delay.h> 15 16#include <asm/mmu_context.h> 17#include <asm/uv/uv.h> 18#include <asm/uv/uv_mmrs.h> 19#include <asm/uv/uv_hub.h> 20#include <asm/uv/uv_bau.h> 21#include <asm/apic.h> 22#include <asm/idle.h> 23#include <asm/tsc.h> 24#include <asm/irq_vectors.h> 25#include <asm/timer.h> 26 27/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */ 28static int timeout_base_ns[] = { 29 20, 30 160, 31 1280, 32 10240, 33 81920, 34 655360, 35 5242880, 36 167772160 37}; 38static int timeout_us; 39static int nobau; 40static int baudisabled; 41static spinlock_t disable_lock; 42static cycles_t congested_cycles; 43 44/* tunables: */ 45static int max_bau_concurrent = MAX_BAU_CONCURRENT; 46static int max_bau_concurrent_constant = MAX_BAU_CONCURRENT; 47static int plugged_delay = PLUGGED_DELAY; 48static int plugsb4reset = PLUGSB4RESET; 49static int timeoutsb4reset = TIMEOUTSB4RESET; 50static int ipi_reset_limit = IPI_RESET_LIMIT; 51static int complete_threshold = COMPLETE_THRESHOLD; 52static int congested_response_us = CONGESTED_RESPONSE_US; 53static int congested_reps = CONGESTED_REPS; 54static int congested_period = CONGESTED_PERIOD; 55static struct dentry *tunables_dir; 56static struct dentry *tunables_file; 57 58static int __init setup_nobau(char *arg) 59{ 60 nobau = 1; 61 return 0; 62} 63early_param("nobau", setup_nobau); 64 65/* base pnode in this partition */ 66static int uv_partition_base_pnode __read_mostly; 67/* position of pnode (which is nasid>>1): */ 68static int uv_nshift __read_mostly; 69static unsigned long uv_mmask __read_mostly; 70 71static DEFINE_PER_CPU(struct ptc_stats, ptcstats); 72static DEFINE_PER_CPU(struct bau_control, bau_control); 73static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask); 74 75/* 76 * Determine the first node on a uvhub. 'Nodes' are used for kernel 77 * memory allocation. 78 */ 79static int __init uvhub_to_first_node(int uvhub) 80{ 81 int node, b; 82 83 for_each_online_node(node) { 84 b = uv_node_to_blade_id(node); 85 if (uvhub == b) 86 return node; 87 } 88 return -1; 89} 90 91/* 92 * Determine the apicid of the first cpu on a uvhub. 93 */ 94static int __init uvhub_to_first_apicid(int uvhub) 95{ 96 int cpu; 97 98 for_each_present_cpu(cpu) 99 if (uvhub == uv_cpu_to_blade_id(cpu)) 100 return per_cpu(x86_cpu_to_apicid, cpu); 101 return -1; 102} 103 104/* 105 * Free a software acknowledge hardware resource by clearing its Pending 106 * bit. This will return a reply to the sender. 107 * If the message has timed out, a reply has already been sent by the 108 * hardware but the resource has not been released. In that case our 109 * clear of the Timeout bit (as well) will free the resource. No reply will 110 * be sent (the hardware will only do one reply per message). 111 */ 112static inline void uv_reply_to_message(struct msg_desc *mdp, 113 struct bau_control *bcp) 114{ 115 unsigned long dw; 116 struct bau_payload_queue_entry *msg; 117 118 msg = mdp->msg; 119 if (!msg->canceled) { 120 dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) | 121 msg->sw_ack_vector; 122 uv_write_local_mmr( 123 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw); 124 } 125 msg->replied_to = 1; 126 msg->sw_ack_vector = 0; 127} 128 129/* 130 * Process the receipt of a RETRY message 131 */ 132static inline void uv_bau_process_retry_msg(struct msg_desc *mdp, 133 struct bau_control *bcp) 134{ 135 int i; 136 int cancel_count = 0; 137 int slot2; 138 unsigned long msg_res; 139 unsigned long mmr = 0; 140 struct bau_payload_queue_entry *msg; 141 struct bau_payload_queue_entry *msg2; 142 struct ptc_stats *stat; 143 144 msg = mdp->msg; 145 stat = bcp->statp; 146 stat->d_retries++; 147 /* 148 * cancel any message from msg+1 to the retry itself 149 */ 150 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) { 151 if (msg2 > mdp->va_queue_last) 152 msg2 = mdp->va_queue_first; 153 if (msg2 == msg) 154 break; 155 156 /* same conditions for cancellation as uv_do_reset */ 157 if ((msg2->replied_to == 0) && (msg2->canceled == 0) && 158 (msg2->sw_ack_vector) && ((msg2->sw_ack_vector & 159 msg->sw_ack_vector) == 0) && 160 (msg2->sending_cpu == msg->sending_cpu) && 161 (msg2->msg_type != MSG_NOOP)) { 162 slot2 = msg2 - mdp->va_queue_first; 163 mmr = uv_read_local_mmr 164 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); 165 msg_res = msg2->sw_ack_vector; 166 /* 167 * This is a message retry; clear the resources held 168 * by the previous message only if they timed out. 169 * If it has not timed out we have an unexpected 170 * situation to report. 171 */ 172 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) { 173 /* 174 * is the resource timed out? 175 * make everyone ignore the cancelled message. 176 */ 177 msg2->canceled = 1; 178 stat->d_canceled++; 179 cancel_count++; 180 uv_write_local_mmr( 181 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, 182 (msg_res << UV_SW_ACK_NPENDING) | 183 msg_res); 184 } 185 } 186 } 187 if (!cancel_count) 188 stat->d_nocanceled++; 189} 190 191/* 192 * Do all the things a cpu should do for a TLB shootdown message. 193 * Other cpu's may come here at the same time for this message. 194 */ 195static void uv_bau_process_message(struct msg_desc *mdp, 196 struct bau_control *bcp) 197{ 198 int msg_ack_count; 199 short socket_ack_count = 0; 200 struct ptc_stats *stat; 201 struct bau_payload_queue_entry *msg; 202 struct bau_control *smaster = bcp->socket_master; 203 204 /* 205 * This must be a normal message, or retry of a normal message 206 */ 207 msg = mdp->msg; 208 stat = bcp->statp; 209 if (msg->address == TLB_FLUSH_ALL) { 210 local_flush_tlb(); 211 stat->d_alltlb++; 212 } else { 213 __flush_tlb_one(msg->address); 214 stat->d_onetlb++; 215 } 216 stat->d_requestee++; 217 218 /* 219 * One cpu on each uvhub has the additional job on a RETRY 220 * of releasing the resource held by the message that is 221 * being retried. That message is identified by sending 222 * cpu number. 223 */ 224 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master) 225 uv_bau_process_retry_msg(mdp, bcp); 226 227 /* 228 * This is a sw_ack message, so we have to reply to it. 229 * Count each responding cpu on the socket. This avoids 230 * pinging the count's cache line back and forth between 231 * the sockets. 232 */ 233 socket_ack_count = atomic_add_short_return(1, (struct atomic_short *) 234 &smaster->socket_acknowledge_count[mdp->msg_slot]); 235 if (socket_ack_count == bcp->cpus_in_socket) { 236 /* 237 * Both sockets dump their completed count total into 238 * the message's count. 239 */ 240 smaster->socket_acknowledge_count[mdp->msg_slot] = 0; 241 msg_ack_count = atomic_add_short_return(socket_ack_count, 242 (struct atomic_short *)&msg->acknowledge_count); 243 244 if (msg_ack_count == bcp->cpus_in_uvhub) { 245 /* 246 * All cpus in uvhub saw it; reply 247 */ 248 uv_reply_to_message(mdp, bcp); 249 } 250 } 251 252 return; 253} 254 255/* 256 * Determine the first cpu on a uvhub. 257 */ 258static int uvhub_to_first_cpu(int uvhub) 259{ 260 int cpu; 261 for_each_present_cpu(cpu) 262 if (uvhub == uv_cpu_to_blade_id(cpu)) 263 return cpu; 264 return -1; 265} 266 267/* 268 * Last resort when we get a large number of destination timeouts is 269 * to clear resources held by a given cpu. 270 * Do this with IPI so that all messages in the BAU message queue 271 * can be identified by their nonzero sw_ack_vector field. 272 * 273 * This is entered for a single cpu on the uvhub. 274 * The sender want's this uvhub to free a specific message's 275 * sw_ack resources. 276 */ 277static void 278uv_do_reset(void *ptr) 279{ 280 int i; 281 int slot; 282 int count = 0; 283 unsigned long mmr; 284 unsigned long msg_res; 285 struct bau_control *bcp; 286 struct reset_args *rap; 287 struct bau_payload_queue_entry *msg; 288 struct ptc_stats *stat; 289 290 bcp = &per_cpu(bau_control, smp_processor_id()); 291 rap = (struct reset_args *)ptr; 292 stat = bcp->statp; 293 stat->d_resets++; 294 295 /* 296 * We're looking for the given sender, and 297 * will free its sw_ack resource. 298 * If all cpu's finally responded after the timeout, its 299 * message 'replied_to' was set. 300 */ 301 for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) { 302 /* uv_do_reset: same conditions for cancellation as 303 uv_bau_process_retry_msg() */ 304 if ((msg->replied_to == 0) && 305 (msg->canceled == 0) && 306 (msg->sending_cpu == rap->sender) && 307 (msg->sw_ack_vector) && 308 (msg->msg_type != MSG_NOOP)) { 309 /* 310 * make everyone else ignore this message 311 */ 312 msg->canceled = 1; 313 slot = msg - bcp->va_queue_first; 314 count++; 315 /* 316 * only reset the resource if it is still pending 317 */ 318 mmr = uv_read_local_mmr 319 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); 320 msg_res = msg->sw_ack_vector; 321 if (mmr & msg_res) { 322 stat->d_rcanceled++; 323 uv_write_local_mmr( 324 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, 325 (msg_res << UV_SW_ACK_NPENDING) | 326 msg_res); 327 } 328 } 329 } 330 return; 331} 332 333/* 334 * Use IPI to get all target uvhubs to release resources held by 335 * a given sending cpu number. 336 */ 337static void uv_reset_with_ipi(struct bau_target_uvhubmask *distribution, 338 int sender) 339{ 340 int uvhub; 341 int cpu; 342 cpumask_t mask; 343 struct reset_args reset_args; 344 345 reset_args.sender = sender; 346 347 cpus_clear(mask); 348 /* find a single cpu for each uvhub in this distribution mask */ 349 for (uvhub = 0; 350 uvhub < sizeof(struct bau_target_uvhubmask) * BITSPERBYTE; 351 uvhub++) { 352 if (!bau_uvhub_isset(uvhub, distribution)) 353 continue; 354 /* find a cpu for this uvhub */ 355 cpu = uvhub_to_first_cpu(uvhub); 356 cpu_set(cpu, mask); 357 } 358 /* IPI all cpus; Preemption is already disabled */ 359 smp_call_function_many(&mask, uv_do_reset, (void *)&reset_args, 1); 360 return; 361} 362 363static inline unsigned long 364cycles_2_us(unsigned long long cyc) 365{ 366 unsigned long long ns; 367 unsigned long us; 368 ns = (cyc * per_cpu(cyc2ns, smp_processor_id())) 369 >> CYC2NS_SCALE_FACTOR; 370 us = ns / 1000; 371 return us; 372} 373 374/* 375 * wait for all cpus on this hub to finish their sends and go quiet 376 * leaves uvhub_quiesce set so that no new broadcasts are started by 377 * bau_flush_send_and_wait() 378 */ 379static inline void 380quiesce_local_uvhub(struct bau_control *hmaster) 381{ 382 atomic_add_short_return(1, (struct atomic_short *) 383 &hmaster->uvhub_quiesce); 384} 385 386/* 387 * mark this quiet-requestor as done 388 */ 389static inline void 390end_uvhub_quiesce(struct bau_control *hmaster) 391{ 392 atomic_add_short_return(-1, (struct atomic_short *) 393 &hmaster->uvhub_quiesce); 394} 395 396/* 397 * Wait for completion of a broadcast software ack message 398 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP 399 */ 400static int uv_wait_completion(struct bau_desc *bau_desc, 401 unsigned long mmr_offset, int right_shift, int this_cpu, 402 struct bau_control *bcp, struct bau_control *smaster, long try) 403{ 404 unsigned long descriptor_status; 405 cycles_t ttime; 406 struct ptc_stats *stat = bcp->statp; 407 struct bau_control *hmaster; 408 409 hmaster = bcp->uvhub_master; 410 411 /* spin on the status MMR, waiting for it to go idle */ 412 while ((descriptor_status = (((unsigned long) 413 uv_read_local_mmr(mmr_offset) >> 414 right_shift) & UV_ACT_STATUS_MASK)) != 415 DESC_STATUS_IDLE) { 416 /* 417 * Our software ack messages may be blocked because there are 418 * no swack resources available. As long as none of them 419 * has timed out hardware will NACK our message and its 420 * state will stay IDLE. 421 */ 422 if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) { 423 stat->s_stimeout++; 424 return FLUSH_GIVEUP; 425 } else if (descriptor_status == 426 DESC_STATUS_DESTINATION_TIMEOUT) { 427 stat->s_dtimeout++; 428 ttime = get_cycles(); 429 430 /* 431 * Our retries may be blocked by all destination 432 * swack resources being consumed, and a timeout 433 * pending. In that case hardware returns the 434 * ERROR that looks like a destination timeout. 435 */ 436 if (cycles_2_us(ttime - bcp->send_message) < 437 timeout_us) { 438 bcp->conseccompletes = 0; 439 return FLUSH_RETRY_PLUGGED; 440 } 441 442 bcp->conseccompletes = 0; 443 return FLUSH_RETRY_TIMEOUT; 444 } else { 445 /* 446 * descriptor_status is still BUSY 447 */ 448 cpu_relax(); 449 } 450 } 451 bcp->conseccompletes++; 452 return FLUSH_COMPLETE; 453} 454 455static inline cycles_t 456sec_2_cycles(unsigned long sec) 457{ 458 unsigned long ns; 459 cycles_t cyc; 460 461 ns = sec * 1000000000; 462 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); 463 return cyc; 464} 465 466/* 467 * conditionally add 1 to *v, unless *v is >= u 468 * return 0 if we cannot add 1 to *v because it is >= u 469 * return 1 if we can add 1 to *v because it is < u 470 * the add is atomic 471 * 472 * This is close to atomic_add_unless(), but this allows the 'u' value 473 * to be lowered below the current 'v'. atomic_add_unless can only stop 474 * on equal. 475 */ 476static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) 477{ 478 spin_lock(lock); 479 if (atomic_read(v) >= u) { 480 spin_unlock(lock); 481 return 0; 482 } 483 atomic_inc(v); 484 spin_unlock(lock); 485 return 1; 486} 487 488/* 489 * Our retries are blocked by all destination swack resources being 490 * in use, and a timeout is pending. In that case hardware immediately 491 * returns the ERROR that looks like a destination timeout. 492 */ 493static void 494destination_plugged(struct bau_desc *bau_desc, struct bau_control *bcp, 495 struct bau_control *hmaster, struct ptc_stats *stat) 496{ 497 udelay(bcp->plugged_delay); 498 bcp->plugged_tries++; 499 if (bcp->plugged_tries >= bcp->plugsb4reset) { 500 bcp->plugged_tries = 0; 501 quiesce_local_uvhub(hmaster); 502 spin_lock(&hmaster->queue_lock); 503 uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu); 504 spin_unlock(&hmaster->queue_lock); 505 end_uvhub_quiesce(hmaster); 506 bcp->ipi_attempts++; 507 stat->s_resets_plug++; 508 } 509} 510 511static void 512destination_timeout(struct bau_desc *bau_desc, struct bau_control *bcp, 513 struct bau_control *hmaster, struct ptc_stats *stat) 514{ 515 hmaster->max_bau_concurrent = 1; 516 bcp->timeout_tries++; 517 if (bcp->timeout_tries >= bcp->timeoutsb4reset) { 518 bcp->timeout_tries = 0; 519 quiesce_local_uvhub(hmaster); 520 spin_lock(&hmaster->queue_lock); 521 uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu); 522 spin_unlock(&hmaster->queue_lock); 523 end_uvhub_quiesce(hmaster); 524 bcp->ipi_attempts++; 525 stat->s_resets_timeout++; 526 } 527} 528 529/* 530 * Completions are taking a very long time due to a congested numalink 531 * network. 532 */ 533static void 534disable_for_congestion(struct bau_control *bcp, struct ptc_stats *stat) 535{ 536 int tcpu; 537 struct bau_control *tbcp; 538 539 /* let only one cpu do this disabling */ 540 spin_lock(&disable_lock); 541 if (!baudisabled && bcp->period_requests && 542 ((bcp->period_time / bcp->period_requests) > congested_cycles)) { 543 /* it becomes this cpu's job to turn on the use of the 544 BAU again */ 545 baudisabled = 1; 546 bcp->set_bau_off = 1; 547 bcp->set_bau_on_time = get_cycles() + 548 sec_2_cycles(bcp->congested_period); 549 stat->s_bau_disabled++; 550 for_each_present_cpu(tcpu) { 551 tbcp = &per_cpu(bau_control, tcpu); 552 tbcp->baudisabled = 1; 553 } 554 } 555 spin_unlock(&disable_lock); 556} 557 558/** 559 * uv_flush_send_and_wait 560 * 561 * Send a broadcast and wait for it to complete. 562 * 563 * The flush_mask contains the cpus the broadcast is to be sent to including 564 * cpus that are on the local uvhub. 565 * 566 * Returns 0 if all flushing represented in the mask was done. 567 * Returns 1 if it gives up entirely and the original cpu mask is to be 568 * returned to the kernel. 569 */ 570int uv_flush_send_and_wait(struct bau_desc *bau_desc, 571 struct cpumask *flush_mask, struct bau_control *bcp) 572{ 573 int right_shift; 574 int completion_status = 0; 575 int seq_number = 0; 576 long try = 0; 577 int cpu = bcp->uvhub_cpu; 578 int this_cpu = bcp->cpu; 579 unsigned long mmr_offset; 580 unsigned long index; 581 cycles_t time1; 582 cycles_t time2; 583 cycles_t elapsed; 584 struct ptc_stats *stat = bcp->statp; 585 struct bau_control *smaster = bcp->socket_master; 586 struct bau_control *hmaster = bcp->uvhub_master; 587 588 if (!atomic_inc_unless_ge(&hmaster->uvhub_lock, 589 &hmaster->active_descriptor_count, 590 hmaster->max_bau_concurrent)) { 591 stat->s_throttles++; 592 do { 593 cpu_relax(); 594 } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock, 595 &hmaster->active_descriptor_count, 596 hmaster->max_bau_concurrent)); 597 } 598 while (hmaster->uvhub_quiesce) 599 cpu_relax(); 600 601 if (cpu < UV_CPUS_PER_ACT_STATUS) { 602 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; 603 right_shift = cpu * UV_ACT_STATUS_SIZE; 604 } else { 605 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1; 606 right_shift = 607 ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE); 608 } 609 time1 = get_cycles(); 610 do { 611 if (try == 0) { 612 bau_desc->header.msg_type = MSG_REGULAR; 613 seq_number = bcp->message_number++; 614 } else { 615 bau_desc->header.msg_type = MSG_RETRY; 616 stat->s_retry_messages++; 617 } 618 bau_desc->header.sequence = seq_number; 619 index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) | 620 bcp->uvhub_cpu; 621 bcp->send_message = get_cycles(); 622 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index); 623 try++; 624 completion_status = uv_wait_completion(bau_desc, mmr_offset, 625 right_shift, this_cpu, bcp, smaster, try); 626 627 if (completion_status == FLUSH_RETRY_PLUGGED) { 628 destination_plugged(bau_desc, bcp, hmaster, stat); 629 } else if (completion_status == FLUSH_RETRY_TIMEOUT) { 630 destination_timeout(bau_desc, bcp, hmaster, stat); 631 } 632 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) { 633 bcp->ipi_attempts = 0; 634 completion_status = FLUSH_GIVEUP; 635 break; 636 } 637 cpu_relax(); 638 } while ((completion_status == FLUSH_RETRY_PLUGGED) || 639 (completion_status == FLUSH_RETRY_TIMEOUT)); 640 time2 = get_cycles(); 641 bcp->plugged_tries = 0; 642 bcp->timeout_tries = 0; 643 if ((completion_status == FLUSH_COMPLETE) && 644 (bcp->conseccompletes > bcp->complete_threshold) && 645 (hmaster->max_bau_concurrent < 646 hmaster->max_bau_concurrent_constant)) 647 hmaster->max_bau_concurrent++; 648 while (hmaster->uvhub_quiesce) 649 cpu_relax(); 650 atomic_dec(&hmaster->active_descriptor_count); 651 if (time2 > time1) { 652 elapsed = time2 - time1; 653 stat->s_time += elapsed; 654 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) { 655 bcp->period_requests++; 656 bcp->period_time += elapsed; 657 if ((elapsed > congested_cycles) && 658 (bcp->period_requests > bcp->congested_reps)) { 659 disable_for_congestion(bcp, stat); 660 } 661 } 662 } else 663 stat->s_requestor--; 664 if (completion_status == FLUSH_COMPLETE && try > 1) 665 stat->s_retriesok++; 666 else if (completion_status == FLUSH_GIVEUP) { 667 stat->s_giveup++; 668 return 1; 669 } 670 return 0; 671} 672 673/** 674 * uv_flush_tlb_others - globally purge translation cache of a virtual 675 * address or all TLB's 676 * @cpumask: mask of all cpu's in which the address is to be removed 677 * @mm: mm_struct containing virtual address range 678 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu) 679 * @cpu: the current cpu 680 * 681 * This is the entry point for initiating any UV global TLB shootdown. 682 * 683 * Purges the translation caches of all specified processors of the given 684 * virtual address, or purges all TLB's on specified processors. 685 * 686 * The caller has derived the cpumask from the mm_struct. This function 687 * is called only if there are bits set in the mask. (e.g. flush_tlb_page()) 688 * 689 * The cpumask is converted into a uvhubmask of the uvhubs containing 690 * those cpus. 691 * 692 * Note that this function should be called with preemption disabled. 693 * 694 * Returns NULL if all remote flushing was done. 695 * Returns pointer to cpumask if some remote flushing remains to be 696 * done. The returned pointer is valid till preemption is re-enabled. 697 */ 698const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, 699 struct mm_struct *mm, 700 unsigned long va, unsigned int cpu) 701{ 702 int tcpu; 703 int uvhub; 704 int locals = 0; 705 int remotes = 0; 706 int hubs = 0; 707 struct bau_desc *bau_desc; 708 struct cpumask *flush_mask; 709 struct ptc_stats *stat; 710 struct bau_control *bcp; 711 struct bau_control *tbcp; 712 713 /* kernel was booted 'nobau' */ 714 if (nobau) 715 return cpumask; 716 717 bcp = &per_cpu(bau_control, cpu); 718 stat = bcp->statp; 719 720 /* bau was disabled due to slow response */ 721 if (bcp->baudisabled) { 722 /* the cpu that disabled it must re-enable it */ 723 if (bcp->set_bau_off) { 724 if (get_cycles() >= bcp->set_bau_on_time) { 725 stat->s_bau_reenabled++; 726 baudisabled = 0; 727 for_each_present_cpu(tcpu) { 728 tbcp = &per_cpu(bau_control, tcpu); 729 tbcp->baudisabled = 0; 730 tbcp->period_requests = 0; 731 tbcp->period_time = 0; 732 } 733 } 734 } 735 return cpumask; 736 } 737 738 /* 739 * Each sending cpu has a per-cpu mask which it fills from the caller's 740 * cpu mask. All cpus are converted to uvhubs and copied to the 741 * activation descriptor. 742 */ 743 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu); 744 /* don't actually do a shootdown of the local cpu */ 745 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); 746 if (cpu_isset(cpu, *cpumask)) 747 stat->s_ntargself++; 748 749 bau_desc = bcp->descriptor_base; 750 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; 751 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); 752 753 /* cpu statistics */ 754 for_each_cpu(tcpu, flush_mask) { 755 uvhub = uv_cpu_to_blade_id(tcpu); 756 bau_uvhub_set(uvhub, &bau_desc->distribution); 757 if (uvhub == bcp->uvhub) 758 locals++; 759 else 760 remotes++; 761 } 762 if ((locals + remotes) == 0) 763 return NULL; 764 stat->s_requestor++; 765 stat->s_ntargcpu += remotes + locals; 766 stat->s_ntargremotes += remotes; 767 stat->s_ntarglocals += locals; 768 remotes = bau_uvhub_weight(&bau_desc->distribution); 769 770 /* uvhub statistics */ 771 hubs = bau_uvhub_weight(&bau_desc->distribution); 772 if (locals) { 773 stat->s_ntarglocaluvhub++; 774 stat->s_ntargremoteuvhub += (hubs - 1); 775 } else 776 stat->s_ntargremoteuvhub += hubs; 777 stat->s_ntarguvhub += hubs; 778 if (hubs >= 16) 779 stat->s_ntarguvhub16++; 780 else if (hubs >= 8) 781 stat->s_ntarguvhub8++; 782 else if (hubs >= 4) 783 stat->s_ntarguvhub4++; 784 else if (hubs >= 2) 785 stat->s_ntarguvhub2++; 786 else 787 stat->s_ntarguvhub1++; 788 789 bau_desc->payload.address = va; 790 bau_desc->payload.sending_cpu = cpu; 791 792 /* 793 * uv_flush_send_and_wait returns 0 if all cpu's were messaged, 794 * or 1 if it gave up and the original cpumask should be returned. 795 */ 796 if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp)) 797 return NULL; 798 else 799 return cpumask; 800} 801 802/* 803 * The BAU message interrupt comes here. (registered by set_intr_gate) 804 * See entry_64.S 805 * 806 * We received a broadcast assist message. 807 * 808 * Interrupts are disabled; this interrupt could represent 809 * the receipt of several messages. 810 * 811 * All cores/threads on this hub get this interrupt. 812 * The last one to see it does the software ack. 813 * (the resource will not be freed until noninterruptable cpus see this 814 * interrupt; hardware may timeout the s/w ack and reply ERROR) 815 */ 816void uv_bau_message_interrupt(struct pt_regs *regs) 817{ 818 int count = 0; 819 cycles_t time_start; 820 struct bau_payload_queue_entry *msg; 821 struct bau_control *bcp; 822 struct ptc_stats *stat; 823 struct msg_desc msgdesc; 824 825 time_start = get_cycles(); 826 bcp = &per_cpu(bau_control, smp_processor_id()); 827 stat = bcp->statp; 828 msgdesc.va_queue_first = bcp->va_queue_first; 829 msgdesc.va_queue_last = bcp->va_queue_last; 830 msg = bcp->bau_msg_head; 831 while (msg->sw_ack_vector) { 832 count++; 833 msgdesc.msg_slot = msg - msgdesc.va_queue_first; 834 msgdesc.sw_ack_slot = ffs(msg->sw_ack_vector) - 1; 835 msgdesc.msg = msg; 836 uv_bau_process_message(&msgdesc, bcp); 837 msg++; 838 if (msg > msgdesc.va_queue_last) 839 msg = msgdesc.va_queue_first; 840 bcp->bau_msg_head = msg; 841 } 842 stat->d_time += (get_cycles() - time_start); 843 if (!count) 844 stat->d_nomsg++; 845 else if (count > 1) 846 stat->d_multmsg++; 847 ack_APIC_irq(); 848} 849 850/* 851 * uv_enable_timeouts 852 * 853 * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have 854 * shootdown message timeouts enabled. The timeout does not cause 855 * an interrupt, but causes an error message to be returned to 856 * the sender. 857 */ 858static void uv_enable_timeouts(void) 859{ 860 int uvhub; 861 int nuvhubs; 862 int pnode; 863 unsigned long mmr_image; 864 865 nuvhubs = uv_num_possible_blades(); 866 867 for (uvhub = 0; uvhub < nuvhubs; uvhub++) { 868 if (!uv_blade_nr_possible_cpus(uvhub)) 869 continue; 870 871 pnode = uv_blade_to_pnode(uvhub); 872 mmr_image = 873 uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL); 874 /* 875 * Set the timeout period and then lock it in, in three 876 * steps; captures and locks in the period. 877 * 878 * To program the period, the SOFT_ACK_MODE must be off. 879 */ 880 mmr_image &= ~((unsigned long)1 << 881 UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT); 882 uv_write_global_mmr64 883 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); 884 /* 885 * Set the 4-bit period. 886 */ 887 mmr_image &= ~((unsigned long)0xf << 888 UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT); 889 mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD << 890 UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT); 891 uv_write_global_mmr64 892 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); 893 /* 894 * Subsequent reversals of the timebase bit (3) cause an 895 * immediate timeout of one or all INTD resources as 896 * indicated in bits 2:0 (7 causes all of them to timeout). 897 */ 898 mmr_image |= ((unsigned long)1 << 899 UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT); 900 uv_write_global_mmr64 901 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); 902 } 903} 904 905static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset) 906{ 907 if (*offset < num_possible_cpus()) 908 return offset; 909 return NULL; 910} 911 912static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset) 913{ 914 (*offset)++; 915 if (*offset < num_possible_cpus()) 916 return offset; 917 return NULL; 918} 919 920static void uv_ptc_seq_stop(struct seq_file *file, void *data) 921{ 922} 923 924static inline unsigned long long 925microsec_2_cycles(unsigned long microsec) 926{ 927 unsigned long ns; 928 unsigned long long cyc; 929 930 ns = microsec * 1000; 931 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); 932 return cyc; 933} 934 935/* 936 * Display the statistics thru /proc. 937 * 'data' points to the cpu number 938 */ 939static int uv_ptc_seq_show(struct seq_file *file, void *data) 940{ 941 struct ptc_stats *stat; 942 int cpu; 943 944 cpu = *(loff_t *)data; 945 946 if (!cpu) { 947 seq_printf(file, 948 "# cpu sent stime self locals remotes ncpus localhub "); 949 seq_printf(file, 950 "remotehub numuvhubs numuvhubs16 numuvhubs8 "); 951 seq_printf(file, 952 "numuvhubs4 numuvhubs2 numuvhubs1 dto "); 953 seq_printf(file, 954 "retries rok resetp resett giveup sto bz throt "); 955 seq_printf(file, 956 "sw_ack recv rtime all "); 957 seq_printf(file, 958 "one mult none retry canc nocan reset rcan "); 959 seq_printf(file, 960 "disable enable\n"); 961 } 962 if (cpu < num_possible_cpus() && cpu_online(cpu)) { 963 stat = &per_cpu(ptcstats, cpu); 964 /* source side statistics */ 965 seq_printf(file, 966 "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", 967 cpu, stat->s_requestor, cycles_2_us(stat->s_time), 968 stat->s_ntargself, stat->s_ntarglocals, 969 stat->s_ntargremotes, stat->s_ntargcpu, 970 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub, 971 stat->s_ntarguvhub, stat->s_ntarguvhub16); 972 seq_printf(file, "%ld %ld %ld %ld %ld ", 973 stat->s_ntarguvhub8, stat->s_ntarguvhub4, 974 stat->s_ntarguvhub2, stat->s_ntarguvhub1, 975 stat->s_dtimeout); 976 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ", 977 stat->s_retry_messages, stat->s_retriesok, 978 stat->s_resets_plug, stat->s_resets_timeout, 979 stat->s_giveup, stat->s_stimeout, 980 stat->s_busy, stat->s_throttles); 981 982 /* destination side statistics */ 983 seq_printf(file, 984 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", 985 uv_read_global_mmr64(uv_cpu_to_pnode(cpu), 986 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), 987 stat->d_requestee, cycles_2_us(stat->d_time), 988 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg, 989 stat->d_nomsg, stat->d_retries, stat->d_canceled, 990 stat->d_nocanceled, stat->d_resets, 991 stat->d_rcanceled); 992 seq_printf(file, "%ld %ld\n", 993 stat->s_bau_disabled, stat->s_bau_reenabled); 994 } 995 996 return 0; 997} 998 999/* 1000 * Display the tunables thru debugfs 1001 */ 1002static ssize_t tunables_read(struct file *file, char __user *userbuf, 1003 size_t count, loff_t *ppos) 1004{ 1005 char *buf; 1006 int ret; 1007 1008 buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n", 1009 "max_bau_concurrent plugged_delay plugsb4reset", 1010 "timeoutsb4reset ipi_reset_limit complete_threshold", 1011 "congested_response_us congested_reps congested_period", 1012 max_bau_concurrent, plugged_delay, plugsb4reset, 1013 timeoutsb4reset, ipi_reset_limit, complete_threshold, 1014 congested_response_us, congested_reps, congested_period); 1015 1016 if (!buf) 1017 return -ENOMEM; 1018 1019 ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf)); 1020 kfree(buf); 1021 return ret; 1022} 1023 1024/* 1025 * -1: resetf the statistics 1026 * 0: display meaning of the statistics 1027 */ 1028static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user, 1029 size_t count, loff_t *data) 1030{ 1031 int cpu; 1032 long input_arg; 1033 char optstr[64]; 1034 struct ptc_stats *stat; 1035 1036 if (count == 0 || count > sizeof(optstr)) 1037 return -EINVAL; 1038 if (copy_from_user(optstr, user, count)) 1039 return -EFAULT; 1040 optstr[count - 1] = '\0'; 1041 if (strict_strtol(optstr, 10, &input_arg) < 0) { 1042 printk(KERN_DEBUG "%s is invalid\n", optstr); 1043 return -EINVAL; 1044 } 1045 1046 if (input_arg == 0) { 1047 printk(KERN_DEBUG "# cpu: cpu number\n"); 1048 printk(KERN_DEBUG "Sender statistics:\n"); 1049 printk(KERN_DEBUG 1050 "sent: number of shootdown messages sent\n"); 1051 printk(KERN_DEBUG 1052 "stime: time spent sending messages\n"); 1053 printk(KERN_DEBUG 1054 "numuvhubs: number of hubs targeted with shootdown\n"); 1055 printk(KERN_DEBUG 1056 "numuvhubs16: number times 16 or more hubs targeted\n"); 1057 printk(KERN_DEBUG 1058 "numuvhubs8: number times 8 or more hubs targeted\n"); 1059 printk(KERN_DEBUG 1060 "numuvhubs4: number times 4 or more hubs targeted\n"); 1061 printk(KERN_DEBUG 1062 "numuvhubs2: number times 2 or more hubs targeted\n"); 1063 printk(KERN_DEBUG 1064 "numuvhubs1: number times 1 hub targeted\n"); 1065 printk(KERN_DEBUG 1066 "numcpus: number of cpus targeted with shootdown\n"); 1067 printk(KERN_DEBUG 1068 "dto: number of destination timeouts\n"); 1069 printk(KERN_DEBUG 1070 "retries: destination timeout retries sent\n"); 1071 printk(KERN_DEBUG 1072 "rok: : destination timeouts successfully retried\n"); 1073 printk(KERN_DEBUG 1074 "resetp: ipi-style resource resets for plugs\n"); 1075 printk(KERN_DEBUG 1076 "resett: ipi-style resource resets for timeouts\n"); 1077 printk(KERN_DEBUG 1078 "giveup: fall-backs to ipi-style shootdowns\n"); 1079 printk(KERN_DEBUG 1080 "sto: number of source timeouts\n"); 1081 printk(KERN_DEBUG 1082 "bz: number of stay-busy's\n"); 1083 printk(KERN_DEBUG 1084 "throt: number times spun in throttle\n"); 1085 printk(KERN_DEBUG "Destination side statistics:\n"); 1086 printk(KERN_DEBUG 1087 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n"); 1088 printk(KERN_DEBUG 1089 "recv: shootdown messages received\n"); 1090 printk(KERN_DEBUG 1091 "rtime: time spent processing messages\n"); 1092 printk(KERN_DEBUG 1093 "all: shootdown all-tlb messages\n"); 1094 printk(KERN_DEBUG 1095 "one: shootdown one-tlb messages\n"); 1096 printk(KERN_DEBUG 1097 "mult: interrupts that found multiple messages\n"); 1098 printk(KERN_DEBUG 1099 "none: interrupts that found no messages\n"); 1100 printk(KERN_DEBUG 1101 "retry: number of retry messages processed\n"); 1102 printk(KERN_DEBUG 1103 "canc: number messages canceled by retries\n"); 1104 printk(KERN_DEBUG 1105 "nocan: number retries that found nothing to cancel\n"); 1106 printk(KERN_DEBUG 1107 "reset: number of ipi-style reset requests processed\n"); 1108 printk(KERN_DEBUG 1109 "rcan: number messages canceled by reset requests\n"); 1110 printk(KERN_DEBUG 1111 "disable: number times use of the BAU was disabled\n"); 1112 printk(KERN_DEBUG 1113 "enable: number times use of the BAU was re-enabled\n"); 1114 } else if (input_arg == -1) { 1115 for_each_present_cpu(cpu) { 1116 stat = &per_cpu(ptcstats, cpu); 1117 memset(stat, 0, sizeof(struct ptc_stats)); 1118 } 1119 } 1120 1121 return count; 1122} 1123 1124static int local_atoi(const char *name) 1125{ 1126 int val = 0; 1127 1128 for (;; name++) { 1129 switch (*name) { 1130 case '0' ... '9': 1131 val = 10*val+(*name-'0'); 1132 break; 1133 default: 1134 return val; 1135 } 1136 } 1137} 1138 1139/* 1140 * set the tunables 1141 * 0 values reset them to defaults 1142 */ 1143static ssize_t tunables_write(struct file *file, const char __user *user, 1144 size_t count, loff_t *data) 1145{ 1146 int cpu; 1147 int cnt = 0; 1148 int val; 1149 char *p; 1150 char *q; 1151 char instr[64]; 1152 struct bau_control *bcp; 1153 1154 if (count == 0 || count > sizeof(instr)-1) 1155 return -EINVAL; 1156 if (copy_from_user(instr, user, count)) 1157 return -EFAULT; 1158 1159 instr[count] = '\0'; 1160 /* count the fields */ 1161 p = instr + strspn(instr, WHITESPACE); 1162 q = p; 1163 for (; *p; p = q + strspn(q, WHITESPACE)) { 1164 q = p + strcspn(p, WHITESPACE); 1165 cnt++; 1166 if (q == p) 1167 break; 1168 } 1169 if (cnt != 9) { 1170 printk(KERN_INFO "bau tunable error: should be 9 numbers\n"); 1171 return -EINVAL; 1172 } 1173 1174 p = instr + strspn(instr, WHITESPACE); 1175 q = p; 1176 for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) { 1177 q = p + strcspn(p, WHITESPACE); 1178 val = local_atoi(p); 1179 switch (cnt) { 1180 case 0: 1181 if (val == 0) { 1182 max_bau_concurrent = MAX_BAU_CONCURRENT; 1183 max_bau_concurrent_constant = 1184 MAX_BAU_CONCURRENT; 1185 continue; 1186 } 1187 bcp = &per_cpu(bau_control, smp_processor_id()); 1188 if (val < 1 || val > bcp->cpus_in_uvhub) { 1189 printk(KERN_DEBUG 1190 "Error: BAU max concurrent %d is invalid\n", 1191 val); 1192 return -EINVAL; 1193 } 1194 max_bau_concurrent = val; 1195 max_bau_concurrent_constant = val; 1196 continue; 1197 case 1: 1198 if (val == 0) 1199 plugged_delay = PLUGGED_DELAY; 1200 else 1201 plugged_delay = val; 1202 continue; 1203 case 2: 1204 if (val == 0) 1205 plugsb4reset = PLUGSB4RESET; 1206 else 1207 plugsb4reset = val; 1208 continue; 1209 case 3: 1210 if (val == 0) 1211 timeoutsb4reset = TIMEOUTSB4RESET; 1212 else 1213 timeoutsb4reset = val; 1214 continue; 1215 case 4: 1216 if (val == 0) 1217 ipi_reset_limit = IPI_RESET_LIMIT; 1218 else 1219 ipi_reset_limit = val; 1220 continue; 1221 case 5: 1222 if (val == 0) 1223 complete_threshold = COMPLETE_THRESHOLD; 1224 else 1225 complete_threshold = val; 1226 continue; 1227 case 6: 1228 if (val == 0) 1229 congested_response_us = CONGESTED_RESPONSE_US; 1230 else 1231 congested_response_us = val; 1232 continue; 1233 case 7: 1234 if (val == 0) 1235 congested_reps = CONGESTED_REPS; 1236 else 1237 congested_reps = val; 1238 continue; 1239 case 8: 1240 if (val == 0) 1241 congested_period = CONGESTED_PERIOD; 1242 else 1243 congested_period = val; 1244 continue; 1245 } 1246 if (q == p) 1247 break; 1248 } 1249 for_each_present_cpu(cpu) { 1250 bcp = &per_cpu(bau_control, cpu); 1251 bcp->max_bau_concurrent = max_bau_concurrent; 1252 bcp->max_bau_concurrent_constant = max_bau_concurrent; 1253 bcp->plugged_delay = plugged_delay; 1254 bcp->plugsb4reset = plugsb4reset; 1255 bcp->timeoutsb4reset = timeoutsb4reset; 1256 bcp->ipi_reset_limit = ipi_reset_limit; 1257 bcp->complete_threshold = complete_threshold; 1258 bcp->congested_response_us = congested_response_us; 1259 bcp->congested_reps = congested_reps; 1260 bcp->congested_period = congested_period; 1261 } 1262 return count; 1263} 1264 1265static const struct seq_operations uv_ptc_seq_ops = { 1266 .start = uv_ptc_seq_start, 1267 .next = uv_ptc_seq_next, 1268 .stop = uv_ptc_seq_stop, 1269 .show = uv_ptc_seq_show 1270}; 1271 1272static int uv_ptc_proc_open(struct inode *inode, struct file *file) 1273{ 1274 return seq_open(file, &uv_ptc_seq_ops); 1275} 1276 1277static int tunables_open(struct inode *inode, struct file *file) 1278{ 1279 return 0; 1280} 1281 1282static const struct file_operations proc_uv_ptc_operations = { 1283 .open = uv_ptc_proc_open, 1284 .read = seq_read, 1285 .write = uv_ptc_proc_write, 1286 .llseek = seq_lseek, 1287 .release = seq_release, 1288}; 1289 1290static const struct file_operations tunables_fops = { 1291 .open = tunables_open, 1292 .read = tunables_read, 1293 .write = tunables_write, 1294 .llseek = default_llseek, 1295}; 1296 1297static int __init uv_ptc_init(void) 1298{ 1299 struct proc_dir_entry *proc_uv_ptc; 1300 1301 if (!is_uv_system()) 1302 return 0; 1303 1304 proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL, 1305 &proc_uv_ptc_operations); 1306 if (!proc_uv_ptc) { 1307 printk(KERN_ERR "unable to create %s proc entry\n", 1308 UV_PTC_BASENAME); 1309 return -EINVAL; 1310 } 1311 1312 tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL); 1313 if (!tunables_dir) { 1314 printk(KERN_ERR "unable to create debugfs directory %s\n", 1315 UV_BAU_TUNABLES_DIR); 1316 return -EINVAL; 1317 } 1318 tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600, 1319 tunables_dir, NULL, &tunables_fops); 1320 if (!tunables_file) { 1321 printk(KERN_ERR "unable to create debugfs file %s\n", 1322 UV_BAU_TUNABLES_FILE); 1323 return -EINVAL; 1324 } 1325 return 0; 1326} 1327 1328/* 1329 * initialize the sending side's sending buffers 1330 */ 1331static void 1332uv_activation_descriptor_init(int node, int pnode) 1333{ 1334 int i; 1335 int cpu; 1336 unsigned long pa; 1337 unsigned long m; 1338 unsigned long n; 1339 struct bau_desc *bau_desc; 1340 struct bau_desc *bd2; 1341 struct bau_control *bcp; 1342 1343 /* 1344 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) 1345 * per cpu; and one per cpu on the uvhub (UV_ADP_SIZE) 1346 */ 1347 bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE 1348 * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); 1349 BUG_ON(!bau_desc); 1350 1351 pa = uv_gpa(bau_desc); /* need the real nasid*/ 1352 n = pa >> uv_nshift; 1353 m = pa & uv_mmask; 1354 1355 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, 1356 (n << UV_DESC_BASE_PNODE_SHIFT | m)); 1357 1358 /* 1359 * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each 1360 * cpu even though we only use the first one; one descriptor can 1361 * describe a broadcast to 256 uv hubs. 1362 */ 1363 for (i = 0, bd2 = bau_desc; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR); 1364 i++, bd2++) { 1365 memset(bd2, 0, sizeof(struct bau_desc)); 1366 bd2->header.sw_ack_flag = 1; 1367 /* 1368 * base_dest_nodeid is the nasid of the first uvhub 1369 * in the partition. The bit map will indicate uvhub numbers, 1370 * which are 0-N in a partition. Pnodes are unique system-wide. 1371 */ 1372 bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); 1373 bd2->header.dest_subnodeid = 0x10; /* the LB */ 1374 bd2->header.command = UV_NET_ENDPOINT_INTD; 1375 bd2->header.int_both = 1; 1376 /* 1377 * all others need to be set to zero: 1378 * fairness chaining multilevel count replied_to 1379 */ 1380 } 1381 for_each_present_cpu(cpu) { 1382 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu))) 1383 continue; 1384 bcp = &per_cpu(bau_control, cpu); 1385 bcp->descriptor_base = bau_desc; 1386 } 1387} 1388 1389/* 1390 * initialize the destination side's receiving buffers 1391 * entered for each uvhub in the partition 1392 * - node is first node (kernel memory notion) on the uvhub 1393 * - pnode is the uvhub's physical identifier 1394 */ 1395static void 1396uv_payload_queue_init(int node, int pnode) 1397{ 1398 int pn; 1399 int cpu; 1400 char *cp; 1401 unsigned long pa; 1402 struct bau_payload_queue_entry *pqp; 1403 struct bau_payload_queue_entry *pqp_malloc; 1404 struct bau_control *bcp; 1405 1406 pqp = kmalloc_node((DEST_Q_SIZE + 1) 1407 * sizeof(struct bau_payload_queue_entry), 1408 GFP_KERNEL, node); 1409 BUG_ON(!pqp); 1410 pqp_malloc = pqp; 1411 1412 cp = (char *)pqp + 31; 1413 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); 1414 1415 for_each_present_cpu(cpu) { 1416 if (pnode != uv_cpu_to_pnode(cpu)) 1417 continue; 1418 /* for every cpu on this pnode: */ 1419 bcp = &per_cpu(bau_control, cpu); 1420 bcp->va_queue_first = pqp; 1421 bcp->bau_msg_head = pqp; 1422 bcp->va_queue_last = pqp + (DEST_Q_SIZE - 1); 1423 } 1424 /* 1425 * need the pnode of where the memory was really allocated 1426 */ 1427 pa = uv_gpa(pqp); 1428 pn = pa >> uv_nshift; 1429 uv_write_global_mmr64(pnode, 1430 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, 1431 ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | 1432 uv_physnodeaddr(pqp)); 1433 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, 1434 uv_physnodeaddr(pqp)); 1435 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, 1436 (unsigned long) 1437 uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1))); 1438 /* in effect, all msg_type's are set to MSG_NOOP */ 1439 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE); 1440} 1441 1442/* 1443 * Initialization of each UV hub's structures 1444 */ 1445static void __init uv_init_uvhub(int uvhub, int vector) 1446{ 1447 int node; 1448 int pnode; 1449 unsigned long apicid; 1450 1451 node = uvhub_to_first_node(uvhub); 1452 pnode = uv_blade_to_pnode(uvhub); 1453 uv_activation_descriptor_init(node, pnode); 1454 uv_payload_queue_init(node, pnode); 1455 /* 1456 * the below initialization can't be in firmware because the 1457 * messaging IRQ will be determined by the OS 1458 */ 1459 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; 1460 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, 1461 ((apicid << 32) | vector)); 1462} 1463 1464/* 1465 * We will set BAU_MISC_CONTROL with a timeout period. 1466 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT. 1467 * So the destination timeout period has be be calculated from them. 1468 */ 1469static int 1470calculate_destination_timeout(void) 1471{ 1472 unsigned long mmr_image; 1473 int mult1; 1474 int mult2; 1475 int index; 1476 int base; 1477 int ret; 1478 unsigned long ts_ns; 1479 1480 mult1 = UV_INTD_SOFT_ACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK; 1481 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL); 1482 index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK; 1483 mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT); 1484 mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK; 1485 base = timeout_base_ns[index]; 1486 ts_ns = base * mult1 * mult2; 1487 ret = ts_ns / 1000; 1488 return ret; 1489} 1490 1491/* 1492 * initialize the bau_control structure for each cpu 1493 */ 1494static int __init uv_init_per_cpu(int nuvhubs) 1495{ 1496 int i; 1497 int cpu; 1498 int pnode; 1499 int uvhub; 1500 int have_hmaster; 1501 short socket = 0; 1502 unsigned short socket_mask; 1503 unsigned char *uvhub_mask; 1504 struct bau_control *bcp; 1505 struct uvhub_desc *bdp; 1506 struct socket_desc *sdp; 1507 struct bau_control *hmaster = NULL; 1508 struct bau_control *smaster = NULL; 1509 struct socket_desc { 1510 short num_cpus; 1511 short cpu_number[MAX_CPUS_PER_SOCKET]; 1512 }; 1513 struct uvhub_desc { 1514 unsigned short socket_mask; 1515 short num_cpus; 1516 short uvhub; 1517 short pnode; 1518 struct socket_desc socket[2]; 1519 }; 1520 struct uvhub_desc *uvhub_descs; 1521 1522 timeout_us = calculate_destination_timeout(); 1523 1524 uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL); 1525 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); 1526 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); 1527 for_each_present_cpu(cpu) { 1528 bcp = &per_cpu(bau_control, cpu); 1529 memset(bcp, 0, sizeof(struct bau_control)); 1530 pnode = uv_cpu_hub_info(cpu)->pnode; 1531 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; 1532 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); 1533 bdp = &uvhub_descs[uvhub]; 1534 bdp->num_cpus++; 1535 bdp->uvhub = uvhub; 1536 bdp->pnode = pnode; 1537 /* kludge: 'assuming' one node per socket, and assuming that 1538 disabling a socket just leaves a gap in node numbers */ 1539 socket = (cpu_to_node(cpu) & 1); 1540 bdp->socket_mask |= (1 << socket); 1541 sdp = &bdp->socket[socket]; 1542 sdp->cpu_number[sdp->num_cpus] = cpu; 1543 sdp->num_cpus++; 1544 if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) { 1545 printk(KERN_EMERG "%d cpus per socket invalid\n", sdp->num_cpus); 1546 return 1; 1547 } 1548 } 1549 for (uvhub = 0; uvhub < nuvhubs; uvhub++) { 1550 if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8)))) 1551 continue; 1552 have_hmaster = 0; 1553 bdp = &uvhub_descs[uvhub]; 1554 socket_mask = bdp->socket_mask; 1555 socket = 0; 1556 while (socket_mask) { 1557 if (!(socket_mask & 1)) 1558 goto nextsocket; 1559 sdp = &bdp->socket[socket]; 1560 for (i = 0; i < sdp->num_cpus; i++) { 1561 cpu = sdp->cpu_number[i]; 1562 bcp = &per_cpu(bau_control, cpu); 1563 bcp->cpu = cpu; 1564 if (i == 0) { 1565 smaster = bcp; 1566 if (!have_hmaster) { 1567 have_hmaster++; 1568 hmaster = bcp; 1569 } 1570 } 1571 bcp->cpus_in_uvhub = bdp->num_cpus; 1572 bcp->cpus_in_socket = sdp->num_cpus; 1573 bcp->socket_master = smaster; 1574 bcp->uvhub = bdp->uvhub; 1575 bcp->uvhub_master = hmaster; 1576 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)-> 1577 blade_processor_id; 1578 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) { 1579 printk(KERN_EMERG 1580 "%d cpus per uvhub invalid\n", 1581 bcp->uvhub_cpu); 1582 return 1; 1583 } 1584 } 1585nextsocket: 1586 socket++; 1587 socket_mask = (socket_mask >> 1); 1588 } 1589 } 1590 kfree(uvhub_descs); 1591 kfree(uvhub_mask); 1592 for_each_present_cpu(cpu) { 1593 bcp = &per_cpu(bau_control, cpu); 1594 bcp->baudisabled = 0; 1595 bcp->statp = &per_cpu(ptcstats, cpu); 1596 /* time interval to catch a hardware stay-busy bug */ 1597 bcp->timeout_interval = microsec_2_cycles(2*timeout_us); 1598 bcp->max_bau_concurrent = max_bau_concurrent; 1599 bcp->max_bau_concurrent_constant = max_bau_concurrent; 1600 bcp->plugged_delay = plugged_delay; 1601 bcp->plugsb4reset = plugsb4reset; 1602 bcp->timeoutsb4reset = timeoutsb4reset; 1603 bcp->ipi_reset_limit = ipi_reset_limit; 1604 bcp->complete_threshold = complete_threshold; 1605 bcp->congested_response_us = congested_response_us; 1606 bcp->congested_reps = congested_reps; 1607 bcp->congested_period = congested_period; 1608 } 1609 return 0; 1610} 1611 1612/* 1613 * Initialization of BAU-related structures 1614 */ 1615static int __init uv_bau_init(void) 1616{ 1617 int uvhub; 1618 int pnode; 1619 int nuvhubs; 1620 int cur_cpu; 1621 int vector; 1622 unsigned long mmr; 1623 1624 if (!is_uv_system()) 1625 return 0; 1626 1627 if (nobau) 1628 return 0; 1629 1630 for_each_possible_cpu(cur_cpu) 1631 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), 1632 GFP_KERNEL, cpu_to_node(cur_cpu)); 1633 1634 uv_nshift = uv_hub_info->m_val; 1635 uv_mmask = (1UL << uv_hub_info->m_val) - 1; 1636 nuvhubs = uv_num_possible_blades(); 1637 spin_lock_init(&disable_lock); 1638 congested_cycles = microsec_2_cycles(congested_response_us); 1639 1640 if (uv_init_per_cpu(nuvhubs)) { 1641 nobau = 1; 1642 return 0; 1643 } 1644 1645 uv_partition_base_pnode = 0x7fffffff; 1646 for (uvhub = 0; uvhub < nuvhubs; uvhub++) 1647 if (uv_blade_nr_possible_cpus(uvhub) && 1648 (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) 1649 uv_partition_base_pnode = uv_blade_to_pnode(uvhub); 1650 1651 vector = UV_BAU_MESSAGE; 1652 for_each_possible_blade(uvhub) 1653 if (uv_blade_nr_possible_cpus(uvhub)) 1654 uv_init_uvhub(uvhub, vector); 1655 1656 uv_enable_timeouts(); 1657 alloc_intr_gate(vector, uv_bau_message_intr1); 1658 1659 for_each_possible_blade(uvhub) { 1660 if (uv_blade_nr_possible_cpus(uvhub)) { 1661 pnode = uv_blade_to_pnode(uvhub); 1662 /* INIT the bau */ 1663 uv_write_global_mmr64(pnode, 1664 UVH_LB_BAU_SB_ACTIVATION_CONTROL, 1665 ((unsigned long)1 << 63)); 1666 mmr = 1; /* should be 1 to broadcast to both sockets */ 1667 uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST, 1668 mmr); 1669 } 1670 } 1671 1672 return 0; 1673} 1674core_initcall(uv_bau_init); 1675fs_initcall(uv_ptc_init); 1676