sn2_smp.c revision 5dfe4c964a0dd7bb3a1d64a4166835a153146207
1/* 2 * SN2 Platform specific SMP Support 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2000-2006 Silicon Graphics, Inc. All rights reserved. 9 */ 10 11#include <linux/init.h> 12#include <linux/kernel.h> 13#include <linux/spinlock.h> 14#include <linux/threads.h> 15#include <linux/sched.h> 16#include <linux/smp.h> 17#include <linux/interrupt.h> 18#include <linux/irq.h> 19#include <linux/mmzone.h> 20#include <linux/module.h> 21#include <linux/bitops.h> 22#include <linux/nodemask.h> 23#include <linux/proc_fs.h> 24#include <linux/seq_file.h> 25 26#include <asm/processor.h> 27#include <asm/irq.h> 28#include <asm/sal.h> 29#include <asm/system.h> 30#include <asm/delay.h> 31#include <asm/io.h> 32#include <asm/smp.h> 33#include <asm/tlb.h> 34#include <asm/numa.h> 35#include <asm/hw_irq.h> 36#include <asm/current.h> 37#include <asm/sn/sn_cpuid.h> 38#include <asm/sn/sn_sal.h> 39#include <asm/sn/addrs.h> 40#include <asm/sn/shub_mmr.h> 41#include <asm/sn/nodepda.h> 42#include <asm/sn/rw_mmr.h> 43 44DEFINE_PER_CPU(struct ptc_stats, ptcstats); 45DECLARE_PER_CPU(struct ptc_stats, ptcstats); 46 47static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); 48 49extern unsigned long 50sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, 51 volatile unsigned long *, unsigned long, 52 volatile unsigned long *, unsigned long); 53void 54sn2_ptc_deadlock_recovery(short *, short, short, int, 55 volatile unsigned long *, unsigned long, 56 volatile unsigned long *, unsigned long); 57 58/* 59 * Note: some is the following is captured here to make degugging easier 60 * (the macros make more sense if you see the debug patch - not posted) 61 */ 62#define sn2_ptctest 0 63#define local_node_uses_ptc_ga(sh1) ((sh1) ? 1 : 0) 64#define max_active_pio(sh1) ((sh1) ? 32 : 7) 65#define reset_max_active_on_deadlock() 1 66#define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock) 67 68struct ptc_stats { 69 unsigned long ptc_l; 70 unsigned long change_rid; 71 unsigned long shub_ptc_flushes; 72 unsigned long nodes_flushed; 73 unsigned long deadlocks; 74 unsigned long deadlocks2; 75 unsigned long lock_itc_clocks; 76 unsigned long shub_itc_clocks; 77 unsigned long shub_itc_clocks_max; 78 unsigned long shub_ptc_flushes_not_my_mm; 79}; 80 81#define sn2_ptctest 0 82 83static inline unsigned long wait_piowc(void) 84{ 85 volatile unsigned long *piows; 86 unsigned long zeroval, ws; 87 88 piows = pda->pio_write_status_addr; 89 zeroval = pda->pio_write_status_val; 90 do { 91 cpu_relax(); 92 } while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval); 93 return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0; 94} 95 96/** 97 * sn_migrate - SN-specific task migration actions 98 * @task: Task being migrated to new CPU 99 * 100 * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order. 101 * Context switching user threads which have memory-mapped MMIO may cause 102 * PIOs to issue from seperate CPUs, thus the PIO writes must be drained 103 * from the previous CPU's Shub before execution resumes on the new CPU. 104 */ 105void sn_migrate(struct task_struct *task) 106{ 107 pda_t *last_pda = pdacpu(task_thread_info(task)->last_cpu); 108 volatile unsigned long *adr = last_pda->pio_write_status_addr; 109 unsigned long val = last_pda->pio_write_status_val; 110 111 /* Drain PIO writes from old CPU's Shub */ 112 while (unlikely((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) 113 != val)) 114 cpu_relax(); 115} 116 117void sn_tlb_migrate_finish(struct mm_struct *mm) 118{ 119 /* flush_tlb_mm is inefficient if more than 1 users of mm */ 120 if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1) 121 flush_tlb_mm(mm); 122} 123 124/** 125 * sn2_global_tlb_purge - globally purge translation cache of virtual address range 126 * @mm: mm_struct containing virtual address range 127 * @start: start of virtual address range 128 * @end: end of virtual address range 129 * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc)) 130 * 131 * Purges the translation caches of all processors of the given virtual address 132 * range. 133 * 134 * Note: 135 * - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context. 136 * - cpu_vm_mask is converted into a nodemask of the nodes containing the 137 * cpus in cpu_vm_mask. 138 * - if only one bit is set in cpu_vm_mask & it is the current cpu & the 139 * process is purging its own virtual address range, then only the 140 * local TLB needs to be flushed. This flushing can be done using 141 * ptc.l. This is the common case & avoids the global spinlock. 142 * - if multiple cpus have loaded the context, then flushing has to be 143 * done with ptc.g/MMRs under protection of the global ptc_lock. 144 */ 145 146void 147sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, 148 unsigned long end, unsigned long nbits) 149{ 150 int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid; 151 int mymm = (mm == current->active_mm && mm == current->mm); 152 int use_cpu_ptcga; 153 volatile unsigned long *ptc0, *ptc1; 154 unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0; 155 short nasids[MAX_NUMNODES], nix; 156 nodemask_t nodes_flushed; 157 int active, max_active, deadlock; 158 159 nodes_clear(nodes_flushed); 160 i = 0; 161 162 for_each_cpu_mask(cpu, mm->cpu_vm_mask) { 163 cnode = cpu_to_node(cpu); 164 node_set(cnode, nodes_flushed); 165 lcpu = cpu; 166 i++; 167 } 168 169 if (i == 0) 170 return; 171 172 preempt_disable(); 173 174 if (likely(i == 1 && lcpu == smp_processor_id() && mymm)) { 175 do { 176 ia64_ptcl(start, nbits << 2); 177 start += (1UL << nbits); 178 } while (start < end); 179 ia64_srlz_i(); 180 __get_cpu_var(ptcstats).ptc_l++; 181 preempt_enable(); 182 return; 183 } 184 185 if (atomic_read(&mm->mm_users) == 1 && mymm) { 186 flush_tlb_mm(mm); 187 __get_cpu_var(ptcstats).change_rid++; 188 preempt_enable(); 189 return; 190 } 191 192 itc = ia64_get_itc(); 193 nix = 0; 194 for_each_node_mask(cnode, nodes_flushed) 195 nasids[nix++] = cnodeid_to_nasid(cnode); 196 197 rr_value = (mm->context << 3) | REGION_NUMBER(start); 198 199 shub1 = is_shub1(); 200 if (shub1) { 201 data0 = (1UL << SH1_PTC_0_A_SHFT) | 202 (nbits << SH1_PTC_0_PS_SHFT) | 203 (rr_value << SH1_PTC_0_RID_SHFT) | 204 (1UL << SH1_PTC_0_START_SHFT); 205 ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0); 206 ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1); 207 } else { 208 data0 = (1UL << SH2_PTC_A_SHFT) | 209 (nbits << SH2_PTC_PS_SHFT) | 210 (1UL << SH2_PTC_START_SHFT); 211 ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC + 212 (rr_value << SH2_PTC_RID_SHFT)); 213 ptc1 = NULL; 214 } 215 216 217 mynasid = get_nasid(); 218 use_cpu_ptcga = local_node_uses_ptc_ga(shub1); 219 max_active = max_active_pio(shub1); 220 221 itc = ia64_get_itc(); 222 spin_lock_irqsave(PTC_LOCK(shub1), flags); 223 itc2 = ia64_get_itc(); 224 225 __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc; 226 __get_cpu_var(ptcstats).shub_ptc_flushes++; 227 __get_cpu_var(ptcstats).nodes_flushed += nix; 228 if (!mymm) 229 __get_cpu_var(ptcstats).shub_ptc_flushes_not_my_mm++; 230 231 if (use_cpu_ptcga && !mymm) { 232 old_rr = ia64_get_rr(start); 233 ia64_set_rr(start, (old_rr & 0xff) | (rr_value << 8)); 234 ia64_srlz_d(); 235 } 236 237 wait_piowc(); 238 do { 239 if (shub1) 240 data1 = start | (1UL << SH1_PTC_1_START_SHFT); 241 else 242 data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK); 243 deadlock = 0; 244 active = 0; 245 for (ibegin = 0, i = 0; i < nix; i++) { 246 nasid = nasids[i]; 247 if (use_cpu_ptcga && unlikely(nasid == mynasid)) { 248 ia64_ptcga(start, nbits << 2); 249 ia64_srlz_i(); 250 } else { 251 ptc0 = CHANGE_NASID(nasid, ptc0); 252 if (ptc1) 253 ptc1 = CHANGE_NASID(nasid, ptc1); 254 pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1); 255 active++; 256 } 257 if (active >= max_active || i == (nix - 1)) { 258 if ((deadlock = wait_piowc())) { 259 sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1); 260 if (reset_max_active_on_deadlock()) 261 max_active = 1; 262 } 263 active = 0; 264 ibegin = i + 1; 265 } 266 } 267 start += (1UL << nbits); 268 } while (start < end); 269 270 itc2 = ia64_get_itc() - itc2; 271 __get_cpu_var(ptcstats).shub_itc_clocks += itc2; 272 if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max) 273 __get_cpu_var(ptcstats).shub_itc_clocks_max = itc2; 274 275 if (old_rr) { 276 ia64_set_rr(start, old_rr); 277 ia64_srlz_d(); 278 } 279 280 spin_unlock_irqrestore(PTC_LOCK(shub1), flags); 281 282 preempt_enable(); 283} 284 285/* 286 * sn2_ptc_deadlock_recovery 287 * 288 * Recover from PTC deadlocks conditions. Recovery requires stepping thru each 289 * TLB flush transaction. The recovery sequence is somewhat tricky & is 290 * coded in assembly language. 291 */ 292 293void 294sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid, 295 volatile unsigned long *ptc0, unsigned long data0, 296 volatile unsigned long *ptc1, unsigned long data1) 297{ 298 short nasid, i; 299 unsigned long *piows, zeroval, n; 300 301 __get_cpu_var(ptcstats).deadlocks++; 302 303 piows = (unsigned long *) pda->pio_write_status_addr; 304 zeroval = pda->pio_write_status_val; 305 306 307 for (i=ib; i <= ie; i++) { 308 nasid = nasids[i]; 309 if (local_node_uses_ptc_ga(is_shub1()) && nasid == mynasid) 310 continue; 311 ptc0 = CHANGE_NASID(nasid, ptc0); 312 if (ptc1) 313 ptc1 = CHANGE_NASID(nasid, ptc1); 314 315 n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval); 316 __get_cpu_var(ptcstats).deadlocks2 += n; 317 } 318 319} 320 321/** 322 * sn_send_IPI_phys - send an IPI to a Nasid and slice 323 * @nasid: nasid to receive the interrupt (may be outside partition) 324 * @physid: physical cpuid to receive the interrupt. 325 * @vector: command to send 326 * @delivery_mode: delivery mechanism 327 * 328 * Sends an IPI (interprocessor interrupt) to the processor specified by 329 * @physid 330 * 331 * @delivery_mode can be one of the following 332 * 333 * %IA64_IPI_DM_INT - pend an interrupt 334 * %IA64_IPI_DM_PMI - pend a PMI 335 * %IA64_IPI_DM_NMI - pend an NMI 336 * %IA64_IPI_DM_INIT - pend an INIT interrupt 337 */ 338void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode) 339{ 340 long val; 341 unsigned long flags = 0; 342 volatile long *p; 343 344 p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT); 345 val = (1UL << SH_IPI_INT_SEND_SHFT) | 346 (physid << SH_IPI_INT_PID_SHFT) | 347 ((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) | 348 ((long)vector << SH_IPI_INT_IDX_SHFT) | 349 (0x000feeUL << SH_IPI_INT_BASE_SHFT); 350 351 mb(); 352 if (enable_shub_wars_1_1()) { 353 spin_lock_irqsave(&sn2_global_ptc_lock, flags); 354 } 355 pio_phys_write_mmr(p, val); 356 if (enable_shub_wars_1_1()) { 357 wait_piowc(); 358 spin_unlock_irqrestore(&sn2_global_ptc_lock, flags); 359 } 360 361} 362 363EXPORT_SYMBOL(sn_send_IPI_phys); 364 365/** 366 * sn2_send_IPI - send an IPI to a processor 367 * @cpuid: target of the IPI 368 * @vector: command to send 369 * @delivery_mode: delivery mechanism 370 * @redirect: redirect the IPI? 371 * 372 * Sends an IPI (InterProcessor Interrupt) to the processor specified by 373 * @cpuid. @vector specifies the command to send, while @delivery_mode can 374 * be one of the following 375 * 376 * %IA64_IPI_DM_INT - pend an interrupt 377 * %IA64_IPI_DM_PMI - pend a PMI 378 * %IA64_IPI_DM_NMI - pend an NMI 379 * %IA64_IPI_DM_INIT - pend an INIT interrupt 380 */ 381void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect) 382{ 383 long physid; 384 int nasid; 385 386 physid = cpu_physical_id(cpuid); 387 nasid = cpuid_to_nasid(cpuid); 388 389 /* the following is used only when starting cpus at boot time */ 390 if (unlikely(nasid == -1)) 391 ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL); 392 393 sn_send_IPI_phys(nasid, physid, vector, delivery_mode); 394} 395 396#ifdef CONFIG_PROC_FS 397 398#define PTC_BASENAME "sgi_sn/ptc_statistics" 399 400static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) 401{ 402 if (*offset < NR_CPUS) 403 return offset; 404 return NULL; 405} 406 407static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) 408{ 409 (*offset)++; 410 if (*offset < NR_CPUS) 411 return offset; 412 return NULL; 413} 414 415static void sn2_ptc_seq_stop(struct seq_file *file, void *data) 416{ 417} 418 419static int sn2_ptc_seq_show(struct seq_file *file, void *data) 420{ 421 struct ptc_stats *stat; 422 int cpu; 423 424 cpu = *(loff_t *) data; 425 426 if (!cpu) { 427 seq_printf(file, 428 "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2\n"); 429 seq_printf(file, "# ptctest %d\n", sn2_ptctest); 430 } 431 432 if (cpu < NR_CPUS && cpu_online(cpu)) { 433 stat = &per_cpu(ptcstats, cpu); 434 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, 435 stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, 436 stat->deadlocks, 437 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, 438 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, 439 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec, 440 stat->shub_ptc_flushes_not_my_mm, 441 stat->deadlocks2); 442 } 443 return 0; 444} 445 446static struct seq_operations sn2_ptc_seq_ops = { 447 .start = sn2_ptc_seq_start, 448 .next = sn2_ptc_seq_next, 449 .stop = sn2_ptc_seq_stop, 450 .show = sn2_ptc_seq_show 451}; 452 453static int sn2_ptc_proc_open(struct inode *inode, struct file *file) 454{ 455 return seq_open(file, &sn2_ptc_seq_ops); 456} 457 458static const struct file_operations proc_sn2_ptc_operations = { 459 .open = sn2_ptc_proc_open, 460 .read = seq_read, 461 .llseek = seq_lseek, 462 .release = seq_release, 463}; 464 465static struct proc_dir_entry *proc_sn2_ptc; 466 467static int __init sn2_ptc_init(void) 468{ 469 if (!ia64_platform_is("sn2")) 470 return 0; 471 472 if (!(proc_sn2_ptc = create_proc_entry(PTC_BASENAME, 0444, NULL))) { 473 printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); 474 return -EINVAL; 475 } 476 proc_sn2_ptc->proc_fops = &proc_sn2_ptc_operations; 477 spin_lock_init(&sn2_global_ptc_lock); 478 return 0; 479} 480 481static void __exit sn2_ptc_exit(void) 482{ 483 remove_proc_entry(PTC_BASENAME, NULL); 484} 485 486module_init(sn2_ptc_init); 487module_exit(sn2_ptc_exit); 488#endif /* CONFIG_PROC_FS */ 489 490