sba_iommu.c revision d54b1fdb1d9f82e375a299e22bd366aad52d4c34
1/* 2** System Bus Adapter (SBA) I/O MMU manager 3** 4** (c) Copyright 2000-2004 Grant Grundler <grundler @ parisc-linux x org> 5** (c) Copyright 2004 Naresh Kumar Inna <knaresh at india x hp x com> 6** (c) Copyright 2000-2004 Hewlett-Packard Company 7** 8** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) 9** 10** This program is free software; you can redistribute it and/or modify 11** it under the terms of the GNU General Public License as published by 12** the Free Software Foundation; either version 2 of the License, or 13** (at your option) any later version. 14** 15** 16** This module initializes the IOC (I/O Controller) found on B1000/C3000/ 17** J5000/J7000/N-class/L-class machines and their successors. 18** 19** FIXME: add DMA hint support programming in both sba and lba modules. 20*/ 21 22#include <linux/types.h> 23#include <linux/kernel.h> 24#include <linux/spinlock.h> 25#include <linux/slab.h> 26#include <linux/init.h> 27 28#include <linux/mm.h> 29#include <linux/string.h> 30#include <linux/pci.h> 31 32#include <asm/byteorder.h> 33#include <asm/io.h> 34#include <asm/dma.h> /* for DMA_CHUNK_SIZE */ 35 36#include <asm/hardware.h> /* for register_parisc_driver() stuff */ 37 38#include <linux/proc_fs.h> 39#include <linux/seq_file.h> 40 41#include <asm/ropes.h> 42#include <asm/mckinley.h> /* for proc_mckinley_root */ 43#include <asm/runway.h> /* for proc_runway_root */ 44#include <asm/pdc.h> /* for PDC_MODEL_* */ 45#include <asm/pdcpat.h> /* for is_pdc_pat() */ 46#include <asm/parisc-device.h> 47 48#define MODULE_NAME "SBA" 49 50/* 51** The number of debug flags is a clue - this code is fragile. 52** Don't even think about messing with it unless you have 53** plenty of 710's to sacrifice to the computer gods. :^) 54*/ 55#undef DEBUG_SBA_INIT 56#undef DEBUG_SBA_RUN 57#undef DEBUG_SBA_RUN_SG 58#undef DEBUG_SBA_RESOURCE 59#undef ASSERT_PDIR_SANITY 60#undef DEBUG_LARGE_SG_ENTRIES 61#undef DEBUG_DMB_TRAP 62 63#ifdef DEBUG_SBA_INIT 64#define DBG_INIT(x...) printk(x) 65#else 66#define DBG_INIT(x...) 67#endif 68 69#ifdef DEBUG_SBA_RUN 70#define DBG_RUN(x...) printk(x) 71#else 72#define DBG_RUN(x...) 73#endif 74 75#ifdef DEBUG_SBA_RUN_SG 76#define DBG_RUN_SG(x...) printk(x) 77#else 78#define DBG_RUN_SG(x...) 79#endif 80 81 82#ifdef DEBUG_SBA_RESOURCE 83#define DBG_RES(x...) printk(x) 84#else 85#define DBG_RES(x...) 86#endif 87 88#define SBA_INLINE __inline__ 89 90#define DEFAULT_DMA_HINT_REG 0 91 92struct sba_device *sba_list; 93EXPORT_SYMBOL_GPL(sba_list); 94 95static unsigned long ioc_needs_fdc = 0; 96 97/* global count of IOMMUs in the system */ 98static unsigned int global_ioc_cnt = 0; 99 100/* PA8700 (Piranha 2.2) bug workaround */ 101static unsigned long piranha_bad_128k = 0; 102 103/* Looks nice and keeps the compiler happy */ 104#define SBA_DEV(d) ((struct sba_device *) (d)) 105 106#ifdef CONFIG_AGP_PARISC 107#define SBA_AGP_SUPPORT 108#endif /*CONFIG_AGP_PARISC*/ 109 110#ifdef SBA_AGP_SUPPORT 111static int sba_reserve_agpgart = 1; 112module_param(sba_reserve_agpgart, int, 1); 113MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART"); 114#endif 115 116#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1)) 117 118 119/************************************ 120** SBA register read and write support 121** 122** BE WARNED: register writes are posted. 123** (ie follow writes which must reach HW with a read) 124** 125** Superdome (in particular, REO) allows only 64-bit CSR accesses. 126*/ 127#define READ_REG32(addr) readl(addr) 128#define READ_REG64(addr) readq(addr) 129#define WRITE_REG32(val, addr) writel((val), (addr)) 130#define WRITE_REG64(val, addr) writeq((val), (addr)) 131 132#ifdef CONFIG_64BIT 133#define READ_REG(addr) READ_REG64(addr) 134#define WRITE_REG(value, addr) WRITE_REG64(value, addr) 135#else 136#define READ_REG(addr) READ_REG32(addr) 137#define WRITE_REG(value, addr) WRITE_REG32(value, addr) 138#endif 139 140#ifdef DEBUG_SBA_INIT 141 142/* NOTE: When CONFIG_64BIT isn't defined, READ_REG64() is two 32-bit reads */ 143 144/** 145 * sba_dump_ranges - debugging only - print ranges assigned to this IOA 146 * @hpa: base address of the sba 147 * 148 * Print the MMIO and IO Port address ranges forwarded by an Astro/Ike/RIO 149 * IO Adapter (aka Bus Converter). 150 */ 151static void 152sba_dump_ranges(void __iomem *hpa) 153{ 154 DBG_INIT("SBA at 0x%p\n", hpa); 155 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE)); 156 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK)); 157 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE)); 158 DBG_INIT("\n"); 159 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE)); 160 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK)); 161 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE)); 162} 163 164/** 165 * sba_dump_tlb - debugging only - print IOMMU operating parameters 166 * @hpa: base address of the IOMMU 167 * 168 * Print the size/location of the IO MMU PDIR. 169 */ 170static void sba_dump_tlb(void __iomem *hpa) 171{ 172 DBG_INIT("IO TLB at 0x%p\n", hpa); 173 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE)); 174 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK)); 175 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG)); 176 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE)); 177 DBG_INIT("\n"); 178} 179#else 180#define sba_dump_ranges(x) 181#define sba_dump_tlb(x) 182#endif /* DEBUG_SBA_INIT */ 183 184 185#ifdef ASSERT_PDIR_SANITY 186 187/** 188 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry 189 * @ioc: IO MMU structure which owns the pdir we are interested in. 190 * @msg: text to print ont the output line. 191 * @pide: pdir index. 192 * 193 * Print one entry of the IO MMU PDIR in human readable form. 194 */ 195static void 196sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) 197{ 198 /* start printing from lowest pde in rval */ 199 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]); 200 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]); 201 uint rcnt; 202 203 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n", 204 msg, 205 rptr, pide & (BITS_PER_LONG - 1), *rptr); 206 207 rcnt = 0; 208 while (rcnt < BITS_PER_LONG) { 209 printk(KERN_DEBUG "%s %2d %p %016Lx\n", 210 (rcnt == (pide & (BITS_PER_LONG - 1))) 211 ? " -->" : " ", 212 rcnt, ptr, *ptr ); 213 rcnt++; 214 ptr++; 215 } 216 printk(KERN_DEBUG "%s", msg); 217} 218 219 220/** 221 * sba_check_pdir - debugging only - consistency checker 222 * @ioc: IO MMU structure which owns the pdir we are interested in. 223 * @msg: text to print ont the output line. 224 * 225 * Verify the resource map and pdir state is consistent 226 */ 227static int 228sba_check_pdir(struct ioc *ioc, char *msg) 229{ 230 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]); 231 u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */ 232 u64 *pptr = ioc->pdir_base; /* pdir ptr */ 233 uint pide = 0; 234 235 while (rptr < rptr_end) { 236 u32 rval = *rptr; 237 int rcnt = 32; /* number of bits we might check */ 238 239 while (rcnt) { 240 /* Get last byte and highest bit from that */ 241 u32 pde = ((u32) (((char *)pptr)[7])) << 24; 242 if ((rval ^ pde) & 0x80000000) 243 { 244 /* 245 ** BUMMER! -- res_map != pdir -- 246 ** Dump rval and matching pdir entries 247 */ 248 sba_dump_pdir_entry(ioc, msg, pide); 249 return(1); 250 } 251 rcnt--; 252 rval <<= 1; /* try the next bit */ 253 pptr++; 254 pide++; 255 } 256 rptr++; /* look at next word of res_map */ 257 } 258 /* It'd be nice if we always got here :^) */ 259 return 0; 260} 261 262 263/** 264 * sba_dump_sg - debugging only - print Scatter-Gather list 265 * @ioc: IO MMU structure which owns the pdir we are interested in. 266 * @startsg: head of the SG list 267 * @nents: number of entries in SG list 268 * 269 * print the SG list so we can verify it's correct by hand. 270 */ 271static void 272sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) 273{ 274 while (nents-- > 0) { 275 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n", 276 nents, 277 (unsigned long) sg_dma_address(startsg), 278 sg_dma_len(startsg), 279 sg_virt_addr(startsg), startsg->length); 280 startsg++; 281 } 282} 283 284#endif /* ASSERT_PDIR_SANITY */ 285 286 287 288 289/************************************************************** 290* 291* I/O Pdir Resource Management 292* 293* Bits set in the resource map are in use. 294* Each bit can represent a number of pages. 295* LSbs represent lower addresses (IOVA's). 296* 297***************************************************************/ 298#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */ 299 300/* Convert from IOVP to IOVA and vice versa. */ 301 302#ifdef ZX1_SUPPORT 303/* Pluto (aka ZX1) boxes need to set or clear the ibase bits appropriately */ 304#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset)) 305#define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask) 306#else 307/* only support Astro and ancestors. Saves a few cycles in key places */ 308#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset)) 309#define SBA_IOVP(ioc,iova) (iova) 310#endif 311 312#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT) 313 314#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) 315#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) 316 317 318/** 319 * sba_search_bitmap - find free space in IO PDIR resource bitmap 320 * @ioc: IO MMU structure which owns the pdir we are interested in. 321 * @bits_wanted: number of entries we need. 322 * 323 * Find consecutive free bits in resource bitmap. 324 * Each bit represents one entry in the IO Pdir. 325 * Cool perf optimization: search for log2(size) bits at a time. 326 */ 327static SBA_INLINE unsigned long 328sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) 329{ 330 unsigned long *res_ptr = ioc->res_hint; 331 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); 332 unsigned long pide = ~0UL; 333 334 if (bits_wanted > (BITS_PER_LONG/2)) { 335 /* Search word at a time - no mask needed */ 336 for(; res_ptr < res_end; ++res_ptr) { 337 if (*res_ptr == 0) { 338 *res_ptr = RESMAP_MASK(bits_wanted); 339 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); 340 pide <<= 3; /* convert to bit address */ 341 break; 342 } 343 } 344 /* point to the next word on next pass */ 345 res_ptr++; 346 ioc->res_bitshift = 0; 347 } else { 348 /* 349 ** Search the resource bit map on well-aligned values. 350 ** "o" is the alignment. 351 ** We need the alignment to invalidate I/O TLB using 352 ** SBA HW features in the unmap path. 353 */ 354 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT); 355 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o); 356 unsigned long mask; 357 358 if (bitshiftcnt >= BITS_PER_LONG) { 359 bitshiftcnt = 0; 360 res_ptr++; 361 } 362 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt; 363 364 DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr); 365 while(res_ptr < res_end) 366 { 367 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); 368 WARN_ON(mask == 0); 369 if(((*res_ptr) & mask) == 0) { 370 *res_ptr |= mask; /* mark resources busy! */ 371 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); 372 pide <<= 3; /* convert to bit address */ 373 pide += bitshiftcnt; 374 break; 375 } 376 mask >>= o; 377 bitshiftcnt += o; 378 if (mask == 0) { 379 mask = RESMAP_MASK(bits_wanted); 380 bitshiftcnt=0; 381 res_ptr++; 382 } 383 } 384 /* look in the same word on the next pass */ 385 ioc->res_bitshift = bitshiftcnt + bits_wanted; 386 } 387 388 /* wrapped ? */ 389 if (res_end <= res_ptr) { 390 ioc->res_hint = (unsigned long *) ioc->res_map; 391 ioc->res_bitshift = 0; 392 } else { 393 ioc->res_hint = res_ptr; 394 } 395 return (pide); 396} 397 398 399/** 400 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap 401 * @ioc: IO MMU structure which owns the pdir we are interested in. 402 * @size: number of bytes to create a mapping for 403 * 404 * Given a size, find consecutive unmarked and then mark those bits in the 405 * resource bit map. 406 */ 407static int 408sba_alloc_range(struct ioc *ioc, size_t size) 409{ 410 unsigned int pages_needed = size >> IOVP_SHIFT; 411#ifdef SBA_COLLECT_STATS 412 unsigned long cr_start = mfctl(16); 413#endif 414 unsigned long pide; 415 416 pide = sba_search_bitmap(ioc, pages_needed); 417 if (pide >= (ioc->res_size << 3)) { 418 pide = sba_search_bitmap(ioc, pages_needed); 419 if (pide >= (ioc->res_size << 3)) 420 panic("%s: I/O MMU @ %p is out of mapping resources\n", 421 __FILE__, ioc->ioc_hpa); 422 } 423 424#ifdef ASSERT_PDIR_SANITY 425 /* verify the first enable bit is clear */ 426 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) { 427 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide); 428 } 429#endif 430 431 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", 432 __FUNCTION__, size, pages_needed, pide, 433 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), 434 ioc->res_bitshift ); 435 436#ifdef SBA_COLLECT_STATS 437 { 438 unsigned long cr_end = mfctl(16); 439 unsigned long tmp = cr_end - cr_start; 440 /* check for roll over */ 441 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp); 442 } 443 ioc->avg_search[ioc->avg_idx++] = cr_start; 444 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; 445 446 ioc->used_pages += pages_needed; 447#endif 448 449 return (pide); 450} 451 452 453/** 454 * sba_free_range - unmark bits in IO PDIR resource bitmap 455 * @ioc: IO MMU structure which owns the pdir we are interested in. 456 * @iova: IO virtual address which was previously allocated. 457 * @size: number of bytes to create a mapping for 458 * 459 * clear bits in the ioc's resource map 460 */ 461static SBA_INLINE void 462sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) 463{ 464 unsigned long iovp = SBA_IOVP(ioc, iova); 465 unsigned int pide = PDIR_INDEX(iovp); 466 unsigned int ridx = pide >> 3; /* convert bit to byte address */ 467 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); 468 469 int bits_not_wanted = size >> IOVP_SHIFT; 470 471 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */ 472 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1)); 473 474 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", 475 __FUNCTION__, (uint) iova, size, 476 bits_not_wanted, m, pide, res_ptr, *res_ptr); 477 478#ifdef SBA_COLLECT_STATS 479 ioc->used_pages -= bits_not_wanted; 480#endif 481 482 *res_ptr &= ~m; 483} 484 485 486/************************************************************** 487* 488* "Dynamic DMA Mapping" support (aka "Coherent I/O") 489* 490***************************************************************/ 491 492#ifdef SBA_HINT_SUPPORT 493#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir) 494#endif 495 496typedef unsigned long space_t; 497#define KERNEL_SPACE 0 498 499/** 500 * sba_io_pdir_entry - fill in one IO PDIR entry 501 * @pdir_ptr: pointer to IO PDIR entry 502 * @sid: process Space ID - currently only support KERNEL_SPACE 503 * @vba: Virtual CPU address of buffer to map 504 * @hint: DMA hint set to use for this mapping 505 * 506 * SBA Mapping Routine 507 * 508 * Given a virtual address (vba, arg2) and space id, (sid, arg1) 509 * sba_io_pdir_entry() loads the I/O PDIR entry pointed to by 510 * pdir_ptr (arg0). 511 * Using the bass-ackwards HP bit numbering, Each IO Pdir entry 512 * for Astro/Ike looks like: 513 * 514 * 515 * 0 19 51 55 63 516 * +-+---------------------+----------------------------------+----+--------+ 517 * |V| U | PPN[43:12] | U | VI | 518 * +-+---------------------+----------------------------------+----+--------+ 519 * 520 * Pluto is basically identical, supports fewer physical address bits: 521 * 522 * 0 23 51 55 63 523 * +-+------------------------+-------------------------------+----+--------+ 524 * |V| U | PPN[39:12] | U | VI | 525 * +-+------------------------+-------------------------------+----+--------+ 526 * 527 * V == Valid Bit (Most Significant Bit is bit 0) 528 * U == Unused 529 * PPN == Physical Page Number 530 * VI == Virtual Index (aka Coherent Index) 531 * 532 * LPA instruction output is put into PPN field. 533 * LCI (Load Coherence Index) instruction provides the "VI" bits. 534 * 535 * We pre-swap the bytes since PCX-W is Big Endian and the 536 * IOMMU uses little endian for the pdir. 537 */ 538 539void SBA_INLINE 540sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, 541 unsigned long hint) 542{ 543 u64 pa; /* physical address */ 544 register unsigned ci; /* coherent index */ 545 546 pa = virt_to_phys(vba); 547 pa &= IOVP_MASK; 548 549 mtsp(sid,1); 550 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); 551 pa |= (ci >> 12) & 0xff; /* move CI (8 bits) into lowest byte */ 552 553 pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */ 554 *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */ 555 556 /* 557 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set 558 * (bit #61, big endian), we have to flush and sync every time 559 * IO-PDIR is changed in Ike/Astro. 560 */ 561 if (ioc_needs_fdc) 562 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 563} 564 565 566/** 567 * sba_mark_invalid - invalidate one or more IO PDIR entries 568 * @ioc: IO MMU structure which owns the pdir we are interested in. 569 * @iova: IO Virtual Address mapped earlier 570 * @byte_cnt: number of bytes this mapping covers. 571 * 572 * Marking the IO PDIR entry(ies) as Invalid and invalidate 573 * corresponding IO TLB entry. The Ike PCOM (Purge Command Register) 574 * is to purge stale entries in the IO TLB when unmapping entries. 575 * 576 * The PCOM register supports purging of multiple pages, with a minium 577 * of 1 page and a maximum of 2GB. Hardware requires the address be 578 * aligned to the size of the range being purged. The size of the range 579 * must be a power of 2. The "Cool perf optimization" in the 580 * allocation routine helps keep that true. 581 */ 582static SBA_INLINE void 583sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) 584{ 585 u32 iovp = (u32) SBA_IOVP(ioc,iova); 586 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)]; 587 588#ifdef ASSERT_PDIR_SANITY 589 /* Assert first pdir entry is set. 590 ** 591 ** Even though this is a big-endian machine, the entries 592 ** in the iopdir are little endian. That's why we look at 593 ** the byte at +7 instead of at +0. 594 */ 595 if (0x80 != (((u8 *) pdir_ptr)[7])) { 596 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); 597 } 598#endif 599 600 if (byte_cnt > IOVP_SIZE) 601 { 602#if 0 603 unsigned long entries_per_cacheline = ioc_needs_fdc ? 604 L1_CACHE_ALIGN(((unsigned long) pdir_ptr)) 605 - (unsigned long) pdir_ptr; 606 : 262144; 607#endif 608 609 /* set "size" field for PCOM */ 610 iovp |= get_order(byte_cnt) + PAGE_SHIFT; 611 612 do { 613 /* clear I/O Pdir entry "valid" bit first */ 614 ((u8 *) pdir_ptr)[7] = 0; 615 if (ioc_needs_fdc) { 616 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 617#if 0 618 entries_per_cacheline = L1_CACHE_SHIFT - 3; 619#endif 620 } 621 pdir_ptr++; 622 byte_cnt -= IOVP_SIZE; 623 } while (byte_cnt > IOVP_SIZE); 624 } else 625 iovp |= IOVP_SHIFT; /* set "size" field for PCOM */ 626 627 /* 628 ** clear I/O PDIR entry "valid" bit. 629 ** We have to R/M/W the cacheline regardless how much of the 630 ** pdir entry that we clobber. 631 ** The rest of the entry would be useful for debugging if we 632 ** could dump core on HPMC. 633 */ 634 ((u8 *) pdir_ptr)[7] = 0; 635 if (ioc_needs_fdc) 636 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 637 638 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM); 639} 640 641/** 642 * sba_dma_supported - PCI driver can query DMA support 643 * @dev: instance of PCI owned by the driver that's asking 644 * @mask: number of address bits this PCI device can handle 645 * 646 * See Documentation/DMA-mapping.txt 647 */ 648static int sba_dma_supported( struct device *dev, u64 mask) 649{ 650 struct ioc *ioc; 651 652 if (dev == NULL) { 653 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n"); 654 BUG(); 655 return(0); 656 } 657 658 /* Documentation/DMA-mapping.txt tells drivers to try 64-bit first, 659 * then fall back to 32-bit if that fails. 660 * We are just "encouraging" 32-bit DMA masks here since we can 661 * never allow IOMMU bypass unless we add special support for ZX1. 662 */ 663 if (mask > ~0U) 664 return 0; 665 666 ioc = GET_IOC(dev); 667 668 /* 669 * check if mask is >= than the current max IO Virt Address 670 * The max IO Virt address will *always* < 30 bits. 671 */ 672 return((int)(mask >= (ioc->ibase - 1 + 673 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) ))); 674} 675 676 677/** 678 * sba_map_single - map one buffer and return IOVA for DMA 679 * @dev: instance of PCI owned by the driver that's asking. 680 * @addr: driver buffer to map. 681 * @size: number of bytes to map in driver buffer. 682 * @direction: R/W or both. 683 * 684 * See Documentation/DMA-mapping.txt 685 */ 686static dma_addr_t 687sba_map_single(struct device *dev, void *addr, size_t size, 688 enum dma_data_direction direction) 689{ 690 struct ioc *ioc; 691 unsigned long flags; 692 dma_addr_t iovp; 693 dma_addr_t offset; 694 u64 *pdir_start; 695 int pide; 696 697 ioc = GET_IOC(dev); 698 699 /* save offset bits */ 700 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK; 701 702 /* round up to nearest IOVP_SIZE */ 703 size = (size + offset + ~IOVP_MASK) & IOVP_MASK; 704 705 spin_lock_irqsave(&ioc->res_lock, flags); 706#ifdef ASSERT_PDIR_SANITY 707 sba_check_pdir(ioc,"Check before sba_map_single()"); 708#endif 709 710#ifdef SBA_COLLECT_STATS 711 ioc->msingle_calls++; 712 ioc->msingle_pages += size >> IOVP_SHIFT; 713#endif 714 pide = sba_alloc_range(ioc, size); 715 iovp = (dma_addr_t) pide << IOVP_SHIFT; 716 717 DBG_RUN("%s() 0x%p -> 0x%lx\n", 718 __FUNCTION__, addr, (long) iovp | offset); 719 720 pdir_start = &(ioc->pdir_base[pide]); 721 722 while (size > 0) { 723 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0); 724 725 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n", 726 pdir_start, 727 (u8) (((u8 *) pdir_start)[7]), 728 (u8) (((u8 *) pdir_start)[6]), 729 (u8) (((u8 *) pdir_start)[5]), 730 (u8) (((u8 *) pdir_start)[4]), 731 (u8) (((u8 *) pdir_start)[3]), 732 (u8) (((u8 *) pdir_start)[2]), 733 (u8) (((u8 *) pdir_start)[1]), 734 (u8) (((u8 *) pdir_start)[0]) 735 ); 736 737 addr += IOVP_SIZE; 738 size -= IOVP_SIZE; 739 pdir_start++; 740 } 741 742 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */ 743 if (ioc_needs_fdc) 744 asm volatile("sync" : : ); 745 746#ifdef ASSERT_PDIR_SANITY 747 sba_check_pdir(ioc,"Check after sba_map_single()"); 748#endif 749 spin_unlock_irqrestore(&ioc->res_lock, flags); 750 751 /* form complete address */ 752 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG); 753} 754 755 756/** 757 * sba_unmap_single - unmap one IOVA and free resources 758 * @dev: instance of PCI owned by the driver that's asking. 759 * @iova: IOVA of driver buffer previously mapped. 760 * @size: number of bytes mapped in driver buffer. 761 * @direction: R/W or both. 762 * 763 * See Documentation/DMA-mapping.txt 764 */ 765static void 766sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, 767 enum dma_data_direction direction) 768{ 769 struct ioc *ioc; 770#if DELAYED_RESOURCE_CNT > 0 771 struct sba_dma_pair *d; 772#endif 773 unsigned long flags; 774 dma_addr_t offset; 775 776 DBG_RUN("%s() iovp 0x%lx/%x\n", __FUNCTION__, (long) iova, size); 777 778 ioc = GET_IOC(dev); 779 offset = iova & ~IOVP_MASK; 780 iova ^= offset; /* clear offset bits */ 781 size += offset; 782 size = ROUNDUP(size, IOVP_SIZE); 783 784 spin_lock_irqsave(&ioc->res_lock, flags); 785 786#ifdef SBA_COLLECT_STATS 787 ioc->usingle_calls++; 788 ioc->usingle_pages += size >> IOVP_SHIFT; 789#endif 790 791 sba_mark_invalid(ioc, iova, size); 792 793#if DELAYED_RESOURCE_CNT > 0 794 /* Delaying when we re-use a IO Pdir entry reduces the number 795 * of MMIO reads needed to flush writes to the PCOM register. 796 */ 797 d = &(ioc->saved[ioc->saved_cnt]); 798 d->iova = iova; 799 d->size = size; 800 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) { 801 int cnt = ioc->saved_cnt; 802 while (cnt--) { 803 sba_free_range(ioc, d->iova, d->size); 804 d--; 805 } 806 ioc->saved_cnt = 0; 807 808 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 809 } 810#else /* DELAYED_RESOURCE_CNT == 0 */ 811 sba_free_range(ioc, iova, size); 812 813 /* If fdc's were issued, force fdc's to be visible now */ 814 if (ioc_needs_fdc) 815 asm volatile("sync" : : ); 816 817 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 818#endif /* DELAYED_RESOURCE_CNT == 0 */ 819 820 spin_unlock_irqrestore(&ioc->res_lock, flags); 821 822 /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support. 823 ** For Astro based systems this isn't a big deal WRT performance. 824 ** As long as 2.4 kernels copyin/copyout data from/to userspace, 825 ** we don't need the syncdma. The issue here is I/O MMU cachelines 826 ** are *not* coherent in all cases. May be hwrev dependent. 827 ** Need to investigate more. 828 asm volatile("syncdma"); 829 */ 830} 831 832 833/** 834 * sba_alloc_consistent - allocate/map shared mem for DMA 835 * @hwdev: instance of PCI owned by the driver that's asking. 836 * @size: number of bytes mapped in driver buffer. 837 * @dma_handle: IOVA of new buffer. 838 * 839 * See Documentation/DMA-mapping.txt 840 */ 841static void *sba_alloc_consistent(struct device *hwdev, size_t size, 842 dma_addr_t *dma_handle, gfp_t gfp) 843{ 844 void *ret; 845 846 if (!hwdev) { 847 /* only support PCI */ 848 *dma_handle = 0; 849 return 0; 850 } 851 852 ret = (void *) __get_free_pages(gfp, get_order(size)); 853 854 if (ret) { 855 memset(ret, 0, size); 856 *dma_handle = sba_map_single(hwdev, ret, size, 0); 857 } 858 859 return ret; 860} 861 862 863/** 864 * sba_free_consistent - free/unmap shared mem for DMA 865 * @hwdev: instance of PCI owned by the driver that's asking. 866 * @size: number of bytes mapped in driver buffer. 867 * @vaddr: virtual address IOVA of "consistent" buffer. 868 * @dma_handler: IO virtual address of "consistent" buffer. 869 * 870 * See Documentation/DMA-mapping.txt 871 */ 872static void 873sba_free_consistent(struct device *hwdev, size_t size, void *vaddr, 874 dma_addr_t dma_handle) 875{ 876 sba_unmap_single(hwdev, dma_handle, size, 0); 877 free_pages((unsigned long) vaddr, get_order(size)); 878} 879 880 881/* 882** Since 0 is a valid pdir_base index value, can't use that 883** to determine if a value is valid or not. Use a flag to indicate 884** the SG list entry contains a valid pdir index. 885*/ 886#define PIDE_FLAG 0x80000000UL 887 888#ifdef SBA_COLLECT_STATS 889#define IOMMU_MAP_STATS 890#endif 891#include "iommu-helpers.h" 892 893#ifdef DEBUG_LARGE_SG_ENTRIES 894int dump_run_sg = 0; 895#endif 896 897 898/** 899 * sba_map_sg - map Scatter/Gather list 900 * @dev: instance of PCI owned by the driver that's asking. 901 * @sglist: array of buffer/length pairs 902 * @nents: number of entries in list 903 * @direction: R/W or both. 904 * 905 * See Documentation/DMA-mapping.txt 906 */ 907static int 908sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, 909 enum dma_data_direction direction) 910{ 911 struct ioc *ioc; 912 int coalesced, filled = 0; 913 unsigned long flags; 914 915 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); 916 917 ioc = GET_IOC(dev); 918 919 /* Fast path single entry scatterlists. */ 920 if (nents == 1) { 921 sg_dma_address(sglist) = sba_map_single(dev, 922 (void *)sg_virt_addr(sglist), 923 sglist->length, direction); 924 sg_dma_len(sglist) = sglist->length; 925 return 1; 926 } 927 928 spin_lock_irqsave(&ioc->res_lock, flags); 929 930#ifdef ASSERT_PDIR_SANITY 931 if (sba_check_pdir(ioc,"Check before sba_map_sg()")) 932 { 933 sba_dump_sg(ioc, sglist, nents); 934 panic("Check before sba_map_sg()"); 935 } 936#endif 937 938#ifdef SBA_COLLECT_STATS 939 ioc->msg_calls++; 940#endif 941 942 /* 943 ** First coalesce the chunks and allocate I/O pdir space 944 ** 945 ** If this is one DMA stream, we can properly map using the 946 ** correct virtual address associated with each DMA page. 947 ** w/o this association, we wouldn't have coherent DMA! 948 ** Access to the virtual address is what forces a two pass algorithm. 949 */ 950 coalesced = iommu_coalesce_chunks(ioc, sglist, nents, sba_alloc_range); 951 952 /* 953 ** Program the I/O Pdir 954 ** 955 ** map the virtual addresses to the I/O Pdir 956 ** o dma_address will contain the pdir index 957 ** o dma_len will contain the number of bytes to map 958 ** o address contains the virtual address. 959 */ 960 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry); 961 962 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */ 963 if (ioc_needs_fdc) 964 asm volatile("sync" : : ); 965 966#ifdef ASSERT_PDIR_SANITY 967 if (sba_check_pdir(ioc,"Check after sba_map_sg()")) 968 { 969 sba_dump_sg(ioc, sglist, nents); 970 panic("Check after sba_map_sg()\n"); 971 } 972#endif 973 974 spin_unlock_irqrestore(&ioc->res_lock, flags); 975 976 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); 977 978 return filled; 979} 980 981 982/** 983 * sba_unmap_sg - unmap Scatter/Gather list 984 * @dev: instance of PCI owned by the driver that's asking. 985 * @sglist: array of buffer/length pairs 986 * @nents: number of entries in list 987 * @direction: R/W or both. 988 * 989 * See Documentation/DMA-mapping.txt 990 */ 991static void 992sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, 993 enum dma_data_direction direction) 994{ 995 struct ioc *ioc; 996#ifdef ASSERT_PDIR_SANITY 997 unsigned long flags; 998#endif 999 1000 DBG_RUN_SG("%s() START %d entries, %p,%x\n", 1001 __FUNCTION__, nents, sg_virt_addr(sglist), sglist->length); 1002 1003 ioc = GET_IOC(dev); 1004 1005#ifdef SBA_COLLECT_STATS 1006 ioc->usg_calls++; 1007#endif 1008 1009#ifdef ASSERT_PDIR_SANITY 1010 spin_lock_irqsave(&ioc->res_lock, flags); 1011 sba_check_pdir(ioc,"Check before sba_unmap_sg()"); 1012 spin_unlock_irqrestore(&ioc->res_lock, flags); 1013#endif 1014 1015 while (sg_dma_len(sglist) && nents--) { 1016 1017 sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction); 1018#ifdef SBA_COLLECT_STATS 1019 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; 1020 ioc->usingle_calls--; /* kluge since call is unmap_sg() */ 1021#endif 1022 ++sglist; 1023 } 1024 1025 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); 1026 1027#ifdef ASSERT_PDIR_SANITY 1028 spin_lock_irqsave(&ioc->res_lock, flags); 1029 sba_check_pdir(ioc,"Check after sba_unmap_sg()"); 1030 spin_unlock_irqrestore(&ioc->res_lock, flags); 1031#endif 1032 1033} 1034 1035static struct hppa_dma_ops sba_ops = { 1036 .dma_supported = sba_dma_supported, 1037 .alloc_consistent = sba_alloc_consistent, 1038 .alloc_noncoherent = sba_alloc_consistent, 1039 .free_consistent = sba_free_consistent, 1040 .map_single = sba_map_single, 1041 .unmap_single = sba_unmap_single, 1042 .map_sg = sba_map_sg, 1043 .unmap_sg = sba_unmap_sg, 1044 .dma_sync_single_for_cpu = NULL, 1045 .dma_sync_single_for_device = NULL, 1046 .dma_sync_sg_for_cpu = NULL, 1047 .dma_sync_sg_for_device = NULL, 1048}; 1049 1050 1051/************************************************************************** 1052** 1053** SBA PAT PDC support 1054** 1055** o call pdc_pat_cell_module() 1056** o store ranges in PCI "resource" structures 1057** 1058**************************************************************************/ 1059 1060static void 1061sba_get_pat_resources(struct sba_device *sba_dev) 1062{ 1063#if 0 1064/* 1065** TODO/REVISIT/FIXME: support for directed ranges requires calls to 1066** PAT PDC to program the SBA/LBA directed range registers...this 1067** burden may fall on the LBA code since it directly supports the 1068** PCI subsystem. It's not clear yet. - ggg 1069*/ 1070PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp); 1071 FIXME : ??? 1072PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp); 1073 Tells where the dvi bits are located in the address. 1074PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp); 1075 FIXME : ??? 1076#endif 1077} 1078 1079 1080/************************************************************** 1081* 1082* Initialization and claim 1083* 1084***************************************************************/ 1085#define PIRANHA_ADDR_MASK 0x00160000UL /* bit 17,18,20 */ 1086#define PIRANHA_ADDR_VAL 0x00060000UL /* bit 17,18 on */ 1087static void * 1088sba_alloc_pdir(unsigned int pdir_size) 1089{ 1090 unsigned long pdir_base; 1091 unsigned long pdir_order = get_order(pdir_size); 1092 1093 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order); 1094 if (NULL == (void *) pdir_base) { 1095 panic("%s() could not allocate I/O Page Table\n", 1096 __FUNCTION__); 1097 } 1098 1099 /* If this is not PA8700 (PCX-W2) 1100 ** OR newer than ver 2.2 1101 ** OR in a system that doesn't need VINDEX bits from SBA, 1102 ** 1103 ** then we aren't exposed to the HW bug. 1104 */ 1105 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13 1106 || (boot_cpu_data.pdc.versions > 0x202) 1107 || (boot_cpu_data.pdc.capabilities & 0x08L) ) 1108 return (void *) pdir_base; 1109 1110 /* 1111 * PA8700 (PCX-W2, aka piranha) silent data corruption fix 1112 * 1113 * An interaction between PA8700 CPU (Ver 2.2 or older) and 1114 * Ike/Astro can cause silent data corruption. This is only 1115 * a problem if the I/O PDIR is located in memory such that 1116 * (little-endian) bits 17 and 18 are on and bit 20 is off. 1117 * 1118 * Since the max IO Pdir size is 2MB, by cleverly allocating the 1119 * right physical address, we can either avoid (IOPDIR <= 1MB) 1120 * or minimize (2MB IO Pdir) the problem if we restrict the 1121 * IO Pdir to a maximum size of 2MB-128K (1902K). 1122 * 1123 * Because we always allocate 2^N sized IO pdirs, either of the 1124 * "bad" regions will be the last 128K if at all. That's easy 1125 * to test for. 1126 * 1127 */ 1128 if (pdir_order <= (19-12)) { 1129 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) { 1130 /* allocate a new one on 512k alignment */ 1131 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12)); 1132 /* release original */ 1133 free_pages(pdir_base, pdir_order); 1134 1135 pdir_base = new_pdir; 1136 1137 /* release excess */ 1138 while (pdir_order < (19-12)) { 1139 new_pdir += pdir_size; 1140 free_pages(new_pdir, pdir_order); 1141 pdir_order +=1; 1142 pdir_size <<=1; 1143 } 1144 } 1145 } else { 1146 /* 1147 ** 1MB or 2MB Pdir 1148 ** Needs to be aligned on an "odd" 1MB boundary. 1149 */ 1150 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1); /* 2 or 4MB */ 1151 1152 /* release original */ 1153 free_pages( pdir_base, pdir_order); 1154 1155 /* release first 1MB */ 1156 free_pages(new_pdir, 20-12); 1157 1158 pdir_base = new_pdir + 1024*1024; 1159 1160 if (pdir_order > (20-12)) { 1161 /* 1162 ** 2MB Pdir. 1163 ** 1164 ** Flag tells init_bitmap() to mark bad 128k as used 1165 ** and to reduce the size by 128k. 1166 */ 1167 piranha_bad_128k = 1; 1168 1169 new_pdir += 3*1024*1024; 1170 /* release last 1MB */ 1171 free_pages(new_pdir, 20-12); 1172 1173 /* release unusable 128KB */ 1174 free_pages(new_pdir - 128*1024 , 17-12); 1175 1176 pdir_size -= 128*1024; 1177 } 1178 } 1179 1180 memset((void *) pdir_base, 0, pdir_size); 1181 return (void *) pdir_base; 1182} 1183 1184static struct device *next_device(struct klist_iter *i) 1185{ 1186 struct klist_node * n = klist_next(i); 1187 return n ? container_of(n, struct device, knode_parent) : NULL; 1188} 1189 1190/* setup Mercury or Elroy IBASE/IMASK registers. */ 1191static void 1192setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1193{ 1194 /* lba_set_iregs() is in drivers/parisc/lba_pci.c */ 1195 extern void lba_set_iregs(struct parisc_device *, u32, u32); 1196 struct device *dev; 1197 struct klist_iter i; 1198 1199 klist_iter_init(&sba->dev.klist_children, &i); 1200 while ((dev = next_device(&i))) { 1201 struct parisc_device *lba = to_parisc_device(dev); 1202 int rope_num = (lba->hpa.start >> 13) & 0xf; 1203 if (rope_num >> 3 == ioc_num) 1204 lba_set_iregs(lba, ioc->ibase, ioc->imask); 1205 } 1206 klist_iter_exit(&i); 1207} 1208 1209static void 1210sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1211{ 1212 u32 iova_space_mask; 1213 u32 iova_space_size; 1214 int iov_order, tcnfg; 1215#ifdef SBA_AGP_SUPPORT 1216 int agp_found = 0; 1217#endif 1218 /* 1219 ** Firmware programs the base and size of a "safe IOVA space" 1220 ** (one that doesn't overlap memory or LMMIO space) in the 1221 ** IBASE and IMASK registers. 1222 */ 1223 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE); 1224 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; 1225 1226 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) { 1227 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n"); 1228 iova_space_size /= 2; 1229 } 1230 1231 /* 1232 ** iov_order is always based on a 1GB IOVA space since we want to 1233 ** turn on the other half for AGP GART. 1234 */ 1235 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT)); 1236 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64); 1237 1238 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n", 1239 __FUNCTION__, ioc->ioc_hpa, iova_space_size >> 20, 1240 iov_order + PAGE_SHIFT); 1241 1242 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, 1243 get_order(ioc->pdir_size)); 1244 if (!ioc->pdir_base) 1245 panic("Couldn't allocate I/O Page Table\n"); 1246 1247 memset(ioc->pdir_base, 0, ioc->pdir_size); 1248 1249 DBG_INIT("%s() pdir %p size %x\n", 1250 __FUNCTION__, ioc->pdir_base, ioc->pdir_size); 1251 1252#ifdef SBA_HINT_SUPPORT 1253 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; 1254 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); 1255 1256 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n", 1257 ioc->hint_shift_pdir, ioc->hint_mask_pdir); 1258#endif 1259 1260 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base); 1261 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); 1262 1263 /* build IMASK for IOC and Elroy */ 1264 iova_space_mask = 0xffffffff; 1265 iova_space_mask <<= (iov_order + PAGE_SHIFT); 1266 ioc->imask = iova_space_mask; 1267#ifdef ZX1_SUPPORT 1268 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); 1269#endif 1270 sba_dump_tlb(ioc->ioc_hpa); 1271 1272 setup_ibase_imask(sba, ioc, ioc_num); 1273 1274 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK); 1275 1276#ifdef CONFIG_64BIT 1277 /* 1278 ** Setting the upper bits makes checking for bypass addresses 1279 ** a little faster later on. 1280 */ 1281 ioc->imask |= 0xFFFFFFFF00000000UL; 1282#endif 1283 1284 /* Set I/O PDIR Page size to system page size */ 1285 switch (PAGE_SHIFT) { 1286 case 12: tcnfg = 0; break; /* 4K */ 1287 case 13: tcnfg = 1; break; /* 8K */ 1288 case 14: tcnfg = 2; break; /* 16K */ 1289 case 16: tcnfg = 3; break; /* 64K */ 1290 default: 1291 panic(__FILE__ "Unsupported system page size %d", 1292 1 << PAGE_SHIFT); 1293 break; 1294 } 1295 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG); 1296 1297 /* 1298 ** Program the IOC's ibase and enable IOVA translation 1299 ** Bit zero == enable bit. 1300 */ 1301 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE); 1302 1303 /* 1304 ** Clear I/O TLB of any possible entries. 1305 ** (Yes. This is a bit paranoid...but so what) 1306 */ 1307 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM); 1308 1309#ifdef SBA_AGP_SUPPORT 1310{ 1311 struct klist_iter i; 1312 struct device *dev = NULL; 1313 1314 /* 1315 ** If an AGP device is present, only use half of the IOV space 1316 ** for PCI DMA. Unfortunately we can't know ahead of time 1317 ** whether GART support will actually be used, for now we 1318 ** can just key on any AGP device found in the system. 1319 ** We program the next pdir index after we stop w/ a key for 1320 ** the GART code to handshake on. 1321 */ 1322 klist_iter_init(&sba->dev.klist_children, &i); 1323 while ((dev = next_device(&i))) { 1324 struct parisc_device *lba = to_parisc_device(dev); 1325 if (IS_QUICKSILVER(lba)) 1326 agp_found = 1; 1327 } 1328 klist_iter_exit(&i); 1329 1330 if (agp_found && sba_reserve_agpgart) { 1331 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n", 1332 __FUNCTION__, (iova_space_size/2) >> 20); 1333 ioc->pdir_size /= 2; 1334 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE; 1335 } 1336} 1337#endif /*SBA_AGP_SUPPORT*/ 1338 1339} 1340 1341static void 1342sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1343{ 1344 u32 iova_space_size, iova_space_mask; 1345 unsigned int pdir_size, iov_order; 1346 1347 /* 1348 ** Determine IOVA Space size from memory size. 1349 ** 1350 ** Ideally, PCI drivers would register the maximum number 1351 ** of DMA they can have outstanding for each device they 1352 ** own. Next best thing would be to guess how much DMA 1353 ** can be outstanding based on PCI Class/sub-class. Both 1354 ** methods still require some "extra" to support PCI 1355 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). 1356 ** 1357 ** While we have 32-bits "IOVA" space, top two 2 bits are used 1358 ** for DMA hints - ergo only 30 bits max. 1359 */ 1360 1361 iova_space_size = (u32) (num_physpages/global_ioc_cnt); 1362 1363 /* limit IOVA space size to 1MB-1GB */ 1364 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { 1365 iova_space_size = 1 << (20 - PAGE_SHIFT); 1366 } 1367 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) { 1368 iova_space_size = 1 << (30 - PAGE_SHIFT); 1369 } 1370 1371 /* 1372 ** iova space must be log2() in size. 1373 ** thus, pdir/res_map will also be log2(). 1374 ** PIRANHA BUG: Exception is when IO Pdir is 2MB (gets reduced) 1375 */ 1376 iov_order = get_order(iova_space_size << PAGE_SHIFT); 1377 1378 /* iova_space_size is now bytes, not pages */ 1379 iova_space_size = 1 << (iov_order + PAGE_SHIFT); 1380 1381 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64); 1382 1383 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n", 1384 __FUNCTION__, 1385 ioc->ioc_hpa, 1386 (unsigned long) num_physpages >> (20 - PAGE_SHIFT), 1387 iova_space_size>>20, 1388 iov_order + PAGE_SHIFT); 1389 1390 ioc->pdir_base = sba_alloc_pdir(pdir_size); 1391 1392 DBG_INIT("%s() pdir %p size %x\n", 1393 __FUNCTION__, ioc->pdir_base, pdir_size); 1394 1395#ifdef SBA_HINT_SUPPORT 1396 /* FIXME : DMA HINTs not used */ 1397 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; 1398 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); 1399 1400 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n", 1401 ioc->hint_shift_pdir, ioc->hint_mask_pdir); 1402#endif 1403 1404 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); 1405 1406 /* build IMASK for IOC and Elroy */ 1407 iova_space_mask = 0xffffffff; 1408 iova_space_mask <<= (iov_order + PAGE_SHIFT); 1409 1410 /* 1411 ** On C3000 w/512MB mem, HP-UX 10.20 reports: 1412 ** ibase=0, imask=0xFE000000, size=0x2000000. 1413 */ 1414 ioc->ibase = 0; 1415 ioc->imask = iova_space_mask; /* save it */ 1416#ifdef ZX1_SUPPORT 1417 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); 1418#endif 1419 1420 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n", 1421 __FUNCTION__, ioc->ibase, ioc->imask); 1422 1423 /* 1424 ** FIXME: Hint registers are programmed with default hint 1425 ** values during boot, so hints should be sane even if we 1426 ** can't reprogram them the way drivers want. 1427 */ 1428 1429 setup_ibase_imask(sba, ioc, ioc_num); 1430 1431 /* 1432 ** Program the IOC's ibase and enable IOVA translation 1433 */ 1434 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE); 1435 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK); 1436 1437 /* Set I/O PDIR Page size to 4K */ 1438 WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG); 1439 1440 /* 1441 ** Clear I/O TLB of any possible entries. 1442 ** (Yes. This is a bit paranoid...but so what) 1443 */ 1444 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM); 1445 1446 ioc->ibase = 0; /* used by SBA_IOVA and related macros */ 1447 1448 DBG_INIT("%s() DONE\n", __FUNCTION__); 1449} 1450 1451 1452 1453/************************************************************************** 1454** 1455** SBA initialization code (HW and SW) 1456** 1457** o identify SBA chip itself 1458** o initialize SBA chip modes (HardFail) 1459** o initialize SBA chip modes (HardFail) 1460** o FIXME: initialize DMA hints for reasonable defaults 1461** 1462**************************************************************************/ 1463 1464static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset) 1465{ 1466 return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE); 1467} 1468 1469static void sba_hw_init(struct sba_device *sba_dev) 1470{ 1471 int i; 1472 int num_ioc; 1473 u64 ioc_ctl; 1474 1475 if (!is_pdc_pat()) { 1476 /* Shutdown the USB controller on Astro-based workstations. 1477 ** Once we reprogram the IOMMU, the next DMA performed by 1478 ** USB will HPMC the box. USB is only enabled if a 1479 ** keyboard is present and found. 1480 ** 1481 ** With serial console, j6k v5.0 firmware says: 1482 ** mem_kbd hpa 0xfee003f8 sba 0x0 pad 0x0 cl_class 0x7 1483 ** 1484 ** FIXME: Using GFX+USB console at power up but direct 1485 ** linux to serial console is still broken. 1486 ** USB could generate DMA so we must reset USB. 1487 ** The proper sequence would be: 1488 ** o block console output 1489 ** o reset USB device 1490 ** o reprogram serial port 1491 ** o unblock console output 1492 */ 1493 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) { 1494 pdc_io_reset_devices(); 1495 } 1496 1497 } 1498 1499 1500#if 0 1501printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa, 1502 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class); 1503 1504 /* 1505 ** Need to deal with DMA from LAN. 1506 ** Maybe use page zero boot device as a handle to talk 1507 ** to PDC about which device to shutdown. 1508 ** 1509 ** Netbooting, j6k v5.0 firmware says: 1510 ** mem_boot hpa 0xf4008000 sba 0x0 pad 0x0 cl_class 0x1002 1511 ** ARGH! invalid class. 1512 */ 1513 if ((PAGE0->mem_boot.cl_class != CL_RANDOM) 1514 && (PAGE0->mem_boot.cl_class != CL_SEQU)) { 1515 pdc_io_reset(); 1516 } 1517#endif 1518 1519 if (!IS_PLUTO(sba_dev->dev)) { 1520 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL); 1521 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->", 1522 __FUNCTION__, sba_dev->sba_hpa, ioc_ctl); 1523 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE); 1524 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC; 1525 /* j6700 v1.6 firmware sets 0x294f */ 1526 /* A500 firmware sets 0x4d */ 1527 1528 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL); 1529 1530#ifdef DEBUG_SBA_INIT 1531 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL); 1532 DBG_INIT(" 0x%Lx\n", ioc_ctl); 1533#endif 1534 } /* if !PLUTO */ 1535 1536 if (IS_ASTRO(sba_dev->dev)) { 1537 int err; 1538 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET); 1539 num_ioc = 1; 1540 1541 sba_dev->chip_resv.name = "Astro Intr Ack"; 1542 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL; 1543 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ; 1544 err = request_resource(&iomem_resource, &(sba_dev->chip_resv)); 1545 BUG_ON(err < 0); 1546 1547 } else if (IS_PLUTO(sba_dev->dev)) { 1548 int err; 1549 1550 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET); 1551 num_ioc = 1; 1552 1553 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA"; 1554 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL; 1555 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1); 1556 err = request_resource(&iomem_resource, &(sba_dev->chip_resv)); 1557 WARN_ON(err < 0); 1558 1559 sba_dev->iommu_resv.name = "IOVA Space"; 1560 sba_dev->iommu_resv.start = 0x40000000UL; 1561 sba_dev->iommu_resv.end = 0x50000000UL - 1; 1562 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv)); 1563 WARN_ON(err < 0); 1564 } else { 1565 /* IKE, REO */ 1566 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0)); 1567 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1)); 1568 num_ioc = 2; 1569 1570 /* TODO - LOOKUP Ike/Stretch chipset mem map */ 1571 } 1572 /* XXX: What about Reo Grande? */ 1573 1574 sba_dev->num_ioc = num_ioc; 1575 for (i = 0; i < num_ioc; i++) { 1576 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa; 1577 unsigned int j; 1578 1579 for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) { 1580 1581 /* 1582 * Clear ROPE(N)_CONFIG AO bit. 1583 * Disables "NT Ordering" (~= !"Relaxed Ordering") 1584 * Overrides bit 1 in DMA Hint Sets. 1585 * Improves netperf UDP_STREAM by ~10% for bcm5701. 1586 */ 1587 if (IS_PLUTO(sba_dev->dev)) { 1588 void __iomem *rope_cfg; 1589 unsigned long cfg_val; 1590 1591 rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j; 1592 cfg_val = READ_REG(rope_cfg); 1593 cfg_val &= ~IOC_ROPE_AO; 1594 WRITE_REG(cfg_val, rope_cfg); 1595 } 1596 1597 /* 1598 ** Make sure the box crashes on rope errors. 1599 */ 1600 WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j); 1601 } 1602 1603 /* flush out the last writes */ 1604 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL); 1605 1606 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n", 1607 i, 1608 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40), 1609 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50) 1610 ); 1611 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n", 1612 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108), 1613 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400) 1614 ); 1615 1616 if (IS_PLUTO(sba_dev->dev)) { 1617 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i); 1618 } else { 1619 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i); 1620 } 1621 } 1622} 1623 1624static void 1625sba_common_init(struct sba_device *sba_dev) 1626{ 1627 int i; 1628 1629 /* add this one to the head of the list (order doesn't matter) 1630 ** This will be useful for debugging - especially if we get coredumps 1631 */ 1632 sba_dev->next = sba_list; 1633 sba_list = sba_dev; 1634 1635 for(i=0; i< sba_dev->num_ioc; i++) { 1636 int res_size; 1637#ifdef DEBUG_DMB_TRAP 1638 extern void iterate_pages(unsigned long , unsigned long , 1639 void (*)(pte_t * , unsigned long), 1640 unsigned long ); 1641 void set_data_memory_break(pte_t * , unsigned long); 1642#endif 1643 /* resource map size dictated by pdir_size */ 1644 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */ 1645 1646 /* Second part of PIRANHA BUG */ 1647 if (piranha_bad_128k) { 1648 res_size -= (128*1024)/sizeof(u64); 1649 } 1650 1651 res_size >>= 3; /* convert bit count to byte count */ 1652 DBG_INIT("%s() res_size 0x%x\n", 1653 __FUNCTION__, res_size); 1654 1655 sba_dev->ioc[i].res_size = res_size; 1656 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size)); 1657 1658#ifdef DEBUG_DMB_TRAP 1659 iterate_pages( sba_dev->ioc[i].res_map, res_size, 1660 set_data_memory_break, 0); 1661#endif 1662 1663 if (NULL == sba_dev->ioc[i].res_map) 1664 { 1665 panic("%s:%s() could not allocate resource map\n", 1666 __FILE__, __FUNCTION__ ); 1667 } 1668 1669 memset(sba_dev->ioc[i].res_map, 0, res_size); 1670 /* next available IOVP - circular search */ 1671 sba_dev->ioc[i].res_hint = (unsigned long *) 1672 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]); 1673 1674#ifdef ASSERT_PDIR_SANITY 1675 /* Mark first bit busy - ie no IOVA 0 */ 1676 sba_dev->ioc[i].res_map[0] = 0x80; 1677 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL; 1678#endif 1679 1680 /* Third (and last) part of PIRANHA BUG */ 1681 if (piranha_bad_128k) { 1682 /* region from +1408K to +1536 is un-usable. */ 1683 1684 int idx_start = (1408*1024/sizeof(u64)) >> 3; 1685 int idx_end = (1536*1024/sizeof(u64)) >> 3; 1686 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]); 1687 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]); 1688 1689 /* mark that part of the io pdir busy */ 1690 while (p_start < p_end) 1691 *p_start++ = -1; 1692 1693 } 1694 1695#ifdef DEBUG_DMB_TRAP 1696 iterate_pages( sba_dev->ioc[i].res_map, res_size, 1697 set_data_memory_break, 0); 1698 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size, 1699 set_data_memory_break, 0); 1700#endif 1701 1702 DBG_INIT("%s() %d res_map %x %p\n", 1703 __FUNCTION__, i, res_size, sba_dev->ioc[i].res_map); 1704 } 1705 1706 spin_lock_init(&sba_dev->sba_lock); 1707 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC; 1708 1709#ifdef DEBUG_SBA_INIT 1710 /* 1711 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set 1712 * (bit #61, big endian), we have to flush and sync every time 1713 * IO-PDIR is changed in Ike/Astro. 1714 */ 1715 if (ioc_needs_fdc) { 1716 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n"); 1717 } else { 1718 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n"); 1719 } 1720#endif 1721} 1722 1723#ifdef CONFIG_PROC_FS 1724static int sba_proc_info(struct seq_file *m, void *p) 1725{ 1726 struct sba_device *sba_dev = sba_list; 1727 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ 1728 int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ 1729#ifdef SBA_COLLECT_STATS 1730 unsigned long avg = 0, min, max; 1731#endif 1732 int i, len = 0; 1733 1734 len += seq_printf(m, "%s rev %d.%d\n", 1735 sba_dev->name, 1736 (sba_dev->hw_rev & 0x7) + 1, 1737 (sba_dev->hw_rev & 0x18) >> 3 1738 ); 1739 len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n", 1740 (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ 1741 total_pages); 1742 1743 len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n", 1744 ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ 1745 1746 len += seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n", 1747 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE), 1748 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK), 1749 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE) 1750 ); 1751 1752 for (i=0; i<4; i++) 1753 len += seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n", i, 1754 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18), 1755 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18), 1756 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18) 1757 ); 1758 1759#ifdef SBA_COLLECT_STATS 1760 len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n", 1761 total_pages - ioc->used_pages, ioc->used_pages, 1762 (int) (ioc->used_pages * 100 / total_pages)); 1763 1764 min = max = ioc->avg_search[0]; 1765 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { 1766 avg += ioc->avg_search[i]; 1767 if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; 1768 if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; 1769 } 1770 avg /= SBA_SEARCH_SAMPLE; 1771 len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", 1772 min, avg, max); 1773 1774 len += seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n", 1775 ioc->msingle_calls, ioc->msingle_pages, 1776 (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls)); 1777 1778 /* KLUGE - unmap_sg calls unmap_single for each mapped page */ 1779 min = ioc->usingle_calls; 1780 max = ioc->usingle_pages - ioc->usg_pages; 1781 len += seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n", 1782 min, max, (int) ((max * 1000)/min)); 1783 1784 len += seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n", 1785 ioc->msg_calls, ioc->msg_pages, 1786 (int) ((ioc->msg_pages * 1000)/ioc->msg_calls)); 1787 1788 len += seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n", 1789 ioc->usg_calls, ioc->usg_pages, 1790 (int) ((ioc->usg_pages * 1000)/ioc->usg_calls)); 1791#endif 1792 1793 return 0; 1794} 1795 1796static int 1797sba_proc_open(struct inode *i, struct file *f) 1798{ 1799 return single_open(f, &sba_proc_info, NULL); 1800} 1801 1802static const struct file_operations sba_proc_fops = { 1803 .owner = THIS_MODULE, 1804 .open = sba_proc_open, 1805 .read = seq_read, 1806 .llseek = seq_lseek, 1807 .release = single_release, 1808}; 1809 1810static int 1811sba_proc_bitmap_info(struct seq_file *m, void *p) 1812{ 1813 struct sba_device *sba_dev = sba_list; 1814 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ 1815 unsigned int *res_ptr = (unsigned int *)ioc->res_map; 1816 int i, len = 0; 1817 1818 for (i = 0; i < (ioc->res_size/sizeof(unsigned int)); ++i, ++res_ptr) { 1819 if ((i & 7) == 0) 1820 len += seq_printf(m, "\n "); 1821 len += seq_printf(m, " %08x", *res_ptr); 1822 } 1823 len += seq_printf(m, "\n"); 1824 1825 return 0; 1826} 1827 1828static int 1829sba_proc_bitmap_open(struct inode *i, struct file *f) 1830{ 1831 return single_open(f, &sba_proc_bitmap_info, NULL); 1832} 1833 1834static const struct file_operations sba_proc_bitmap_fops = { 1835 .owner = THIS_MODULE, 1836 .open = sba_proc_bitmap_open, 1837 .read = seq_read, 1838 .llseek = seq_lseek, 1839 .release = single_release, 1840}; 1841#endif /* CONFIG_PROC_FS */ 1842 1843static struct parisc_device_id sba_tbl[] = { 1844 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb }, 1845 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc }, 1846 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc }, 1847 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc }, 1848 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc }, 1849 { 0, } 1850}; 1851 1852int sba_driver_callback(struct parisc_device *); 1853 1854static struct parisc_driver sba_driver = { 1855 .name = MODULE_NAME, 1856 .id_table = sba_tbl, 1857 .probe = sba_driver_callback, 1858}; 1859 1860/* 1861** Determine if sba should claim this chip (return 0) or not (return 1). 1862** If so, initialize the chip and tell other partners in crime they 1863** have work to do. 1864*/ 1865int 1866sba_driver_callback(struct parisc_device *dev) 1867{ 1868 struct sba_device *sba_dev; 1869 u32 func_class; 1870 int i; 1871 char *version; 1872 void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE); 1873 struct proc_dir_entry *info_entry, *bitmap_entry, *root; 1874 1875 sba_dump_ranges(sba_addr); 1876 1877 /* Read HW Rev First */ 1878 func_class = READ_REG(sba_addr + SBA_FCLASS); 1879 1880 if (IS_ASTRO(dev)) { 1881 unsigned long fclass; 1882 static char astro_rev[]="Astro ?.?"; 1883 1884 /* Astro is broken...Read HW Rev First */ 1885 fclass = READ_REG(sba_addr); 1886 1887 astro_rev[6] = '1' + (char) (fclass & 0x7); 1888 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3); 1889 version = astro_rev; 1890 1891 } else if (IS_IKE(dev)) { 1892 static char ike_rev[] = "Ike rev ?"; 1893 ike_rev[8] = '0' + (char) (func_class & 0xff); 1894 version = ike_rev; 1895 } else if (IS_PLUTO(dev)) { 1896 static char pluto_rev[]="Pluto ?.?"; 1897 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4); 1898 pluto_rev[8] = '0' + (char) (func_class & 0x0f); 1899 version = pluto_rev; 1900 } else { 1901 static char reo_rev[] = "REO rev ?"; 1902 reo_rev[8] = '0' + (char) (func_class & 0xff); 1903 version = reo_rev; 1904 } 1905 1906 if (!global_ioc_cnt) { 1907 global_ioc_cnt = count_parisc_driver(&sba_driver); 1908 1909 /* Astro and Pluto have one IOC per SBA */ 1910 if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev))) 1911 global_ioc_cnt *= 2; 1912 } 1913 1914 printk(KERN_INFO "%s found %s at 0x%lx\n", 1915 MODULE_NAME, version, dev->hpa.start); 1916 1917 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL); 1918 if (!sba_dev) { 1919 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n"); 1920 return -ENOMEM; 1921 } 1922 1923 parisc_set_drvdata(dev, sba_dev); 1924 1925 for(i=0; i<MAX_IOC; i++) 1926 spin_lock_init(&(sba_dev->ioc[i].res_lock)); 1927 1928 sba_dev->dev = dev; 1929 sba_dev->hw_rev = func_class; 1930 sba_dev->name = dev->name; 1931 sba_dev->sba_hpa = sba_addr; 1932 1933 sba_get_pat_resources(sba_dev); 1934 sba_hw_init(sba_dev); 1935 sba_common_init(sba_dev); 1936 1937 hppa_dma_ops = &sba_ops; 1938 1939#ifdef CONFIG_PROC_FS 1940 switch (dev->id.hversion) { 1941 case PLUTO_MCKINLEY_PORT: 1942 root = proc_mckinley_root; 1943 break; 1944 case ASTRO_RUNWAY_PORT: 1945 case IKE_MERCED_PORT: 1946 default: 1947 root = proc_runway_root; 1948 break; 1949 } 1950 1951 info_entry = create_proc_entry("sba_iommu", 0, root); 1952 bitmap_entry = create_proc_entry("sba_iommu-bitmap", 0, root); 1953 1954 if (info_entry) 1955 info_entry->proc_fops = &sba_proc_fops; 1956 1957 if (bitmap_entry) 1958 bitmap_entry->proc_fops = &sba_proc_bitmap_fops; 1959#endif 1960 1961 parisc_vmerge_boundary = IOVP_SIZE; 1962 parisc_vmerge_max_size = IOVP_SIZE * BITS_PER_LONG; 1963 parisc_has_iommu(); 1964 return 0; 1965} 1966 1967/* 1968** One time initialization to let the world know the SBA was found. 1969** This is the only routine which is NOT static. 1970** Must be called exactly once before pci_init(). 1971*/ 1972void __init sba_init(void) 1973{ 1974 register_parisc_driver(&sba_driver); 1975} 1976 1977 1978/** 1979 * sba_get_iommu - Assign the iommu pointer for the pci bus controller. 1980 * @dev: The parisc device. 1981 * 1982 * Returns the appropriate IOMMU data for the given parisc PCI controller. 1983 * This is cached and used later for PCI DMA Mapping. 1984 */ 1985void * sba_get_iommu(struct parisc_device *pci_hba) 1986{ 1987 struct parisc_device *sba_dev = parisc_parent(pci_hba); 1988 struct sba_device *sba = sba_dev->dev.driver_data; 1989 char t = sba_dev->id.hw_type; 1990 int iocnum = (pci_hba->hw_path >> 3); /* rope # */ 1991 1992 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT)); 1993 1994 return &(sba->ioc[iocnum]); 1995} 1996 1997 1998/** 1999 * sba_directed_lmmio - return first directed LMMIO range routed to rope 2000 * @pa_dev: The parisc device. 2001 * @r: resource PCI host controller wants start/end fields assigned. 2002 * 2003 * For the given parisc PCI controller, determine if any direct ranges 2004 * are routed down the corresponding rope. 2005 */ 2006void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r) 2007{ 2008 struct parisc_device *sba_dev = parisc_parent(pci_hba); 2009 struct sba_device *sba = sba_dev->dev.driver_data; 2010 char t = sba_dev->id.hw_type; 2011 int i; 2012 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */ 2013 2014 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT)); 2015 2016 r->start = r->end = 0; 2017 2018 /* Astro has 4 directed ranges. Not sure about Ike/Pluto/et al */ 2019 for (i=0; i<4; i++) { 2020 int base, size; 2021 void __iomem *reg = sba->sba_hpa + i*0x18; 2022 2023 base = READ_REG32(reg + LMMIO_DIRECT0_BASE); 2024 if ((base & 1) == 0) 2025 continue; /* not enabled */ 2026 2027 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE); 2028 2029 if ((size & (ROPES_PER_IOC-1)) != rope) 2030 continue; /* directed down different rope */ 2031 2032 r->start = (base & ~1UL) | PCI_F_EXTEND; 2033 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK); 2034 r->end = r->start + size; 2035 } 2036} 2037 2038 2039/** 2040 * sba_distributed_lmmio - return portion of distributed LMMIO range 2041 * @pa_dev: The parisc device. 2042 * @r: resource PCI host controller wants start/end fields assigned. 2043 * 2044 * For the given parisc PCI controller, return portion of distributed LMMIO 2045 * range. The distributed LMMIO is always present and it's just a question 2046 * of the base address and size of the range. 2047 */ 2048void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r ) 2049{ 2050 struct parisc_device *sba_dev = parisc_parent(pci_hba); 2051 struct sba_device *sba = sba_dev->dev.driver_data; 2052 char t = sba_dev->id.hw_type; 2053 int base, size; 2054 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */ 2055 2056 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT)); 2057 2058 r->start = r->end = 0; 2059 2060 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE); 2061 if ((base & 1) == 0) { 2062 BUG(); /* Gah! Distr Range wasn't enabled! */ 2063 return; 2064 } 2065 2066 r->start = (base & ~1UL) | PCI_F_EXTEND; 2067 2068 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC; 2069 r->start += rope * (size + 1); /* adjust base for this rope */ 2070 r->end = r->start + size; 2071} 2072