setup.c revision 3a920accbb5f88d753ab5a6a47d0dd48b6269f84
1/* 2 * arch/blackfin/kernel/setup.c 3 * 4 * Copyright 2004-2006 Analog Devices Inc. 5 * 6 * Enter bugs at http://blackfin.uclinux.org/ 7 * 8 * Licensed under the GPL-2 or later. 9 */ 10 11#include <linux/delay.h> 12#include <linux/console.h> 13#include <linux/bootmem.h> 14#include <linux/seq_file.h> 15#include <linux/cpu.h> 16#include <linux/mm.h> 17#include <linux/module.h> 18#include <linux/tty.h> 19#include <linux/pfn.h> 20 21#ifdef CONFIG_MTD_UCLINUX 22#include <linux/mtd/map.h> 23#include <linux/ext2_fs.h> 24#include <linux/cramfs_fs.h> 25#include <linux/romfs_fs.h> 26#endif 27 28#include <asm/cplb.h> 29#include <asm/cacheflush.h> 30#include <asm/blackfin.h> 31#include <asm/cplbinit.h> 32#include <asm/div64.h> 33#include <asm/cpu.h> 34#include <asm/fixed_code.h> 35#include <asm/early_printk.h> 36 37u16 _bfin_swrst; 38EXPORT_SYMBOL(_bfin_swrst); 39 40unsigned long memory_start, memory_end, physical_mem_end; 41unsigned long _rambase, _ramstart, _ramend; 42unsigned long reserved_mem_dcache_on; 43unsigned long reserved_mem_icache_on; 44EXPORT_SYMBOL(memory_start); 45EXPORT_SYMBOL(memory_end); 46EXPORT_SYMBOL(physical_mem_end); 47EXPORT_SYMBOL(_ramend); 48EXPORT_SYMBOL(reserved_mem_dcache_on); 49 50#ifdef CONFIG_MTD_UCLINUX 51extern struct map_info uclinux_ram_map; 52unsigned long memory_mtd_end, memory_mtd_start, mtd_size; 53unsigned long _ebss; 54EXPORT_SYMBOL(memory_mtd_end); 55EXPORT_SYMBOL(memory_mtd_start); 56EXPORT_SYMBOL(mtd_size); 57#endif 58 59char __initdata command_line[COMMAND_LINE_SIZE]; 60void __initdata *init_retx, *init_saved_retx, *init_saved_seqstat, 61 *init_saved_icplb_fault_addr, *init_saved_dcplb_fault_addr; 62 63/* boot memmap, for parsing "memmap=" */ 64#define BFIN_MEMMAP_MAX 128 /* number of entries in bfin_memmap */ 65#define BFIN_MEMMAP_RAM 1 66#define BFIN_MEMMAP_RESERVED 2 67static struct bfin_memmap { 68 int nr_map; 69 struct bfin_memmap_entry { 70 unsigned long long addr; /* start of memory segment */ 71 unsigned long long size; 72 unsigned long type; 73 } map[BFIN_MEMMAP_MAX]; 74} bfin_memmap __initdata; 75 76/* for memmap sanitization */ 77struct change_member { 78 struct bfin_memmap_entry *pentry; /* pointer to original entry */ 79 unsigned long long addr; /* address for this change point */ 80}; 81static struct change_member change_point_list[2*BFIN_MEMMAP_MAX] __initdata; 82static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata; 83static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata; 84static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata; 85 86DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data); 87 88static int early_init_clkin_hz(char *buf); 89 90#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) 91void __init generate_cplb_tables(void) 92{ 93 unsigned int cpu; 94 95 generate_cplb_tables_all(); 96 /* Generate per-CPU I&D CPLB tables */ 97 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) 98 generate_cplb_tables_cpu(cpu); 99} 100#endif 101 102void __cpuinit bfin_setup_caches(unsigned int cpu) 103{ 104#ifdef CONFIG_BFIN_ICACHE 105 bfin_icache_init(icplb_tbl[cpu]); 106#endif 107 108#ifdef CONFIG_BFIN_DCACHE 109 bfin_dcache_init(dcplb_tbl[cpu]); 110#endif 111 112 /* 113 * In cache coherence emulation mode, we need to have the 114 * D-cache enabled before running any atomic operation which 115 * might invove cache invalidation (i.e. spinlock, rwlock). 116 * So printk's are deferred until then. 117 */ 118#ifdef CONFIG_BFIN_ICACHE 119 printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu); 120 printk(KERN_INFO " External memory:" 121# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE 122 " cacheable" 123# else 124 " uncacheable" 125# endif 126 " in instruction cache\n"); 127 if (L2_LENGTH) 128 printk(KERN_INFO " L2 SRAM :" 129# ifdef CONFIG_BFIN_L2_ICACHEABLE 130 " cacheable" 131# else 132 " uncacheable" 133# endif 134 " in instruction cache\n"); 135 136#else 137 printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu); 138#endif 139 140#ifdef CONFIG_BFIN_DCACHE 141 printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu); 142 printk(KERN_INFO " External memory:" 143# if defined CONFIG_BFIN_EXTMEM_WRITEBACK 144 " cacheable (write-back)" 145# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH 146 " cacheable (write-through)" 147# else 148 " uncacheable" 149# endif 150 " in data cache\n"); 151 if (L2_LENGTH) 152 printk(KERN_INFO " L2 SRAM :" 153# if defined CONFIG_BFIN_L2_WRITEBACK 154 " cacheable (write-back)" 155# elif defined CONFIG_BFIN_L2_WRITETHROUGH 156 " cacheable (write-through)" 157# else 158 " uncacheable" 159# endif 160 " in data cache\n"); 161#else 162 printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu); 163#endif 164} 165 166void __cpuinit bfin_setup_cpudata(unsigned int cpu) 167{ 168 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu); 169 170 cpudata->idle = current; 171 cpudata->loops_per_jiffy = loops_per_jiffy; 172 cpudata->imemctl = bfin_read_IMEM_CONTROL(); 173 cpudata->dmemctl = bfin_read_DMEM_CONTROL(); 174} 175 176void __init bfin_cache_init(void) 177{ 178#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) 179 generate_cplb_tables(); 180#endif 181 bfin_setup_caches(0); 182} 183 184void __init bfin_relocate_l1_mem(void) 185{ 186 unsigned long l1_code_length; 187 unsigned long l1_data_a_length; 188 unsigned long l1_data_b_length; 189 unsigned long l2_length; 190 191 /* 192 * due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S 193 * we know that everything about l1 text/data is nice and aligned, 194 * so copy by 4 byte chunks, and don't worry about overlapping 195 * src/dest. 196 * 197 * We can't use the dma_memcpy functions, since they can call 198 * scheduler functions which might be in L1 :( and core writes 199 * into L1 instruction cause bad access errors, so we are stuck, 200 * we are required to use DMA, but can't use the common dma 201 * functions. We can't use memcpy either - since that might be 202 * going to be in the relocated L1 203 */ 204 205 blackfin_dma_early_init(); 206 207 /* if necessary, copy _stext_l1 to _etext_l1 to L1 instruction SRAM */ 208 l1_code_length = _etext_l1 - _stext_l1; 209 if (l1_code_length) 210 early_dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length); 211 212 /* if necessary, copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */ 213 l1_data_a_length = _sbss_l1 - _sdata_l1; 214 if (l1_data_a_length) 215 early_dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length); 216 217 /* if necessary, copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */ 218 l1_data_b_length = _sbss_b_l1 - _sdata_b_l1; 219 if (l1_data_b_length) 220 early_dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length + 221 l1_data_a_length, l1_data_b_length); 222 223 early_dma_memcpy_done(); 224 225 /* if necessary, copy _stext_l2 to _edata_l2 to L2 SRAM */ 226 if (L2_LENGTH != 0) { 227 l2_length = _sbss_l2 - _stext_l2; 228 if (l2_length) 229 memcpy(_stext_l2, _l2_lma_start, l2_length); 230 } 231} 232 233/* add_memory_region to memmap */ 234static void __init add_memory_region(unsigned long long start, 235 unsigned long long size, int type) 236{ 237 int i; 238 239 i = bfin_memmap.nr_map; 240 241 if (i == BFIN_MEMMAP_MAX) { 242 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); 243 return; 244 } 245 246 bfin_memmap.map[i].addr = start; 247 bfin_memmap.map[i].size = size; 248 bfin_memmap.map[i].type = type; 249 bfin_memmap.nr_map++; 250} 251 252/* 253 * Sanitize the boot memmap, removing overlaps. 254 */ 255static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map) 256{ 257 struct change_member *change_tmp; 258 unsigned long current_type, last_type; 259 unsigned long long last_addr; 260 int chgidx, still_changing; 261 int overlap_entries; 262 int new_entry; 263 int old_nr, new_nr, chg_nr; 264 int i; 265 266 /* 267 Visually we're performing the following (1,2,3,4 = memory types) 268 269 Sample memory map (w/overlaps): 270 ____22__________________ 271 ______________________4_ 272 ____1111________________ 273 _44_____________________ 274 11111111________________ 275 ____________________33__ 276 ___________44___________ 277 __________33333_________ 278 ______________22________ 279 ___________________2222_ 280 _________111111111______ 281 _____________________11_ 282 _________________4______ 283 284 Sanitized equivalent (no overlap): 285 1_______________________ 286 _44_____________________ 287 ___1____________________ 288 ____22__________________ 289 ______11________________ 290 _________1______________ 291 __________3_____________ 292 ___________44___________ 293 _____________33_________ 294 _______________2________ 295 ________________1_______ 296 _________________4______ 297 ___________________2____ 298 ____________________33__ 299 ______________________4_ 300 */ 301 /* if there's only one memory region, don't bother */ 302 if (*pnr_map < 2) 303 return -1; 304 305 old_nr = *pnr_map; 306 307 /* bail out if we find any unreasonable addresses in memmap */ 308 for (i = 0; i < old_nr; i++) 309 if (map[i].addr + map[i].size < map[i].addr) 310 return -1; 311 312 /* create pointers for initial change-point information (for sorting) */ 313 for (i = 0; i < 2*old_nr; i++) 314 change_point[i] = &change_point_list[i]; 315 316 /* record all known change-points (starting and ending addresses), 317 omitting those that are for empty memory regions */ 318 chgidx = 0; 319 for (i = 0; i < old_nr; i++) { 320 if (map[i].size != 0) { 321 change_point[chgidx]->addr = map[i].addr; 322 change_point[chgidx++]->pentry = &map[i]; 323 change_point[chgidx]->addr = map[i].addr + map[i].size; 324 change_point[chgidx++]->pentry = &map[i]; 325 } 326 } 327 chg_nr = chgidx; /* true number of change-points */ 328 329 /* sort change-point list by memory addresses (low -> high) */ 330 still_changing = 1; 331 while (still_changing) { 332 still_changing = 0; 333 for (i = 1; i < chg_nr; i++) { 334 /* if <current_addr> > <last_addr>, swap */ 335 /* or, if current=<start_addr> & last=<end_addr>, swap */ 336 if ((change_point[i]->addr < change_point[i-1]->addr) || 337 ((change_point[i]->addr == change_point[i-1]->addr) && 338 (change_point[i]->addr == change_point[i]->pentry->addr) && 339 (change_point[i-1]->addr != change_point[i-1]->pentry->addr)) 340 ) { 341 change_tmp = change_point[i]; 342 change_point[i] = change_point[i-1]; 343 change_point[i-1] = change_tmp; 344 still_changing = 1; 345 } 346 } 347 } 348 349 /* create a new memmap, removing overlaps */ 350 overlap_entries = 0; /* number of entries in the overlap table */ 351 new_entry = 0; /* index for creating new memmap entries */ 352 last_type = 0; /* start with undefined memory type */ 353 last_addr = 0; /* start with 0 as last starting address */ 354 /* loop through change-points, determining affect on the new memmap */ 355 for (chgidx = 0; chgidx < chg_nr; chgidx++) { 356 /* keep track of all overlapping memmap entries */ 357 if (change_point[chgidx]->addr == change_point[chgidx]->pentry->addr) { 358 /* add map entry to overlap list (> 1 entry implies an overlap) */ 359 overlap_list[overlap_entries++] = change_point[chgidx]->pentry; 360 } else { 361 /* remove entry from list (order independent, so swap with last) */ 362 for (i = 0; i < overlap_entries; i++) { 363 if (overlap_list[i] == change_point[chgidx]->pentry) 364 overlap_list[i] = overlap_list[overlap_entries-1]; 365 } 366 overlap_entries--; 367 } 368 /* if there are overlapping entries, decide which "type" to use */ 369 /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */ 370 current_type = 0; 371 for (i = 0; i < overlap_entries; i++) 372 if (overlap_list[i]->type > current_type) 373 current_type = overlap_list[i]->type; 374 /* continue building up new memmap based on this information */ 375 if (current_type != last_type) { 376 if (last_type != 0) { 377 new_map[new_entry].size = 378 change_point[chgidx]->addr - last_addr; 379 /* move forward only if the new size was non-zero */ 380 if (new_map[new_entry].size != 0) 381 if (++new_entry >= BFIN_MEMMAP_MAX) 382 break; /* no more space left for new entries */ 383 } 384 if (current_type != 0) { 385 new_map[new_entry].addr = change_point[chgidx]->addr; 386 new_map[new_entry].type = current_type; 387 last_addr = change_point[chgidx]->addr; 388 } 389 last_type = current_type; 390 } 391 } 392 new_nr = new_entry; /* retain count for new entries */ 393 394 /* copy new mapping into original location */ 395 memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry)); 396 *pnr_map = new_nr; 397 398 return 0; 399} 400 401static void __init print_memory_map(char *who) 402{ 403 int i; 404 405 for (i = 0; i < bfin_memmap.nr_map; i++) { 406 printk(KERN_DEBUG " %s: %016Lx - %016Lx ", who, 407 bfin_memmap.map[i].addr, 408 bfin_memmap.map[i].addr + bfin_memmap.map[i].size); 409 switch (bfin_memmap.map[i].type) { 410 case BFIN_MEMMAP_RAM: 411 printk(KERN_CONT "(usable)\n"); 412 break; 413 case BFIN_MEMMAP_RESERVED: 414 printk(KERN_CONT "(reserved)\n"); 415 break; 416 default: 417 printk(KERN_CONT "type %lu\n", bfin_memmap.map[i].type); 418 break; 419 } 420 } 421} 422 423static __init int parse_memmap(char *arg) 424{ 425 unsigned long long start_at, mem_size; 426 427 if (!arg) 428 return -EINVAL; 429 430 mem_size = memparse(arg, &arg); 431 if (*arg == '@') { 432 start_at = memparse(arg+1, &arg); 433 add_memory_region(start_at, mem_size, BFIN_MEMMAP_RAM); 434 } else if (*arg == '$') { 435 start_at = memparse(arg+1, &arg); 436 add_memory_region(start_at, mem_size, BFIN_MEMMAP_RESERVED); 437 } 438 439 return 0; 440} 441 442/* 443 * Initial parsing of the command line. Currently, we support: 444 * - Controlling the linux memory size: mem=xxx[KMG] 445 * - Controlling the physical memory size: max_mem=xxx[KMG][$][#] 446 * $ -> reserved memory is dcacheable 447 * # -> reserved memory is icacheable 448 * - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region 449 * @ from <start> to <start>+<mem>, type RAM 450 * $ from <start> to <start>+<mem>, type RESERVED 451 */ 452static __init void parse_cmdline_early(char *cmdline_p) 453{ 454 char c = ' ', *to = cmdline_p; 455 unsigned int memsize; 456 for (;;) { 457 if (c == ' ') { 458 if (!memcmp(to, "mem=", 4)) { 459 to += 4; 460 memsize = memparse(to, &to); 461 if (memsize) 462 _ramend = memsize; 463 464 } else if (!memcmp(to, "max_mem=", 8)) { 465 to += 8; 466 memsize = memparse(to, &to); 467 if (memsize) { 468 physical_mem_end = memsize; 469 if (*to != ' ') { 470 if (*to == '$' 471 || *(to + 1) == '$') 472 reserved_mem_dcache_on = 1; 473 if (*to == '#' 474 || *(to + 1) == '#') 475 reserved_mem_icache_on = 1; 476 } 477 } 478 } else if (!memcmp(to, "clkin_hz=", 9)) { 479 to += 9; 480 early_init_clkin_hz(to); 481#ifdef CONFIG_EARLY_PRINTK 482 } else if (!memcmp(to, "earlyprintk=", 12)) { 483 to += 12; 484 setup_early_printk(to); 485#endif 486 } else if (!memcmp(to, "memmap=", 7)) { 487 to += 7; 488 parse_memmap(to); 489 } 490 } 491 c = *(to++); 492 if (!c) 493 break; 494 } 495} 496 497/* 498 * Setup memory defaults from user config. 499 * The physical memory layout looks like: 500 * 501 * [_rambase, _ramstart]: kernel image 502 * [memory_start, memory_end]: dynamic memory managed by kernel 503 * [memory_end, _ramend]: reserved memory 504 * [memory_mtd_start(memory_end), 505 * memory_mtd_start + mtd_size]: rootfs (if any) 506 * [_ramend - DMA_UNCACHED_REGION, 507 * _ramend]: uncached DMA region 508 * [_ramend, physical_mem_end]: memory not managed by kernel 509 */ 510static __init void memory_setup(void) 511{ 512#ifdef CONFIG_MTD_UCLINUX 513 unsigned long mtd_phys = 0; 514#endif 515 516 _rambase = (unsigned long)_stext; 517 _ramstart = (unsigned long)_end; 518 519 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { 520 console_init(); 521 panic("DMA region exceeds memory limit: %lu.", 522 _ramend - _ramstart); 523 } 524 memory_end = _ramend - DMA_UNCACHED_REGION; 525 526#ifdef CONFIG_MPU 527 /* Round up to multiple of 4MB */ 528 memory_start = (_ramstart + 0x3fffff) & ~0x3fffff; 529#else 530 memory_start = PAGE_ALIGN(_ramstart); 531#endif 532 533#if defined(CONFIG_MTD_UCLINUX) 534 /* generic memory mapped MTD driver */ 535 memory_mtd_end = memory_end; 536 537 mtd_phys = _ramstart; 538 mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8))); 539 540# if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS) 541 if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC) 542 mtd_size = 543 PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10); 544# endif 545 546# if defined(CONFIG_CRAMFS) 547 if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC) 548 mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4))); 549# endif 550 551# if defined(CONFIG_ROMFS_FS) 552 if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0 553 && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) 554 mtd_size = 555 PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2])); 556# if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263) 557 /* Due to a Hardware Anomaly we need to limit the size of usable 558 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on 559 * 05000263 - Hardware loop corrupted when taking an ICPLB exception 560 */ 561# if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO)) 562 if (memory_end >= 56 * 1024 * 1024) 563 memory_end = 56 * 1024 * 1024; 564# else 565 if (memory_end >= 60 * 1024 * 1024) 566 memory_end = 60 * 1024 * 1024; 567# endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */ 568# endif /* ANOMALY_05000263 */ 569# endif /* CONFIG_ROMFS_FS */ 570 571 /* Since the default MTD_UCLINUX has no magic number, we just blindly 572 * read 8 past the end of the kernel's image, and look at it. 573 * When no image is attached, mtd_size is set to a random number 574 * Do some basic sanity checks before operating on things 575 */ 576 if (mtd_size == 0 || memory_end <= mtd_size) { 577 pr_emerg("Could not find valid ram mtd attached.\n"); 578 } else { 579 memory_end -= mtd_size; 580 581 /* Relocate MTD image to the top of memory after the uncached memory area */ 582 uclinux_ram_map.phys = memory_mtd_start = memory_end; 583 uclinux_ram_map.size = mtd_size; 584 pr_info("Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n", 585 _end, mtd_size, (void *)memory_mtd_start); 586 dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size); 587 } 588#endif /* CONFIG_MTD_UCLINUX */ 589 590#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263) 591 /* Due to a Hardware Anomaly we need to limit the size of usable 592 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on 593 * 05000263 - Hardware loop corrupted when taking an ICPLB exception 594 */ 595#if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO)) 596 if (memory_end >= 56 * 1024 * 1024) 597 memory_end = 56 * 1024 * 1024; 598#else 599 if (memory_end >= 60 * 1024 * 1024) 600 memory_end = 60 * 1024 * 1024; 601#endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */ 602 printk(KERN_NOTICE "Warning: limiting memory to %liMB due to hardware anomaly 05000263\n", memory_end >> 20); 603#endif /* ANOMALY_05000263 */ 604 605#ifdef CONFIG_MPU 606 page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32; 607 page_mask_order = get_order(3 * page_mask_nelts * sizeof(long)); 608#endif 609 610#if !defined(CONFIG_MTD_UCLINUX) 611 /*In case there is no valid CPLB behind memory_end make sure we don't get to close*/ 612 memory_end -= SIZE_4K; 613#endif 614 615 init_mm.start_code = (unsigned long)_stext; 616 init_mm.end_code = (unsigned long)_etext; 617 init_mm.end_data = (unsigned long)_edata; 618 init_mm.brk = (unsigned long)0; 619 620 printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20); 621 printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20); 622 623 printk(KERN_INFO "Memory map:\n" 624 " fixedcode = 0x%p-0x%p\n" 625 " text = 0x%p-0x%p\n" 626 " rodata = 0x%p-0x%p\n" 627 " bss = 0x%p-0x%p\n" 628 " data = 0x%p-0x%p\n" 629 " stack = 0x%p-0x%p\n" 630 " init = 0x%p-0x%p\n" 631 " available = 0x%p-0x%p\n" 632#ifdef CONFIG_MTD_UCLINUX 633 " rootfs = 0x%p-0x%p\n" 634#endif 635#if DMA_UNCACHED_REGION > 0 636 " DMA Zone = 0x%p-0x%p\n" 637#endif 638 , (void *)FIXED_CODE_START, (void *)FIXED_CODE_END, 639 _stext, _etext, 640 __start_rodata, __end_rodata, 641 __bss_start, __bss_stop, 642 _sdata, _edata, 643 (void *)&init_thread_union, 644 (void *)((int)(&init_thread_union) + 0x2000), 645 __init_begin, __init_end, 646 (void *)_ramstart, (void *)memory_end 647#ifdef CONFIG_MTD_UCLINUX 648 , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size) 649#endif 650#if DMA_UNCACHED_REGION > 0 651 , (void *)(_ramend - DMA_UNCACHED_REGION), (void *)(_ramend) 652#endif 653 ); 654} 655 656/* 657 * Find the lowest, highest page frame number we have available 658 */ 659void __init find_min_max_pfn(void) 660{ 661 int i; 662 663 max_pfn = 0; 664 min_low_pfn = memory_end; 665 666 for (i = 0; i < bfin_memmap.nr_map; i++) { 667 unsigned long start, end; 668 /* RAM? */ 669 if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM) 670 continue; 671 start = PFN_UP(bfin_memmap.map[i].addr); 672 end = PFN_DOWN(bfin_memmap.map[i].addr + 673 bfin_memmap.map[i].size); 674 if (start >= end) 675 continue; 676 if (end > max_pfn) 677 max_pfn = end; 678 if (start < min_low_pfn) 679 min_low_pfn = start; 680 } 681} 682 683static __init void setup_bootmem_allocator(void) 684{ 685 int bootmap_size; 686 int i; 687 unsigned long start_pfn, end_pfn; 688 unsigned long curr_pfn, last_pfn, size; 689 690 /* mark memory between memory_start and memory_end usable */ 691 add_memory_region(memory_start, 692 memory_end - memory_start, BFIN_MEMMAP_RAM); 693 /* sanity check for overlap */ 694 sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map); 695 print_memory_map("boot memmap"); 696 697 /* intialize globals in linux/bootmem.h */ 698 find_min_max_pfn(); 699 /* pfn of the last usable page frame */ 700 if (max_pfn > memory_end >> PAGE_SHIFT) 701 max_pfn = memory_end >> PAGE_SHIFT; 702 /* pfn of last page frame directly mapped by kernel */ 703 max_low_pfn = max_pfn; 704 /* pfn of the first usable page frame after kernel image*/ 705 if (min_low_pfn < memory_start >> PAGE_SHIFT) 706 min_low_pfn = memory_start >> PAGE_SHIFT; 707 708 start_pfn = PAGE_OFFSET >> PAGE_SHIFT; 709 end_pfn = memory_end >> PAGE_SHIFT; 710 711 /* 712 * give all the memory to the bootmap allocator, tell it to put the 713 * boot mem_map at the start of memory. 714 */ 715 bootmap_size = init_bootmem_node(NODE_DATA(0), 716 memory_start >> PAGE_SHIFT, /* map goes here */ 717 start_pfn, end_pfn); 718 719 /* register the memmap regions with the bootmem allocator */ 720 for (i = 0; i < bfin_memmap.nr_map; i++) { 721 /* 722 * Reserve usable memory 723 */ 724 if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM) 725 continue; 726 /* 727 * We are rounding up the start address of usable memory: 728 */ 729 curr_pfn = PFN_UP(bfin_memmap.map[i].addr); 730 if (curr_pfn >= end_pfn) 731 continue; 732 /* 733 * ... and at the end of the usable range downwards: 734 */ 735 last_pfn = PFN_DOWN(bfin_memmap.map[i].addr + 736 bfin_memmap.map[i].size); 737 738 if (last_pfn > end_pfn) 739 last_pfn = end_pfn; 740 741 /* 742 * .. finally, did all the rounding and playing 743 * around just make the area go away? 744 */ 745 if (last_pfn <= curr_pfn) 746 continue; 747 748 size = last_pfn - curr_pfn; 749 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size)); 750 } 751 752 /* reserve memory before memory_start, including bootmap */ 753 reserve_bootmem(PAGE_OFFSET, 754 memory_start + bootmap_size + PAGE_SIZE - 1 - PAGE_OFFSET, 755 BOOTMEM_DEFAULT); 756} 757 758#define EBSZ_TO_MEG(ebsz) \ 759({ \ 760 int meg = 0; \ 761 switch (ebsz & 0xf) { \ 762 case 0x1: meg = 16; break; \ 763 case 0x3: meg = 32; break; \ 764 case 0x5: meg = 64; break; \ 765 case 0x7: meg = 128; break; \ 766 case 0x9: meg = 256; break; \ 767 case 0xb: meg = 512; break; \ 768 } \ 769 meg; \ 770}) 771static inline int __init get_mem_size(void) 772{ 773#if defined(EBIU_SDBCTL) 774# if defined(BF561_FAMILY) 775 int ret = 0; 776 u32 sdbctl = bfin_read_EBIU_SDBCTL(); 777 ret += EBSZ_TO_MEG(sdbctl >> 0); 778 ret += EBSZ_TO_MEG(sdbctl >> 8); 779 ret += EBSZ_TO_MEG(sdbctl >> 16); 780 ret += EBSZ_TO_MEG(sdbctl >> 24); 781 return ret; 782# else 783 return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL()); 784# endif 785#elif defined(EBIU_DDRCTL1) 786 u32 ddrctl = bfin_read_EBIU_DDRCTL1(); 787 int ret = 0; 788 switch (ddrctl & 0xc0000) { 789 case DEVSZ_64: ret = 64 / 8; 790 case DEVSZ_128: ret = 128 / 8; 791 case DEVSZ_256: ret = 256 / 8; 792 case DEVSZ_512: ret = 512 / 8; 793 } 794 switch (ddrctl & 0x30000) { 795 case DEVWD_4: ret *= 2; 796 case DEVWD_8: ret *= 2; 797 case DEVWD_16: break; 798 } 799 if ((ddrctl & 0xc000) == 0x4000) 800 ret *= 2; 801 return ret; 802#endif 803 BUG(); 804} 805 806void __init setup_arch(char **cmdline_p) 807{ 808 unsigned long sclk, cclk; 809 810 /* Check to make sure we are running on the right processor */ 811 if (unlikely(CPUID != bfin_cpuid())) 812 printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n", 813 CPU, bfin_cpuid(), bfin_revid()); 814 815#ifdef CONFIG_DUMMY_CONSOLE 816 conswitchp = &dummy_con; 817#endif 818 819#if defined(CONFIG_CMDLINE_BOOL) 820 strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line)); 821 command_line[sizeof(command_line) - 1] = 0; 822#endif 823 824 /* Keep a copy of command line */ 825 *cmdline_p = &command_line[0]; 826 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); 827 boot_command_line[COMMAND_LINE_SIZE - 1] = '\0'; 828 829 memset(&bfin_memmap, 0, sizeof(bfin_memmap)); 830 831 /* If the user does not specify things on the command line, use 832 * what the bootloader set things up as 833 */ 834 physical_mem_end = 0; 835 parse_cmdline_early(&command_line[0]); 836 837 if (_ramend == 0) 838 _ramend = get_mem_size() * 1024 * 1024; 839 840 if (physical_mem_end == 0) 841 physical_mem_end = _ramend; 842 843 memory_setup(); 844 845 /* Initialize Async memory banks */ 846 bfin_write_EBIU_AMBCTL0(AMBCTL0VAL); 847 bfin_write_EBIU_AMBCTL1(AMBCTL1VAL); 848 bfin_write_EBIU_AMGCTL(AMGCTLVAL); 849#ifdef CONFIG_EBIU_MBSCTLVAL 850 bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL); 851 bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL); 852 bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL); 853#endif 854 855 cclk = get_cclk(); 856 sclk = get_sclk(); 857 858 if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk) 859 panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK"); 860 861#ifdef BF561_FAMILY 862 if (ANOMALY_05000266) { 863 bfin_read_IMDMA_D0_IRQ_STATUS(); 864 bfin_read_IMDMA_D1_IRQ_STATUS(); 865 } 866#endif 867 printk(KERN_INFO "Hardware Trace "); 868 if (bfin_read_TBUFCTL() & 0x1) 869 printk(KERN_CONT "Active "); 870 else 871 printk(KERN_CONT "Off "); 872 if (bfin_read_TBUFCTL() & 0x2) 873 printk(KERN_CONT "and Enabled\n"); 874 else 875 printk(KERN_CONT "and Disabled\n"); 876 877 printk(KERN_INFO "Boot Mode: %i\n", bfin_read_SYSCR() & 0xF); 878 879 /* Newer parts mirror SWRST bits in SYSCR */ 880#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \ 881 defined(CONFIG_BF538) || defined(CONFIG_BF539) 882 _bfin_swrst = bfin_read_SWRST(); 883#else 884 /* Clear boot mode field */ 885 _bfin_swrst = bfin_read_SYSCR() & ~0xf; 886#endif 887 888#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT 889 bfin_write_SWRST(_bfin_swrst & ~DOUBLE_FAULT); 890#endif 891#ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET 892 bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT); 893#endif 894 895#ifdef CONFIG_SMP 896 if (_bfin_swrst & SWRST_DBL_FAULT_A) { 897#else 898 if (_bfin_swrst & RESET_DOUBLE) { 899#endif 900 printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n"); 901#ifdef CONFIG_DEBUG_DOUBLEFAULT 902 /* We assume the crashing kernel, and the current symbol table match */ 903 printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n", 904 (int)init_saved_seqstat & SEQSTAT_EXCAUSE, init_saved_retx); 905 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr); 906 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr); 907#endif 908 printk(KERN_NOTICE " The instruction at %pF caused a double exception\n", 909 init_retx); 910 } else if (_bfin_swrst & RESET_WDOG) 911 printk(KERN_INFO "Recovering from Watchdog event\n"); 912 else if (_bfin_swrst & RESET_SOFTWARE) 913 printk(KERN_NOTICE "Reset caused by Software reset\n"); 914 915 printk(KERN_INFO "Blackfin support (C) 2004-2009 Analog Devices, Inc.\n"); 916 if (bfin_compiled_revid() == 0xffff) 917 printk(KERN_INFO "Compiled for ADSP-%s Rev any\n", CPU); 918 else if (bfin_compiled_revid() == -1) 919 printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU); 920 else 921 printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid()); 922 923 if (likely(CPUID == bfin_cpuid())) { 924 if (bfin_revid() != bfin_compiled_revid()) { 925 if (bfin_compiled_revid() == -1) 926 printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n", 927 bfin_revid()); 928 else if (bfin_compiled_revid() != 0xffff) { 929 printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n", 930 bfin_compiled_revid(), bfin_revid()); 931 if (bfin_compiled_revid() > bfin_revid()) 932 panic("Error: you are missing anomaly workarounds for this rev"); 933 } 934 } 935 if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX) 936 printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n", 937 CPU, bfin_revid()); 938 } 939 940 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n"); 941 942 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n", 943 cclk / 1000000, sclk / 1000000); 944 945 setup_bootmem_allocator(); 946 947 paging_init(); 948 949 /* Copy atomic sequences to their fixed location, and sanity check that 950 these locations are the ones that we advertise to userspace. */ 951 memcpy((void *)FIXED_CODE_START, &fixed_code_start, 952 FIXED_CODE_END - FIXED_CODE_START); 953 BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start 954 != SIGRETURN_STUB - FIXED_CODE_START); 955 BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start 956 != ATOMIC_XCHG32 - FIXED_CODE_START); 957 BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start 958 != ATOMIC_CAS32 - FIXED_CODE_START); 959 BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start 960 != ATOMIC_ADD32 - FIXED_CODE_START); 961 BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start 962 != ATOMIC_SUB32 - FIXED_CODE_START); 963 BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start 964 != ATOMIC_IOR32 - FIXED_CODE_START); 965 BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start 966 != ATOMIC_AND32 - FIXED_CODE_START); 967 BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start 968 != ATOMIC_XOR32 - FIXED_CODE_START); 969 BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start 970 != SAFE_USER_INSTRUCTION - FIXED_CODE_START); 971 972#ifdef CONFIG_SMP 973 platform_init_cpus(); 974#endif 975 init_exception_vectors(); 976 bfin_cache_init(); /* Initialize caches for the boot CPU */ 977} 978 979static int __init topology_init(void) 980{ 981 unsigned int cpu; 982 /* Record CPU-private information for the boot processor. */ 983 bfin_setup_cpudata(0); 984 985 for_each_possible_cpu(cpu) { 986 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu); 987 } 988 989 return 0; 990} 991 992subsys_initcall(topology_init); 993 994/* Get the input clock frequency */ 995static u_long cached_clkin_hz = CONFIG_CLKIN_HZ; 996static u_long get_clkin_hz(void) 997{ 998 return cached_clkin_hz; 999} 1000static int __init early_init_clkin_hz(char *buf) 1001{ 1002 cached_clkin_hz = simple_strtoul(buf, NULL, 0); 1003#ifdef BFIN_KERNEL_CLOCK 1004 if (cached_clkin_hz != CONFIG_CLKIN_HZ) 1005 panic("cannot change clkin_hz when reprogramming clocks"); 1006#endif 1007 return 1; 1008} 1009early_param("clkin_hz=", early_init_clkin_hz); 1010 1011/* Get the voltage input multiplier */ 1012static u_long get_vco(void) 1013{ 1014 static u_long cached_vco; 1015 u_long msel, pll_ctl; 1016 1017 /* The assumption here is that VCO never changes at runtime. 1018 * If, someday, we support that, then we'll have to change this. 1019 */ 1020 if (cached_vco) 1021 return cached_vco; 1022 1023 pll_ctl = bfin_read_PLL_CTL(); 1024 msel = (pll_ctl >> 9) & 0x3F; 1025 if (0 == msel) 1026 msel = 64; 1027 1028 cached_vco = get_clkin_hz(); 1029 cached_vco >>= (1 & pll_ctl); /* DF bit */ 1030 cached_vco *= msel; 1031 return cached_vco; 1032} 1033 1034/* Get the Core clock */ 1035u_long get_cclk(void) 1036{ 1037 static u_long cached_cclk_pll_div, cached_cclk; 1038 u_long csel, ssel; 1039 1040 if (bfin_read_PLL_STAT() & 0x1) 1041 return get_clkin_hz(); 1042 1043 ssel = bfin_read_PLL_DIV(); 1044 if (ssel == cached_cclk_pll_div) 1045 return cached_cclk; 1046 else 1047 cached_cclk_pll_div = ssel; 1048 1049 csel = ((ssel >> 4) & 0x03); 1050 ssel &= 0xf; 1051 if (ssel && ssel < (1 << csel)) /* SCLK > CCLK */ 1052 cached_cclk = get_vco() / ssel; 1053 else 1054 cached_cclk = get_vco() >> csel; 1055 return cached_cclk; 1056} 1057EXPORT_SYMBOL(get_cclk); 1058 1059/* Get the System clock */ 1060u_long get_sclk(void) 1061{ 1062 static u_long cached_sclk; 1063 u_long ssel; 1064 1065 /* The assumption here is that SCLK never changes at runtime. 1066 * If, someday, we support that, then we'll have to change this. 1067 */ 1068 if (cached_sclk) 1069 return cached_sclk; 1070 1071 if (bfin_read_PLL_STAT() & 0x1) 1072 return get_clkin_hz(); 1073 1074 ssel = bfin_read_PLL_DIV() & 0xf; 1075 if (0 == ssel) { 1076 printk(KERN_WARNING "Invalid System Clock\n"); 1077 ssel = 1; 1078 } 1079 1080 cached_sclk = get_vco() / ssel; 1081 return cached_sclk; 1082} 1083EXPORT_SYMBOL(get_sclk); 1084 1085unsigned long sclk_to_usecs(unsigned long sclk) 1086{ 1087 u64 tmp = USEC_PER_SEC * (u64)sclk; 1088 do_div(tmp, get_sclk()); 1089 return tmp; 1090} 1091EXPORT_SYMBOL(sclk_to_usecs); 1092 1093unsigned long usecs_to_sclk(unsigned long usecs) 1094{ 1095 u64 tmp = get_sclk() * (u64)usecs; 1096 do_div(tmp, USEC_PER_SEC); 1097 return tmp; 1098} 1099EXPORT_SYMBOL(usecs_to_sclk); 1100 1101/* 1102 * Get CPU information for use by the procfs. 1103 */ 1104static int show_cpuinfo(struct seq_file *m, void *v) 1105{ 1106 char *cpu, *mmu, *fpu, *vendor, *cache; 1107 uint32_t revid; 1108 int cpu_num = *(unsigned int *)v; 1109 u_long sclk, cclk; 1110 u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0; 1111 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num); 1112 1113 cpu = CPU; 1114 mmu = "none"; 1115 fpu = "none"; 1116 revid = bfin_revid(); 1117 1118 sclk = get_sclk(); 1119 cclk = get_cclk(); 1120 1121 switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) { 1122 case 0xca: 1123 vendor = "Analog Devices"; 1124 break; 1125 default: 1126 vendor = "unknown"; 1127 break; 1128 } 1129 1130 seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor); 1131 1132 if (CPUID == bfin_cpuid()) 1133 seq_printf(m, "cpu family\t: 0x%04x\n", CPUID); 1134 else 1135 seq_printf(m, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n", 1136 CPUID, bfin_cpuid()); 1137 1138 seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n" 1139 "stepping\t: %d ", 1140 cpu, cclk/1000000, sclk/1000000, 1141#ifdef CONFIG_MPU 1142 "mpu on", 1143#else 1144 "mpu off", 1145#endif 1146 revid); 1147 1148 if (bfin_revid() != bfin_compiled_revid()) { 1149 if (bfin_compiled_revid() == -1) 1150 seq_printf(m, "(Compiled for Rev none)"); 1151 else if (bfin_compiled_revid() == 0xffff) 1152 seq_printf(m, "(Compiled for Rev any)"); 1153 else 1154 seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid()); 1155 } 1156 1157 seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", 1158 cclk/1000000, cclk%1000000, 1159 sclk/1000000, sclk%1000000); 1160 seq_printf(m, "bogomips\t: %lu.%02lu\n" 1161 "Calibration\t: %lu loops\n", 1162 (cpudata->loops_per_jiffy * HZ) / 500000, 1163 ((cpudata->loops_per_jiffy * HZ) / 5000) % 100, 1164 (cpudata->loops_per_jiffy * HZ)); 1165 1166 /* Check Cache configutation */ 1167 switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) { 1168 case ACACHE_BSRAM: 1169 cache = "dbank-A/B\t: cache/sram"; 1170 dcache_size = 16; 1171 dsup_banks = 1; 1172 break; 1173 case ACACHE_BCACHE: 1174 cache = "dbank-A/B\t: cache/cache"; 1175 dcache_size = 32; 1176 dsup_banks = 2; 1177 break; 1178 case ASRAM_BSRAM: 1179 cache = "dbank-A/B\t: sram/sram"; 1180 dcache_size = 0; 1181 dsup_banks = 0; 1182 break; 1183 default: 1184 cache = "unknown"; 1185 dcache_size = 0; 1186 dsup_banks = 0; 1187 break; 1188 } 1189 1190 /* Is it turned on? */ 1191 if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE)) 1192 dcache_size = 0; 1193 1194 if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB)) 1195 icache_size = 0; 1196 1197 seq_printf(m, "cache size\t: %d KB(L1 icache) " 1198 "%d KB(L1 dcache) %d KB(L2 cache)\n", 1199 icache_size, dcache_size, 0); 1200 seq_printf(m, "%s\n", cache); 1201 seq_printf(m, "external memory\t: " 1202#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) 1203 "cacheable" 1204#else 1205 "uncacheable" 1206#endif 1207 " in instruction cache\n"); 1208 seq_printf(m, "external memory\t: " 1209#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) 1210 "cacheable (write-back)" 1211#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH) 1212 "cacheable (write-through)" 1213#else 1214 "uncacheable" 1215#endif 1216 " in data cache\n"); 1217 1218 if (icache_size) 1219 seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n", 1220 BFIN_ISUBBANKS, BFIN_IWAYS, BFIN_ILINES); 1221 else 1222 seq_printf(m, "icache setup\t: off\n"); 1223 1224 seq_printf(m, 1225 "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n", 1226 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, 1227 BFIN_DLINES); 1228#ifdef __ARCH_SYNC_CORE_DCACHE 1229 seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count); 1230#endif 1231#ifdef __ARCH_SYNC_CORE_ICACHE 1232 seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count); 1233#endif 1234#ifdef CONFIG_BFIN_ICACHE_LOCK 1235 switch ((cpudata->imemctl >> 3) & WAYALL_L) { 1236 case WAY0_L: 1237 seq_printf(m, "Way0 Locked-Down\n"); 1238 break; 1239 case WAY1_L: 1240 seq_printf(m, "Way1 Locked-Down\n"); 1241 break; 1242 case WAY01_L: 1243 seq_printf(m, "Way0,Way1 Locked-Down\n"); 1244 break; 1245 case WAY2_L: 1246 seq_printf(m, "Way2 Locked-Down\n"); 1247 break; 1248 case WAY02_L: 1249 seq_printf(m, "Way0,Way2 Locked-Down\n"); 1250 break; 1251 case WAY12_L: 1252 seq_printf(m, "Way1,Way2 Locked-Down\n"); 1253 break; 1254 case WAY012_L: 1255 seq_printf(m, "Way0,Way1 & Way2 Locked-Down\n"); 1256 break; 1257 case WAY3_L: 1258 seq_printf(m, "Way3 Locked-Down\n"); 1259 break; 1260 case WAY03_L: 1261 seq_printf(m, "Way0,Way3 Locked-Down\n"); 1262 break; 1263 case WAY13_L: 1264 seq_printf(m, "Way1,Way3 Locked-Down\n"); 1265 break; 1266 case WAY013_L: 1267 seq_printf(m, "Way 0,Way1,Way3 Locked-Down\n"); 1268 break; 1269 case WAY32_L: 1270 seq_printf(m, "Way3,Way2 Locked-Down\n"); 1271 break; 1272 case WAY320_L: 1273 seq_printf(m, "Way3,Way2,Way0 Locked-Down\n"); 1274 break; 1275 case WAY321_L: 1276 seq_printf(m, "Way3,Way2,Way1 Locked-Down\n"); 1277 break; 1278 case WAYALL_L: 1279 seq_printf(m, "All Ways are locked\n"); 1280 break; 1281 default: 1282 seq_printf(m, "No Ways are locked\n"); 1283 } 1284#endif 1285 1286 if (cpu_num != num_possible_cpus() - 1) 1287 return 0; 1288 1289 if (L2_LENGTH) { 1290 seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400); 1291 seq_printf(m, "L2 SRAM\t\t: " 1292#if defined(CONFIG_BFIN_L2_ICACHEABLE) 1293 "cacheable" 1294#else 1295 "uncacheable" 1296#endif 1297 " in instruction cache\n"); 1298 seq_printf(m, "L2 SRAM\t\t: " 1299#if defined(CONFIG_BFIN_L2_WRITEBACK) 1300 "cacheable (write-back)" 1301#elif defined(CONFIG_BFIN_L2_WRITETHROUGH) 1302 "cacheable (write-through)" 1303#else 1304 "uncacheable" 1305#endif 1306 " in data cache\n"); 1307 } 1308 seq_printf(m, "board name\t: %s\n", bfin_board_name); 1309 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", 1310 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); 1311 seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n", 1312 ((int)memory_end - (int)_stext) >> 10, 1313 _stext, 1314 (void *)memory_end); 1315 seq_printf(m, "\n"); 1316 1317 return 0; 1318} 1319 1320static void *c_start(struct seq_file *m, loff_t *pos) 1321{ 1322 if (*pos == 0) 1323 *pos = first_cpu(cpu_online_map); 1324 if (*pos >= num_online_cpus()) 1325 return NULL; 1326 1327 return pos; 1328} 1329 1330static void *c_next(struct seq_file *m, void *v, loff_t *pos) 1331{ 1332 *pos = next_cpu(*pos, cpu_online_map); 1333 1334 return c_start(m, pos); 1335} 1336 1337static void c_stop(struct seq_file *m, void *v) 1338{ 1339} 1340 1341const struct seq_operations cpuinfo_op = { 1342 .start = c_start, 1343 .next = c_next, 1344 .stop = c_stop, 1345 .show = show_cpuinfo, 1346}; 1347 1348void __init cmdline_init(const char *r0) 1349{ 1350 if (r0) 1351 strncpy(command_line, r0, COMMAND_LINE_SIZE); 1352} 1353