setup.c revision c70c754ff916cedd969a73549799d2167ffefcd6
1/* 2 * arch/blackfin/kernel/setup.c 3 * 4 * Copyright 2004-2006 Analog Devices Inc. 5 * 6 * Enter bugs at http://blackfin.uclinux.org/ 7 * 8 * Licensed under the GPL-2 or later. 9 */ 10 11#include <linux/delay.h> 12#include <linux/console.h> 13#include <linux/bootmem.h> 14#include <linux/seq_file.h> 15#include <linux/cpu.h> 16#include <linux/mm.h> 17#include <linux/module.h> 18#include <linux/tty.h> 19#include <linux/pfn.h> 20 21#ifdef CONFIG_MTD_UCLINUX 22#include <linux/mtd/map.h> 23#include <linux/ext2_fs.h> 24#include <linux/cramfs_fs.h> 25#include <linux/romfs_fs.h> 26#endif 27 28#include <asm/cplb.h> 29#include <asm/cacheflush.h> 30#include <asm/blackfin.h> 31#include <asm/cplbinit.h> 32#include <asm/div64.h> 33#include <asm/cpu.h> 34#include <asm/fixed_code.h> 35#include <asm/early_printk.h> 36 37u16 _bfin_swrst; 38EXPORT_SYMBOL(_bfin_swrst); 39 40unsigned long memory_start, memory_end, physical_mem_end; 41unsigned long _rambase, _ramstart, _ramend; 42unsigned long reserved_mem_dcache_on; 43unsigned long reserved_mem_icache_on; 44EXPORT_SYMBOL(memory_start); 45EXPORT_SYMBOL(memory_end); 46EXPORT_SYMBOL(physical_mem_end); 47EXPORT_SYMBOL(_ramend); 48EXPORT_SYMBOL(reserved_mem_dcache_on); 49 50#ifdef CONFIG_MTD_UCLINUX 51extern struct map_info uclinux_ram_map; 52unsigned long memory_mtd_end, memory_mtd_start, mtd_size; 53unsigned long _ebss; 54EXPORT_SYMBOL(memory_mtd_end); 55EXPORT_SYMBOL(memory_mtd_start); 56EXPORT_SYMBOL(mtd_size); 57#endif 58 59char __initdata command_line[COMMAND_LINE_SIZE]; 60void __initdata *init_retx, *init_saved_retx, *init_saved_seqstat, 61 *init_saved_icplb_fault_addr, *init_saved_dcplb_fault_addr; 62 63/* boot memmap, for parsing "memmap=" */ 64#define BFIN_MEMMAP_MAX 128 /* number of entries in bfin_memmap */ 65#define BFIN_MEMMAP_RAM 1 66#define BFIN_MEMMAP_RESERVED 2 67static struct bfin_memmap { 68 int nr_map; 69 struct bfin_memmap_entry { 70 unsigned long long addr; /* start of memory segment */ 71 unsigned long long size; 72 unsigned long type; 73 } map[BFIN_MEMMAP_MAX]; 74} bfin_memmap __initdata; 75 76/* for memmap sanitization */ 77struct change_member { 78 struct bfin_memmap_entry *pentry; /* pointer to original entry */ 79 unsigned long long addr; /* address for this change point */ 80}; 81static struct change_member change_point_list[2*BFIN_MEMMAP_MAX] __initdata; 82static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata; 83static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata; 84static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata; 85 86DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data); 87 88static int early_init_clkin_hz(char *buf); 89 90#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) 91void __init generate_cplb_tables(void) 92{ 93 unsigned int cpu; 94 95 generate_cplb_tables_all(); 96 /* Generate per-CPU I&D CPLB tables */ 97 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) 98 generate_cplb_tables_cpu(cpu); 99} 100#endif 101 102void __cpuinit bfin_setup_caches(unsigned int cpu) 103{ 104#ifdef CONFIG_BFIN_ICACHE 105 bfin_icache_init(icplb_tbl[cpu]); 106#endif 107 108#ifdef CONFIG_BFIN_DCACHE 109 bfin_dcache_init(dcplb_tbl[cpu]); 110#endif 111 112 /* 113 * In cache coherence emulation mode, we need to have the 114 * D-cache enabled before running any atomic operation which 115 * might invove cache invalidation (i.e. spinlock, rwlock). 116 * So printk's are deferred until then. 117 */ 118#ifdef CONFIG_BFIN_ICACHE 119 printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu); 120 printk(KERN_INFO " External memory:" 121# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE 122 " cacheable" 123# else 124 " uncacheable" 125# endif 126 " in instruction cache\n"); 127 if (L2_LENGTH) 128 printk(KERN_INFO " L2 SRAM :" 129# ifdef CONFIG_BFIN_L2_ICACHEABLE 130 " cacheable" 131# else 132 " uncacheable" 133# endif 134 " in instruction cache\n"); 135 136#else 137 printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu); 138#endif 139 140#ifdef CONFIG_BFIN_DCACHE 141 printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu); 142 printk(KERN_INFO " External memory:" 143# if defined CONFIG_BFIN_EXTMEM_WRITEBACK 144 " cacheable (write-back)" 145# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH 146 " cacheable (write-through)" 147# else 148 " uncacheable" 149# endif 150 " in data cache\n"); 151 if (L2_LENGTH) 152 printk(KERN_INFO " L2 SRAM :" 153# if defined CONFIG_BFIN_L2_WRITEBACK 154 " cacheable (write-back)" 155# elif defined CONFIG_BFIN_L2_WRITETHROUGH 156 " cacheable (write-through)" 157# else 158 " uncacheable" 159# endif 160 " in data cache\n"); 161#else 162 printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu); 163#endif 164} 165 166void __cpuinit bfin_setup_cpudata(unsigned int cpu) 167{ 168 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu); 169 170 cpudata->idle = current; 171 cpudata->imemctl = bfin_read_IMEM_CONTROL(); 172 cpudata->dmemctl = bfin_read_DMEM_CONTROL(); 173} 174 175void __init bfin_cache_init(void) 176{ 177#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) 178 generate_cplb_tables(); 179#endif 180 bfin_setup_caches(0); 181} 182 183void __init bfin_relocate_l1_mem(void) 184{ 185 unsigned long l1_code_length; 186 unsigned long l1_data_a_length; 187 unsigned long l1_data_b_length; 188 unsigned long l2_length; 189 190 /* 191 * due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S 192 * we know that everything about l1 text/data is nice and aligned, 193 * so copy by 4 byte chunks, and don't worry about overlapping 194 * src/dest. 195 * 196 * We can't use the dma_memcpy functions, since they can call 197 * scheduler functions which might be in L1 :( and core writes 198 * into L1 instruction cause bad access errors, so we are stuck, 199 * we are required to use DMA, but can't use the common dma 200 * functions. We can't use memcpy either - since that might be 201 * going to be in the relocated L1 202 */ 203 204 blackfin_dma_early_init(); 205 206 /* if necessary, copy _stext_l1 to _etext_l1 to L1 instruction SRAM */ 207 l1_code_length = _etext_l1 - _stext_l1; 208 if (l1_code_length) 209 early_dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length); 210 211 /* if necessary, copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */ 212 l1_data_a_length = _sbss_l1 - _sdata_l1; 213 if (l1_data_a_length) 214 early_dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length); 215 216 /* if necessary, copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */ 217 l1_data_b_length = _sbss_b_l1 - _sdata_b_l1; 218 if (l1_data_b_length) 219 early_dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length + 220 l1_data_a_length, l1_data_b_length); 221 222 early_dma_memcpy_done(); 223 224 /* if necessary, copy _stext_l2 to _edata_l2 to L2 SRAM */ 225 if (L2_LENGTH != 0) { 226 l2_length = _sbss_l2 - _stext_l2; 227 if (l2_length) 228 memcpy(_stext_l2, _l2_lma_start, l2_length); 229 } 230} 231 232/* add_memory_region to memmap */ 233static void __init add_memory_region(unsigned long long start, 234 unsigned long long size, int type) 235{ 236 int i; 237 238 i = bfin_memmap.nr_map; 239 240 if (i == BFIN_MEMMAP_MAX) { 241 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); 242 return; 243 } 244 245 bfin_memmap.map[i].addr = start; 246 bfin_memmap.map[i].size = size; 247 bfin_memmap.map[i].type = type; 248 bfin_memmap.nr_map++; 249} 250 251/* 252 * Sanitize the boot memmap, removing overlaps. 253 */ 254static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map) 255{ 256 struct change_member *change_tmp; 257 unsigned long current_type, last_type; 258 unsigned long long last_addr; 259 int chgidx, still_changing; 260 int overlap_entries; 261 int new_entry; 262 int old_nr, new_nr, chg_nr; 263 int i; 264 265 /* 266 Visually we're performing the following (1,2,3,4 = memory types) 267 268 Sample memory map (w/overlaps): 269 ____22__________________ 270 ______________________4_ 271 ____1111________________ 272 _44_____________________ 273 11111111________________ 274 ____________________33__ 275 ___________44___________ 276 __________33333_________ 277 ______________22________ 278 ___________________2222_ 279 _________111111111______ 280 _____________________11_ 281 _________________4______ 282 283 Sanitized equivalent (no overlap): 284 1_______________________ 285 _44_____________________ 286 ___1____________________ 287 ____22__________________ 288 ______11________________ 289 _________1______________ 290 __________3_____________ 291 ___________44___________ 292 _____________33_________ 293 _______________2________ 294 ________________1_______ 295 _________________4______ 296 ___________________2____ 297 ____________________33__ 298 ______________________4_ 299 */ 300 /* if there's only one memory region, don't bother */ 301 if (*pnr_map < 2) 302 return -1; 303 304 old_nr = *pnr_map; 305 306 /* bail out if we find any unreasonable addresses in memmap */ 307 for (i = 0; i < old_nr; i++) 308 if (map[i].addr + map[i].size < map[i].addr) 309 return -1; 310 311 /* create pointers for initial change-point information (for sorting) */ 312 for (i = 0; i < 2*old_nr; i++) 313 change_point[i] = &change_point_list[i]; 314 315 /* record all known change-points (starting and ending addresses), 316 omitting those that are for empty memory regions */ 317 chgidx = 0; 318 for (i = 0; i < old_nr; i++) { 319 if (map[i].size != 0) { 320 change_point[chgidx]->addr = map[i].addr; 321 change_point[chgidx++]->pentry = &map[i]; 322 change_point[chgidx]->addr = map[i].addr + map[i].size; 323 change_point[chgidx++]->pentry = &map[i]; 324 } 325 } 326 chg_nr = chgidx; /* true number of change-points */ 327 328 /* sort change-point list by memory addresses (low -> high) */ 329 still_changing = 1; 330 while (still_changing) { 331 still_changing = 0; 332 for (i = 1; i < chg_nr; i++) { 333 /* if <current_addr> > <last_addr>, swap */ 334 /* or, if current=<start_addr> & last=<end_addr>, swap */ 335 if ((change_point[i]->addr < change_point[i-1]->addr) || 336 ((change_point[i]->addr == change_point[i-1]->addr) && 337 (change_point[i]->addr == change_point[i]->pentry->addr) && 338 (change_point[i-1]->addr != change_point[i-1]->pentry->addr)) 339 ) { 340 change_tmp = change_point[i]; 341 change_point[i] = change_point[i-1]; 342 change_point[i-1] = change_tmp; 343 still_changing = 1; 344 } 345 } 346 } 347 348 /* create a new memmap, removing overlaps */ 349 overlap_entries = 0; /* number of entries in the overlap table */ 350 new_entry = 0; /* index for creating new memmap entries */ 351 last_type = 0; /* start with undefined memory type */ 352 last_addr = 0; /* start with 0 as last starting address */ 353 /* loop through change-points, determining affect on the new memmap */ 354 for (chgidx = 0; chgidx < chg_nr; chgidx++) { 355 /* keep track of all overlapping memmap entries */ 356 if (change_point[chgidx]->addr == change_point[chgidx]->pentry->addr) { 357 /* add map entry to overlap list (> 1 entry implies an overlap) */ 358 overlap_list[overlap_entries++] = change_point[chgidx]->pentry; 359 } else { 360 /* remove entry from list (order independent, so swap with last) */ 361 for (i = 0; i < overlap_entries; i++) { 362 if (overlap_list[i] == change_point[chgidx]->pentry) 363 overlap_list[i] = overlap_list[overlap_entries-1]; 364 } 365 overlap_entries--; 366 } 367 /* if there are overlapping entries, decide which "type" to use */ 368 /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */ 369 current_type = 0; 370 for (i = 0; i < overlap_entries; i++) 371 if (overlap_list[i]->type > current_type) 372 current_type = overlap_list[i]->type; 373 /* continue building up new memmap based on this information */ 374 if (current_type != last_type) { 375 if (last_type != 0) { 376 new_map[new_entry].size = 377 change_point[chgidx]->addr - last_addr; 378 /* move forward only if the new size was non-zero */ 379 if (new_map[new_entry].size != 0) 380 if (++new_entry >= BFIN_MEMMAP_MAX) 381 break; /* no more space left for new entries */ 382 } 383 if (current_type != 0) { 384 new_map[new_entry].addr = change_point[chgidx]->addr; 385 new_map[new_entry].type = current_type; 386 last_addr = change_point[chgidx]->addr; 387 } 388 last_type = current_type; 389 } 390 } 391 new_nr = new_entry; /* retain count for new entries */ 392 393 /* copy new mapping into original location */ 394 memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry)); 395 *pnr_map = new_nr; 396 397 return 0; 398} 399 400static void __init print_memory_map(char *who) 401{ 402 int i; 403 404 for (i = 0; i < bfin_memmap.nr_map; i++) { 405 printk(KERN_DEBUG " %s: %016Lx - %016Lx ", who, 406 bfin_memmap.map[i].addr, 407 bfin_memmap.map[i].addr + bfin_memmap.map[i].size); 408 switch (bfin_memmap.map[i].type) { 409 case BFIN_MEMMAP_RAM: 410 printk(KERN_CONT "(usable)\n"); 411 break; 412 case BFIN_MEMMAP_RESERVED: 413 printk(KERN_CONT "(reserved)\n"); 414 break; 415 default: 416 printk(KERN_CONT "type %lu\n", bfin_memmap.map[i].type); 417 break; 418 } 419 } 420} 421 422static __init int parse_memmap(char *arg) 423{ 424 unsigned long long start_at, mem_size; 425 426 if (!arg) 427 return -EINVAL; 428 429 mem_size = memparse(arg, &arg); 430 if (*arg == '@') { 431 start_at = memparse(arg+1, &arg); 432 add_memory_region(start_at, mem_size, BFIN_MEMMAP_RAM); 433 } else if (*arg == '$') { 434 start_at = memparse(arg+1, &arg); 435 add_memory_region(start_at, mem_size, BFIN_MEMMAP_RESERVED); 436 } 437 438 return 0; 439} 440 441/* 442 * Initial parsing of the command line. Currently, we support: 443 * - Controlling the linux memory size: mem=xxx[KMG] 444 * - Controlling the physical memory size: max_mem=xxx[KMG][$][#] 445 * $ -> reserved memory is dcacheable 446 * # -> reserved memory is icacheable 447 * - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region 448 * @ from <start> to <start>+<mem>, type RAM 449 * $ from <start> to <start>+<mem>, type RESERVED 450 */ 451static __init void parse_cmdline_early(char *cmdline_p) 452{ 453 char c = ' ', *to = cmdline_p; 454 unsigned int memsize; 455 for (;;) { 456 if (c == ' ') { 457 if (!memcmp(to, "mem=", 4)) { 458 to += 4; 459 memsize = memparse(to, &to); 460 if (memsize) 461 _ramend = memsize; 462 463 } else if (!memcmp(to, "max_mem=", 8)) { 464 to += 8; 465 memsize = memparse(to, &to); 466 if (memsize) { 467 physical_mem_end = memsize; 468 if (*to != ' ') { 469 if (*to == '$' 470 || *(to + 1) == '$') 471 reserved_mem_dcache_on = 1; 472 if (*to == '#' 473 || *(to + 1) == '#') 474 reserved_mem_icache_on = 1; 475 } 476 } 477 } else if (!memcmp(to, "clkin_hz=", 9)) { 478 to += 9; 479 early_init_clkin_hz(to); 480#ifdef CONFIG_EARLY_PRINTK 481 } else if (!memcmp(to, "earlyprintk=", 12)) { 482 to += 12; 483 setup_early_printk(to); 484#endif 485 } else if (!memcmp(to, "memmap=", 7)) { 486 to += 7; 487 parse_memmap(to); 488 } 489 } 490 c = *(to++); 491 if (!c) 492 break; 493 } 494} 495 496/* 497 * Setup memory defaults from user config. 498 * The physical memory layout looks like: 499 * 500 * [_rambase, _ramstart]: kernel image 501 * [memory_start, memory_end]: dynamic memory managed by kernel 502 * [memory_end, _ramend]: reserved memory 503 * [memory_mtd_start(memory_end), 504 * memory_mtd_start + mtd_size]: rootfs (if any) 505 * [_ramend - DMA_UNCACHED_REGION, 506 * _ramend]: uncached DMA region 507 * [_ramend, physical_mem_end]: memory not managed by kernel 508 */ 509static __init void memory_setup(void) 510{ 511#ifdef CONFIG_MTD_UCLINUX 512 unsigned long mtd_phys = 0; 513#endif 514 515 _rambase = (unsigned long)_stext; 516 _ramstart = (unsigned long)_end; 517 518 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { 519 console_init(); 520 panic("DMA region exceeds memory limit: %lu.", 521 _ramend - _ramstart); 522 } 523 memory_end = _ramend - DMA_UNCACHED_REGION; 524 525#ifdef CONFIG_MPU 526 /* Round up to multiple of 4MB */ 527 memory_start = (_ramstart + 0x3fffff) & ~0x3fffff; 528#else 529 memory_start = PAGE_ALIGN(_ramstart); 530#endif 531 532#if defined(CONFIG_MTD_UCLINUX) 533 /* generic memory mapped MTD driver */ 534 memory_mtd_end = memory_end; 535 536 mtd_phys = _ramstart; 537 mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8))); 538 539# if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS) 540 if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC) 541 mtd_size = 542 PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10); 543# endif 544 545# if defined(CONFIG_CRAMFS) 546 if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC) 547 mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4))); 548# endif 549 550# if defined(CONFIG_ROMFS_FS) 551 if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0 552 && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) 553 mtd_size = 554 PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2])); 555# if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263) 556 /* Due to a Hardware Anomaly we need to limit the size of usable 557 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on 558 * 05000263 - Hardware loop corrupted when taking an ICPLB exception 559 */ 560# if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO)) 561 if (memory_end >= 56 * 1024 * 1024) 562 memory_end = 56 * 1024 * 1024; 563# else 564 if (memory_end >= 60 * 1024 * 1024) 565 memory_end = 60 * 1024 * 1024; 566# endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */ 567# endif /* ANOMALY_05000263 */ 568# endif /* CONFIG_ROMFS_FS */ 569 570 /* Since the default MTD_UCLINUX has no magic number, we just blindly 571 * read 8 past the end of the kernel's image, and look at it. 572 * When no image is attached, mtd_size is set to a random number 573 * Do some basic sanity checks before operating on things 574 */ 575 if (mtd_size == 0 || memory_end <= mtd_size) { 576 pr_emerg("Could not find valid ram mtd attached.\n"); 577 } else { 578 memory_end -= mtd_size; 579 580 /* Relocate MTD image to the top of memory after the uncached memory area */ 581 uclinux_ram_map.phys = memory_mtd_start = memory_end; 582 uclinux_ram_map.size = mtd_size; 583 pr_info("Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n", 584 _end, mtd_size, (void *)memory_mtd_start); 585 dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size); 586 } 587#endif /* CONFIG_MTD_UCLINUX */ 588 589#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263) 590 /* Due to a Hardware Anomaly we need to limit the size of usable 591 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on 592 * 05000263 - Hardware loop corrupted when taking an ICPLB exception 593 */ 594#if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO)) 595 if (memory_end >= 56 * 1024 * 1024) 596 memory_end = 56 * 1024 * 1024; 597#else 598 if (memory_end >= 60 * 1024 * 1024) 599 memory_end = 60 * 1024 * 1024; 600#endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */ 601 printk(KERN_NOTICE "Warning: limiting memory to %liMB due to hardware anomaly 05000263\n", memory_end >> 20); 602#endif /* ANOMALY_05000263 */ 603 604#ifdef CONFIG_MPU 605 page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32; 606 page_mask_order = get_order(3 * page_mask_nelts * sizeof(long)); 607#endif 608 609#if !defined(CONFIG_MTD_UCLINUX) 610 /*In case there is no valid CPLB behind memory_end make sure we don't get to close*/ 611 memory_end -= SIZE_4K; 612#endif 613 614 init_mm.start_code = (unsigned long)_stext; 615 init_mm.end_code = (unsigned long)_etext; 616 init_mm.end_data = (unsigned long)_edata; 617 init_mm.brk = (unsigned long)0; 618 619 printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20); 620 printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20); 621 622 printk(KERN_INFO "Memory map:\n" 623 " fixedcode = 0x%p-0x%p\n" 624 " text = 0x%p-0x%p\n" 625 " rodata = 0x%p-0x%p\n" 626 " bss = 0x%p-0x%p\n" 627 " data = 0x%p-0x%p\n" 628 " stack = 0x%p-0x%p\n" 629 " init = 0x%p-0x%p\n" 630 " available = 0x%p-0x%p\n" 631#ifdef CONFIG_MTD_UCLINUX 632 " rootfs = 0x%p-0x%p\n" 633#endif 634#if DMA_UNCACHED_REGION > 0 635 " DMA Zone = 0x%p-0x%p\n" 636#endif 637 , (void *)FIXED_CODE_START, (void *)FIXED_CODE_END, 638 _stext, _etext, 639 __start_rodata, __end_rodata, 640 __bss_start, __bss_stop, 641 _sdata, _edata, 642 (void *)&init_thread_union, 643 (void *)((int)(&init_thread_union) + 0x2000), 644 __init_begin, __init_end, 645 (void *)_ramstart, (void *)memory_end 646#ifdef CONFIG_MTD_UCLINUX 647 , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size) 648#endif 649#if DMA_UNCACHED_REGION > 0 650 , (void *)(_ramend - DMA_UNCACHED_REGION), (void *)(_ramend) 651#endif 652 ); 653} 654 655/* 656 * Find the lowest, highest page frame number we have available 657 */ 658void __init find_min_max_pfn(void) 659{ 660 int i; 661 662 max_pfn = 0; 663 min_low_pfn = memory_end; 664 665 for (i = 0; i < bfin_memmap.nr_map; i++) { 666 unsigned long start, end; 667 /* RAM? */ 668 if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM) 669 continue; 670 start = PFN_UP(bfin_memmap.map[i].addr); 671 end = PFN_DOWN(bfin_memmap.map[i].addr + 672 bfin_memmap.map[i].size); 673 if (start >= end) 674 continue; 675 if (end > max_pfn) 676 max_pfn = end; 677 if (start < min_low_pfn) 678 min_low_pfn = start; 679 } 680} 681 682static __init void setup_bootmem_allocator(void) 683{ 684 int bootmap_size; 685 int i; 686 unsigned long start_pfn, end_pfn; 687 unsigned long curr_pfn, last_pfn, size; 688 689 /* mark memory between memory_start and memory_end usable */ 690 add_memory_region(memory_start, 691 memory_end - memory_start, BFIN_MEMMAP_RAM); 692 /* sanity check for overlap */ 693 sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map); 694 print_memory_map("boot memmap"); 695 696 /* intialize globals in linux/bootmem.h */ 697 find_min_max_pfn(); 698 /* pfn of the last usable page frame */ 699 if (max_pfn > memory_end >> PAGE_SHIFT) 700 max_pfn = memory_end >> PAGE_SHIFT; 701 /* pfn of last page frame directly mapped by kernel */ 702 max_low_pfn = max_pfn; 703 /* pfn of the first usable page frame after kernel image*/ 704 if (min_low_pfn < memory_start >> PAGE_SHIFT) 705 min_low_pfn = memory_start >> PAGE_SHIFT; 706 707 start_pfn = PAGE_OFFSET >> PAGE_SHIFT; 708 end_pfn = memory_end >> PAGE_SHIFT; 709 710 /* 711 * give all the memory to the bootmap allocator, tell it to put the 712 * boot mem_map at the start of memory. 713 */ 714 bootmap_size = init_bootmem_node(NODE_DATA(0), 715 memory_start >> PAGE_SHIFT, /* map goes here */ 716 start_pfn, end_pfn); 717 718 /* register the memmap regions with the bootmem allocator */ 719 for (i = 0; i < bfin_memmap.nr_map; i++) { 720 /* 721 * Reserve usable memory 722 */ 723 if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM) 724 continue; 725 /* 726 * We are rounding up the start address of usable memory: 727 */ 728 curr_pfn = PFN_UP(bfin_memmap.map[i].addr); 729 if (curr_pfn >= end_pfn) 730 continue; 731 /* 732 * ... and at the end of the usable range downwards: 733 */ 734 last_pfn = PFN_DOWN(bfin_memmap.map[i].addr + 735 bfin_memmap.map[i].size); 736 737 if (last_pfn > end_pfn) 738 last_pfn = end_pfn; 739 740 /* 741 * .. finally, did all the rounding and playing 742 * around just make the area go away? 743 */ 744 if (last_pfn <= curr_pfn) 745 continue; 746 747 size = last_pfn - curr_pfn; 748 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size)); 749 } 750 751 /* reserve memory before memory_start, including bootmap */ 752 reserve_bootmem(PAGE_OFFSET, 753 memory_start + bootmap_size + PAGE_SIZE - 1 - PAGE_OFFSET, 754 BOOTMEM_DEFAULT); 755} 756 757#define EBSZ_TO_MEG(ebsz) \ 758({ \ 759 int meg = 0; \ 760 switch (ebsz & 0xf) { \ 761 case 0x1: meg = 16; break; \ 762 case 0x3: meg = 32; break; \ 763 case 0x5: meg = 64; break; \ 764 case 0x7: meg = 128; break; \ 765 case 0x9: meg = 256; break; \ 766 case 0xb: meg = 512; break; \ 767 } \ 768 meg; \ 769}) 770static inline int __init get_mem_size(void) 771{ 772#if defined(EBIU_SDBCTL) 773# if defined(BF561_FAMILY) 774 int ret = 0; 775 u32 sdbctl = bfin_read_EBIU_SDBCTL(); 776 ret += EBSZ_TO_MEG(sdbctl >> 0); 777 ret += EBSZ_TO_MEG(sdbctl >> 8); 778 ret += EBSZ_TO_MEG(sdbctl >> 16); 779 ret += EBSZ_TO_MEG(sdbctl >> 24); 780 return ret; 781# else 782 return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL()); 783# endif 784#elif defined(EBIU_DDRCTL1) 785 u32 ddrctl = bfin_read_EBIU_DDRCTL1(); 786 int ret = 0; 787 switch (ddrctl & 0xc0000) { 788 case DEVSZ_64: ret = 64 / 8; 789 case DEVSZ_128: ret = 128 / 8; 790 case DEVSZ_256: ret = 256 / 8; 791 case DEVSZ_512: ret = 512 / 8; 792 } 793 switch (ddrctl & 0x30000) { 794 case DEVWD_4: ret *= 2; 795 case DEVWD_8: ret *= 2; 796 case DEVWD_16: break; 797 } 798 if ((ddrctl & 0xc000) == 0x4000) 799 ret *= 2; 800 return ret; 801#endif 802 BUG(); 803} 804 805void __init setup_arch(char **cmdline_p) 806{ 807 unsigned long sclk, cclk; 808 809 /* Check to make sure we are running on the right processor */ 810 if (unlikely(CPUID != bfin_cpuid())) 811 printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n", 812 CPU, bfin_cpuid(), bfin_revid()); 813 814#ifdef CONFIG_DUMMY_CONSOLE 815 conswitchp = &dummy_con; 816#endif 817 818#if defined(CONFIG_CMDLINE_BOOL) 819 strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line)); 820 command_line[sizeof(command_line) - 1] = 0; 821#endif 822 823 /* Keep a copy of command line */ 824 *cmdline_p = &command_line[0]; 825 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); 826 boot_command_line[COMMAND_LINE_SIZE - 1] = '\0'; 827 828 memset(&bfin_memmap, 0, sizeof(bfin_memmap)); 829 830 /* If the user does not specify things on the command line, use 831 * what the bootloader set things up as 832 */ 833 physical_mem_end = 0; 834 parse_cmdline_early(&command_line[0]); 835 836 if (_ramend == 0) 837 _ramend = get_mem_size() * 1024 * 1024; 838 839 if (physical_mem_end == 0) 840 physical_mem_end = _ramend; 841 842 memory_setup(); 843 844 /* Initialize Async memory banks */ 845 bfin_write_EBIU_AMBCTL0(AMBCTL0VAL); 846 bfin_write_EBIU_AMBCTL1(AMBCTL1VAL); 847 bfin_write_EBIU_AMGCTL(AMGCTLVAL); 848#ifdef CONFIG_EBIU_MBSCTLVAL 849 bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL); 850 bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL); 851 bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL); 852#endif 853 854 cclk = get_cclk(); 855 sclk = get_sclk(); 856 857 if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk) 858 panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK"); 859 860#ifdef BF561_FAMILY 861 if (ANOMALY_05000266) { 862 bfin_read_IMDMA_D0_IRQ_STATUS(); 863 bfin_read_IMDMA_D1_IRQ_STATUS(); 864 } 865#endif 866 printk(KERN_INFO "Hardware Trace "); 867 if (bfin_read_TBUFCTL() & 0x1) 868 printk(KERN_CONT "Active "); 869 else 870 printk(KERN_CONT "Off "); 871 if (bfin_read_TBUFCTL() & 0x2) 872 printk(KERN_CONT "and Enabled\n"); 873 else 874 printk(KERN_CONT "and Disabled\n"); 875 876 printk(KERN_INFO "Boot Mode: %i\n", bfin_read_SYSCR() & 0xF); 877 878 /* Newer parts mirror SWRST bits in SYSCR */ 879#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \ 880 defined(CONFIG_BF538) || defined(CONFIG_BF539) 881 _bfin_swrst = bfin_read_SWRST(); 882#else 883 /* Clear boot mode field */ 884 _bfin_swrst = bfin_read_SYSCR() & ~0xf; 885#endif 886 887#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT 888 bfin_write_SWRST(_bfin_swrst & ~DOUBLE_FAULT); 889#endif 890#ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET 891 bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT); 892#endif 893 894#ifdef CONFIG_SMP 895 if (_bfin_swrst & SWRST_DBL_FAULT_A) { 896#else 897 if (_bfin_swrst & RESET_DOUBLE) { 898#endif 899 printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n"); 900#ifdef CONFIG_DEBUG_DOUBLEFAULT 901 /* We assume the crashing kernel, and the current symbol table match */ 902 printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n", 903 (int)init_saved_seqstat & SEQSTAT_EXCAUSE, init_saved_retx); 904 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr); 905 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr); 906#endif 907 printk(KERN_NOTICE " The instruction at %pF caused a double exception\n", 908 init_retx); 909 } else if (_bfin_swrst & RESET_WDOG) 910 printk(KERN_INFO "Recovering from Watchdog event\n"); 911 else if (_bfin_swrst & RESET_SOFTWARE) 912 printk(KERN_NOTICE "Reset caused by Software reset\n"); 913 914 printk(KERN_INFO "Blackfin support (C) 2004-2009 Analog Devices, Inc.\n"); 915 if (bfin_compiled_revid() == 0xffff) 916 printk(KERN_INFO "Compiled for ADSP-%s Rev any\n", CPU); 917 else if (bfin_compiled_revid() == -1) 918 printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU); 919 else 920 printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid()); 921 922 if (likely(CPUID == bfin_cpuid())) { 923 if (bfin_revid() != bfin_compiled_revid()) { 924 if (bfin_compiled_revid() == -1) 925 printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n", 926 bfin_revid()); 927 else if (bfin_compiled_revid() != 0xffff) { 928 printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n", 929 bfin_compiled_revid(), bfin_revid()); 930 if (bfin_compiled_revid() > bfin_revid()) 931 panic("Error: you are missing anomaly workarounds for this rev"); 932 } 933 } 934 if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX) 935 printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n", 936 CPU, bfin_revid()); 937 } 938 939 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n"); 940 941 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n", 942 cclk / 1000000, sclk / 1000000); 943 944 setup_bootmem_allocator(); 945 946 paging_init(); 947 948 /* Copy atomic sequences to their fixed location, and sanity check that 949 these locations are the ones that we advertise to userspace. */ 950 memcpy((void *)FIXED_CODE_START, &fixed_code_start, 951 FIXED_CODE_END - FIXED_CODE_START); 952 BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start 953 != SIGRETURN_STUB - FIXED_CODE_START); 954 BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start 955 != ATOMIC_XCHG32 - FIXED_CODE_START); 956 BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start 957 != ATOMIC_CAS32 - FIXED_CODE_START); 958 BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start 959 != ATOMIC_ADD32 - FIXED_CODE_START); 960 BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start 961 != ATOMIC_SUB32 - FIXED_CODE_START); 962 BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start 963 != ATOMIC_IOR32 - FIXED_CODE_START); 964 BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start 965 != ATOMIC_AND32 - FIXED_CODE_START); 966 BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start 967 != ATOMIC_XOR32 - FIXED_CODE_START); 968 BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start 969 != SAFE_USER_INSTRUCTION - FIXED_CODE_START); 970 971#ifdef CONFIG_SMP 972 platform_init_cpus(); 973#endif 974 init_exception_vectors(); 975 bfin_cache_init(); /* Initialize caches for the boot CPU */ 976} 977 978static int __init topology_init(void) 979{ 980 unsigned int cpu; 981 /* Record CPU-private information for the boot processor. */ 982 bfin_setup_cpudata(0); 983 984 for_each_possible_cpu(cpu) { 985 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu); 986 } 987 988 return 0; 989} 990 991subsys_initcall(topology_init); 992 993/* Get the input clock frequency */ 994static u_long cached_clkin_hz = CONFIG_CLKIN_HZ; 995static u_long get_clkin_hz(void) 996{ 997 return cached_clkin_hz; 998} 999static int __init early_init_clkin_hz(char *buf) 1000{ 1001 cached_clkin_hz = simple_strtoul(buf, NULL, 0); 1002#ifdef BFIN_KERNEL_CLOCK 1003 if (cached_clkin_hz != CONFIG_CLKIN_HZ) 1004 panic("cannot change clkin_hz when reprogramming clocks"); 1005#endif 1006 return 1; 1007} 1008early_param("clkin_hz=", early_init_clkin_hz); 1009 1010/* Get the voltage input multiplier */ 1011static u_long get_vco(void) 1012{ 1013 static u_long cached_vco; 1014 u_long msel, pll_ctl; 1015 1016 /* The assumption here is that VCO never changes at runtime. 1017 * If, someday, we support that, then we'll have to change this. 1018 */ 1019 if (cached_vco) 1020 return cached_vco; 1021 1022 pll_ctl = bfin_read_PLL_CTL(); 1023 msel = (pll_ctl >> 9) & 0x3F; 1024 if (0 == msel) 1025 msel = 64; 1026 1027 cached_vco = get_clkin_hz(); 1028 cached_vco >>= (1 & pll_ctl); /* DF bit */ 1029 cached_vco *= msel; 1030 return cached_vco; 1031} 1032 1033/* Get the Core clock */ 1034u_long get_cclk(void) 1035{ 1036 static u_long cached_cclk_pll_div, cached_cclk; 1037 u_long csel, ssel; 1038 1039 if (bfin_read_PLL_STAT() & 0x1) 1040 return get_clkin_hz(); 1041 1042 ssel = bfin_read_PLL_DIV(); 1043 if (ssel == cached_cclk_pll_div) 1044 return cached_cclk; 1045 else 1046 cached_cclk_pll_div = ssel; 1047 1048 csel = ((ssel >> 4) & 0x03); 1049 ssel &= 0xf; 1050 if (ssel && ssel < (1 << csel)) /* SCLK > CCLK */ 1051 cached_cclk = get_vco() / ssel; 1052 else 1053 cached_cclk = get_vco() >> csel; 1054 return cached_cclk; 1055} 1056EXPORT_SYMBOL(get_cclk); 1057 1058/* Get the System clock */ 1059u_long get_sclk(void) 1060{ 1061 static u_long cached_sclk; 1062 u_long ssel; 1063 1064 /* The assumption here is that SCLK never changes at runtime. 1065 * If, someday, we support that, then we'll have to change this. 1066 */ 1067 if (cached_sclk) 1068 return cached_sclk; 1069 1070 if (bfin_read_PLL_STAT() & 0x1) 1071 return get_clkin_hz(); 1072 1073 ssel = bfin_read_PLL_DIV() & 0xf; 1074 if (0 == ssel) { 1075 printk(KERN_WARNING "Invalid System Clock\n"); 1076 ssel = 1; 1077 } 1078 1079 cached_sclk = get_vco() / ssel; 1080 return cached_sclk; 1081} 1082EXPORT_SYMBOL(get_sclk); 1083 1084unsigned long sclk_to_usecs(unsigned long sclk) 1085{ 1086 u64 tmp = USEC_PER_SEC * (u64)sclk; 1087 do_div(tmp, get_sclk()); 1088 return tmp; 1089} 1090EXPORT_SYMBOL(sclk_to_usecs); 1091 1092unsigned long usecs_to_sclk(unsigned long usecs) 1093{ 1094 u64 tmp = get_sclk() * (u64)usecs; 1095 do_div(tmp, USEC_PER_SEC); 1096 return tmp; 1097} 1098EXPORT_SYMBOL(usecs_to_sclk); 1099 1100/* 1101 * Get CPU information for use by the procfs. 1102 */ 1103static int show_cpuinfo(struct seq_file *m, void *v) 1104{ 1105 char *cpu, *mmu, *fpu, *vendor, *cache; 1106 uint32_t revid; 1107 int cpu_num = *(unsigned int *)v; 1108 u_long sclk, cclk; 1109 u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0; 1110 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num); 1111 1112 cpu = CPU; 1113 mmu = "none"; 1114 fpu = "none"; 1115 revid = bfin_revid(); 1116 1117 sclk = get_sclk(); 1118 cclk = get_cclk(); 1119 1120 switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) { 1121 case 0xca: 1122 vendor = "Analog Devices"; 1123 break; 1124 default: 1125 vendor = "unknown"; 1126 break; 1127 } 1128 1129 seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor); 1130 1131 if (CPUID == bfin_cpuid()) 1132 seq_printf(m, "cpu family\t: 0x%04x\n", CPUID); 1133 else 1134 seq_printf(m, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n", 1135 CPUID, bfin_cpuid()); 1136 1137 seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n" 1138 "stepping\t: %d ", 1139 cpu, cclk/1000000, sclk/1000000, 1140#ifdef CONFIG_MPU 1141 "mpu on", 1142#else 1143 "mpu off", 1144#endif 1145 revid); 1146 1147 if (bfin_revid() != bfin_compiled_revid()) { 1148 if (bfin_compiled_revid() == -1) 1149 seq_printf(m, "(Compiled for Rev none)"); 1150 else if (bfin_compiled_revid() == 0xffff) 1151 seq_printf(m, "(Compiled for Rev any)"); 1152 else 1153 seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid()); 1154 } 1155 1156 seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", 1157 cclk/1000000, cclk%1000000, 1158 sclk/1000000, sclk%1000000); 1159 seq_printf(m, "bogomips\t: %lu.%02lu\n" 1160 "Calibration\t: %lu loops\n", 1161 (loops_per_jiffy * HZ) / 500000, 1162 ((loops_per_jiffy * HZ) / 5000) % 100, 1163 (loops_per_jiffy * HZ)); 1164 1165 /* Check Cache configutation */ 1166 switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) { 1167 case ACACHE_BSRAM: 1168 cache = "dbank-A/B\t: cache/sram"; 1169 dcache_size = 16; 1170 dsup_banks = 1; 1171 break; 1172 case ACACHE_BCACHE: 1173 cache = "dbank-A/B\t: cache/cache"; 1174 dcache_size = 32; 1175 dsup_banks = 2; 1176 break; 1177 case ASRAM_BSRAM: 1178 cache = "dbank-A/B\t: sram/sram"; 1179 dcache_size = 0; 1180 dsup_banks = 0; 1181 break; 1182 default: 1183 cache = "unknown"; 1184 dcache_size = 0; 1185 dsup_banks = 0; 1186 break; 1187 } 1188 1189 /* Is it turned on? */ 1190 if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE)) 1191 dcache_size = 0; 1192 1193 if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB)) 1194 icache_size = 0; 1195 1196 seq_printf(m, "cache size\t: %d KB(L1 icache) " 1197 "%d KB(L1 dcache) %d KB(L2 cache)\n", 1198 icache_size, dcache_size, 0); 1199 seq_printf(m, "%s\n", cache); 1200 seq_printf(m, "external memory\t: " 1201#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) 1202 "cacheable" 1203#else 1204 "uncacheable" 1205#endif 1206 " in instruction cache\n"); 1207 seq_printf(m, "external memory\t: " 1208#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) 1209 "cacheable (write-back)" 1210#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH) 1211 "cacheable (write-through)" 1212#else 1213 "uncacheable" 1214#endif 1215 " in data cache\n"); 1216 1217 if (icache_size) 1218 seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n", 1219 BFIN_ISUBBANKS, BFIN_IWAYS, BFIN_ILINES); 1220 else 1221 seq_printf(m, "icache setup\t: off\n"); 1222 1223 seq_printf(m, 1224 "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n", 1225 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, 1226 BFIN_DLINES); 1227#ifdef __ARCH_SYNC_CORE_DCACHE 1228 seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count); 1229#endif 1230#ifdef __ARCH_SYNC_CORE_ICACHE 1231 seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count); 1232#endif 1233#ifdef CONFIG_BFIN_ICACHE_LOCK 1234 switch ((cpudata->imemctl >> 3) & WAYALL_L) { 1235 case WAY0_L: 1236 seq_printf(m, "Way0 Locked-Down\n"); 1237 break; 1238 case WAY1_L: 1239 seq_printf(m, "Way1 Locked-Down\n"); 1240 break; 1241 case WAY01_L: 1242 seq_printf(m, "Way0,Way1 Locked-Down\n"); 1243 break; 1244 case WAY2_L: 1245 seq_printf(m, "Way2 Locked-Down\n"); 1246 break; 1247 case WAY02_L: 1248 seq_printf(m, "Way0,Way2 Locked-Down\n"); 1249 break; 1250 case WAY12_L: 1251 seq_printf(m, "Way1,Way2 Locked-Down\n"); 1252 break; 1253 case WAY012_L: 1254 seq_printf(m, "Way0,Way1 & Way2 Locked-Down\n"); 1255 break; 1256 case WAY3_L: 1257 seq_printf(m, "Way3 Locked-Down\n"); 1258 break; 1259 case WAY03_L: 1260 seq_printf(m, "Way0,Way3 Locked-Down\n"); 1261 break; 1262 case WAY13_L: 1263 seq_printf(m, "Way1,Way3 Locked-Down\n"); 1264 break; 1265 case WAY013_L: 1266 seq_printf(m, "Way 0,Way1,Way3 Locked-Down\n"); 1267 break; 1268 case WAY32_L: 1269 seq_printf(m, "Way3,Way2 Locked-Down\n"); 1270 break; 1271 case WAY320_L: 1272 seq_printf(m, "Way3,Way2,Way0 Locked-Down\n"); 1273 break; 1274 case WAY321_L: 1275 seq_printf(m, "Way3,Way2,Way1 Locked-Down\n"); 1276 break; 1277 case WAYALL_L: 1278 seq_printf(m, "All Ways are locked\n"); 1279 break; 1280 default: 1281 seq_printf(m, "No Ways are locked\n"); 1282 } 1283#endif 1284 1285 if (cpu_num != num_possible_cpus() - 1) 1286 return 0; 1287 1288 if (L2_LENGTH) { 1289 seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400); 1290 seq_printf(m, "L2 SRAM\t\t: " 1291#if defined(CONFIG_BFIN_L2_ICACHEABLE) 1292 "cacheable" 1293#else 1294 "uncacheable" 1295#endif 1296 " in instruction cache\n"); 1297 seq_printf(m, "L2 SRAM\t\t: " 1298#if defined(CONFIG_BFIN_L2_WRITEBACK) 1299 "cacheable (write-back)" 1300#elif defined(CONFIG_BFIN_L2_WRITETHROUGH) 1301 "cacheable (write-through)" 1302#else 1303 "uncacheable" 1304#endif 1305 " in data cache\n"); 1306 } 1307 seq_printf(m, "board name\t: %s\n", bfin_board_name); 1308 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", 1309 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); 1310 seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n", 1311 ((int)memory_end - (int)_stext) >> 10, 1312 _stext, 1313 (void *)memory_end); 1314 seq_printf(m, "\n"); 1315 1316 return 0; 1317} 1318 1319static void *c_start(struct seq_file *m, loff_t *pos) 1320{ 1321 if (*pos == 0) 1322 *pos = first_cpu(cpu_online_map); 1323 if (*pos >= num_online_cpus()) 1324 return NULL; 1325 1326 return pos; 1327} 1328 1329static void *c_next(struct seq_file *m, void *v, loff_t *pos) 1330{ 1331 *pos = next_cpu(*pos, cpu_online_map); 1332 1333 return c_start(m, pos); 1334} 1335 1336static void c_stop(struct seq_file *m, void *v) 1337{ 1338} 1339 1340const struct seq_operations cpuinfo_op = { 1341 .start = c_start, 1342 .next = c_next, 1343 .stop = c_stop, 1344 .show = show_cpuinfo, 1345}; 1346 1347void __init cmdline_init(const char *r0) 1348{ 1349 if (r0) 1350 strncpy(command_line, r0, COMMAND_LINE_SIZE); 1351} 1352