setup.c revision 7f3aee3c187641ec7c7e260d9cabb71ac4ac9f7c
1/* 2 * arch/blackfin/kernel/setup.c 3 * 4 * Copyright 2004-2006 Analog Devices Inc. 5 * 6 * Enter bugs at http://blackfin.uclinux.org/ 7 * 8 * Licensed under the GPL-2 or later. 9 */ 10 11#include <linux/delay.h> 12#include <linux/console.h> 13#include <linux/bootmem.h> 14#include <linux/seq_file.h> 15#include <linux/cpu.h> 16#include <linux/mm.h> 17#include <linux/module.h> 18#include <linux/tty.h> 19#include <linux/pfn.h> 20 21#include <linux/ext2_fs.h> 22#include <linux/cramfs_fs.h> 23#include <linux/romfs_fs.h> 24 25#include <asm/cplb.h> 26#include <asm/cacheflush.h> 27#include <asm/blackfin.h> 28#include <asm/cplbinit.h> 29#include <asm/div64.h> 30#include <asm/cpu.h> 31#include <asm/fixed_code.h> 32#include <asm/early_printk.h> 33 34u16 _bfin_swrst; 35EXPORT_SYMBOL(_bfin_swrst); 36 37unsigned long memory_start, memory_end, physical_mem_end; 38unsigned long _rambase, _ramstart, _ramend; 39unsigned long reserved_mem_dcache_on; 40unsigned long reserved_mem_icache_on; 41EXPORT_SYMBOL(memory_start); 42EXPORT_SYMBOL(memory_end); 43EXPORT_SYMBOL(physical_mem_end); 44EXPORT_SYMBOL(_ramend); 45EXPORT_SYMBOL(reserved_mem_dcache_on); 46 47#ifdef CONFIG_MTD_UCLINUX 48unsigned long memory_mtd_end, memory_mtd_start, mtd_size; 49unsigned long _ebss; 50EXPORT_SYMBOL(memory_mtd_end); 51EXPORT_SYMBOL(memory_mtd_start); 52EXPORT_SYMBOL(mtd_size); 53#endif 54 55char __initdata command_line[COMMAND_LINE_SIZE]; 56void __initdata *init_retx, *init_saved_retx, *init_saved_seqstat, 57 *init_saved_icplb_fault_addr, *init_saved_dcplb_fault_addr; 58 59/* boot memmap, for parsing "memmap=" */ 60#define BFIN_MEMMAP_MAX 128 /* number of entries in bfin_memmap */ 61#define BFIN_MEMMAP_RAM 1 62#define BFIN_MEMMAP_RESERVED 2 63static struct bfin_memmap { 64 int nr_map; 65 struct bfin_memmap_entry { 66 unsigned long long addr; /* start of memory segment */ 67 unsigned long long size; 68 unsigned long type; 69 } map[BFIN_MEMMAP_MAX]; 70} bfin_memmap __initdata; 71 72/* for memmap sanitization */ 73struct change_member { 74 struct bfin_memmap_entry *pentry; /* pointer to original entry */ 75 unsigned long long addr; /* address for this change point */ 76}; 77static struct change_member change_point_list[2*BFIN_MEMMAP_MAX] __initdata; 78static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata; 79static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata; 80static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata; 81 82DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data); 83 84static int early_init_clkin_hz(char *buf); 85 86#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) 87void __init generate_cplb_tables(void) 88{ 89 unsigned int cpu; 90 91 generate_cplb_tables_all(); 92 /* Generate per-CPU I&D CPLB tables */ 93 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) 94 generate_cplb_tables_cpu(cpu); 95} 96#endif 97 98void __cpuinit bfin_setup_caches(unsigned int cpu) 99{ 100#ifdef CONFIG_BFIN_ICACHE 101 bfin_icache_init(icplb_tbl[cpu]); 102#endif 103 104#ifdef CONFIG_BFIN_DCACHE 105 bfin_dcache_init(dcplb_tbl[cpu]); 106#endif 107 108 /* 109 * In cache coherence emulation mode, we need to have the 110 * D-cache enabled before running any atomic operation which 111 * might invove cache invalidation (i.e. spinlock, rwlock). 112 * So printk's are deferred until then. 113 */ 114#ifdef CONFIG_BFIN_ICACHE 115 printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu); 116#endif 117#ifdef CONFIG_BFIN_DCACHE 118 printk(KERN_INFO "Data Cache Enabled for CPU%u" 119# if defined CONFIG_BFIN_WB 120 " (write-back)" 121# elif defined CONFIG_BFIN_WT 122 " (write-through)" 123# endif 124 "\n", cpu); 125#endif 126} 127 128void __cpuinit bfin_setup_cpudata(unsigned int cpu) 129{ 130 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu); 131 132 cpudata->idle = current; 133 cpudata->loops_per_jiffy = loops_per_jiffy; 134 cpudata->imemctl = bfin_read_IMEM_CONTROL(); 135 cpudata->dmemctl = bfin_read_DMEM_CONTROL(); 136} 137 138void __init bfin_cache_init(void) 139{ 140#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) 141 generate_cplb_tables(); 142#endif 143 bfin_setup_caches(0); 144} 145 146void __init bfin_relocate_l1_mem(void) 147{ 148 unsigned long l1_code_length; 149 unsigned long l1_data_a_length; 150 unsigned long l1_data_b_length; 151 unsigned long l2_length; 152 153 /* 154 * due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S 155 * we know that everything about l1 text/data is nice and aligned, 156 * so copy by 4 byte chunks, and don't worry about overlapping 157 * src/dest. 158 * 159 * We can't use the dma_memcpy functions, since they can call 160 * scheduler functions which might be in L1 :( and core writes 161 * into L1 instruction cause bad access errors, so we are stuck, 162 * we are required to use DMA, but can't use the common dma 163 * functions. We can't use memcpy either - since that might be 164 * going to be in the relocated L1 165 */ 166 167 blackfin_dma_early_init(); 168 169 /* if necessary, copy _stext_l1 to _etext_l1 to L1 instruction SRAM */ 170 l1_code_length = _etext_l1 - _stext_l1; 171 if (l1_code_length) 172 early_dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length); 173 174 /* if necessary, copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */ 175 l1_data_a_length = _sbss_l1 - _sdata_l1; 176 if (l1_data_a_length) 177 early_dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length); 178 179 /* if necessary, copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */ 180 l1_data_b_length = _sbss_b_l1 - _sdata_b_l1; 181 if (l1_data_b_length) 182 early_dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length + 183 l1_data_a_length, l1_data_b_length); 184 185 early_dma_memcpy_done(); 186 187 /* if necessary, copy _stext_l2 to _edata_l2 to L2 SRAM */ 188 if (L2_LENGTH != 0) { 189 l2_length = _sbss_l2 - _stext_l2; 190 if (l2_length) 191 memcpy(_stext_l2, _l2_lma_start, l2_length); 192 } 193} 194 195/* add_memory_region to memmap */ 196static void __init add_memory_region(unsigned long long start, 197 unsigned long long size, int type) 198{ 199 int i; 200 201 i = bfin_memmap.nr_map; 202 203 if (i == BFIN_MEMMAP_MAX) { 204 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); 205 return; 206 } 207 208 bfin_memmap.map[i].addr = start; 209 bfin_memmap.map[i].size = size; 210 bfin_memmap.map[i].type = type; 211 bfin_memmap.nr_map++; 212} 213 214/* 215 * Sanitize the boot memmap, removing overlaps. 216 */ 217static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map) 218{ 219 struct change_member *change_tmp; 220 unsigned long current_type, last_type; 221 unsigned long long last_addr; 222 int chgidx, still_changing; 223 int overlap_entries; 224 int new_entry; 225 int old_nr, new_nr, chg_nr; 226 int i; 227 228 /* 229 Visually we're performing the following (1,2,3,4 = memory types) 230 231 Sample memory map (w/overlaps): 232 ____22__________________ 233 ______________________4_ 234 ____1111________________ 235 _44_____________________ 236 11111111________________ 237 ____________________33__ 238 ___________44___________ 239 __________33333_________ 240 ______________22________ 241 ___________________2222_ 242 _________111111111______ 243 _____________________11_ 244 _________________4______ 245 246 Sanitized equivalent (no overlap): 247 1_______________________ 248 _44_____________________ 249 ___1____________________ 250 ____22__________________ 251 ______11________________ 252 _________1______________ 253 __________3_____________ 254 ___________44___________ 255 _____________33_________ 256 _______________2________ 257 ________________1_______ 258 _________________4______ 259 ___________________2____ 260 ____________________33__ 261 ______________________4_ 262 */ 263 /* if there's only one memory region, don't bother */ 264 if (*pnr_map < 2) 265 return -1; 266 267 old_nr = *pnr_map; 268 269 /* bail out if we find any unreasonable addresses in memmap */ 270 for (i = 0; i < old_nr; i++) 271 if (map[i].addr + map[i].size < map[i].addr) 272 return -1; 273 274 /* create pointers for initial change-point information (for sorting) */ 275 for (i = 0; i < 2*old_nr; i++) 276 change_point[i] = &change_point_list[i]; 277 278 /* record all known change-points (starting and ending addresses), 279 omitting those that are for empty memory regions */ 280 chgidx = 0; 281 for (i = 0; i < old_nr; i++) { 282 if (map[i].size != 0) { 283 change_point[chgidx]->addr = map[i].addr; 284 change_point[chgidx++]->pentry = &map[i]; 285 change_point[chgidx]->addr = map[i].addr + map[i].size; 286 change_point[chgidx++]->pentry = &map[i]; 287 } 288 } 289 chg_nr = chgidx; /* true number of change-points */ 290 291 /* sort change-point list by memory addresses (low -> high) */ 292 still_changing = 1; 293 while (still_changing) { 294 still_changing = 0; 295 for (i = 1; i < chg_nr; i++) { 296 /* if <current_addr> > <last_addr>, swap */ 297 /* or, if current=<start_addr> & last=<end_addr>, swap */ 298 if ((change_point[i]->addr < change_point[i-1]->addr) || 299 ((change_point[i]->addr == change_point[i-1]->addr) && 300 (change_point[i]->addr == change_point[i]->pentry->addr) && 301 (change_point[i-1]->addr != change_point[i-1]->pentry->addr)) 302 ) { 303 change_tmp = change_point[i]; 304 change_point[i] = change_point[i-1]; 305 change_point[i-1] = change_tmp; 306 still_changing = 1; 307 } 308 } 309 } 310 311 /* create a new memmap, removing overlaps */ 312 overlap_entries = 0; /* number of entries in the overlap table */ 313 new_entry = 0; /* index for creating new memmap entries */ 314 last_type = 0; /* start with undefined memory type */ 315 last_addr = 0; /* start with 0 as last starting address */ 316 /* loop through change-points, determining affect on the new memmap */ 317 for (chgidx = 0; chgidx < chg_nr; chgidx++) { 318 /* keep track of all overlapping memmap entries */ 319 if (change_point[chgidx]->addr == change_point[chgidx]->pentry->addr) { 320 /* add map entry to overlap list (> 1 entry implies an overlap) */ 321 overlap_list[overlap_entries++] = change_point[chgidx]->pentry; 322 } else { 323 /* remove entry from list (order independent, so swap with last) */ 324 for (i = 0; i < overlap_entries; i++) { 325 if (overlap_list[i] == change_point[chgidx]->pentry) 326 overlap_list[i] = overlap_list[overlap_entries-1]; 327 } 328 overlap_entries--; 329 } 330 /* if there are overlapping entries, decide which "type" to use */ 331 /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */ 332 current_type = 0; 333 for (i = 0; i < overlap_entries; i++) 334 if (overlap_list[i]->type > current_type) 335 current_type = overlap_list[i]->type; 336 /* continue building up new memmap based on this information */ 337 if (current_type != last_type) { 338 if (last_type != 0) { 339 new_map[new_entry].size = 340 change_point[chgidx]->addr - last_addr; 341 /* move forward only if the new size was non-zero */ 342 if (new_map[new_entry].size != 0) 343 if (++new_entry >= BFIN_MEMMAP_MAX) 344 break; /* no more space left for new entries */ 345 } 346 if (current_type != 0) { 347 new_map[new_entry].addr = change_point[chgidx]->addr; 348 new_map[new_entry].type = current_type; 349 last_addr = change_point[chgidx]->addr; 350 } 351 last_type = current_type; 352 } 353 } 354 new_nr = new_entry; /* retain count for new entries */ 355 356 /* copy new mapping into original location */ 357 memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry)); 358 *pnr_map = new_nr; 359 360 return 0; 361} 362 363static void __init print_memory_map(char *who) 364{ 365 int i; 366 367 for (i = 0; i < bfin_memmap.nr_map; i++) { 368 printk(KERN_DEBUG " %s: %016Lx - %016Lx ", who, 369 bfin_memmap.map[i].addr, 370 bfin_memmap.map[i].addr + bfin_memmap.map[i].size); 371 switch (bfin_memmap.map[i].type) { 372 case BFIN_MEMMAP_RAM: 373 printk("(usable)\n"); 374 break; 375 case BFIN_MEMMAP_RESERVED: 376 printk("(reserved)\n"); 377 break; 378 default: printk("type %lu\n", bfin_memmap.map[i].type); 379 break; 380 } 381 } 382} 383 384static __init int parse_memmap(char *arg) 385{ 386 unsigned long long start_at, mem_size; 387 388 if (!arg) 389 return -EINVAL; 390 391 mem_size = memparse(arg, &arg); 392 if (*arg == '@') { 393 start_at = memparse(arg+1, &arg); 394 add_memory_region(start_at, mem_size, BFIN_MEMMAP_RAM); 395 } else if (*arg == '$') { 396 start_at = memparse(arg+1, &arg); 397 add_memory_region(start_at, mem_size, BFIN_MEMMAP_RESERVED); 398 } 399 400 return 0; 401} 402 403/* 404 * Initial parsing of the command line. Currently, we support: 405 * - Controlling the linux memory size: mem=xxx[KMG] 406 * - Controlling the physical memory size: max_mem=xxx[KMG][$][#] 407 * $ -> reserved memory is dcacheable 408 * # -> reserved memory is icacheable 409 * - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region 410 * @ from <start> to <start>+<mem>, type RAM 411 * $ from <start> to <start>+<mem>, type RESERVED 412 */ 413static __init void parse_cmdline_early(char *cmdline_p) 414{ 415 char c = ' ', *to = cmdline_p; 416 unsigned int memsize; 417 for (;;) { 418 if (c == ' ') { 419 if (!memcmp(to, "mem=", 4)) { 420 to += 4; 421 memsize = memparse(to, &to); 422 if (memsize) 423 _ramend = memsize; 424 425 } else if (!memcmp(to, "max_mem=", 8)) { 426 to += 8; 427 memsize = memparse(to, &to); 428 if (memsize) { 429 physical_mem_end = memsize; 430 if (*to != ' ') { 431 if (*to == '$' 432 || *(to + 1) == '$') 433 reserved_mem_dcache_on = 1; 434 if (*to == '#' 435 || *(to + 1) == '#') 436 reserved_mem_icache_on = 1; 437 } 438 } 439 } else if (!memcmp(to, "clkin_hz=", 9)) { 440 to += 9; 441 early_init_clkin_hz(to); 442 } else if (!memcmp(to, "earlyprintk=", 12)) { 443 to += 12; 444 setup_early_printk(to); 445 } else if (!memcmp(to, "memmap=", 7)) { 446 to += 7; 447 parse_memmap(to); 448 } 449 } 450 c = *(to++); 451 if (!c) 452 break; 453 } 454} 455 456/* 457 * Setup memory defaults from user config. 458 * The physical memory layout looks like: 459 * 460 * [_rambase, _ramstart]: kernel image 461 * [memory_start, memory_end]: dynamic memory managed by kernel 462 * [memory_end, _ramend]: reserved memory 463 * [memory_mtd_start(memory_end), 464 * memory_mtd_start + mtd_size]: rootfs (if any) 465 * [_ramend - DMA_UNCACHED_REGION, 466 * _ramend]: uncached DMA region 467 * [_ramend, physical_mem_end]: memory not managed by kernel 468 */ 469static __init void memory_setup(void) 470{ 471#ifdef CONFIG_MTD_UCLINUX 472 unsigned long mtd_phys = 0; 473#endif 474 475 _rambase = (unsigned long)_stext; 476 _ramstart = (unsigned long)_end; 477 478 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { 479 console_init(); 480 panic("DMA region exceeds memory limit: %lu.", 481 _ramend - _ramstart); 482 } 483 memory_end = _ramend - DMA_UNCACHED_REGION; 484 485#ifdef CONFIG_MPU 486 /* Round up to multiple of 4MB */ 487 memory_start = (_ramstart + 0x3fffff) & ~0x3fffff; 488#else 489 memory_start = PAGE_ALIGN(_ramstart); 490#endif 491 492#if defined(CONFIG_MTD_UCLINUX) 493 /* generic memory mapped MTD driver */ 494 memory_mtd_end = memory_end; 495 496 mtd_phys = _ramstart; 497 mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8))); 498 499# if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS) 500 if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC) 501 mtd_size = 502 PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10); 503# endif 504 505# if defined(CONFIG_CRAMFS) 506 if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC) 507 mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4))); 508# endif 509 510# if defined(CONFIG_ROMFS_FS) 511 if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0 512 && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) 513 mtd_size = 514 PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2])); 515# if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263) 516 /* Due to a Hardware Anomaly we need to limit the size of usable 517 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on 518 * 05000263 - Hardware loop corrupted when taking an ICPLB exception 519 */ 520# if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO)) 521 if (memory_end >= 56 * 1024 * 1024) 522 memory_end = 56 * 1024 * 1024; 523# else 524 if (memory_end >= 60 * 1024 * 1024) 525 memory_end = 60 * 1024 * 1024; 526# endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */ 527# endif /* ANOMALY_05000263 */ 528# endif /* CONFIG_ROMFS_FS */ 529 530 memory_end -= mtd_size; 531 532 if (mtd_size == 0) { 533 console_init(); 534 panic("Don't boot kernel without rootfs attached."); 535 } 536 537 /* Relocate MTD image to the top of memory after the uncached memory area */ 538 dma_memcpy((char *)memory_end, _end, mtd_size); 539 540 memory_mtd_start = memory_end; 541 _ebss = memory_mtd_start; /* define _ebss for compatible */ 542#endif /* CONFIG_MTD_UCLINUX */ 543 544#if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263) 545 /* Due to a Hardware Anomaly we need to limit the size of usable 546 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on 547 * 05000263 - Hardware loop corrupted when taking an ICPLB exception 548 */ 549#if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO)) 550 if (memory_end >= 56 * 1024 * 1024) 551 memory_end = 56 * 1024 * 1024; 552#else 553 if (memory_end >= 60 * 1024 * 1024) 554 memory_end = 60 * 1024 * 1024; 555#endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */ 556 printk(KERN_NOTICE "Warning: limiting memory to %liMB due to hardware anomaly 05000263\n", memory_end >> 20); 557#endif /* ANOMALY_05000263 */ 558 559#ifdef CONFIG_MPU 560 page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32; 561 page_mask_order = get_order(3 * page_mask_nelts * sizeof(long)); 562#endif 563 564#if !defined(CONFIG_MTD_UCLINUX) 565 /*In case there is no valid CPLB behind memory_end make sure we don't get to close*/ 566 memory_end -= SIZE_4K; 567#endif 568 569 init_mm.start_code = (unsigned long)_stext; 570 init_mm.end_code = (unsigned long)_etext; 571 init_mm.end_data = (unsigned long)_edata; 572 init_mm.brk = (unsigned long)0; 573 574 printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20); 575 printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20); 576 577 printk(KERN_INFO "Memory map:\n" 578 KERN_INFO " fixedcode = 0x%p-0x%p\n" 579 KERN_INFO " text = 0x%p-0x%p\n" 580 KERN_INFO " rodata = 0x%p-0x%p\n" 581 KERN_INFO " bss = 0x%p-0x%p\n" 582 KERN_INFO " data = 0x%p-0x%p\n" 583 KERN_INFO " stack = 0x%p-0x%p\n" 584 KERN_INFO " init = 0x%p-0x%p\n" 585 KERN_INFO " available = 0x%p-0x%p\n" 586#ifdef CONFIG_MTD_UCLINUX 587 KERN_INFO " rootfs = 0x%p-0x%p\n" 588#endif 589#if DMA_UNCACHED_REGION > 0 590 KERN_INFO " DMA Zone = 0x%p-0x%p\n" 591#endif 592 , (void *)FIXED_CODE_START, (void *)FIXED_CODE_END, 593 _stext, _etext, 594 __start_rodata, __end_rodata, 595 __bss_start, __bss_stop, 596 _sdata, _edata, 597 (void *)&init_thread_union, 598 (void *)((int)(&init_thread_union) + 0x2000), 599 __init_begin, __init_end, 600 (void *)_ramstart, (void *)memory_end 601#ifdef CONFIG_MTD_UCLINUX 602 , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size) 603#endif 604#if DMA_UNCACHED_REGION > 0 605 , (void *)(_ramend - DMA_UNCACHED_REGION), (void *)(_ramend) 606#endif 607 ); 608} 609 610/* 611 * Find the lowest, highest page frame number we have available 612 */ 613void __init find_min_max_pfn(void) 614{ 615 int i; 616 617 max_pfn = 0; 618 min_low_pfn = memory_end; 619 620 for (i = 0; i < bfin_memmap.nr_map; i++) { 621 unsigned long start, end; 622 /* RAM? */ 623 if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM) 624 continue; 625 start = PFN_UP(bfin_memmap.map[i].addr); 626 end = PFN_DOWN(bfin_memmap.map[i].addr + 627 bfin_memmap.map[i].size); 628 if (start >= end) 629 continue; 630 if (end > max_pfn) 631 max_pfn = end; 632 if (start < min_low_pfn) 633 min_low_pfn = start; 634 } 635} 636 637static __init void setup_bootmem_allocator(void) 638{ 639 int bootmap_size; 640 int i; 641 unsigned long start_pfn, end_pfn; 642 unsigned long curr_pfn, last_pfn, size; 643 644 /* mark memory between memory_start and memory_end usable */ 645 add_memory_region(memory_start, 646 memory_end - memory_start, BFIN_MEMMAP_RAM); 647 /* sanity check for overlap */ 648 sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map); 649 print_memory_map("boot memmap"); 650 651 /* intialize globals in linux/bootmem.h */ 652 find_min_max_pfn(); 653 /* pfn of the last usable page frame */ 654 if (max_pfn > memory_end >> PAGE_SHIFT) 655 max_pfn = memory_end >> PAGE_SHIFT; 656 /* pfn of last page frame directly mapped by kernel */ 657 max_low_pfn = max_pfn; 658 /* pfn of the first usable page frame after kernel image*/ 659 if (min_low_pfn < memory_start >> PAGE_SHIFT) 660 min_low_pfn = memory_start >> PAGE_SHIFT; 661 662 start_pfn = PAGE_OFFSET >> PAGE_SHIFT; 663 end_pfn = memory_end >> PAGE_SHIFT; 664 665 /* 666 * give all the memory to the bootmap allocator, tell it to put the 667 * boot mem_map at the start of memory. 668 */ 669 bootmap_size = init_bootmem_node(NODE_DATA(0), 670 memory_start >> PAGE_SHIFT, /* map goes here */ 671 start_pfn, end_pfn); 672 673 /* register the memmap regions with the bootmem allocator */ 674 for (i = 0; i < bfin_memmap.nr_map; i++) { 675 /* 676 * Reserve usable memory 677 */ 678 if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM) 679 continue; 680 /* 681 * We are rounding up the start address of usable memory: 682 */ 683 curr_pfn = PFN_UP(bfin_memmap.map[i].addr); 684 if (curr_pfn >= end_pfn) 685 continue; 686 /* 687 * ... and at the end of the usable range downwards: 688 */ 689 last_pfn = PFN_DOWN(bfin_memmap.map[i].addr + 690 bfin_memmap.map[i].size); 691 692 if (last_pfn > end_pfn) 693 last_pfn = end_pfn; 694 695 /* 696 * .. finally, did all the rounding and playing 697 * around just make the area go away? 698 */ 699 if (last_pfn <= curr_pfn) 700 continue; 701 702 size = last_pfn - curr_pfn; 703 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size)); 704 } 705 706 /* reserve memory before memory_start, including bootmap */ 707 reserve_bootmem(PAGE_OFFSET, 708 memory_start + bootmap_size + PAGE_SIZE - 1 - PAGE_OFFSET, 709 BOOTMEM_DEFAULT); 710} 711 712#define EBSZ_TO_MEG(ebsz) \ 713({ \ 714 int meg = 0; \ 715 switch (ebsz & 0xf) { \ 716 case 0x1: meg = 16; break; \ 717 case 0x3: meg = 32; break; \ 718 case 0x5: meg = 64; break; \ 719 case 0x7: meg = 128; break; \ 720 case 0x9: meg = 256; break; \ 721 case 0xb: meg = 512; break; \ 722 } \ 723 meg; \ 724}) 725static inline int __init get_mem_size(void) 726{ 727#if defined(EBIU_SDBCTL) 728# if defined(BF561_FAMILY) 729 int ret = 0; 730 u32 sdbctl = bfin_read_EBIU_SDBCTL(); 731 ret += EBSZ_TO_MEG(sdbctl >> 0); 732 ret += EBSZ_TO_MEG(sdbctl >> 8); 733 ret += EBSZ_TO_MEG(sdbctl >> 16); 734 ret += EBSZ_TO_MEG(sdbctl >> 24); 735 return ret; 736# else 737 return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL()); 738# endif 739#elif defined(EBIU_DDRCTL1) 740 u32 ddrctl = bfin_read_EBIU_DDRCTL1(); 741 int ret = 0; 742 switch (ddrctl & 0xc0000) { 743 case DEVSZ_64: ret = 64 / 8; 744 case DEVSZ_128: ret = 128 / 8; 745 case DEVSZ_256: ret = 256 / 8; 746 case DEVSZ_512: ret = 512 / 8; 747 } 748 switch (ddrctl & 0x30000) { 749 case DEVWD_4: ret *= 2; 750 case DEVWD_8: ret *= 2; 751 case DEVWD_16: break; 752 } 753 if ((ddrctl & 0xc000) == 0x4000) 754 ret *= 2; 755 return ret; 756#endif 757 BUG(); 758} 759 760void __init setup_arch(char **cmdline_p) 761{ 762 unsigned long sclk, cclk; 763 764#ifdef CONFIG_DUMMY_CONSOLE 765 conswitchp = &dummy_con; 766#endif 767 768#if defined(CONFIG_CMDLINE_BOOL) 769 strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line)); 770 command_line[sizeof(command_line) - 1] = 0; 771#endif 772 773 /* Keep a copy of command line */ 774 *cmdline_p = &command_line[0]; 775 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); 776 boot_command_line[COMMAND_LINE_SIZE - 1] = '\0'; 777 778 /* setup memory defaults from the user config */ 779 physical_mem_end = 0; 780 _ramend = get_mem_size() * 1024 * 1024; 781 782 memset(&bfin_memmap, 0, sizeof(bfin_memmap)); 783 784 parse_cmdline_early(&command_line[0]); 785 786 if (physical_mem_end == 0) 787 physical_mem_end = _ramend; 788 789 memory_setup(); 790 791 /* Initialize Async memory banks */ 792 bfin_write_EBIU_AMBCTL0(AMBCTL0VAL); 793 bfin_write_EBIU_AMBCTL1(AMBCTL1VAL); 794 bfin_write_EBIU_AMGCTL(AMGCTLVAL); 795#ifdef CONFIG_EBIU_MBSCTLVAL 796 bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL); 797 bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL); 798 bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL); 799#endif 800 801 cclk = get_cclk(); 802 sclk = get_sclk(); 803 804 if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk) 805 panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK"); 806 807#ifdef BF561_FAMILY 808 if (ANOMALY_05000266) { 809 bfin_read_IMDMA_D0_IRQ_STATUS(); 810 bfin_read_IMDMA_D1_IRQ_STATUS(); 811 } 812#endif 813 printk(KERN_INFO "Hardware Trace "); 814 if (bfin_read_TBUFCTL() & 0x1) 815 printk("Active "); 816 else 817 printk("Off "); 818 if (bfin_read_TBUFCTL() & 0x2) 819 printk("and Enabled\n"); 820 else 821 printk("and Disabled\n"); 822 823#if defined(CONFIG_CHR_DEV_FLASH) || defined(CONFIG_BLK_DEV_FLASH) 824 /* we need to initialize the Flashrom device here since we might 825 * do things with flash early on in the boot 826 */ 827 flash_probe(); 828#endif 829 830 printk(KERN_INFO "Boot Mode: %i\n", bfin_read_SYSCR() & 0xF); 831 832 /* Newer parts mirror SWRST bits in SYSCR */ 833#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \ 834 defined(CONFIG_BF538) || defined(CONFIG_BF539) 835 _bfin_swrst = bfin_read_SWRST(); 836#else 837 _bfin_swrst = bfin_read_SYSCR(); 838#endif 839 840#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT 841 bfin_write_SWRST(_bfin_swrst & ~DOUBLE_FAULT); 842#endif 843#ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET 844 bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT); 845#endif 846 847#ifdef CONFIG_SMP 848 if (_bfin_swrst & SWRST_DBL_FAULT_A) { 849#else 850 if (_bfin_swrst & RESET_DOUBLE) { 851#endif 852 printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n"); 853#ifdef CONFIG_DEBUG_DOUBLEFAULT 854 /* We assume the crashing kernel, and the current symbol table match */ 855 printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n", 856 (int)init_saved_seqstat & SEQSTAT_EXCAUSE, init_saved_retx); 857 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr); 858 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr); 859#endif 860 printk(KERN_NOTICE " The instruction at %pF caused a double exception\n", 861 init_retx); 862 } else if (_bfin_swrst & RESET_WDOG) 863 printk(KERN_INFO "Recovering from Watchdog event\n"); 864 else if (_bfin_swrst & RESET_SOFTWARE) 865 printk(KERN_NOTICE "Reset caused by Software reset\n"); 866 867 printk(KERN_INFO "Blackfin support (C) 2004-2009 Analog Devices, Inc.\n"); 868 if (bfin_compiled_revid() == 0xffff) 869 printk(KERN_INFO "Compiled for ADSP-%s Rev any\n", CPU); 870 else if (bfin_compiled_revid() == -1) 871 printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU); 872 else 873 printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid()); 874 875 if (unlikely(CPUID != bfin_cpuid())) 876 printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n", 877 CPU, bfin_cpuid(), bfin_revid()); 878 else { 879 if (bfin_revid() != bfin_compiled_revid()) { 880 if (bfin_compiled_revid() == -1) 881 printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n", 882 bfin_revid()); 883 else if (bfin_compiled_revid() != 0xffff) { 884 printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n", 885 bfin_compiled_revid(), bfin_revid()); 886 if (bfin_compiled_revid() > bfin_revid()) 887 panic("Error: you are missing anomaly workarounds for this rev"); 888 } 889 } 890 if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX) 891 printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n", 892 CPU, bfin_revid()); 893 } 894 895 /* We can't run on BF548-0.1 due to ANOMALY 05000448 */ 896 if (bfin_cpuid() == 0x27de && bfin_revid() == 1) 897 panic("You can't run on this processor due to 05000448"); 898 899 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n"); 900 901 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n", 902 cclk / 1000000, sclk / 1000000); 903 904 setup_bootmem_allocator(); 905 906 paging_init(); 907 908 /* Copy atomic sequences to their fixed location, and sanity check that 909 these locations are the ones that we advertise to userspace. */ 910 memcpy((void *)FIXED_CODE_START, &fixed_code_start, 911 FIXED_CODE_END - FIXED_CODE_START); 912 BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start 913 != SIGRETURN_STUB - FIXED_CODE_START); 914 BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start 915 != ATOMIC_XCHG32 - FIXED_CODE_START); 916 BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start 917 != ATOMIC_CAS32 - FIXED_CODE_START); 918 BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start 919 != ATOMIC_ADD32 - FIXED_CODE_START); 920 BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start 921 != ATOMIC_SUB32 - FIXED_CODE_START); 922 BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start 923 != ATOMIC_IOR32 - FIXED_CODE_START); 924 BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start 925 != ATOMIC_AND32 - FIXED_CODE_START); 926 BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start 927 != ATOMIC_XOR32 - FIXED_CODE_START); 928 BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start 929 != SAFE_USER_INSTRUCTION - FIXED_CODE_START); 930 931#ifdef CONFIG_SMP 932 platform_init_cpus(); 933#endif 934 init_exception_vectors(); 935 bfin_cache_init(); /* Initialize caches for the boot CPU */ 936} 937 938static int __init topology_init(void) 939{ 940 unsigned int cpu; 941 /* Record CPU-private information for the boot processor. */ 942 bfin_setup_cpudata(0); 943 944 for_each_possible_cpu(cpu) { 945 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu); 946 } 947 948 return 0; 949} 950 951subsys_initcall(topology_init); 952 953/* Get the input clock frequency */ 954static u_long cached_clkin_hz = CONFIG_CLKIN_HZ; 955static u_long get_clkin_hz(void) 956{ 957 return cached_clkin_hz; 958} 959static int __init early_init_clkin_hz(char *buf) 960{ 961 cached_clkin_hz = simple_strtoul(buf, NULL, 0); 962#ifdef BFIN_KERNEL_CLOCK 963 if (cached_clkin_hz != CONFIG_CLKIN_HZ) 964 panic("cannot change clkin_hz when reprogramming clocks"); 965#endif 966 return 1; 967} 968early_param("clkin_hz=", early_init_clkin_hz); 969 970/* Get the voltage input multiplier */ 971static u_long get_vco(void) 972{ 973 static u_long cached_vco; 974 u_long msel, pll_ctl; 975 976 /* The assumption here is that VCO never changes at runtime. 977 * If, someday, we support that, then we'll have to change this. 978 */ 979 if (cached_vco) 980 return cached_vco; 981 982 pll_ctl = bfin_read_PLL_CTL(); 983 msel = (pll_ctl >> 9) & 0x3F; 984 if (0 == msel) 985 msel = 64; 986 987 cached_vco = get_clkin_hz(); 988 cached_vco >>= (1 & pll_ctl); /* DF bit */ 989 cached_vco *= msel; 990 return cached_vco; 991} 992 993/* Get the Core clock */ 994u_long get_cclk(void) 995{ 996 static u_long cached_cclk_pll_div, cached_cclk; 997 u_long csel, ssel; 998 999 if (bfin_read_PLL_STAT() & 0x1) 1000 return get_clkin_hz(); 1001 1002 ssel = bfin_read_PLL_DIV(); 1003 if (ssel == cached_cclk_pll_div) 1004 return cached_cclk; 1005 else 1006 cached_cclk_pll_div = ssel; 1007 1008 csel = ((ssel >> 4) & 0x03); 1009 ssel &= 0xf; 1010 if (ssel && ssel < (1 << csel)) /* SCLK > CCLK */ 1011 cached_cclk = get_vco() / ssel; 1012 else 1013 cached_cclk = get_vco() >> csel; 1014 return cached_cclk; 1015} 1016EXPORT_SYMBOL(get_cclk); 1017 1018/* Get the System clock */ 1019u_long get_sclk(void) 1020{ 1021 static u_long cached_sclk; 1022 u_long ssel; 1023 1024 /* The assumption here is that SCLK never changes at runtime. 1025 * If, someday, we support that, then we'll have to change this. 1026 */ 1027 if (cached_sclk) 1028 return cached_sclk; 1029 1030 if (bfin_read_PLL_STAT() & 0x1) 1031 return get_clkin_hz(); 1032 1033 ssel = bfin_read_PLL_DIV() & 0xf; 1034 if (0 == ssel) { 1035 printk(KERN_WARNING "Invalid System Clock\n"); 1036 ssel = 1; 1037 } 1038 1039 cached_sclk = get_vco() / ssel; 1040 return cached_sclk; 1041} 1042EXPORT_SYMBOL(get_sclk); 1043 1044unsigned long sclk_to_usecs(unsigned long sclk) 1045{ 1046 u64 tmp = USEC_PER_SEC * (u64)sclk; 1047 do_div(tmp, get_sclk()); 1048 return tmp; 1049} 1050EXPORT_SYMBOL(sclk_to_usecs); 1051 1052unsigned long usecs_to_sclk(unsigned long usecs) 1053{ 1054 u64 tmp = get_sclk() * (u64)usecs; 1055 do_div(tmp, USEC_PER_SEC); 1056 return tmp; 1057} 1058EXPORT_SYMBOL(usecs_to_sclk); 1059 1060/* 1061 * Get CPU information for use by the procfs. 1062 */ 1063static int show_cpuinfo(struct seq_file *m, void *v) 1064{ 1065 char *cpu, *mmu, *fpu, *vendor, *cache; 1066 uint32_t revid; 1067 int cpu_num = *(unsigned int *)v; 1068 u_long sclk, cclk; 1069 u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0; 1070 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num); 1071 1072 cpu = CPU; 1073 mmu = "none"; 1074 fpu = "none"; 1075 revid = bfin_revid(); 1076 1077 sclk = get_sclk(); 1078 cclk = get_cclk(); 1079 1080 switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) { 1081 case 0xca: 1082 vendor = "Analog Devices"; 1083 break; 1084 default: 1085 vendor = "unknown"; 1086 break; 1087 } 1088 1089 seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor); 1090 1091 if (CPUID == bfin_cpuid()) 1092 seq_printf(m, "cpu family\t: 0x%04x\n", CPUID); 1093 else 1094 seq_printf(m, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n", 1095 CPUID, bfin_cpuid()); 1096 1097 seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n" 1098 "stepping\t: %d\n", 1099 cpu, cclk/1000000, sclk/1000000, 1100#ifdef CONFIG_MPU 1101 "mpu on", 1102#else 1103 "mpu off", 1104#endif 1105 revid); 1106 1107 seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", 1108 cclk/1000000, cclk%1000000, 1109 sclk/1000000, sclk%1000000); 1110 seq_printf(m, "bogomips\t: %lu.%02lu\n" 1111 "Calibration\t: %lu loops\n", 1112 (cpudata->loops_per_jiffy * HZ) / 500000, 1113 ((cpudata->loops_per_jiffy * HZ) / 5000) % 100, 1114 (cpudata->loops_per_jiffy * HZ)); 1115 1116 /* Check Cache configutation */ 1117 switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) { 1118 case ACACHE_BSRAM: 1119 cache = "dbank-A/B\t: cache/sram"; 1120 dcache_size = 16; 1121 dsup_banks = 1; 1122 break; 1123 case ACACHE_BCACHE: 1124 cache = "dbank-A/B\t: cache/cache"; 1125 dcache_size = 32; 1126 dsup_banks = 2; 1127 break; 1128 case ASRAM_BSRAM: 1129 cache = "dbank-A/B\t: sram/sram"; 1130 dcache_size = 0; 1131 dsup_banks = 0; 1132 break; 1133 default: 1134 cache = "unknown"; 1135 dcache_size = 0; 1136 dsup_banks = 0; 1137 break; 1138 } 1139 1140 /* Is it turned on? */ 1141 if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE)) 1142 dcache_size = 0; 1143 1144 if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB)) 1145 icache_size = 0; 1146 1147 seq_printf(m, "cache size\t: %d KB(L1 icache) " 1148 "%d KB(L1 dcache%s) %d KB(L2 cache)\n", 1149 icache_size, dcache_size, 1150#if defined CONFIG_BFIN_WB 1151 "-wb" 1152#elif defined CONFIG_BFIN_WT 1153 "-wt" 1154#endif 1155 "", 0); 1156 1157 seq_printf(m, "%s\n", cache); 1158 1159 if (icache_size) 1160 seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n", 1161 BFIN_ISUBBANKS, BFIN_IWAYS, BFIN_ILINES); 1162 else 1163 seq_printf(m, "icache setup\t: off\n"); 1164 1165 seq_printf(m, 1166 "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n", 1167 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, 1168 BFIN_DLINES); 1169#ifdef __ARCH_SYNC_CORE_DCACHE 1170 seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count); 1171#endif 1172#ifdef CONFIG_BFIN_ICACHE_LOCK 1173 switch ((cpudata->imemctl >> 3) & WAYALL_L) { 1174 case WAY0_L: 1175 seq_printf(m, "Way0 Locked-Down\n"); 1176 break; 1177 case WAY1_L: 1178 seq_printf(m, "Way1 Locked-Down\n"); 1179 break; 1180 case WAY01_L: 1181 seq_printf(m, "Way0,Way1 Locked-Down\n"); 1182 break; 1183 case WAY2_L: 1184 seq_printf(m, "Way2 Locked-Down\n"); 1185 break; 1186 case WAY02_L: 1187 seq_printf(m, "Way0,Way2 Locked-Down\n"); 1188 break; 1189 case WAY12_L: 1190 seq_printf(m, "Way1,Way2 Locked-Down\n"); 1191 break; 1192 case WAY012_L: 1193 seq_printf(m, "Way0,Way1 & Way2 Locked-Down\n"); 1194 break; 1195 case WAY3_L: 1196 seq_printf(m, "Way3 Locked-Down\n"); 1197 break; 1198 case WAY03_L: 1199 seq_printf(m, "Way0,Way3 Locked-Down\n"); 1200 break; 1201 case WAY13_L: 1202 seq_printf(m, "Way1,Way3 Locked-Down\n"); 1203 break; 1204 case WAY013_L: 1205 seq_printf(m, "Way 0,Way1,Way3 Locked-Down\n"); 1206 break; 1207 case WAY32_L: 1208 seq_printf(m, "Way3,Way2 Locked-Down\n"); 1209 break; 1210 case WAY320_L: 1211 seq_printf(m, "Way3,Way2,Way0 Locked-Down\n"); 1212 break; 1213 case WAY321_L: 1214 seq_printf(m, "Way3,Way2,Way1 Locked-Down\n"); 1215 break; 1216 case WAYALL_L: 1217 seq_printf(m, "All Ways are locked\n"); 1218 break; 1219 default: 1220 seq_printf(m, "No Ways are locked\n"); 1221 } 1222#endif 1223 1224 if (cpu_num != num_possible_cpus() - 1) 1225 return 0; 1226 1227 if (L2_LENGTH) 1228 seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400); 1229 seq_printf(m, "board name\t: %s\n", bfin_board_name); 1230 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", 1231 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); 1232 seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n", 1233 ((int)memory_end - (int)_stext) >> 10, 1234 _stext, 1235 (void *)memory_end); 1236 seq_printf(m, "\n"); 1237 1238 return 0; 1239} 1240 1241static void *c_start(struct seq_file *m, loff_t *pos) 1242{ 1243 if (*pos == 0) 1244 *pos = first_cpu(cpu_online_map); 1245 if (*pos >= num_online_cpus()) 1246 return NULL; 1247 1248 return pos; 1249} 1250 1251static void *c_next(struct seq_file *m, void *v, loff_t *pos) 1252{ 1253 *pos = next_cpu(*pos, cpu_online_map); 1254 1255 return c_start(m, pos); 1256} 1257 1258static void c_stop(struct seq_file *m, void *v) 1259{ 1260} 1261 1262const struct seq_operations cpuinfo_op = { 1263 .start = c_start, 1264 .next = c_next, 1265 .stop = c_stop, 1266 .show = show_cpuinfo, 1267}; 1268 1269void __init cmdline_init(const char *r0) 1270{ 1271 if (r0) 1272 strncpy(command_line, r0, COMMAND_LINE_SIZE); 1273} 1274