setup.c revision 672552adb3197c5db3acc8800c7917bcff180461
1/*
2 * Copyright 2004-2010 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later.
5 */
6
7#include <linux/delay.h>
8#include <linux/console.h>
9#include <linux/bootmem.h>
10#include <linux/seq_file.h>
11#include <linux/cpu.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/tty.h>
15#include <linux/pfn.h>
16
17#ifdef CONFIG_MTD_UCLINUX
18#include <linux/mtd/map.h>
19#include <linux/ext2_fs.h>
20#include <linux/cramfs_fs.h>
21#include <linux/romfs_fs.h>
22#endif
23
24#include <asm/cplb.h>
25#include <asm/cacheflush.h>
26#include <asm/blackfin.h>
27#include <asm/cplbinit.h>
28#include <asm/clocks.h>
29#include <asm/div64.h>
30#include <asm/cpu.h>
31#include <asm/fixed_code.h>
32#include <asm/early_printk.h>
33#include <asm/irq_handler.h>
34#include <asm/pda.h>
35#ifdef CONFIG_BF60x
36#include <mach/pm.h>
37#endif
38
39u16 _bfin_swrst;
40EXPORT_SYMBOL(_bfin_swrst);
41
42unsigned long memory_start, memory_end, physical_mem_end;
43unsigned long _rambase, _ramstart, _ramend;
44unsigned long reserved_mem_dcache_on;
45unsigned long reserved_mem_icache_on;
46EXPORT_SYMBOL(memory_start);
47EXPORT_SYMBOL(memory_end);
48EXPORT_SYMBOL(physical_mem_end);
49EXPORT_SYMBOL(_ramend);
50EXPORT_SYMBOL(reserved_mem_dcache_on);
51
52#ifdef CONFIG_MTD_UCLINUX
53extern struct map_info uclinux_ram_map;
54unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
55unsigned long _ebss;
56EXPORT_SYMBOL(memory_mtd_end);
57EXPORT_SYMBOL(memory_mtd_start);
58EXPORT_SYMBOL(mtd_size);
59#endif
60
61char __initdata command_line[COMMAND_LINE_SIZE];
62struct blackfin_initial_pda __initdata initial_pda;
63
64/* boot memmap, for parsing "memmap=" */
65#define BFIN_MEMMAP_MAX		128 /* number of entries in bfin_memmap */
66#define BFIN_MEMMAP_RAM		1
67#define BFIN_MEMMAP_RESERVED	2
68static struct bfin_memmap {
69	int nr_map;
70	struct bfin_memmap_entry {
71		unsigned long long addr; /* start of memory segment */
72		unsigned long long size;
73		unsigned long type;
74	} map[BFIN_MEMMAP_MAX];
75} bfin_memmap __initdata;
76
77/* for memmap sanitization */
78struct change_member {
79	struct bfin_memmap_entry *pentry; /* pointer to original entry */
80	unsigned long long addr; /* address for this change point */
81};
82static struct change_member change_point_list[2*BFIN_MEMMAP_MAX] __initdata;
83static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
84static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
85static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
86
87DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
88
89static int early_init_clkin_hz(char *buf);
90
91#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
92void __init generate_cplb_tables(void)
93{
94	unsigned int cpu;
95
96	generate_cplb_tables_all();
97	/* Generate per-CPU I&D CPLB tables */
98	for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
99		generate_cplb_tables_cpu(cpu);
100}
101#endif
102
103void __cpuinit bfin_setup_caches(unsigned int cpu)
104{
105#ifdef CONFIG_BFIN_ICACHE
106	bfin_icache_init(icplb_tbl[cpu]);
107#endif
108
109#ifdef CONFIG_BFIN_DCACHE
110	bfin_dcache_init(dcplb_tbl[cpu]);
111#endif
112
113	bfin_setup_cpudata(cpu);
114
115	/*
116	 * In cache coherence emulation mode, we need to have the
117	 * D-cache enabled before running any atomic operation which
118	 * might involve cache invalidation (i.e. spinlock, rwlock).
119	 * So printk's are deferred until then.
120	 */
121#ifdef CONFIG_BFIN_ICACHE
122	printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
123	printk(KERN_INFO "  External memory:"
124# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
125	       " cacheable"
126# else
127	       " uncacheable"
128# endif
129	       " in instruction cache\n");
130	if (L2_LENGTH)
131		printk(KERN_INFO "  L2 SRAM        :"
132# ifdef CONFIG_BFIN_L2_ICACHEABLE
133		       " cacheable"
134# else
135		       " uncacheable"
136# endif
137		       " in instruction cache\n");
138
139#else
140	printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
141#endif
142
143#ifdef CONFIG_BFIN_DCACHE
144	printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
145	printk(KERN_INFO "  External memory:"
146# if defined CONFIG_BFIN_EXTMEM_WRITEBACK
147	       " cacheable (write-back)"
148# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
149	       " cacheable (write-through)"
150# else
151	       " uncacheable"
152# endif
153	       " in data cache\n");
154	if (L2_LENGTH)
155		printk(KERN_INFO "  L2 SRAM        :"
156# if defined CONFIG_BFIN_L2_WRITEBACK
157		       " cacheable (write-back)"
158# elif defined CONFIG_BFIN_L2_WRITETHROUGH
159		       " cacheable (write-through)"
160# else
161		       " uncacheable"
162# endif
163		       " in data cache\n");
164#else
165	printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
166#endif
167}
168
169void __cpuinit bfin_setup_cpudata(unsigned int cpu)
170{
171	struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
172
173	cpudata->imemctl = bfin_read_IMEM_CONTROL();
174	cpudata->dmemctl = bfin_read_DMEM_CONTROL();
175}
176
177void __init bfin_cache_init(void)
178{
179#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
180	generate_cplb_tables();
181#endif
182	bfin_setup_caches(0);
183}
184
185void __init bfin_relocate_l1_mem(void)
186{
187	unsigned long text_l1_len = (unsigned long)_text_l1_len;
188	unsigned long data_l1_len = (unsigned long)_data_l1_len;
189	unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
190	unsigned long l2_len = (unsigned long)_l2_len;
191
192	early_shadow_stamp();
193
194	/*
195	 * due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S
196	 * we know that everything about l1 text/data is nice and aligned,
197	 * so copy by 4 byte chunks, and don't worry about overlapping
198	 * src/dest.
199	 *
200	 * We can't use the dma_memcpy functions, since they can call
201	 * scheduler functions which might be in L1 :( and core writes
202	 * into L1 instruction cause bad access errors, so we are stuck,
203	 * we are required to use DMA, but can't use the common dma
204	 * functions. We can't use memcpy either - since that might be
205	 * going to be in the relocated L1
206	 */
207
208	blackfin_dma_early_init();
209
210	/* if necessary, copy L1 text to L1 instruction SRAM */
211	if (L1_CODE_LENGTH && text_l1_len)
212		early_dma_memcpy(_stext_l1, _text_l1_lma, text_l1_len);
213
214	/* if necessary, copy L1 data to L1 data bank A SRAM */
215	if (L1_DATA_A_LENGTH && data_l1_len)
216		early_dma_memcpy(_sdata_l1, _data_l1_lma, data_l1_len);
217
218	/* if necessary, copy L1 data B to L1 data bank B SRAM */
219	if (L1_DATA_B_LENGTH && data_b_l1_len)
220		early_dma_memcpy(_sdata_b_l1, _data_b_l1_lma, data_b_l1_len);
221
222	early_dma_memcpy_done();
223
224#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
225	blackfin_iflush_l1_entry[0] = (unsigned long)blackfin_icache_flush_range_l1;
226#endif
227
228	/* if necessary, copy L2 text/data to L2 SRAM */
229	if (L2_LENGTH && l2_len)
230		memcpy(_stext_l2, _l2_lma, l2_len);
231}
232
233#ifdef CONFIG_SMP
234void __init bfin_relocate_coreb_l1_mem(void)
235{
236	unsigned long text_l1_len = (unsigned long)_text_l1_len;
237	unsigned long data_l1_len = (unsigned long)_data_l1_len;
238	unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
239
240	blackfin_dma_early_init();
241
242	/* if necessary, copy L1 text to L1 instruction SRAM */
243	if (L1_CODE_LENGTH && text_l1_len)
244		early_dma_memcpy((void *)COREB_L1_CODE_START, _text_l1_lma,
245				text_l1_len);
246
247	/* if necessary, copy L1 data to L1 data bank A SRAM */
248	if (L1_DATA_A_LENGTH && data_l1_len)
249		early_dma_memcpy((void *)COREB_L1_DATA_A_START, _data_l1_lma,
250				data_l1_len);
251
252	/* if necessary, copy L1 data B to L1 data bank B SRAM */
253	if (L1_DATA_B_LENGTH && data_b_l1_len)
254		early_dma_memcpy((void *)COREB_L1_DATA_B_START, _data_b_l1_lma,
255				data_b_l1_len);
256
257	early_dma_memcpy_done();
258
259#ifdef CONFIG_ICACHE_FLUSH_L1
260	blackfin_iflush_l1_entry[1] = (unsigned long)blackfin_icache_flush_range_l1 -
261			(unsigned long)_stext_l1 + COREB_L1_CODE_START;
262#endif
263}
264#endif
265
266#ifdef CONFIG_ROMKERNEL
267void __init bfin_relocate_xip_data(void)
268{
269	early_shadow_stamp();
270
271	memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info));
272	memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len);
273}
274#endif
275
276/* add_memory_region to memmap */
277static void __init add_memory_region(unsigned long long start,
278			      unsigned long long size, int type)
279{
280	int i;
281
282	i = bfin_memmap.nr_map;
283
284	if (i == BFIN_MEMMAP_MAX) {
285		printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
286		return;
287	}
288
289	bfin_memmap.map[i].addr = start;
290	bfin_memmap.map[i].size = size;
291	bfin_memmap.map[i].type = type;
292	bfin_memmap.nr_map++;
293}
294
295/*
296 * Sanitize the boot memmap, removing overlaps.
297 */
298static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
299{
300	struct change_member *change_tmp;
301	unsigned long current_type, last_type;
302	unsigned long long last_addr;
303	int chgidx, still_changing;
304	int overlap_entries;
305	int new_entry;
306	int old_nr, new_nr, chg_nr;
307	int i;
308
309	/*
310		Visually we're performing the following (1,2,3,4 = memory types)
311
312		Sample memory map (w/overlaps):
313		   ____22__________________
314		   ______________________4_
315		   ____1111________________
316		   _44_____________________
317		   11111111________________
318		   ____________________33__
319		   ___________44___________
320		   __________33333_________
321		   ______________22________
322		   ___________________2222_
323		   _________111111111______
324		   _____________________11_
325		   _________________4______
326
327		Sanitized equivalent (no overlap):
328		   1_______________________
329		   _44_____________________
330		   ___1____________________
331		   ____22__________________
332		   ______11________________
333		   _________1______________
334		   __________3_____________
335		   ___________44___________
336		   _____________33_________
337		   _______________2________
338		   ________________1_______
339		   _________________4______
340		   ___________________2____
341		   ____________________33__
342		   ______________________4_
343	*/
344	/* if there's only one memory region, don't bother */
345	if (*pnr_map < 2)
346		return -1;
347
348	old_nr = *pnr_map;
349
350	/* bail out if we find any unreasonable addresses in memmap */
351	for (i = 0; i < old_nr; i++)
352		if (map[i].addr + map[i].size < map[i].addr)
353			return -1;
354
355	/* create pointers for initial change-point information (for sorting) */
356	for (i = 0; i < 2*old_nr; i++)
357		change_point[i] = &change_point_list[i];
358
359	/* record all known change-points (starting and ending addresses),
360	   omitting those that are for empty memory regions */
361	chgidx = 0;
362	for (i = 0; i < old_nr; i++) {
363		if (map[i].size != 0) {
364			change_point[chgidx]->addr = map[i].addr;
365			change_point[chgidx++]->pentry = &map[i];
366			change_point[chgidx]->addr = map[i].addr + map[i].size;
367			change_point[chgidx++]->pentry = &map[i];
368		}
369	}
370	chg_nr = chgidx;	/* true number of change-points */
371
372	/* sort change-point list by memory addresses (low -> high) */
373	still_changing = 1;
374	while (still_changing) {
375		still_changing = 0;
376		for (i = 1; i < chg_nr; i++) {
377			/* if <current_addr> > <last_addr>, swap */
378			/* or, if current=<start_addr> & last=<end_addr>, swap */
379			if ((change_point[i]->addr < change_point[i-1]->addr) ||
380				((change_point[i]->addr == change_point[i-1]->addr) &&
381				 (change_point[i]->addr == change_point[i]->pentry->addr) &&
382				 (change_point[i-1]->addr != change_point[i-1]->pentry->addr))
383			   ) {
384				change_tmp = change_point[i];
385				change_point[i] = change_point[i-1];
386				change_point[i-1] = change_tmp;
387				still_changing = 1;
388			}
389		}
390	}
391
392	/* create a new memmap, removing overlaps */
393	overlap_entries = 0;	/* number of entries in the overlap table */
394	new_entry = 0;		/* index for creating new memmap entries */
395	last_type = 0;		/* start with undefined memory type */
396	last_addr = 0;		/* start with 0 as last starting address */
397	/* loop through change-points, determining affect on the new memmap */
398	for (chgidx = 0; chgidx < chg_nr; chgidx++) {
399		/* keep track of all overlapping memmap entries */
400		if (change_point[chgidx]->addr == change_point[chgidx]->pentry->addr) {
401			/* add map entry to overlap list (> 1 entry implies an overlap) */
402			overlap_list[overlap_entries++] = change_point[chgidx]->pentry;
403		} else {
404			/* remove entry from list (order independent, so swap with last) */
405			for (i = 0; i < overlap_entries; i++) {
406				if (overlap_list[i] == change_point[chgidx]->pentry)
407					overlap_list[i] = overlap_list[overlap_entries-1];
408			}
409			overlap_entries--;
410		}
411		/* if there are overlapping entries, decide which "type" to use */
412		/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
413		current_type = 0;
414		for (i = 0; i < overlap_entries; i++)
415			if (overlap_list[i]->type > current_type)
416				current_type = overlap_list[i]->type;
417		/* continue building up new memmap based on this information */
418		if (current_type != last_type) {
419			if (last_type != 0) {
420				new_map[new_entry].size =
421					change_point[chgidx]->addr - last_addr;
422				/* move forward only if the new size was non-zero */
423				if (new_map[new_entry].size != 0)
424					if (++new_entry >= BFIN_MEMMAP_MAX)
425						break;	/* no more space left for new entries */
426			}
427			if (current_type != 0) {
428				new_map[new_entry].addr = change_point[chgidx]->addr;
429				new_map[new_entry].type = current_type;
430				last_addr = change_point[chgidx]->addr;
431			}
432			last_type = current_type;
433		}
434	}
435	new_nr = new_entry;	/* retain count for new entries */
436
437	/* copy new mapping into original location */
438	memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
439	*pnr_map = new_nr;
440
441	return 0;
442}
443
444static void __init print_memory_map(char *who)
445{
446	int i;
447
448	for (i = 0; i < bfin_memmap.nr_map; i++) {
449		printk(KERN_DEBUG " %s: %016Lx - %016Lx ", who,
450			bfin_memmap.map[i].addr,
451			bfin_memmap.map[i].addr + bfin_memmap.map[i].size);
452		switch (bfin_memmap.map[i].type) {
453		case BFIN_MEMMAP_RAM:
454			printk(KERN_CONT "(usable)\n");
455			break;
456		case BFIN_MEMMAP_RESERVED:
457			printk(KERN_CONT "(reserved)\n");
458			break;
459		default:
460			printk(KERN_CONT "type %lu\n", bfin_memmap.map[i].type);
461			break;
462		}
463	}
464}
465
466static __init int parse_memmap(char *arg)
467{
468	unsigned long long start_at, mem_size;
469
470	if (!arg)
471		return -EINVAL;
472
473	mem_size = memparse(arg, &arg);
474	if (*arg == '@') {
475		start_at = memparse(arg+1, &arg);
476		add_memory_region(start_at, mem_size, BFIN_MEMMAP_RAM);
477	} else if (*arg == '$') {
478		start_at = memparse(arg+1, &arg);
479		add_memory_region(start_at, mem_size, BFIN_MEMMAP_RESERVED);
480	}
481
482	return 0;
483}
484
485/*
486 * Initial parsing of the command line.  Currently, we support:
487 *  - Controlling the linux memory size: mem=xxx[KMG]
488 *  - Controlling the physical memory size: max_mem=xxx[KMG][$][#]
489 *       $ -> reserved memory is dcacheable
490 *       # -> reserved memory is icacheable
491 *  - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
492 *       @ from <start> to <start>+<mem>, type RAM
493 *       $ from <start> to <start>+<mem>, type RESERVED
494 */
495static __init void parse_cmdline_early(char *cmdline_p)
496{
497	char c = ' ', *to = cmdline_p;
498	unsigned int memsize;
499	for (;;) {
500		if (c == ' ') {
501			if (!memcmp(to, "mem=", 4)) {
502				to += 4;
503				memsize = memparse(to, &to);
504				if (memsize)
505					_ramend = memsize;
506
507			} else if (!memcmp(to, "max_mem=", 8)) {
508				to += 8;
509				memsize = memparse(to, &to);
510				if (memsize) {
511					physical_mem_end = memsize;
512					if (*to != ' ') {
513						if (*to == '$'
514						    || *(to + 1) == '$')
515							reserved_mem_dcache_on = 1;
516						if (*to == '#'
517						    || *(to + 1) == '#')
518							reserved_mem_icache_on = 1;
519					}
520				}
521			} else if (!memcmp(to, "clkin_hz=", 9)) {
522				to += 9;
523				early_init_clkin_hz(to);
524#ifdef CONFIG_EARLY_PRINTK
525			} else if (!memcmp(to, "earlyprintk=", 12)) {
526				to += 12;
527				setup_early_printk(to);
528#endif
529			} else if (!memcmp(to, "memmap=", 7)) {
530				to += 7;
531				parse_memmap(to);
532			}
533		}
534		c = *(to++);
535		if (!c)
536			break;
537	}
538}
539
540/*
541 * Setup memory defaults from user config.
542 * The physical memory layout looks like:
543 *
544 *  [_rambase, _ramstart]:		kernel image
545 *  [memory_start, memory_end]:		dynamic memory managed by kernel
546 *  [memory_end, _ramend]:		reserved memory
547 *  	[memory_mtd_start(memory_end),
548 *  		memory_mtd_start + mtd_size]:	rootfs (if any)
549 *	[_ramend - DMA_UNCACHED_REGION,
550 *		_ramend]:			uncached DMA region
551 *  [_ramend, physical_mem_end]:	memory not managed by kernel
552 */
553static __init void memory_setup(void)
554{
555#ifdef CONFIG_MTD_UCLINUX
556	unsigned long mtd_phys = 0;
557#endif
558	unsigned long max_mem;
559
560	_rambase = CONFIG_BOOT_LOAD;
561	_ramstart = (unsigned long)_end;
562
563	if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
564		console_init();
565		panic("DMA region exceeds memory limit: %lu.",
566			_ramend - _ramstart);
567	}
568	max_mem = memory_end = _ramend - DMA_UNCACHED_REGION;
569
570#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
571	/* Due to a Hardware Anomaly we need to limit the size of usable
572	 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
573	 * 05000263 - Hardware loop corrupted when taking an ICPLB exception
574	 */
575# if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
576	if (max_mem >= 56 * 1024 * 1024)
577		max_mem = 56 * 1024 * 1024;
578# else
579	if (max_mem >= 60 * 1024 * 1024)
580		max_mem = 60 * 1024 * 1024;
581# endif				/* CONFIG_DEBUG_HUNT_FOR_ZERO */
582#endif				/* ANOMALY_05000263 */
583
584
585#ifdef CONFIG_MPU
586	/* Round up to multiple of 4MB */
587	memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
588#else
589	memory_start = PAGE_ALIGN(_ramstart);
590#endif
591
592#if defined(CONFIG_MTD_UCLINUX)
593	/* generic memory mapped MTD driver */
594	memory_mtd_end = memory_end;
595
596	mtd_phys = _ramstart;
597	mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
598
599# if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
600	if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC)
601		mtd_size =
602		    PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10);
603# endif
604
605# if defined(CONFIG_CRAMFS)
606	if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC)
607		mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4)));
608# endif
609
610# if defined(CONFIG_ROMFS_FS)
611	if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0
612	    && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) {
613		mtd_size =
614		    PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
615
616		/* ROM_FS is XIP, so if we found it, we need to limit memory */
617		if (memory_end > max_mem) {
618			pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n",
619				(max_mem - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
620			memory_end = max_mem;
621		}
622	}
623# endif				/* CONFIG_ROMFS_FS */
624
625	/* Since the default MTD_UCLINUX has no magic number, we just blindly
626	 * read 8 past the end of the kernel's image, and look at it.
627	 * When no image is attached, mtd_size is set to a random number
628	 * Do some basic sanity checks before operating on things
629	 */
630	if (mtd_size == 0 || memory_end <= mtd_size) {
631		pr_emerg("Could not find valid ram mtd attached.\n");
632	} else {
633		memory_end -= mtd_size;
634
635		/* Relocate MTD image to the top of memory after the uncached memory area */
636		uclinux_ram_map.phys = memory_mtd_start = memory_end;
637		uclinux_ram_map.size = mtd_size;
638		pr_info("Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n",
639			_end, mtd_size, (void *)memory_mtd_start);
640		dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
641	}
642#endif				/* CONFIG_MTD_UCLINUX */
643
644	/* We need lo limit memory, since everything could have a text section
645	 * of userspace in it, and expose anomaly 05000263. If the anomaly
646	 * doesn't exist, or we don't need to - then dont.
647	 */
648	if (memory_end > max_mem) {
649		pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n",
650				(max_mem - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
651		memory_end = max_mem;
652	}
653
654#ifdef CONFIG_MPU
655#if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
656	page_mask_nelts = (((_ramend + ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE -
657					ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32;
658#else
659	page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
660#endif
661	page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
662#endif
663
664	init_mm.start_code = (unsigned long)_stext;
665	init_mm.end_code = (unsigned long)_etext;
666	init_mm.end_data = (unsigned long)_edata;
667	init_mm.brk = (unsigned long)0;
668
669	printk(KERN_INFO "Board Memory: %ldMB\n", (physical_mem_end - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
670	printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", (_ramend - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
671
672	printk(KERN_INFO "Memory map:\n"
673	       "  fixedcode = 0x%p-0x%p\n"
674	       "  text      = 0x%p-0x%p\n"
675	       "  rodata    = 0x%p-0x%p\n"
676	       "  bss       = 0x%p-0x%p\n"
677	       "  data      = 0x%p-0x%p\n"
678	       "    stack   = 0x%p-0x%p\n"
679	       "  init      = 0x%p-0x%p\n"
680	       "  available = 0x%p-0x%p\n"
681#ifdef CONFIG_MTD_UCLINUX
682	       "  rootfs    = 0x%p-0x%p\n"
683#endif
684#if DMA_UNCACHED_REGION > 0
685	       "  DMA Zone  = 0x%p-0x%p\n"
686#endif
687		, (void *)FIXED_CODE_START, (void *)FIXED_CODE_END,
688		_stext, _etext,
689		__start_rodata, __end_rodata,
690		__bss_start, __bss_stop,
691		_sdata, _edata,
692		(void *)&init_thread_union,
693		(void *)((int)(&init_thread_union) + THREAD_SIZE),
694		__init_begin, __init_end,
695		(void *)_ramstart, (void *)memory_end
696#ifdef CONFIG_MTD_UCLINUX
697		, (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
698#endif
699#if DMA_UNCACHED_REGION > 0
700		, (void *)(_ramend - DMA_UNCACHED_REGION), (void *)(_ramend)
701#endif
702		);
703}
704
705/*
706 * Find the lowest, highest page frame number we have available
707 */
708void __init find_min_max_pfn(void)
709{
710	int i;
711
712	max_pfn = 0;
713	min_low_pfn = PFN_DOWN(memory_end);
714
715	for (i = 0; i < bfin_memmap.nr_map; i++) {
716		unsigned long start, end;
717		/* RAM? */
718		if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
719			continue;
720		start = PFN_UP(bfin_memmap.map[i].addr);
721		end = PFN_DOWN(bfin_memmap.map[i].addr +
722				bfin_memmap.map[i].size);
723		if (start >= end)
724			continue;
725		if (end > max_pfn)
726			max_pfn = end;
727		if (start < min_low_pfn)
728			min_low_pfn = start;
729	}
730}
731
732static __init void setup_bootmem_allocator(void)
733{
734	int bootmap_size;
735	int i;
736	unsigned long start_pfn, end_pfn;
737	unsigned long curr_pfn, last_pfn, size;
738
739	/* mark memory between memory_start and memory_end usable */
740	add_memory_region(memory_start,
741		memory_end - memory_start, BFIN_MEMMAP_RAM);
742	/* sanity check for overlap */
743	sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map);
744	print_memory_map("boot memmap");
745
746	/* initialize globals in linux/bootmem.h */
747	find_min_max_pfn();
748	/* pfn of the last usable page frame */
749	if (max_pfn > memory_end >> PAGE_SHIFT)
750		max_pfn = memory_end >> PAGE_SHIFT;
751	/* pfn of last page frame directly mapped by kernel */
752	max_low_pfn = max_pfn;
753	/* pfn of the first usable page frame after kernel image*/
754	if (min_low_pfn < memory_start >> PAGE_SHIFT)
755		min_low_pfn = memory_start >> PAGE_SHIFT;
756	start_pfn = CONFIG_PHY_RAM_BASE_ADDRESS >> PAGE_SHIFT;
757	end_pfn = memory_end >> PAGE_SHIFT;
758
759	/*
760	 * give all the memory to the bootmap allocator, tell it to put the
761	 * boot mem_map at the start of memory.
762	 */
763	bootmap_size = init_bootmem_node(NODE_DATA(0),
764			memory_start >> PAGE_SHIFT,	/* map goes here */
765			start_pfn, end_pfn);
766
767	/* register the memmap regions with the bootmem allocator */
768	for (i = 0; i < bfin_memmap.nr_map; i++) {
769		/*
770		 * Reserve usable memory
771		 */
772		if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
773			continue;
774		/*
775		 * We are rounding up the start address of usable memory:
776		 */
777		curr_pfn = PFN_UP(bfin_memmap.map[i].addr);
778		if (curr_pfn >= end_pfn)
779			continue;
780		/*
781		 * ... and at the end of the usable range downwards:
782		 */
783		last_pfn = PFN_DOWN(bfin_memmap.map[i].addr +
784					 bfin_memmap.map[i].size);
785
786		if (last_pfn > end_pfn)
787			last_pfn = end_pfn;
788
789		/*
790		 * .. finally, did all the rounding and playing
791		 * around just make the area go away?
792		 */
793		if (last_pfn <= curr_pfn)
794			continue;
795
796		size = last_pfn - curr_pfn;
797		free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
798	}
799
800	/* reserve memory before memory_start, including bootmap */
801	reserve_bootmem(CONFIG_PHY_RAM_BASE_ADDRESS,
802		memory_start + bootmap_size + PAGE_SIZE - 1 - CONFIG_PHY_RAM_BASE_ADDRESS,
803		BOOTMEM_DEFAULT);
804}
805
806#define EBSZ_TO_MEG(ebsz) \
807({ \
808	int meg = 0; \
809	switch (ebsz & 0xf) { \
810		case 0x1: meg =  16; break; \
811		case 0x3: meg =  32; break; \
812		case 0x5: meg =  64; break; \
813		case 0x7: meg = 128; break; \
814		case 0x9: meg = 256; break; \
815		case 0xb: meg = 512; break; \
816	} \
817	meg; \
818})
819static inline int __init get_mem_size(void)
820{
821#if defined(EBIU_SDBCTL)
822# if defined(BF561_FAMILY)
823	int ret = 0;
824	u32 sdbctl = bfin_read_EBIU_SDBCTL();
825	ret += EBSZ_TO_MEG(sdbctl >>  0);
826	ret += EBSZ_TO_MEG(sdbctl >>  8);
827	ret += EBSZ_TO_MEG(sdbctl >> 16);
828	ret += EBSZ_TO_MEG(sdbctl >> 24);
829	return ret;
830# else
831	return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL());
832# endif
833#elif defined(EBIU_DDRCTL1)
834	u32 ddrctl = bfin_read_EBIU_DDRCTL1();
835	int ret = 0;
836	switch (ddrctl & 0xc0000) {
837	case DEVSZ_64:
838		ret = 64 / 8;
839		break;
840	case DEVSZ_128:
841		ret = 128 / 8;
842		break;
843	case DEVSZ_256:
844		ret = 256 / 8;
845		break;
846	case DEVSZ_512:
847		ret = 512 / 8;
848		break;
849	}
850	switch (ddrctl & 0x30000) {
851	case DEVWD_4:
852		ret *= 2;
853	case DEVWD_8:
854		ret *= 2;
855	case DEVWD_16:
856		break;
857	}
858	if ((ddrctl & 0xc000) == 0x4000)
859		ret *= 2;
860	return ret;
861#elif defined(CONFIG_BF60x)
862	u32 ddrctl = bfin_read_DMC0_CFG();
863	int ret;
864	switch (ddrctl & 0xf00) {
865	case DEVSZ_64:
866		ret = 64 / 8;
867		break;
868	case DEVSZ_128:
869		ret = 128 / 8;
870		break;
871	case DEVSZ_256:
872		ret = 256 / 8;
873		break;
874	case DEVSZ_512:
875		ret = 512 / 8;
876		break;
877	case DEVSZ_1G:
878		ret = 1024 / 8;
879		break;
880	case DEVSZ_2G:
881		ret = 2048 / 8;
882		break;
883	}
884	return ret;
885#endif
886	BUG();
887}
888
889__attribute__((weak))
890void __init native_machine_early_platform_add_devices(void)
891{
892}
893
894#ifdef CONFIG_BF60x
895static inline u_long bfin_get_clk(char *name)
896{
897	struct clk *clk;
898	u_long clk_rate;
899
900	clk = clk_get(NULL, name);
901	if (IS_ERR(clk))
902		return 0;
903
904	clk_rate = clk_get_rate(clk);
905	clk_put(clk);
906	return clk_rate;
907}
908#endif
909
910void __init setup_arch(char **cmdline_p)
911{
912	u32 mmr;
913	unsigned long sclk, cclk;
914
915	native_machine_early_platform_add_devices();
916
917	enable_shadow_console();
918
919	/* Check to make sure we are running on the right processor */
920	mmr =  bfin_cpuid();
921	if (unlikely(CPUID != bfin_cpuid()))
922		printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
923			CPU, bfin_cpuid(), bfin_revid());
924
925#ifdef CONFIG_DUMMY_CONSOLE
926	conswitchp = &dummy_con;
927#endif
928
929#if defined(CONFIG_CMDLINE_BOOL)
930	strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line));
931	command_line[sizeof(command_line) - 1] = 0;
932#endif
933
934	/* Keep a copy of command line */
935	*cmdline_p = &command_line[0];
936	memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
937	boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
938
939	memset(&bfin_memmap, 0, sizeof(bfin_memmap));
940
941#ifdef CONFIG_BF60x
942	/* Should init clock device before parse command early */
943	clk_init();
944#endif
945	/* If the user does not specify things on the command line, use
946	 * what the bootloader set things up as
947	 */
948	physical_mem_end = 0;
949	parse_cmdline_early(&command_line[0]);
950
951	if (_ramend == 0)
952		_ramend = get_mem_size() * 1024 * 1024;
953
954	if (physical_mem_end == 0)
955		physical_mem_end = _ramend;
956
957	memory_setup();
958
959#ifndef CONFIG_BF60x
960	/* Initialize Async memory banks */
961	bfin_write_EBIU_AMBCTL0(AMBCTL0VAL);
962	bfin_write_EBIU_AMBCTL1(AMBCTL1VAL);
963	bfin_write_EBIU_AMGCTL(AMGCTLVAL);
964#ifdef CONFIG_EBIU_MBSCTLVAL
965	bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL);
966	bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL);
967	bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
968#endif
969#endif
970#ifdef CONFIG_BFIN_HYSTERESIS_CONTROL
971	bfin_write_PORTF_HYSTERESIS(HYST_PORTF_0_15);
972	bfin_write_PORTG_HYSTERESIS(HYST_PORTG_0_15);
973	bfin_write_PORTH_HYSTERESIS(HYST_PORTH_0_15);
974	bfin_write_MISCPORT_HYSTERESIS((bfin_read_MISCPORT_HYSTERESIS() &
975					~HYST_NONEGPIO_MASK) | HYST_NONEGPIO);
976#endif
977
978	cclk = get_cclk();
979	sclk = get_sclk();
980
981	if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk)
982		panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK");
983
984#ifdef BF561_FAMILY
985	if (ANOMALY_05000266) {
986		bfin_read_IMDMA_D0_IRQ_STATUS();
987		bfin_read_IMDMA_D1_IRQ_STATUS();
988	}
989#endif
990
991	mmr = bfin_read_TBUFCTL();
992	printk(KERN_INFO "Hardware Trace %s and %sabled\n",
993		(mmr & 0x1) ? "active" : "off",
994		(mmr & 0x2) ? "en" : "dis");
995#ifndef CONFIG_BF60x
996	mmr = bfin_read_SYSCR();
997	printk(KERN_INFO "Boot Mode: %i\n", mmr & 0xF);
998
999	/* Newer parts mirror SWRST bits in SYSCR */
1000#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \
1001    defined(CONFIG_BF538) || defined(CONFIG_BF539)
1002	_bfin_swrst = bfin_read_SWRST();
1003#else
1004	/* Clear boot mode field */
1005	_bfin_swrst = mmr & ~0xf;
1006#endif
1007
1008#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
1009	bfin_write_SWRST(_bfin_swrst & ~DOUBLE_FAULT);
1010#endif
1011#ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET
1012	bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
1013#endif
1014
1015#ifdef CONFIG_SMP
1016	if (_bfin_swrst & SWRST_DBL_FAULT_A) {
1017#else
1018	if (_bfin_swrst & RESET_DOUBLE) {
1019#endif
1020		printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
1021#ifdef CONFIG_DEBUG_DOUBLEFAULT
1022		/* We assume the crashing kernel, and the current symbol table match */
1023		printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
1024			initial_pda.seqstat_doublefault & SEQSTAT_EXCAUSE,
1025			initial_pda.retx_doublefault);
1026		printk(KERN_NOTICE "   DCPLB_FAULT_ADDR: %pF\n",
1027			initial_pda.dcplb_doublefault_addr);
1028		printk(KERN_NOTICE "   ICPLB_FAULT_ADDR: %pF\n",
1029			initial_pda.icplb_doublefault_addr);
1030#endif
1031		printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
1032			initial_pda.retx);
1033	} else if (_bfin_swrst & RESET_WDOG)
1034		printk(KERN_INFO "Recovering from Watchdog event\n");
1035	else if (_bfin_swrst & RESET_SOFTWARE)
1036		printk(KERN_NOTICE "Reset caused by Software reset\n");
1037#endif
1038	printk(KERN_INFO "Blackfin support (C) 2004-2010 Analog Devices, Inc.\n");
1039	if (bfin_compiled_revid() == 0xffff)
1040		printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid());
1041	else if (bfin_compiled_revid() == -1)
1042		printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU);
1043	else
1044		printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
1045
1046	if (likely(CPUID == bfin_cpuid())) {
1047		if (bfin_revid() != bfin_compiled_revid()) {
1048			if (bfin_compiled_revid() == -1)
1049				printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
1050				       bfin_revid());
1051			else if (bfin_compiled_revid() != 0xffff) {
1052				printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
1053				       bfin_compiled_revid(), bfin_revid());
1054				if (bfin_compiled_revid() > bfin_revid())
1055					panic("Error: you are missing anomaly workarounds for this rev");
1056			}
1057		}
1058		if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
1059			printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
1060			       CPU, bfin_revid());
1061	}
1062
1063	printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
1064
1065#ifdef CONFIG_BF60x
1066	printk(KERN_INFO "Processor Speed: %lu MHz core clock, %lu MHz SCLk, %lu MHz SCLK0, %lu MHz SCLK1 and %lu MHz DCLK\n",
1067		cclk / 1000000, bfin_get_clk("SYSCLK") / 1000000, get_sclk0() / 1000000, get_sclk1() / 1000000, get_dclk() / 1000000);
1068#else
1069	printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
1070	       cclk / 1000000, sclk / 1000000);
1071#endif
1072
1073	setup_bootmem_allocator();
1074
1075	paging_init();
1076
1077	/* Copy atomic sequences to their fixed location, and sanity check that
1078	   these locations are the ones that we advertise to userspace.  */
1079	memcpy((void *)FIXED_CODE_START, &fixed_code_start,
1080	       FIXED_CODE_END - FIXED_CODE_START);
1081	BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start
1082	       != SIGRETURN_STUB - FIXED_CODE_START);
1083	BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start
1084	       != ATOMIC_XCHG32 - FIXED_CODE_START);
1085	BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start
1086	       != ATOMIC_CAS32 - FIXED_CODE_START);
1087	BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start
1088	       != ATOMIC_ADD32 - FIXED_CODE_START);
1089	BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start
1090	       != ATOMIC_SUB32 - FIXED_CODE_START);
1091	BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start
1092	       != ATOMIC_IOR32 - FIXED_CODE_START);
1093	BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start
1094	       != ATOMIC_AND32 - FIXED_CODE_START);
1095	BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
1096	       != ATOMIC_XOR32 - FIXED_CODE_START);
1097	BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
1098		!= SAFE_USER_INSTRUCTION - FIXED_CODE_START);
1099
1100#ifdef CONFIG_SMP
1101	platform_init_cpus();
1102#endif
1103	init_exception_vectors();
1104	bfin_cache_init();	/* Initialize caches for the boot CPU */
1105}
1106
1107static int __init topology_init(void)
1108{
1109	unsigned int cpu;
1110
1111	for_each_possible_cpu(cpu) {
1112		register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
1113	}
1114
1115	return 0;
1116}
1117
1118subsys_initcall(topology_init);
1119
1120/* Get the input clock frequency */
1121static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
1122#ifndef CONFIG_BF60x
1123static u_long get_clkin_hz(void)
1124{
1125	return cached_clkin_hz;
1126}
1127#endif
1128static int __init early_init_clkin_hz(char *buf)
1129{
1130	cached_clkin_hz = simple_strtoul(buf, NULL, 0);
1131#ifdef BFIN_KERNEL_CLOCK
1132	if (cached_clkin_hz != CONFIG_CLKIN_HZ)
1133		panic("cannot change clkin_hz when reprogramming clocks");
1134#endif
1135	return 1;
1136}
1137early_param("clkin_hz=", early_init_clkin_hz);
1138
1139#ifndef CONFIG_BF60x
1140/* Get the voltage input multiplier */
1141static u_long get_vco(void)
1142{
1143	static u_long cached_vco;
1144	u_long msel, pll_ctl;
1145
1146	/* The assumption here is that VCO never changes at runtime.
1147	 * If, someday, we support that, then we'll have to change this.
1148	 */
1149	if (cached_vco)
1150		return cached_vco;
1151
1152	pll_ctl = bfin_read_PLL_CTL();
1153	msel = (pll_ctl >> 9) & 0x3F;
1154	if (0 == msel)
1155		msel = 64;
1156
1157	cached_vco = get_clkin_hz();
1158	cached_vco >>= (1 & pll_ctl);	/* DF bit */
1159	cached_vco *= msel;
1160	return cached_vco;
1161}
1162#endif
1163
1164/* Get the Core clock */
1165u_long get_cclk(void)
1166{
1167#ifdef CONFIG_BF60x
1168	return bfin_get_clk("CCLK");
1169#else
1170	static u_long cached_cclk_pll_div, cached_cclk;
1171	u_long csel, ssel;
1172
1173	if (bfin_read_PLL_STAT() & 0x1)
1174		return get_clkin_hz();
1175
1176	ssel = bfin_read_PLL_DIV();
1177	if (ssel == cached_cclk_pll_div)
1178		return cached_cclk;
1179	else
1180		cached_cclk_pll_div = ssel;
1181
1182	csel = ((ssel >> 4) & 0x03);
1183	ssel &= 0xf;
1184	if (ssel && ssel < (1 << csel))	/* SCLK > CCLK */
1185		cached_cclk = get_vco() / ssel;
1186	else
1187		cached_cclk = get_vco() >> csel;
1188	return cached_cclk;
1189#endif
1190}
1191EXPORT_SYMBOL(get_cclk);
1192
1193#ifdef CONFIG_BF60x
1194/* Get the bf60x clock of SCLK0 domain */
1195u_long get_sclk0(void)
1196{
1197	return bfin_get_clk("SCLK0");
1198}
1199EXPORT_SYMBOL(get_sclk0);
1200
1201/* Get the bf60x clock of SCLK1 domain */
1202u_long get_sclk1(void)
1203{
1204	return bfin_get_clk("SCLK1");
1205}
1206EXPORT_SYMBOL(get_sclk1);
1207
1208/* Get the bf60x DRAM clock */
1209u_long get_dclk(void)
1210{
1211	return bfin_get_clk("DCLK");
1212}
1213EXPORT_SYMBOL(get_dclk);
1214#endif
1215
1216/* Get the default system clock */
1217u_long get_sclk(void)
1218{
1219#ifdef CONFIG_BF60x
1220	return get_sclk0();
1221#else
1222	static u_long cached_sclk;
1223	u_long ssel;
1224
1225	/* The assumption here is that SCLK never changes at runtime.
1226	 * If, someday, we support that, then we'll have to change this.
1227	 */
1228	if (cached_sclk)
1229		return cached_sclk;
1230
1231	if (bfin_read_PLL_STAT() & 0x1)
1232		return get_clkin_hz();
1233
1234	ssel = bfin_read_PLL_DIV() & 0xf;
1235	if (0 == ssel) {
1236		printk(KERN_WARNING "Invalid System Clock\n");
1237		ssel = 1;
1238	}
1239
1240	cached_sclk = get_vco() / ssel;
1241	return cached_sclk;
1242#endif
1243}
1244EXPORT_SYMBOL(get_sclk);
1245
1246unsigned long sclk_to_usecs(unsigned long sclk)
1247{
1248	u64 tmp = USEC_PER_SEC * (u64)sclk;
1249	do_div(tmp, get_sclk());
1250	return tmp;
1251}
1252EXPORT_SYMBOL(sclk_to_usecs);
1253
1254unsigned long usecs_to_sclk(unsigned long usecs)
1255{
1256	u64 tmp = get_sclk() * (u64)usecs;
1257	do_div(tmp, USEC_PER_SEC);
1258	return tmp;
1259}
1260EXPORT_SYMBOL(usecs_to_sclk);
1261
1262/*
1263 *	Get CPU information for use by the procfs.
1264 */
1265static int show_cpuinfo(struct seq_file *m, void *v)
1266{
1267	char *cpu, *mmu, *fpu, *vendor, *cache;
1268	uint32_t revid;
1269	int cpu_num = *(unsigned int *)v;
1270	u_long sclk, cclk;
1271	u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
1272	struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num);
1273
1274	cpu = CPU;
1275	mmu = "none";
1276	fpu = "none";
1277	revid = bfin_revid();
1278
1279	sclk = get_sclk();
1280	cclk = get_cclk();
1281
1282	switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
1283	case 0xca:
1284		vendor = "Analog Devices";
1285		break;
1286	default:
1287		vendor = "unknown";
1288		break;
1289	}
1290
1291	seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor);
1292
1293	if (CPUID == bfin_cpuid())
1294		seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
1295	else
1296		seq_printf(m, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n",
1297			CPUID, bfin_cpuid());
1298
1299	seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
1300		"stepping\t: %d ",
1301		cpu, cclk/1000000, sclk/1000000,
1302#ifdef CONFIG_MPU
1303		"mpu on",
1304#else
1305		"mpu off",
1306#endif
1307		revid);
1308
1309	if (bfin_revid() != bfin_compiled_revid()) {
1310		if (bfin_compiled_revid() == -1)
1311			seq_printf(m, "(Compiled for Rev none)");
1312		else if (bfin_compiled_revid() == 0xffff)
1313			seq_printf(m, "(Compiled for Rev any)");
1314		else
1315			seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
1316	}
1317
1318	seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
1319		cclk/1000000, cclk%1000000,
1320		sclk/1000000, sclk%1000000);
1321	seq_printf(m, "bogomips\t: %lu.%02lu\n"
1322		"Calibration\t: %lu loops\n",
1323		(loops_per_jiffy * HZ) / 500000,
1324		((loops_per_jiffy * HZ) / 5000) % 100,
1325		(loops_per_jiffy * HZ));
1326
1327	/* Check Cache configutation */
1328	switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
1329	case ACACHE_BSRAM:
1330		cache = "dbank-A/B\t: cache/sram";
1331		dcache_size = 16;
1332		dsup_banks = 1;
1333		break;
1334	case ACACHE_BCACHE:
1335		cache = "dbank-A/B\t: cache/cache";
1336		dcache_size = 32;
1337		dsup_banks = 2;
1338		break;
1339	case ASRAM_BSRAM:
1340		cache = "dbank-A/B\t: sram/sram";
1341		dcache_size = 0;
1342		dsup_banks = 0;
1343		break;
1344	default:
1345		cache = "unknown";
1346		dcache_size = 0;
1347		dsup_banks = 0;
1348		break;
1349	}
1350
1351	/* Is it turned on? */
1352	if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
1353		dcache_size = 0;
1354
1355	if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
1356		icache_size = 0;
1357
1358	seq_printf(m, "cache size\t: %d KB(L1 icache) "
1359		"%d KB(L1 dcache) %d KB(L2 cache)\n",
1360		icache_size, dcache_size, 0);
1361	seq_printf(m, "%s\n", cache);
1362	seq_printf(m, "external memory\t: "
1363#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
1364		   "cacheable"
1365#else
1366		   "uncacheable"
1367#endif
1368		   " in instruction cache\n");
1369	seq_printf(m, "external memory\t: "
1370#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
1371		      "cacheable (write-back)"
1372#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
1373		      "cacheable (write-through)"
1374#else
1375		      "uncacheable"
1376#endif
1377		      " in data cache\n");
1378
1379	if (icache_size)
1380		seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
1381			   BFIN_ISUBBANKS, BFIN_IWAYS, BFIN_ILINES);
1382	else
1383		seq_printf(m, "icache setup\t: off\n");
1384
1385	seq_printf(m,
1386		   "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
1387		   dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
1388		   BFIN_DLINES);
1389#ifdef __ARCH_SYNC_CORE_DCACHE
1390	seq_printf(m, "dcache flushes\t: %lu\n", dcache_invld_count[cpu_num]);
1391#endif
1392#ifdef __ARCH_SYNC_CORE_ICACHE
1393	seq_printf(m, "icache flushes\t: %lu\n", icache_invld_count[cpu_num]);
1394#endif
1395
1396	seq_printf(m, "\n");
1397
1398	if (cpu_num != num_possible_cpus() - 1)
1399		return 0;
1400
1401	if (L2_LENGTH) {
1402		seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
1403		seq_printf(m, "L2 SRAM\t\t: "
1404#if defined(CONFIG_BFIN_L2_ICACHEABLE)
1405			      "cacheable"
1406#else
1407			      "uncacheable"
1408#endif
1409			      " in instruction cache\n");
1410		seq_printf(m, "L2 SRAM\t\t: "
1411#if defined(CONFIG_BFIN_L2_WRITEBACK)
1412			      "cacheable (write-back)"
1413#elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
1414			      "cacheable (write-through)"
1415#else
1416			      "uncacheable"
1417#endif
1418			      " in data cache\n");
1419	}
1420	seq_printf(m, "board name\t: %s\n", bfin_board_name);
1421	seq_printf(m, "board memory\t: %ld kB (0x%08lx -> 0x%08lx)\n",
1422		physical_mem_end >> 10, 0ul, physical_mem_end);
1423	seq_printf(m, "kernel memory\t: %d kB (0x%08lx -> 0x%08lx)\n",
1424		((int)memory_end - (int)_rambase) >> 10,
1425		_rambase, memory_end);
1426
1427	return 0;
1428}
1429
1430static void *c_start(struct seq_file *m, loff_t *pos)
1431{
1432	if (*pos == 0)
1433		*pos = cpumask_first(cpu_online_mask);
1434	if (*pos >= num_online_cpus())
1435		return NULL;
1436
1437	return pos;
1438}
1439
1440static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1441{
1442	*pos = cpumask_next(*pos, cpu_online_mask);
1443
1444	return c_start(m, pos);
1445}
1446
1447static void c_stop(struct seq_file *m, void *v)
1448{
1449}
1450
1451const struct seq_operations cpuinfo_op = {
1452	.start = c_start,
1453	.next = c_next,
1454	.stop = c_stop,
1455	.show = show_cpuinfo,
1456};
1457
1458void __init cmdline_init(const char *r0)
1459{
1460	early_shadow_stamp();
1461	if (r0)
1462		strncpy(command_line, r0, COMMAND_LINE_SIZE);
1463}
1464