cache.c revision 2b4315185a06414c4ab40fb0db50dce1b534a1d9
1/*
2 * arch/sh/mm/pg-mmu.c
3 *
4 * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5 * Copyright (C) 2002 - 2009  Paul Mundt
6 *
7 * Released under the terms of the GNU GPL v2.0.
8 */
9#include <linux/mm.h>
10#include <linux/init.h>
11#include <linux/mutex.h>
12#include <linux/fs.h>
13#include <linux/highmem.h>
14#include <linux/module.h>
15#include <asm/mmu_context.h>
16#include <asm/cacheflush.h>
17
18void (*flush_cache_all)(void);
19void (*flush_cache_mm)(struct mm_struct *mm);
20void (*flush_cache_dup_mm)(struct mm_struct *mm);
21void (*flush_cache_page)(struct vm_area_struct *vma,
22				unsigned long addr, unsigned long pfn);
23void (*flush_cache_range)(struct vm_area_struct *vma,
24				 unsigned long start, unsigned long end);
25void (*flush_dcache_page)(struct page *page);
26void (*flush_icache_range)(unsigned long start, unsigned long end);
27void (*flush_icache_page)(struct vm_area_struct *vma,
28				 struct page *page);
29void (*flush_cache_sigtramp)(unsigned long address);
30void (*__flush_wback_region)(void *start, int size);
31void (*__flush_purge_region)(void *start, int size);
32void (*__flush_invalidate_region)(void *start, int size);
33
34static inline void noop_flush_cache_all(void)
35{
36}
37
38static inline void noop_flush_cache_mm(struct mm_struct *mm)
39{
40}
41
42static inline void noop_flush_cache_page(struct vm_area_struct *vma,
43				unsigned long addr, unsigned long pfn)
44{
45}
46
47static inline void noop_flush_cache_range(struct vm_area_struct *vma,
48				 unsigned long start, unsigned long end)
49{
50}
51
52static inline void noop_flush_dcache_page(struct page *page)
53{
54}
55
56static inline void noop_flush_icache_range(unsigned long start,
57					   unsigned long end)
58{
59}
60
61static inline void noop_flush_icache_page(struct vm_area_struct *vma,
62					  struct page *page)
63{
64}
65
66static inline void noop_flush_cache_sigtramp(unsigned long address)
67{
68}
69
70static inline void noop__flush_region(void *start, int size)
71{
72}
73
74void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
75		       unsigned long vaddr, void *dst, const void *src,
76		       unsigned long len)
77{
78	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
79	    !test_bit(PG_dcache_dirty, &page->flags)) {
80		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
81		memcpy(vto, src, len);
82		kunmap_coherent();
83	} else {
84		memcpy(dst, src, len);
85		if (boot_cpu_data.dcache.n_aliases)
86			set_bit(PG_dcache_dirty, &page->flags);
87	}
88
89	if (vma->vm_flags & VM_EXEC)
90		flush_cache_page(vma, vaddr, page_to_pfn(page));
91}
92
93void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
94			 unsigned long vaddr, void *dst, const void *src,
95			 unsigned long len)
96{
97	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
98	    !test_bit(PG_dcache_dirty, &page->flags)) {
99		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
100		memcpy(dst, vfrom, len);
101		kunmap_coherent();
102	} else {
103		memcpy(dst, src, len);
104		if (boot_cpu_data.dcache.n_aliases)
105			set_bit(PG_dcache_dirty, &page->flags);
106	}
107}
108
109void copy_user_highpage(struct page *to, struct page *from,
110			unsigned long vaddr, struct vm_area_struct *vma)
111{
112	void *vfrom, *vto;
113
114	vto = kmap_atomic(to, KM_USER1);
115
116	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
117	    !test_bit(PG_dcache_dirty, &from->flags)) {
118		vfrom = kmap_coherent(from, vaddr);
119		copy_page(vto, vfrom);
120		kunmap_coherent();
121	} else {
122		vfrom = kmap_atomic(from, KM_USER0);
123		copy_page(vto, vfrom);
124		kunmap_atomic(vfrom, KM_USER0);
125	}
126
127	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
128		__flush_wback_region(vto, PAGE_SIZE);
129
130	kunmap_atomic(vto, KM_USER1);
131	/* Make sure this page is cleared on other CPU's too before using it */
132	smp_wmb();
133}
134EXPORT_SYMBOL(copy_user_highpage);
135
136void clear_user_highpage(struct page *page, unsigned long vaddr)
137{
138	void *kaddr = kmap_atomic(page, KM_USER0);
139
140	clear_page(kaddr);
141
142	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
143		__flush_wback_region(kaddr, PAGE_SIZE);
144
145	kunmap_atomic(kaddr, KM_USER0);
146}
147EXPORT_SYMBOL(clear_user_highpage);
148
149void __update_cache(struct vm_area_struct *vma,
150		    unsigned long address, pte_t pte)
151{
152	struct page *page;
153	unsigned long pfn = pte_pfn(pte);
154
155	if (!boot_cpu_data.dcache.n_aliases)
156		return;
157
158	page = pfn_to_page(pfn);
159	if (pfn_valid(pfn) && page_mapping(page)) {
160		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
161		if (dirty) {
162			unsigned long addr = (unsigned long)page_address(page);
163
164			if (pages_do_alias(addr, address & PAGE_MASK))
165				__flush_wback_region((void *)addr, PAGE_SIZE);
166		}
167	}
168}
169
170void __flush_anon_page(struct page *page, unsigned long vmaddr)
171{
172	unsigned long addr = (unsigned long) page_address(page);
173
174	if (pages_do_alias(addr, vmaddr)) {
175		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
176		    !test_bit(PG_dcache_dirty, &page->flags)) {
177			void *kaddr;
178
179			kaddr = kmap_coherent(page, vmaddr);
180			__flush_wback_region((void *)kaddr, PAGE_SIZE);
181			kunmap_coherent();
182		} else
183			__flush_wback_region((void *)addr, PAGE_SIZE);
184	}
185}
186
187static void compute_alias(struct cache_info *c)
188{
189	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
190	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
191}
192
193static void __init emit_cache_params(void)
194{
195	printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
196		boot_cpu_data.icache.ways,
197		boot_cpu_data.icache.sets,
198		boot_cpu_data.icache.way_incr);
199	printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
200		boot_cpu_data.icache.entry_mask,
201		boot_cpu_data.icache.alias_mask,
202		boot_cpu_data.icache.n_aliases);
203	printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
204		boot_cpu_data.dcache.ways,
205		boot_cpu_data.dcache.sets,
206		boot_cpu_data.dcache.way_incr);
207	printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
208		boot_cpu_data.dcache.entry_mask,
209		boot_cpu_data.dcache.alias_mask,
210		boot_cpu_data.dcache.n_aliases);
211
212	/*
213	 * Emit Secondary Cache parameters if the CPU has a probed L2.
214	 */
215	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
216		printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
217			boot_cpu_data.scache.ways,
218			boot_cpu_data.scache.sets,
219			boot_cpu_data.scache.way_incr);
220		printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
221			boot_cpu_data.scache.entry_mask,
222			boot_cpu_data.scache.alias_mask,
223			boot_cpu_data.scache.n_aliases);
224	}
225}
226
227void __init cpu_cache_init(void)
228{
229	compute_alias(&boot_cpu_data.icache);
230	compute_alias(&boot_cpu_data.dcache);
231	compute_alias(&boot_cpu_data.scache);
232
233	flush_cache_all		= noop_flush_cache_all;
234	flush_cache_mm		= noop_flush_cache_mm;
235	flush_cache_dup_mm	= noop_flush_cache_mm;
236	flush_cache_page	= noop_flush_cache_page;
237	flush_cache_range	= noop_flush_cache_range;
238	flush_dcache_page	= noop_flush_dcache_page;
239	flush_icache_range	= noop_flush_icache_range;
240	flush_icache_page	= noop_flush_icache_page;
241	flush_cache_sigtramp	= noop_flush_cache_sigtramp;
242
243	__flush_wback_region		= noop__flush_region;
244	__flush_purge_region		= noop__flush_region;
245	__flush_invalidate_region	= noop__flush_region;
246
247	if (boot_cpu_data.family == CPU_FAMILY_SH2) {
248		extern void __weak sh2_cache_init(void);
249
250		sh2_cache_init();
251	}
252
253	if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
254		extern void __weak sh2a_cache_init(void);
255
256		sh2a_cache_init();
257	}
258
259	if (boot_cpu_data.family == CPU_FAMILY_SH3) {
260		extern void __weak sh3_cache_init(void);
261
262		sh3_cache_init();
263
264		if ((boot_cpu_data.type == CPU_SH7705) &&
265		    (boot_cpu_data.dcache.sets == 512)) {
266			extern void __weak sh7705_cache_init(void);
267
268			sh7705_cache_init();
269		}
270	}
271
272	if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
273	    (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
274	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
275		extern void __weak sh4_cache_init(void);
276
277		sh4_cache_init();
278	}
279
280	if (boot_cpu_data.family == CPU_FAMILY_SH5) {
281		extern void __weak sh5_cache_init(void);
282
283		sh5_cache_init();
284	}
285
286	emit_cache_params();
287}
288