cache.c revision 000ab4b0a7afaa3799ffd8cc41a4328ee999990b
1/*
2 * arch/score/mm/cache.c
3 *
4 * Score Processor version.
5 *
6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
7 *  Lennox Wu <lennox.wu@sunplusct.com>
8 *  Chen Liqin <liqin.chen@sunplusct.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see the file COPYING, or write
22 * to the Free Software Foundation, Inc.,
23 * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
24 */
25
26#include <linux/init.h>
27#include <linux/linkage.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32#include <linux/fs.h>
33
34#include <asm/mmu_context.h>
35
36/*
37Just flush entire Dcache!!
38You must ensure the page doesn't include instructions, because
39the function will not flush the Icache.
40The addr must be cache aligned.
41*/
42static void flush_data_cache_page(unsigned long addr)
43{
44	unsigned int i;
45	for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) {
46		__asm__ __volatile__(
47		"cache 0x0e, [%0, 0]\n"
48		"cache 0x1a, [%0, 0]\n"
49		"nop\n"
50		: : "r" (addr));
51		addr += L1_CACHE_BYTES;
52	}
53}
54
55void flush_dcache_page(struct page *page)
56{
57	struct address_space *mapping = page_mapping(page);
58	unsigned long addr;
59
60	if (PageHighMem(page))
61		return;
62	if (mapping && !mapping_mapped(mapping)) {
63		set_bit(PG_dcache_dirty, &(page)->flags);
64		return;
65	}
66
67	/*
68	 * We could delay the flush for the !page_mapping case too.  But that
69	 * case is for exec env/arg pages and those are %99 certainly going to
70	 * get faulted into the tlb (and thus flushed) anyways.
71	 */
72	addr = (unsigned long) page_address(page);
73	flush_data_cache_page(addr);
74}
75EXPORT_SYMBOL(flush_dcache_page);
76
77/* called by update_mmu_cache. */
78void __update_cache(struct vm_area_struct *vma, unsigned long address,
79		pte_t pte)
80{
81	struct page *page;
82	unsigned long pfn, addr;
83	int exec = (vma->vm_flags & VM_EXEC);
84
85	pfn = pte_pfn(pte);
86	if (unlikely(!pfn_valid(pfn)))
87		return;
88	page = pfn_to_page(pfn);
89	if (page_mapping(page) && test_bit(PG_dcache_dirty, &(page)->flags)) {
90		addr = (unsigned long) page_address(page);
91		if (exec)
92			flush_data_cache_page(addr);
93		clear_bit(PG_dcache_dirty, &(page)->flags);
94	}
95}
96
97static inline void setup_protection_map(void)
98{
99	protection_map[0] = PAGE_NONE;
100	protection_map[1] = PAGE_READONLY;
101	protection_map[2] = PAGE_COPY;
102	protection_map[3] = PAGE_COPY;
103	protection_map[4] = PAGE_READONLY;
104	protection_map[5] = PAGE_READONLY;
105	protection_map[6] = PAGE_COPY;
106	protection_map[7] = PAGE_COPY;
107	protection_map[8] = PAGE_NONE;
108	protection_map[9] = PAGE_READONLY;
109	protection_map[10] = PAGE_SHARED;
110	protection_map[11] = PAGE_SHARED;
111	protection_map[12] = PAGE_READONLY;
112	protection_map[13] = PAGE_READONLY;
113	protection_map[14] = PAGE_SHARED;
114	protection_map[15] = PAGE_SHARED;
115}
116
117void cpu_cache_init(void)
118{
119	setup_protection_map();
120}
121
122void flush_icache_all(void)
123{
124	__asm__ __volatile__(
125	"la r8, flush_icache_all\n"
126	"cache 0x10, [r8, 0]\n"
127	"nop\nnop\nnop\nnop\nnop\nnop\n"
128	: : : "r8");
129}
130
131void flush_dcache_all(void)
132{
133	__asm__ __volatile__(
134	"la r8, flush_dcache_all\n"
135	"cache 0x1f, [r8, 0]\n"
136	"nop\nnop\nnop\nnop\nnop\nnop\n"
137	"cache 0x1a, [r8, 0]\n"
138	"nop\nnop\nnop\nnop\nnop\nnop\n"
139	: : : "r8");
140}
141
142void flush_cache_all(void)
143{
144	__asm__ __volatile__(
145	"la r8, flush_cache_all\n"
146	"cache 0x10, [r8, 0]\n"
147	"nop\nnop\nnop\nnop\nnop\nnop\n"
148	"cache 0x1f, [r8, 0]\n"
149	"nop\nnop\nnop\nnop\nnop\nnop\n"
150	"cache 0x1a, [r8, 0]\n"
151	"nop\nnop\nnop\nnop\nnop\nnop\n"
152	: : : "r8");
153}
154
155void flush_cache_mm(struct mm_struct *mm)
156{
157	if (!(mm->context))
158		return;
159	flush_cache_all();
160}
161
162/*if we flush a range precisely , the processing may be very long.
163We must check each page in the range whether present. If the page is present,
164we can flush the range in the page. Be careful, the range may be cross two
165page, a page is present and another is not present.
166*/
167/*
168The interface is provided in hopes that the port can find
169a suitably efficient method for removing multiple page
170sized regions from the cache.
171*/
172void flush_cache_range(struct vm_area_struct *vma,
173		unsigned long start, unsigned long end)
174{
175	struct mm_struct *mm = vma->vm_mm;
176	int exec = vma->vm_flags & VM_EXEC;
177	pgd_t *pgdp;
178	pud_t *pudp;
179	pmd_t *pmdp;
180	pte_t *ptep;
181
182	if (!(mm->context))
183		return;
184
185	pgdp = pgd_offset(mm, start);
186	pudp = pud_offset(pgdp, start);
187	pmdp = pmd_offset(pudp, start);
188	ptep = pte_offset(pmdp, start);
189
190	while (start <= end) {
191		unsigned long tmpend;
192		pgdp = pgd_offset(mm, start);
193		pudp = pud_offset(pgdp, start);
194		pmdp = pmd_offset(pudp, start);
195		ptep = pte_offset(pmdp, start);
196
197		if (!(pte_val(*ptep) & _PAGE_PRESENT)) {
198			start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
199			continue;
200		}
201		tmpend = (start | (PAGE_SIZE-1)) > end ?
202				 end : (start | (PAGE_SIZE-1));
203
204		flush_dcache_range(start, tmpend);
205		if (exec)
206			flush_icache_range(start, tmpend);
207		start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
208	}
209}
210
211void flush_cache_page(struct vm_area_struct *vma,
212		unsigned long addr, unsigned long pfn)
213{
214	int exec = vma->vm_flags & VM_EXEC;
215	unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT);
216
217	flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
218
219	if (exec)
220		flush_icache_range(kaddr, kaddr + PAGE_SIZE);
221}
222
223void flush_cache_sigtramp(unsigned long addr)
224{
225	__asm__ __volatile__(
226	"cache 0x02, [%0, 0]\n"
227	"nop\nnop\nnop\nnop\nnop\n"
228	"cache 0x02, [%0, 0x4]\n"
229	"nop\nnop\nnop\nnop\nnop\n"
230
231	"cache 0x0d, [%0, 0]\n"
232	"nop\nnop\nnop\nnop\nnop\n"
233	"cache 0x0d, [%0, 0x4]\n"
234	"nop\nnop\nnop\nnop\nnop\n"
235
236	"cache 0x1a, [%0, 0]\n"
237	"nop\nnop\nnop\nnop\nnop\n"
238	: : "r" (addr));
239}
240
241/*
2421. WB and invalid a cache line of Dcache
2432. Drain Write Buffer
244the range must be smaller than PAGE_SIZE
245*/
246void flush_dcache_range(unsigned long start, unsigned long end)
247{
248	int size, i;
249
250	start = start & ~(L1_CACHE_BYTES - 1);
251	end = end & ~(L1_CACHE_BYTES - 1);
252	size = end - start;
253	/* flush dcache to ram, and invalidate dcache lines. */
254	for (i = 0; i < size; i += L1_CACHE_BYTES) {
255		__asm__ __volatile__(
256		"cache 0x0e, [%0, 0]\n"
257		"nop\nnop\nnop\nnop\nnop\n"
258		"cache 0x1a, [%0, 0]\n"
259		"nop\nnop\nnop\nnop\nnop\n"
260		: : "r" (start));
261		start += L1_CACHE_BYTES;
262	}
263}
264
265void flush_icache_range(unsigned long start, unsigned long end)
266{
267	int size, i;
268	start = start & ~(L1_CACHE_BYTES - 1);
269	end = end & ~(L1_CACHE_BYTES - 1);
270
271	size = end - start;
272	/* invalidate icache lines. */
273	for (i = 0; i < size; i += L1_CACHE_BYTES) {
274		__asm__ __volatile__(
275		"cache 0x02, [%0, 0]\n"
276		"nop\nnop\nnop\nnop\nnop\n"
277		: : "r" (start));
278		start += L1_CACHE_BYTES;
279	}
280}
281EXPORT_SYMBOL(flush_icache_range);
282