cache-sh5.c revision 2739742c24f1a55365e71f0722bfdce8994e9c4e
1/*
2 * arch/sh/mm/cache-sh5.c
3 *
4 * Copyright (C) 2000, 2001  Paolo Alberelli
5 * Copyright (C) 2002  Benedict Gaster
6 * Copyright (C) 2003  Richard Curnow
7 * Copyright (C) 2003 - 2008  Paul Mundt
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License.  See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/mman.h>
15#include <linux/mm.h>
16#include <asm/tlb.h>
17#include <asm/processor.h>
18#include <asm/cache.h>
19#include <asm/pgalloc.h>
20#include <asm/uaccess.h>
21#include <asm/mmu_context.h>
22
23/* Wired TLB entry for the D-cache */
24static unsigned long long dtlb_cache_slot;
25
26void __init p3_cache_init(void)
27{
28	/* Reserve a slot for dcache colouring in the DTLB */
29	dtlb_cache_slot	= sh64_get_wired_dtlb_entry();
30}
31
32void __init kmap_coherent_init(void)
33{
34	/* XXX ... */
35}
36
37void *kmap_coherent(struct page *page, unsigned long addr)
38{
39	/* XXX ... */
40	return NULL;
41}
42
43void kunmap_coherent(void)
44{
45}
46
47#ifdef CONFIG_DCACHE_DISABLED
48#define sh64_dcache_purge_all()					do { } while (0)
49#define sh64_dcache_purge_coloured_phy_page(paddr, eaddr)	do { } while (0)
50#define sh64_dcache_purge_user_range(mm, start, end)		do { } while (0)
51#define sh64_dcache_purge_phy_page(paddr)			do { } while (0)
52#define sh64_dcache_purge_virt_page(mm, eaddr)			do { } while (0)
53#endif
54
55/*
56 * The following group of functions deal with mapping and unmapping a
57 * temporary page into a DTLB slot that has been set aside for exclusive
58 * use.
59 */
60static inline void
61sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
62			   unsigned long paddr)
63{
64	local_irq_disable();
65	sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
66}
67
68static inline void sh64_teardown_dtlb_cache_slot(void)
69{
70	sh64_teardown_tlb_slot(dtlb_cache_slot);
71	local_irq_enable();
72}
73
74#ifndef CONFIG_ICACHE_DISABLED
75static inline void sh64_icache_inv_all(void)
76{
77	unsigned long long addr, flag, data;
78	unsigned long flags;
79
80	addr = ICCR0;
81	flag = ICCR0_ICI;
82	data = 0;
83
84	/* Make this a critical section for safety (probably not strictly necessary.) */
85	local_irq_save(flags);
86
87	/* Without %1 it gets unexplicably wrong */
88	__asm__ __volatile__ (
89		"getcfg	%3, 0, %0\n\t"
90		"or	%0, %2, %0\n\t"
91		"putcfg	%3, 0, %0\n\t"
92		"synci"
93		: "=&r" (data)
94		: "0" (data), "r" (flag), "r" (addr));
95
96	local_irq_restore(flags);
97}
98
99static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
100{
101	/* Invalidate range of addresses [start,end] from the I-cache, where
102	 * the addresses lie in the kernel superpage. */
103
104	unsigned long long ullend, addr, aligned_start;
105	aligned_start = (unsigned long long)(signed long long)(signed long) start;
106	addr = L1_CACHE_ALIGN(aligned_start);
107	ullend = (unsigned long long) (signed long long) (signed long) end;
108
109	while (addr <= ullend) {
110		__asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
111		addr += L1_CACHE_BYTES;
112	}
113}
114
115static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
116{
117	/* If we get called, we know that vma->vm_flags contains VM_EXEC.
118	   Also, eaddr is page-aligned. */
119	unsigned int cpu = smp_processor_id();
120	unsigned long long addr, end_addr;
121	unsigned long flags = 0;
122	unsigned long running_asid, vma_asid;
123	addr = eaddr;
124	end_addr = addr + PAGE_SIZE;
125
126	/* Check whether we can use the current ASID for the I-cache
127	   invalidation.  For example, if we're called via
128	   access_process_vm->flush_cache_page->here, (e.g. when reading from
129	   /proc), 'running_asid' will be that of the reader, not of the
130	   victim.
131
132	   Also, note the risk that we might get pre-empted between the ASID
133	   compare and blocking IRQs, and before we regain control, the
134	   pid->ASID mapping changes.  However, the whole cache will get
135	   invalidated when the mapping is renewed, so the worst that can
136	   happen is that the loop below ends up invalidating somebody else's
137	   cache entries.
138	*/
139
140	running_asid = get_asid();
141	vma_asid = cpu_asid(cpu, vma->vm_mm);
142	if (running_asid != vma_asid) {
143		local_irq_save(flags);
144		switch_and_save_asid(vma_asid);
145	}
146	while (addr < end_addr) {
147		/* Worth unrolling a little */
148		__asm__ __volatile__("icbi %0,  0" : : "r" (addr));
149		__asm__ __volatile__("icbi %0, 32" : : "r" (addr));
150		__asm__ __volatile__("icbi %0, 64" : : "r" (addr));
151		__asm__ __volatile__("icbi %0, 96" : : "r" (addr));
152		addr += 128;
153	}
154	if (running_asid != vma_asid) {
155		switch_and_save_asid(running_asid);
156		local_irq_restore(flags);
157	}
158}
159
160static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
161			  unsigned long start, unsigned long end)
162{
163	/* Used for invalidating big chunks of I-cache, i.e. assume the range
164	   is whole pages.  If 'start' or 'end' is not page aligned, the code
165	   is conservative and invalidates to the ends of the enclosing pages.
166	   This is functionally OK, just a performance loss. */
167
168	/* See the comments below in sh64_dcache_purge_user_range() regarding
169	   the choice of algorithm.  However, for the I-cache option (2) isn't
170	   available because there are no physical tags so aliases can't be
171	   resolved.  The icbi instruction has to be used through the user
172	   mapping.   Because icbi is cheaper than ocbp on a cache hit, it
173	   would be cheaper to use the selective code for a large range than is
174	   possible with the D-cache.  Just assume 64 for now as a working
175	   figure.
176	   */
177	int n_pages;
178
179	if (!mm)
180		return;
181
182	n_pages = ((end - start) >> PAGE_SHIFT);
183	if (n_pages >= 64) {
184		sh64_icache_inv_all();
185	} else {
186		unsigned long aligned_start;
187		unsigned long eaddr;
188		unsigned long after_last_page_start;
189		unsigned long mm_asid, current_asid;
190		unsigned long flags = 0;
191
192		mm_asid = cpu_asid(smp_processor_id(), mm);
193		current_asid = get_asid();
194
195		if (mm_asid != current_asid) {
196			/* Switch ASID and run the invalidate loop under cli */
197			local_irq_save(flags);
198			switch_and_save_asid(mm_asid);
199		}
200
201		aligned_start = start & PAGE_MASK;
202		after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
203
204		while (aligned_start < after_last_page_start) {
205			struct vm_area_struct *vma;
206			unsigned long vma_end;
207			vma = find_vma(mm, aligned_start);
208			if (!vma || (aligned_start <= vma->vm_end)) {
209				/* Avoid getting stuck in an error condition */
210				aligned_start += PAGE_SIZE;
211				continue;
212			}
213			vma_end = vma->vm_end;
214			if (vma->vm_flags & VM_EXEC) {
215				/* Executable */
216				eaddr = aligned_start;
217				while (eaddr < vma_end) {
218					sh64_icache_inv_user_page(vma, eaddr);
219					eaddr += PAGE_SIZE;
220				}
221			}
222			aligned_start = vma->vm_end; /* Skip to start of next region */
223		}
224
225		if (mm_asid != current_asid) {
226			switch_and_save_asid(current_asid);
227			local_irq_restore(flags);
228		}
229	}
230}
231
232/*
233 * Invalidate a small range of user context I-cache, not necessarily page
234 * (or even cache-line) aligned.
235 *
236 * Since this is used inside ptrace, the ASID in the mm context typically
237 * won't match current_asid.  We'll have to switch ASID to do this.  For
238 * safety, and given that the range will be small, do all this under cli.
239 *
240 * Note, there is a hazard that the ASID in mm->context is no longer
241 * actually associated with mm, i.e. if the mm->context has started a new
242 * cycle since mm was last active.  However, this is just a performance
243 * issue: all that happens is that we invalidate lines belonging to
244 * another mm, so the owning process has to refill them when that mm goes
245 * live again.  mm itself can't have any cache entries because there will
246 * have been a flush_cache_all when the new mm->context cycle started.
247 */
248static void sh64_icache_inv_user_small_range(struct mm_struct *mm,
249						unsigned long start, int len)
250{
251	unsigned long long eaddr = start;
252	unsigned long long eaddr_end = start + len;
253	unsigned long current_asid, mm_asid;
254	unsigned long flags;
255	unsigned long long epage_start;
256
257	/*
258	 * Align to start of cache line.  Otherwise, suppose len==8 and
259	 * start was at 32N+28 : the last 4 bytes wouldn't get invalidated.
260	 */
261	eaddr = L1_CACHE_ALIGN(start);
262	eaddr_end = start + len;
263
264	mm_asid = cpu_asid(smp_processor_id(), mm);
265	local_irq_save(flags);
266	current_asid = switch_and_save_asid(mm_asid);
267
268	epage_start = eaddr & PAGE_MASK;
269
270	while (eaddr < eaddr_end) {
271		__asm__ __volatile__("icbi %0, 0" : : "r" (eaddr));
272		eaddr += L1_CACHE_BYTES;
273	}
274	switch_and_save_asid(current_asid);
275	local_irq_restore(flags);
276}
277
278static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
279{
280	/* The icbi instruction never raises ITLBMISS.  i.e. if there's not a
281	   cache hit on the virtual tag the instruction ends there, without a
282	   TLB lookup. */
283
284	unsigned long long aligned_start;
285	unsigned long long ull_end;
286	unsigned long long addr;
287
288	ull_end = end;
289
290	/* Just invalidate over the range using the natural addresses.  TLB
291	   miss handling will be OK (TBC).  Since it's for the current process,
292	   either we're already in the right ASID context, or the ASIDs have
293	   been recycled since we were last active in which case we might just
294	   invalidate another processes I-cache entries : no worries, just a
295	   performance drop for him. */
296	aligned_start = L1_CACHE_ALIGN(start);
297	addr = aligned_start;
298	while (addr < ull_end) {
299		__asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
300		__asm__ __volatile__ ("nop");
301		__asm__ __volatile__ ("nop");
302		addr += L1_CACHE_BYTES;
303	}
304}
305#endif /* !CONFIG_ICACHE_DISABLED */
306
307#ifndef CONFIG_DCACHE_DISABLED
308/* Buffer used as the target of alloco instructions to purge data from cache
309   sets by natural eviction. -- RPC */
310#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
311static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
312
313static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
314{
315	/* Purge all ways in a particular block of sets, specified by the base
316	   set number and number of sets.  Can handle wrap-around, if that's
317	   needed.  */
318
319	int dummy_buffer_base_set;
320	unsigned long long eaddr, eaddr0, eaddr1;
321	int j;
322	int set_offset;
323
324	dummy_buffer_base_set = ((int)&dummy_alloco_area &
325				 cpu_data->dcache.entry_mask) >>
326				 cpu_data->dcache.entry_shift;
327	set_offset = sets_to_purge_base - dummy_buffer_base_set;
328
329	for (j = 0; j < n_sets; j++, set_offset++) {
330		set_offset &= (cpu_data->dcache.sets - 1);
331		eaddr0 = (unsigned long long)dummy_alloco_area +
332			(set_offset << cpu_data->dcache.entry_shift);
333
334		/*
335		 * Do one alloco which hits the required set per cache
336		 * way.  For write-back mode, this will purge the #ways
337		 * resident lines.  There's little point unrolling this
338		 * loop because the allocos stall more if they're too
339		 * close together.
340		 */
341		eaddr1 = eaddr0 + cpu_data->dcache.way_size *
342				  cpu_data->dcache.ways;
343
344		for (eaddr = eaddr0; eaddr < eaddr1;
345		     eaddr += cpu_data->dcache.way_size) {
346			__asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr));
347			__asm__ __volatile__ ("synco"); /* TAKum03020 */
348		}
349
350		eaddr1 = eaddr0 + cpu_data->dcache.way_size *
351				  cpu_data->dcache.ways;
352
353		for (eaddr = eaddr0; eaddr < eaddr1;
354		     eaddr += cpu_data->dcache.way_size) {
355			/*
356			 * Load from each address.  Required because
357			 * alloco is a NOP if the cache is write-through.
358			 */
359			if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
360				__raw_readb((unsigned long)eaddr);
361		}
362	}
363
364	/*
365	 * Don't use OCBI to invalidate the lines.  That costs cycles
366	 * directly.  If the dummy block is just left resident, it will
367	 * naturally get evicted as required.
368	 */
369}
370
371/*
372 * Purge the entire contents of the dcache.  The most efficient way to
373 * achieve this is to use alloco instructions on a region of unused
374 * memory equal in size to the cache, thereby causing the current
375 * contents to be discarded by natural eviction.  The alternative, namely
376 * reading every tag, setting up a mapping for the corresponding page and
377 * doing an OCBP for the line, would be much more expensive.
378 */
379static void sh64_dcache_purge_all(void)
380{
381
382	sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
383}
384
385
386/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
387   anything else in the kernel */
388#define MAGIC_PAGE0_START 0xffffffffec000000ULL
389
390/* Purge the physical page 'paddr' from the cache.  It's known that any
391 * cache lines requiring attention have the same page colour as the the
392 * address 'eaddr'.
393 *
394 * This relies on the fact that the D-cache matches on physical tags when
395 * no virtual tag matches.  So we create an alias for the original page
396 * and purge through that.  (Alternatively, we could have done this by
397 * switching ASID to match the original mapping and purged through that,
398 * but that involves ASID switching cost + probably a TLBMISS + refill
399 * anyway.)
400 */
401static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr,
402					        unsigned long eaddr)
403{
404	unsigned long long magic_page_start;
405	unsigned long long magic_eaddr, magic_eaddr_end;
406
407	magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
408
409	/* As long as the kernel is not pre-emptible, this doesn't need to be
410	   under cli/sti. */
411	sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
412
413	magic_eaddr = magic_page_start;
414	magic_eaddr_end = magic_eaddr + PAGE_SIZE;
415
416	while (magic_eaddr < magic_eaddr_end) {
417		/* Little point in unrolling this loop - the OCBPs are blocking
418		   and won't go any quicker (i.e. the loop overhead is parallel
419		   to part of the OCBP execution.) */
420		__asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
421		magic_eaddr += L1_CACHE_BYTES;
422	}
423
424	sh64_teardown_dtlb_cache_slot();
425}
426
427/*
428 * Purge a page given its physical start address, by creating a temporary
429 * 1 page mapping and purging across that.  Even if we know the virtual
430 * address (& vma or mm) of the page, the method here is more elegant
431 * because it avoids issues of coping with page faults on the purge
432 * instructions (i.e. no special-case code required in the critical path
433 * in the TLB miss handling).
434 */
435static void sh64_dcache_purge_phy_page(unsigned long paddr)
436{
437	unsigned long long eaddr_start, eaddr, eaddr_end;
438	int i;
439
440	/* As long as the kernel is not pre-emptible, this doesn't need to be
441	   under cli/sti. */
442	eaddr_start = MAGIC_PAGE0_START;
443	for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
444		sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
445
446		eaddr = eaddr_start;
447		eaddr_end = eaddr + PAGE_SIZE;
448		while (eaddr < eaddr_end) {
449			__asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
450			eaddr += L1_CACHE_BYTES;
451		}
452
453		sh64_teardown_dtlb_cache_slot();
454		eaddr_start += PAGE_SIZE;
455	}
456}
457
458static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
459				unsigned long addr, unsigned long end)
460{
461	pgd_t *pgd;
462	pud_t *pud;
463	pmd_t *pmd;
464	pte_t *pte;
465	pte_t entry;
466	spinlock_t *ptl;
467	unsigned long paddr;
468
469	if (!mm)
470		return; /* No way to find physical address of page */
471
472	pgd = pgd_offset(mm, addr);
473	if (pgd_bad(*pgd))
474		return;
475
476	pud = pud_offset(pgd, addr);
477	if (pud_none(*pud) || pud_bad(*pud))
478		return;
479
480	pmd = pmd_offset(pud, addr);
481	if (pmd_none(*pmd) || pmd_bad(*pmd))
482		return;
483
484	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
485	do {
486		entry = *pte;
487		if (pte_none(entry) || !pte_present(entry))
488			continue;
489		paddr = pte_val(entry) & PAGE_MASK;
490		sh64_dcache_purge_coloured_phy_page(paddr, addr);
491	} while (pte++, addr += PAGE_SIZE, addr != end);
492	pte_unmap_unlock(pte - 1, ptl);
493}
494
495/*
496 * There are at least 5 choices for the implementation of this, with
497 * pros (+), cons(-), comments(*):
498 *
499 * 1. ocbp each line in the range through the original user's ASID
500 *    + no lines spuriously evicted
501 *    - tlbmiss handling (must either handle faults on demand => extra
502 *	special-case code in tlbmiss critical path), or map the page in
503 *	advance (=> flush_tlb_range in advance to avoid multiple hits)
504 *    - ASID switching
505 *    - expensive for large ranges
506 *
507 * 2. temporarily map each page in the range to a special effective
508 *    address and ocbp through the temporary mapping; relies on the
509 *    fact that SH-5 OCB* always do TLB lookup and match on ptags (they
510 *    never look at the etags)
511 *    + no spurious evictions
512 *    - expensive for large ranges
513 *    * surely cheaper than (1)
514 *
515 * 3. walk all the lines in the cache, check the tags, if a match
516 *    occurs create a page mapping to ocbp the line through
517 *    + no spurious evictions
518 *    - tag inspection overhead
519 *    - (especially for small ranges)
520 *    - potential cost of setting up/tearing down page mapping for
521 *	every line that matches the range
522 *    * cost partly independent of range size
523 *
524 * 4. walk all the lines in the cache, check the tags, if a match
525 *    occurs use 4 * alloco to purge the line (+3 other probably
526 *    innocent victims) by natural eviction
527 *    + no tlb mapping overheads
528 *    - spurious evictions
529 *    - tag inspection overhead
530 *
531 * 5. implement like flush_cache_all
532 *    + no tag inspection overhead
533 *    - spurious evictions
534 *    - bad for small ranges
535 *
536 * (1) can be ruled out as more expensive than (2).  (2) appears best
537 * for small ranges.  The choice between (3), (4) and (5) for large
538 * ranges and the range size for the large/small boundary need
539 * benchmarking to determine.
540 *
541 * For now use approach (2) for small ranges and (5) for large ones.
542 */
543static void sh64_dcache_purge_user_range(struct mm_struct *mm,
544			  unsigned long start, unsigned long end)
545{
546	int n_pages = ((end - start) >> PAGE_SHIFT);
547
548	if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
549		sh64_dcache_purge_all();
550	} else {
551		/* Small range, covered by a single page table page */
552		start &= PAGE_MASK;	/* should already be so */
553		end = PAGE_ALIGN(end);	/* should already be so */
554		sh64_dcache_purge_user_pages(mm, start, end);
555	}
556}
557#endif /* !CONFIG_DCACHE_DISABLED */
558
559/*
560 * Invalidate the entire contents of both caches, after writing back to
561 * memory any dirty data from the D-cache.
562 */
563void flush_cache_all(void)
564{
565	sh64_dcache_purge_all();
566	sh64_icache_inv_all();
567}
568
569/*
570 * Invalidate an entire user-address space from both caches, after
571 * writing back dirty data (e.g. for shared mmap etc).
572 *
573 * This could be coded selectively by inspecting all the tags then
574 * doing 4*alloco on any set containing a match (as for
575 * flush_cache_range), but fork/exit/execve (where this is called from)
576 * are expensive anyway.
577 *
578 * Have to do a purge here, despite the comments re I-cache below.
579 * There could be odd-coloured dirty data associated with the mm still
580 * in the cache - if this gets written out through natural eviction
581 * after the kernel has reused the page there will be chaos.
582 *
583 * The mm being torn down won't ever be active again, so any Icache
584 * lines tagged with its ASID won't be visible for the rest of the
585 * lifetime of this ASID cycle.  Before the ASID gets reused, there
586 * will be a flush_cache_all.  Hence we don't need to touch the
587 * I-cache.  This is similar to the lack of action needed in
588 * flush_tlb_mm - see fault.c.
589 */
590void flush_cache_mm(struct mm_struct *mm)
591{
592	sh64_dcache_purge_all();
593}
594
595/*
596 * Invalidate (from both caches) the range [start,end) of virtual
597 * addresses from the user address space specified by mm, after writing
598 * back any dirty data.
599 *
600 * Note, 'end' is 1 byte beyond the end of the range to flush.
601 */
602void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
603		       unsigned long end)
604{
605	struct mm_struct *mm = vma->vm_mm;
606
607	sh64_dcache_purge_user_range(mm, start, end);
608	sh64_icache_inv_user_page_range(mm, start, end);
609}
610
611/*
612 * Invalidate any entries in either cache for the vma within the user
613 * address space vma->vm_mm for the page starting at virtual address
614 * 'eaddr'.   This seems to be used primarily in breaking COW.  Note,
615 * the I-cache must be searched too in case the page in question is
616 * both writable and being executed from (e.g. stack trampolines.)
617 *
618 * Note, this is called with pte lock held.
619 */
620void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr,
621		      unsigned long pfn)
622{
623	sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
624
625	if (vma->vm_flags & VM_EXEC)
626		sh64_icache_inv_user_page(vma, eaddr);
627}
628
629void flush_dcache_page(struct page *page)
630{
631	sh64_dcache_purge_phy_page(page_to_phys(page));
632	wmb();
633}
634
635/*
636 * Flush the range [start,end] of kernel virtual adddress space from
637 * the I-cache.  The corresponding range must be purged from the
638 * D-cache also because the SH-5 doesn't have cache snooping between
639 * the caches.  The addresses will be visible through the superpage
640 * mapping, therefore it's guaranteed that there no cache entries for
641 * the range in cache sets of the wrong colour.
642 */
643void flush_icache_range(unsigned long start, unsigned long end)
644{
645	__flush_purge_region((void *)start, end);
646	wmb();
647	sh64_icache_inv_kernel_range(start, end);
648}
649
650/*
651 * Flush the range of user (defined by vma->vm_mm) address space starting
652 * at 'addr' for 'len' bytes from the cache.  The range does not straddle
653 * a page boundary, the unique physical page containing the range is
654 * 'page'.  This seems to be used mainly for invalidating an address
655 * range following a poke into the program text through the ptrace() call
656 * from another process (e.g. for BRK instruction insertion).
657 */
658void flush_icache_user_range(struct vm_area_struct *vma,
659			struct page *page, unsigned long addr, int len)
660{
661
662	sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr);
663	mb();
664
665	if (vma->vm_flags & VM_EXEC)
666		sh64_icache_inv_user_small_range(vma->vm_mm, addr, len);
667}
668
669/*
670 * For the address range [start,end), write back the data from the
671 * D-cache and invalidate the corresponding region of the I-cache for the
672 * current process.  Used to flush signal trampolines on the stack to
673 * make them executable.
674 */
675void flush_cache_sigtramp(unsigned long vaddr)
676{
677	unsigned long end = vaddr + L1_CACHE_BYTES;
678
679	__flush_wback_region((void *)vaddr, L1_CACHE_BYTES);
680	wmb();
681	sh64_icache_inv_current_user_range(vaddr, end);
682}
683
684#ifdef CONFIG_MMU
685/*
686 * These *MUST* lie in an area of virtual address space that's otherwise
687 * unused.
688 */
689#define UNIQUE_EADDR_START 0xe0000000UL
690#define UNIQUE_EADDR_END   0xe8000000UL
691
692/*
693 * Given a physical address paddr, and a user virtual address user_eaddr
694 * which will eventually be mapped to it, create a one-off kernel-private
695 * eaddr mapped to the same paddr.  This is used for creating special
696 * destination pages for copy_user_page and clear_user_page.
697 */
698static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr,
699					    unsigned long paddr)
700{
701	static unsigned long current_pointer = UNIQUE_EADDR_START;
702	unsigned long coloured_pointer;
703
704	if (current_pointer == UNIQUE_EADDR_END) {
705		sh64_dcache_purge_all();
706		current_pointer = UNIQUE_EADDR_START;
707	}
708
709	coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) |
710				(user_eaddr & CACHE_OC_SYN_MASK);
711	sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr);
712
713	current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS);
714
715	return coloured_pointer;
716}
717
718static void sh64_copy_user_page_coloured(void *to, void *from,
719					 unsigned long address)
720{
721	void *coloured_to;
722
723	/*
724	 * Discard any existing cache entries of the wrong colour.  These are
725	 * present quite often, if the kernel has recently used the page
726	 * internally, then given it up, then it's been allocated to the user.
727	 */
728	sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
729
730	coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
731	copy_page(from, coloured_to);
732
733	sh64_teardown_dtlb_cache_slot();
734}
735
736static void sh64_clear_user_page_coloured(void *to, unsigned long address)
737{
738	void *coloured_to;
739
740	/*
741	 * Discard any existing kernel-originated lines of the wrong
742	 * colour (as above)
743	 */
744	sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
745
746	coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
747	clear_page(coloured_to);
748
749	sh64_teardown_dtlb_cache_slot();
750}
751
752/*
753 * 'from' and 'to' are kernel virtual addresses (within the superpage
754 * mapping of the physical RAM).  'address' is the user virtual address
755 * where the copy 'to' will be mapped after.  This allows a custom
756 * mapping to be used to ensure that the new copy is placed in the
757 * right cache sets for the user to see it without having to bounce it
758 * out via memory.  Note however : the call to flush_page_to_ram in
759 * (generic)/mm/memory.c:(break_cow) undoes all this good work in that one
760 * very important case!
761 *
762 * TBD : can we guarantee that on every call, any cache entries for
763 * 'from' are in the same colour sets as 'address' also?  i.e. is this
764 * always used just to deal with COW?  (I suspect not).
765 *
766 * There are two possibilities here for when the page 'from' was last accessed:
767 * - by the kernel : this is OK, no purge required.
768 * - by the/a user (e.g. for break_COW) : need to purge.
769 *
770 * If the potential user mapping at 'address' is the same colour as
771 * 'from' there is no need to purge any cache lines from the 'from'
772 * page mapped into cache sets of colour 'address'.  (The copy will be
773 * accessing the page through 'from').
774 */
775void copy_user_page(void *to, void *from, unsigned long address,
776		    struct page *page)
777{
778	if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0)
779		sh64_dcache_purge_coloured_phy_page(__pa(from), address);
780
781	if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
782		copy_page(to, from);
783	else
784		sh64_copy_user_page_coloured(to, from, address);
785}
786
787/*
788 * 'to' is a kernel virtual address (within the superpage mapping of the
789 * physical RAM).  'address' is the user virtual address where the 'to'
790 * page will be mapped after.  This allows a custom mapping to be used to
791 * ensure that the new copy is placed in the right cache sets for the
792 * user to see it without having to bounce it out via memory.
793 */
794void clear_user_page(void *to, unsigned long address, struct page *page)
795{
796	if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
797		clear_page(to);
798	else
799		sh64_clear_user_page_coloured(to, address);
800}
801
802void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
803		       unsigned long vaddr, void *dst, const void *src,
804		       unsigned long len)
805{
806	flush_cache_page(vma, vaddr, page_to_pfn(page));
807	memcpy(dst, src, len);
808	flush_icache_user_range(vma, page, vaddr, len);
809}
810
811void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
812			 unsigned long vaddr, void *dst, const void *src,
813			 unsigned long len)
814{
815	flush_cache_page(vma, vaddr, page_to_pfn(page));
816	memcpy(dst, src, len);
817}
818#endif
819