cache-sh5.c revision 795687265d1b6f666d02ff56f6c1679a8db160a9
1/*
2 * arch/sh/mm/cache-sh5.c
3 *
4 * Copyright (C) 2000, 2001  Paolo Alberelli
5 * Copyright (C) 2002  Benedict Gaster
6 * Copyright (C) 2003  Richard Curnow
7 * Copyright (C) 2003 - 2008  Paul Mundt
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License.  See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/mman.h>
15#include <linux/mm.h>
16#include <asm/tlb.h>
17#include <asm/processor.h>
18#include <asm/cache.h>
19#include <asm/pgalloc.h>
20#include <asm/uaccess.h>
21#include <asm/mmu_context.h>
22
23/* Wired TLB entry for the D-cache */
24static unsigned long long dtlb_cache_slot;
25
26void __init p3_cache_init(void)
27{
28	/* Reserve a slot for dcache colouring in the DTLB */
29	dtlb_cache_slot	= sh64_get_wired_dtlb_entry();
30}
31
32#ifdef CONFIG_DCACHE_DISABLED
33#define sh64_dcache_purge_all()					do { } while (0)
34#define sh64_dcache_purge_coloured_phy_page(paddr, eaddr)	do { } while (0)
35#define sh64_dcache_purge_user_range(mm, start, end)		do { } while (0)
36#define sh64_dcache_purge_phy_page(paddr)			do { } while (0)
37#define sh64_dcache_purge_virt_page(mm, eaddr)			do { } while (0)
38#endif
39
40/*
41 * The following group of functions deal with mapping and unmapping a
42 * temporary page into a DTLB slot that has been set aside for exclusive
43 * use.
44 */
45static inline void
46sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
47			   unsigned long paddr)
48{
49	local_irq_disable();
50	sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
51}
52
53static inline void sh64_teardown_dtlb_cache_slot(void)
54{
55	sh64_teardown_tlb_slot(dtlb_cache_slot);
56	local_irq_enable();
57}
58
59#ifndef CONFIG_ICACHE_DISABLED
60static inline void sh64_icache_inv_all(void)
61{
62	unsigned long long addr, flag, data;
63	unsigned long flags;
64
65	addr = ICCR0;
66	flag = ICCR0_ICI;
67	data = 0;
68
69	/* Make this a critical section for safety (probably not strictly necessary.) */
70	local_irq_save(flags);
71
72	/* Without %1 it gets unexplicably wrong */
73	__asm__ __volatile__ (
74		"getcfg	%3, 0, %0\n\t"
75		"or	%0, %2, %0\n\t"
76		"putcfg	%3, 0, %0\n\t"
77		"synci"
78		: "=&r" (data)
79		: "0" (data), "r" (flag), "r" (addr));
80
81	local_irq_restore(flags);
82}
83
84static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
85{
86	/* Invalidate range of addresses [start,end] from the I-cache, where
87	 * the addresses lie in the kernel superpage. */
88
89	unsigned long long ullend, addr, aligned_start;
90	aligned_start = (unsigned long long)(signed long long)(signed long) start;
91	addr = L1_CACHE_ALIGN(aligned_start);
92	ullend = (unsigned long long) (signed long long) (signed long) end;
93
94	while (addr <= ullend) {
95		__asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
96		addr += L1_CACHE_BYTES;
97	}
98}
99
100static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
101{
102	/* If we get called, we know that vma->vm_flags contains VM_EXEC.
103	   Also, eaddr is page-aligned. */
104	unsigned int cpu = smp_processor_id();
105	unsigned long long addr, end_addr;
106	unsigned long flags = 0;
107	unsigned long running_asid, vma_asid;
108	addr = eaddr;
109	end_addr = addr + PAGE_SIZE;
110
111	/* Check whether we can use the current ASID for the I-cache
112	   invalidation.  For example, if we're called via
113	   access_process_vm->flush_cache_page->here, (e.g. when reading from
114	   /proc), 'running_asid' will be that of the reader, not of the
115	   victim.
116
117	   Also, note the risk that we might get pre-empted between the ASID
118	   compare and blocking IRQs, and before we regain control, the
119	   pid->ASID mapping changes.  However, the whole cache will get
120	   invalidated when the mapping is renewed, so the worst that can
121	   happen is that the loop below ends up invalidating somebody else's
122	   cache entries.
123	*/
124
125	running_asid = get_asid();
126	vma_asid = cpu_asid(cpu, vma->vm_mm);
127	if (running_asid != vma_asid) {
128		local_irq_save(flags);
129		switch_and_save_asid(vma_asid);
130	}
131	while (addr < end_addr) {
132		/* Worth unrolling a little */
133		__asm__ __volatile__("icbi %0,  0" : : "r" (addr));
134		__asm__ __volatile__("icbi %0, 32" : : "r" (addr));
135		__asm__ __volatile__("icbi %0, 64" : : "r" (addr));
136		__asm__ __volatile__("icbi %0, 96" : : "r" (addr));
137		addr += 128;
138	}
139	if (running_asid != vma_asid) {
140		switch_and_save_asid(running_asid);
141		local_irq_restore(flags);
142	}
143}
144
145static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
146			  unsigned long start, unsigned long end)
147{
148	/* Used for invalidating big chunks of I-cache, i.e. assume the range
149	   is whole pages.  If 'start' or 'end' is not page aligned, the code
150	   is conservative and invalidates to the ends of the enclosing pages.
151	   This is functionally OK, just a performance loss. */
152
153	/* See the comments below in sh64_dcache_purge_user_range() regarding
154	   the choice of algorithm.  However, for the I-cache option (2) isn't
155	   available because there are no physical tags so aliases can't be
156	   resolved.  The icbi instruction has to be used through the user
157	   mapping.   Because icbi is cheaper than ocbp on a cache hit, it
158	   would be cheaper to use the selective code for a large range than is
159	   possible with the D-cache.  Just assume 64 for now as a working
160	   figure.
161	   */
162	int n_pages;
163
164	if (!mm)
165		return;
166
167	n_pages = ((end - start) >> PAGE_SHIFT);
168	if (n_pages >= 64) {
169		sh64_icache_inv_all();
170	} else {
171		unsigned long aligned_start;
172		unsigned long eaddr;
173		unsigned long after_last_page_start;
174		unsigned long mm_asid, current_asid;
175		unsigned long flags = 0;
176
177		mm_asid = cpu_asid(smp_processor_id(), mm);
178		current_asid = get_asid();
179
180		if (mm_asid != current_asid) {
181			/* Switch ASID and run the invalidate loop under cli */
182			local_irq_save(flags);
183			switch_and_save_asid(mm_asid);
184		}
185
186		aligned_start = start & PAGE_MASK;
187		after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
188
189		while (aligned_start < after_last_page_start) {
190			struct vm_area_struct *vma;
191			unsigned long vma_end;
192			vma = find_vma(mm, aligned_start);
193			if (!vma || (aligned_start <= vma->vm_end)) {
194				/* Avoid getting stuck in an error condition */
195				aligned_start += PAGE_SIZE;
196				continue;
197			}
198			vma_end = vma->vm_end;
199			if (vma->vm_flags & VM_EXEC) {
200				/* Executable */
201				eaddr = aligned_start;
202				while (eaddr < vma_end) {
203					sh64_icache_inv_user_page(vma, eaddr);
204					eaddr += PAGE_SIZE;
205				}
206			}
207			aligned_start = vma->vm_end; /* Skip to start of next region */
208		}
209
210		if (mm_asid != current_asid) {
211			switch_and_save_asid(current_asid);
212			local_irq_restore(flags);
213		}
214	}
215}
216
217/*
218 * Invalidate a small range of user context I-cache, not necessarily page
219 * (or even cache-line) aligned.
220 *
221 * Since this is used inside ptrace, the ASID in the mm context typically
222 * won't match current_asid.  We'll have to switch ASID to do this.  For
223 * safety, and given that the range will be small, do all this under cli.
224 *
225 * Note, there is a hazard that the ASID in mm->context is no longer
226 * actually associated with mm, i.e. if the mm->context has started a new
227 * cycle since mm was last active.  However, this is just a performance
228 * issue: all that happens is that we invalidate lines belonging to
229 * another mm, so the owning process has to refill them when that mm goes
230 * live again.  mm itself can't have any cache entries because there will
231 * have been a flush_cache_all when the new mm->context cycle started.
232 */
233static void sh64_icache_inv_user_small_range(struct mm_struct *mm,
234						unsigned long start, int len)
235{
236	unsigned long long eaddr = start;
237	unsigned long long eaddr_end = start + len;
238	unsigned long current_asid, mm_asid;
239	unsigned long flags;
240	unsigned long long epage_start;
241
242	/*
243	 * Align to start of cache line.  Otherwise, suppose len==8 and
244	 * start was at 32N+28 : the last 4 bytes wouldn't get invalidated.
245	 */
246	eaddr = L1_CACHE_ALIGN(start);
247	eaddr_end = start + len;
248
249	mm_asid = cpu_asid(smp_processor_id(), mm);
250	local_irq_save(flags);
251	current_asid = switch_and_save_asid(mm_asid);
252
253	epage_start = eaddr & PAGE_MASK;
254
255	while (eaddr < eaddr_end) {
256		__asm__ __volatile__("icbi %0, 0" : : "r" (eaddr));
257		eaddr += L1_CACHE_BYTES;
258	}
259	switch_and_save_asid(current_asid);
260	local_irq_restore(flags);
261}
262
263static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
264{
265	/* The icbi instruction never raises ITLBMISS.  i.e. if there's not a
266	   cache hit on the virtual tag the instruction ends there, without a
267	   TLB lookup. */
268
269	unsigned long long aligned_start;
270	unsigned long long ull_end;
271	unsigned long long addr;
272
273	ull_end = end;
274
275	/* Just invalidate over the range using the natural addresses.  TLB
276	   miss handling will be OK (TBC).  Since it's for the current process,
277	   either we're already in the right ASID context, or the ASIDs have
278	   been recycled since we were last active in which case we might just
279	   invalidate another processes I-cache entries : no worries, just a
280	   performance drop for him. */
281	aligned_start = L1_CACHE_ALIGN(start);
282	addr = aligned_start;
283	while (addr < ull_end) {
284		__asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
285		__asm__ __volatile__ ("nop");
286		__asm__ __volatile__ ("nop");
287		addr += L1_CACHE_BYTES;
288	}
289}
290#endif /* !CONFIG_ICACHE_DISABLED */
291
292#ifndef CONFIG_DCACHE_DISABLED
293/* Buffer used as the target of alloco instructions to purge data from cache
294   sets by natural eviction. -- RPC */
295#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
296static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
297
298static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
299{
300	/* Purge all ways in a particular block of sets, specified by the base
301	   set number and number of sets.  Can handle wrap-around, if that's
302	   needed.  */
303
304	int dummy_buffer_base_set;
305	unsigned long long eaddr, eaddr0, eaddr1;
306	int j;
307	int set_offset;
308
309	dummy_buffer_base_set = ((int)&dummy_alloco_area &
310				 cpu_data->dcache.entry_mask) >>
311				 cpu_data->dcache.entry_shift;
312	set_offset = sets_to_purge_base - dummy_buffer_base_set;
313
314	for (j = 0; j < n_sets; j++, set_offset++) {
315		set_offset &= (cpu_data->dcache.sets - 1);
316		eaddr0 = (unsigned long long)dummy_alloco_area +
317			(set_offset << cpu_data->dcache.entry_shift);
318
319		/*
320		 * Do one alloco which hits the required set per cache
321		 * way.  For write-back mode, this will purge the #ways
322		 * resident lines.  There's little point unrolling this
323		 * loop because the allocos stall more if they're too
324		 * close together.
325		 */
326		eaddr1 = eaddr0 + cpu_data->dcache.way_size *
327				  cpu_data->dcache.ways;
328
329		for (eaddr = eaddr0; eaddr < eaddr1;
330		     eaddr += cpu_data->dcache.way_size) {
331			__asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr));
332			__asm__ __volatile__ ("synco"); /* TAKum03020 */
333		}
334
335		eaddr1 = eaddr0 + cpu_data->dcache.way_size *
336				  cpu_data->dcache.ways;
337
338		for (eaddr = eaddr0; eaddr < eaddr1;
339		     eaddr += cpu_data->dcache.way_size) {
340			/*
341			 * Load from each address.  Required because
342			 * alloco is a NOP if the cache is write-through.
343			 */
344			if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
345				__raw_readb((unsigned long)eaddr);
346		}
347	}
348
349	/*
350	 * Don't use OCBI to invalidate the lines.  That costs cycles
351	 * directly.  If the dummy block is just left resident, it will
352	 * naturally get evicted as required.
353	 */
354}
355
356/*
357 * Purge the entire contents of the dcache.  The most efficient way to
358 * achieve this is to use alloco instructions on a region of unused
359 * memory equal in size to the cache, thereby causing the current
360 * contents to be discarded by natural eviction.  The alternative, namely
361 * reading every tag, setting up a mapping for the corresponding page and
362 * doing an OCBP for the line, would be much more expensive.
363 */
364static void sh64_dcache_purge_all(void)
365{
366
367	sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
368}
369
370
371/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
372   anything else in the kernel */
373#define MAGIC_PAGE0_START 0xffffffffec000000ULL
374
375/* Purge the physical page 'paddr' from the cache.  It's known that any
376 * cache lines requiring attention have the same page colour as the the
377 * address 'eaddr'.
378 *
379 * This relies on the fact that the D-cache matches on physical tags when
380 * no virtual tag matches.  So we create an alias for the original page
381 * and purge through that.  (Alternatively, we could have done this by
382 * switching ASID to match the original mapping and purged through that,
383 * but that involves ASID switching cost + probably a TLBMISS + refill
384 * anyway.)
385 */
386static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr,
387					        unsigned long eaddr)
388{
389	unsigned long long magic_page_start;
390	unsigned long long magic_eaddr, magic_eaddr_end;
391
392	magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
393
394	/* As long as the kernel is not pre-emptible, this doesn't need to be
395	   under cli/sti. */
396	sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
397
398	magic_eaddr = magic_page_start;
399	magic_eaddr_end = magic_eaddr + PAGE_SIZE;
400
401	while (magic_eaddr < magic_eaddr_end) {
402		/* Little point in unrolling this loop - the OCBPs are blocking
403		   and won't go any quicker (i.e. the loop overhead is parallel
404		   to part of the OCBP execution.) */
405		__asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
406		magic_eaddr += L1_CACHE_BYTES;
407	}
408
409	sh64_teardown_dtlb_cache_slot();
410}
411
412/*
413 * Purge a page given its physical start address, by creating a temporary
414 * 1 page mapping and purging across that.  Even if we know the virtual
415 * address (& vma or mm) of the page, the method here is more elegant
416 * because it avoids issues of coping with page faults on the purge
417 * instructions (i.e. no special-case code required in the critical path
418 * in the TLB miss handling).
419 */
420static void sh64_dcache_purge_phy_page(unsigned long paddr)
421{
422	unsigned long long eaddr_start, eaddr, eaddr_end;
423	int i;
424
425	/* As long as the kernel is not pre-emptible, this doesn't need to be
426	   under cli/sti. */
427	eaddr_start = MAGIC_PAGE0_START;
428	for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
429		sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
430
431		eaddr = eaddr_start;
432		eaddr_end = eaddr + PAGE_SIZE;
433		while (eaddr < eaddr_end) {
434			__asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
435			eaddr += L1_CACHE_BYTES;
436		}
437
438		sh64_teardown_dtlb_cache_slot();
439		eaddr_start += PAGE_SIZE;
440	}
441}
442
443static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
444				unsigned long addr, unsigned long end)
445{
446	pgd_t *pgd;
447	pud_t *pud;
448	pmd_t *pmd;
449	pte_t *pte;
450	pte_t entry;
451	spinlock_t *ptl;
452	unsigned long paddr;
453
454	if (!mm)
455		return; /* No way to find physical address of page */
456
457	pgd = pgd_offset(mm, addr);
458	if (pgd_bad(*pgd))
459		return;
460
461	pud = pud_offset(pgd, addr);
462	if (pud_none(*pud) || pud_bad(*pud))
463		return;
464
465	pmd = pmd_offset(pud, addr);
466	if (pmd_none(*pmd) || pmd_bad(*pmd))
467		return;
468
469	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
470	do {
471		entry = *pte;
472		if (pte_none(entry) || !pte_present(entry))
473			continue;
474		paddr = pte_val(entry) & PAGE_MASK;
475		sh64_dcache_purge_coloured_phy_page(paddr, addr);
476	} while (pte++, addr += PAGE_SIZE, addr != end);
477	pte_unmap_unlock(pte - 1, ptl);
478}
479
480/*
481 * There are at least 5 choices for the implementation of this, with
482 * pros (+), cons(-), comments(*):
483 *
484 * 1. ocbp each line in the range through the original user's ASID
485 *    + no lines spuriously evicted
486 *    - tlbmiss handling (must either handle faults on demand => extra
487 *	special-case code in tlbmiss critical path), or map the page in
488 *	advance (=> flush_tlb_range in advance to avoid multiple hits)
489 *    - ASID switching
490 *    - expensive for large ranges
491 *
492 * 2. temporarily map each page in the range to a special effective
493 *    address and ocbp through the temporary mapping; relies on the
494 *    fact that SH-5 OCB* always do TLB lookup and match on ptags (they
495 *    never look at the etags)
496 *    + no spurious evictions
497 *    - expensive for large ranges
498 *    * surely cheaper than (1)
499 *
500 * 3. walk all the lines in the cache, check the tags, if a match
501 *    occurs create a page mapping to ocbp the line through
502 *    + no spurious evictions
503 *    - tag inspection overhead
504 *    - (especially for small ranges)
505 *    - potential cost of setting up/tearing down page mapping for
506 *	every line that matches the range
507 *    * cost partly independent of range size
508 *
509 * 4. walk all the lines in the cache, check the tags, if a match
510 *    occurs use 4 * alloco to purge the line (+3 other probably
511 *    innocent victims) by natural eviction
512 *    + no tlb mapping overheads
513 *    - spurious evictions
514 *    - tag inspection overhead
515 *
516 * 5. implement like flush_cache_all
517 *    + no tag inspection overhead
518 *    - spurious evictions
519 *    - bad for small ranges
520 *
521 * (1) can be ruled out as more expensive than (2).  (2) appears best
522 * for small ranges.  The choice between (3), (4) and (5) for large
523 * ranges and the range size for the large/small boundary need
524 * benchmarking to determine.
525 *
526 * For now use approach (2) for small ranges and (5) for large ones.
527 */
528static void sh64_dcache_purge_user_range(struct mm_struct *mm,
529			  unsigned long start, unsigned long end)
530{
531	int n_pages = ((end - start) >> PAGE_SHIFT);
532
533	if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
534		sh64_dcache_purge_all();
535	} else {
536		/* Small range, covered by a single page table page */
537		start &= PAGE_MASK;	/* should already be so */
538		end = PAGE_ALIGN(end);	/* should already be so */
539		sh64_dcache_purge_user_pages(mm, start, end);
540	}
541}
542#endif /* !CONFIG_DCACHE_DISABLED */
543
544/*
545 * Invalidate the entire contents of both caches, after writing back to
546 * memory any dirty data from the D-cache.
547 */
548void flush_cache_all(void)
549{
550	sh64_dcache_purge_all();
551	sh64_icache_inv_all();
552}
553
554/*
555 * Invalidate an entire user-address space from both caches, after
556 * writing back dirty data (e.g. for shared mmap etc).
557 *
558 * This could be coded selectively by inspecting all the tags then
559 * doing 4*alloco on any set containing a match (as for
560 * flush_cache_range), but fork/exit/execve (where this is called from)
561 * are expensive anyway.
562 *
563 * Have to do a purge here, despite the comments re I-cache below.
564 * There could be odd-coloured dirty data associated with the mm still
565 * in the cache - if this gets written out through natural eviction
566 * after the kernel has reused the page there will be chaos.
567 *
568 * The mm being torn down won't ever be active again, so any Icache
569 * lines tagged with its ASID won't be visible for the rest of the
570 * lifetime of this ASID cycle.  Before the ASID gets reused, there
571 * will be a flush_cache_all.  Hence we don't need to touch the
572 * I-cache.  This is similar to the lack of action needed in
573 * flush_tlb_mm - see fault.c.
574 */
575void flush_cache_mm(struct mm_struct *mm)
576{
577	sh64_dcache_purge_all();
578}
579
580/*
581 * Invalidate (from both caches) the range [start,end) of virtual
582 * addresses from the user address space specified by mm, after writing
583 * back any dirty data.
584 *
585 * Note, 'end' is 1 byte beyond the end of the range to flush.
586 */
587void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
588		       unsigned long end)
589{
590	struct mm_struct *mm = vma->vm_mm;
591
592	sh64_dcache_purge_user_range(mm, start, end);
593	sh64_icache_inv_user_page_range(mm, start, end);
594}
595
596/*
597 * Invalidate any entries in either cache for the vma within the user
598 * address space vma->vm_mm for the page starting at virtual address
599 * 'eaddr'.   This seems to be used primarily in breaking COW.  Note,
600 * the I-cache must be searched too in case the page in question is
601 * both writable and being executed from (e.g. stack trampolines.)
602 *
603 * Note, this is called with pte lock held.
604 */
605void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr,
606		      unsigned long pfn)
607{
608	sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
609
610	if (vma->vm_flags & VM_EXEC)
611		sh64_icache_inv_user_page(vma, eaddr);
612}
613
614void flush_dcache_page(struct page *page)
615{
616	sh64_dcache_purge_phy_page(page_to_phys(page));
617	wmb();
618}
619
620/*
621 * Flush the range [start,end] of kernel virtual adddress space from
622 * the I-cache.  The corresponding range must be purged from the
623 * D-cache also because the SH-5 doesn't have cache snooping between
624 * the caches.  The addresses will be visible through the superpage
625 * mapping, therefore it's guaranteed that there no cache entries for
626 * the range in cache sets of the wrong colour.
627 */
628void flush_icache_range(unsigned long start, unsigned long end)
629{
630	__flush_purge_region((void *)start, end);
631	wmb();
632	sh64_icache_inv_kernel_range(start, end);
633}
634
635/*
636 * Flush the range of user (defined by vma->vm_mm) address space starting
637 * at 'addr' for 'len' bytes from the cache.  The range does not straddle
638 * a page boundary, the unique physical page containing the range is
639 * 'page'.  This seems to be used mainly for invalidating an address
640 * range following a poke into the program text through the ptrace() call
641 * from another process (e.g. for BRK instruction insertion).
642 */
643void flush_icache_user_range(struct vm_area_struct *vma,
644			struct page *page, unsigned long addr, int len)
645{
646
647	sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr);
648	mb();
649
650	if (vma->vm_flags & VM_EXEC)
651		sh64_icache_inv_user_small_range(vma->vm_mm, addr, len);
652}
653
654/*
655 * For the address range [start,end), write back the data from the
656 * D-cache and invalidate the corresponding region of the I-cache for the
657 * current process.  Used to flush signal trampolines on the stack to
658 * make them executable.
659 */
660void flush_cache_sigtramp(unsigned long vaddr)
661{
662	unsigned long end = vaddr + L1_CACHE_BYTES;
663
664	__flush_wback_region((void *)vaddr, L1_CACHE_BYTES);
665	wmb();
666	sh64_icache_inv_current_user_range(vaddr, end);
667}
668
669#ifdef CONFIG_MMU
670/*
671 * These *MUST* lie in an area of virtual address space that's otherwise
672 * unused.
673 */
674#define UNIQUE_EADDR_START 0xe0000000UL
675#define UNIQUE_EADDR_END   0xe8000000UL
676
677/*
678 * Given a physical address paddr, and a user virtual address user_eaddr
679 * which will eventually be mapped to it, create a one-off kernel-private
680 * eaddr mapped to the same paddr.  This is used for creating special
681 * destination pages for copy_user_page and clear_user_page.
682 */
683static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr,
684					    unsigned long paddr)
685{
686	static unsigned long current_pointer = UNIQUE_EADDR_START;
687	unsigned long coloured_pointer;
688
689	if (current_pointer == UNIQUE_EADDR_END) {
690		sh64_dcache_purge_all();
691		current_pointer = UNIQUE_EADDR_START;
692	}
693
694	coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) |
695				(user_eaddr & CACHE_OC_SYN_MASK);
696	sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr);
697
698	current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS);
699
700	return coloured_pointer;
701}
702
703static void sh64_copy_user_page_coloured(void *to, void *from,
704					 unsigned long address)
705{
706	void *coloured_to;
707
708	/*
709	 * Discard any existing cache entries of the wrong colour.  These are
710	 * present quite often, if the kernel has recently used the page
711	 * internally, then given it up, then it's been allocated to the user.
712	 */
713	sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
714
715	coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
716	copy_page(from, coloured_to);
717
718	sh64_teardown_dtlb_cache_slot();
719}
720
721static void sh64_clear_user_page_coloured(void *to, unsigned long address)
722{
723	void *coloured_to;
724
725	/*
726	 * Discard any existing kernel-originated lines of the wrong
727	 * colour (as above)
728	 */
729	sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
730
731	coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
732	clear_page(coloured_to);
733
734	sh64_teardown_dtlb_cache_slot();
735}
736
737/*
738 * 'from' and 'to' are kernel virtual addresses (within the superpage
739 * mapping of the physical RAM).  'address' is the user virtual address
740 * where the copy 'to' will be mapped after.  This allows a custom
741 * mapping to be used to ensure that the new copy is placed in the
742 * right cache sets for the user to see it without having to bounce it
743 * out via memory.  Note however : the call to flush_page_to_ram in
744 * (generic)/mm/memory.c:(break_cow) undoes all this good work in that one
745 * very important case!
746 *
747 * TBD : can we guarantee that on every call, any cache entries for
748 * 'from' are in the same colour sets as 'address' also?  i.e. is this
749 * always used just to deal with COW?  (I suspect not).
750 *
751 * There are two possibilities here for when the page 'from' was last accessed:
752 * - by the kernel : this is OK, no purge required.
753 * - by the/a user (e.g. for break_COW) : need to purge.
754 *
755 * If the potential user mapping at 'address' is the same colour as
756 * 'from' there is no need to purge any cache lines from the 'from'
757 * page mapped into cache sets of colour 'address'.  (The copy will be
758 * accessing the page through 'from').
759 */
760void copy_user_page(void *to, void *from, unsigned long address,
761		    struct page *page)
762{
763	if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0)
764		sh64_dcache_purge_coloured_phy_page(__pa(from), address);
765
766	if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
767		copy_page(to, from);
768	else
769		sh64_copy_user_page_coloured(to, from, address);
770}
771
772/*
773 * 'to' is a kernel virtual address (within the superpage mapping of the
774 * physical RAM).  'address' is the user virtual address where the 'to'
775 * page will be mapped after.  This allows a custom mapping to be used to
776 * ensure that the new copy is placed in the right cache sets for the
777 * user to see it without having to bounce it out via memory.
778 */
779void clear_user_page(void *to, unsigned long address, struct page *page)
780{
781	if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
782		clear_page(to);
783	else
784		sh64_clear_user_page_coloured(to, address);
785}
786
787void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
788		       unsigned long vaddr, void *dst, const void *src,
789		       unsigned long len)
790{
791	flush_cache_page(vma, vaddr, page_to_pfn(page));
792	memcpy(dst, src, len);
793	flush_icache_user_range(vma, page, vaddr, len);
794}
795
796void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
797			 unsigned long vaddr, void *dst, const void *src,
798			 unsigned long len)
799{
800	flush_cache_page(vma, vaddr, page_to_pfn(page));
801	memcpy(dst, src, len);
802}
803#endif
804