1#ifndef _ASM_IA64_TLBFLUSH_H
2#define _ASM_IA64_TLBFLUSH_H
3
4/*
5 * Copyright (C) 2002 Hewlett-Packard Co
6 *	David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9
10#include <linux/mm.h>
11
12#include <asm/intrinsics.h>
13#include <asm/mmu_context.h>
14#include <asm/page.h>
15
16/*
17 * Now for some TLB flushing routines.  This is the kind of stuff that
18 * can be very expensive, so try to avoid them whenever possible.
19 */
20extern void setup_ptcg_sem(int max_purges, int from_palo);
21
22/*
23 * Flush everything (kernel mapping may also have changed due to
24 * vmalloc/vfree).
25 */
26extern void local_flush_tlb_all (void);
27
28#ifdef CONFIG_SMP
29  extern void smp_flush_tlb_all (void);
30  extern void smp_flush_tlb_mm (struct mm_struct *mm);
31  extern void smp_flush_tlb_cpumask (cpumask_t xcpumask);
32# define flush_tlb_all()	smp_flush_tlb_all()
33#else
34# define flush_tlb_all()	local_flush_tlb_all()
35# define smp_flush_tlb_cpumask(m) local_flush_tlb_all()
36#endif
37
38static inline void
39local_finish_flush_tlb_mm (struct mm_struct *mm)
40{
41	if (mm == current->active_mm)
42		activate_context(mm);
43}
44
45/*
46 * Flush a specified user mapping.  This is called, e.g., as a result of fork() and
47 * exit().  fork() ends up here because the copy-on-write mechanism needs to write-protect
48 * the PTEs of the parent task.
49 */
50static inline void
51flush_tlb_mm (struct mm_struct *mm)
52{
53	if (!mm)
54		return;
55
56	set_bit(mm->context, ia64_ctx.flushmap);
57	mm->context = 0;
58
59	if (atomic_read(&mm->mm_users) == 0)
60		return;		/* happens as a result of exit_mmap() */
61
62#ifdef CONFIG_SMP
63	smp_flush_tlb_mm(mm);
64#else
65	local_finish_flush_tlb_mm(mm);
66#endif
67}
68
69extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
70
71/*
72 * Page-granular tlb flush.
73 */
74static inline void
75flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
76{
77#ifdef CONFIG_SMP
78	flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
79#else
80	if (vma->vm_mm == current->active_mm)
81		ia64_ptcl(addr, (PAGE_SHIFT << 2));
82	else
83		vma->vm_mm->context = 0;
84#endif
85}
86
87/*
88 * Flush the local TLB. Invoked from another cpu using an IPI.
89 */
90#ifdef CONFIG_SMP
91void smp_local_flush_tlb(void);
92#else
93#define smp_local_flush_tlb()
94#endif
95
96static inline void flush_tlb_kernel_range(unsigned long start,
97					  unsigned long end)
98{
99	flush_tlb_all();	/* XXX fix me */
100}
101
102#endif /* _ASM_IA64_TLBFLUSH_H */
103