1#ifndef _SPARC64_TLBFLUSH_H
2#define _SPARC64_TLBFLUSH_H
3
4#include <asm/mmu_context.h>
5
6/* TSB flush operations. */
7
8#define TLB_BATCH_NR	192
9
10struct tlb_batch {
11	struct mm_struct *mm;
12	unsigned long tlb_nr;
13	unsigned long active;
14	unsigned long vaddrs[TLB_BATCH_NR];
15};
16
17void flush_tsb_kernel_range(unsigned long start, unsigned long end);
18void flush_tsb_user(struct tlb_batch *tb);
19void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
20
21/* TLB flush operations. */
22
23static inline void flush_tlb_mm(struct mm_struct *mm)
24{
25}
26
27static inline void flush_tlb_page(struct vm_area_struct *vma,
28				  unsigned long vmaddr)
29{
30}
31
32static inline void flush_tlb_range(struct vm_area_struct *vma,
33				   unsigned long start, unsigned long end)
34{
35}
36
37void flush_tlb_kernel_range(unsigned long start, unsigned long end);
38
39#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
40
41void flush_tlb_pending(void);
42void arch_enter_lazy_mmu_mode(void);
43void arch_leave_lazy_mmu_mode(void);
44#define arch_flush_lazy_mmu_mode()      do {} while (0)
45
46/* Local cpu only.  */
47void __flush_tlb_all(void);
48void __flush_tlb_page(unsigned long context, unsigned long vaddr);
49void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
50
51#ifndef CONFIG_SMP
52
53static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
54{
55	__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
56}
57
58#else /* CONFIG_SMP */
59
60void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
61void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
62
63#define global_flush_tlb_page(mm, vaddr) \
64	smp_flush_tlb_page(mm, vaddr)
65
66#endif /* ! CONFIG_SMP */
67
68#endif /* _SPARC64_TLBFLUSH_H */
69