kvm_host.h revision 656473003bc7e056c3bbd4a4d9832dad01e86f76
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License.  See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10#ifndef __MIPS_KVM_HOST_H__
11#define __MIPS_KVM_HOST_H__
12
13#include <linux/mutex.h>
14#include <linux/hrtimer.h>
15#include <linux/interrupt.h>
16#include <linux/types.h>
17#include <linux/kvm.h>
18#include <linux/kvm_types.h>
19#include <linux/threads.h>
20#include <linux/spinlock.h>
21
22/* MIPS KVM register ids */
23#define MIPS_CP0_32(_R, _S)					\
24	(KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
25
26#define MIPS_CP0_64(_R, _S)					\
27	(KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
28
29#define KVM_REG_MIPS_CP0_INDEX		MIPS_CP0_32(0, 0)
30#define KVM_REG_MIPS_CP0_ENTRYLO0	MIPS_CP0_64(2, 0)
31#define KVM_REG_MIPS_CP0_ENTRYLO1	MIPS_CP0_64(3, 0)
32#define KVM_REG_MIPS_CP0_CONTEXT	MIPS_CP0_64(4, 0)
33#define KVM_REG_MIPS_CP0_USERLOCAL	MIPS_CP0_64(4, 2)
34#define KVM_REG_MIPS_CP0_PAGEMASK	MIPS_CP0_32(5, 0)
35#define KVM_REG_MIPS_CP0_PAGEGRAIN	MIPS_CP0_32(5, 1)
36#define KVM_REG_MIPS_CP0_WIRED		MIPS_CP0_32(6, 0)
37#define KVM_REG_MIPS_CP0_HWRENA		MIPS_CP0_32(7, 0)
38#define KVM_REG_MIPS_CP0_BADVADDR	MIPS_CP0_64(8, 0)
39#define KVM_REG_MIPS_CP0_COUNT		MIPS_CP0_32(9, 0)
40#define KVM_REG_MIPS_CP0_ENTRYHI	MIPS_CP0_64(10, 0)
41#define KVM_REG_MIPS_CP0_COMPARE	MIPS_CP0_32(11, 0)
42#define KVM_REG_MIPS_CP0_STATUS		MIPS_CP0_32(12, 0)
43#define KVM_REG_MIPS_CP0_CAUSE		MIPS_CP0_32(13, 0)
44#define KVM_REG_MIPS_CP0_EPC		MIPS_CP0_64(14, 0)
45#define KVM_REG_MIPS_CP0_EBASE		MIPS_CP0_64(15, 1)
46#define KVM_REG_MIPS_CP0_CONFIG		MIPS_CP0_32(16, 0)
47#define KVM_REG_MIPS_CP0_CONFIG1	MIPS_CP0_32(16, 1)
48#define KVM_REG_MIPS_CP0_CONFIG2	MIPS_CP0_32(16, 2)
49#define KVM_REG_MIPS_CP0_CONFIG3	MIPS_CP0_32(16, 3)
50#define KVM_REG_MIPS_CP0_CONFIG7	MIPS_CP0_32(16, 7)
51#define KVM_REG_MIPS_CP0_XCONTEXT	MIPS_CP0_64(20, 0)
52#define KVM_REG_MIPS_CP0_ERROREPC	MIPS_CP0_64(30, 0)
53
54
55#define KVM_MAX_VCPUS		1
56#define KVM_USER_MEM_SLOTS	8
57/* memory slots that does not exposed to userspace */
58#define KVM_PRIVATE_MEM_SLOTS 	0
59
60#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
61
62
63
64/* Special address that contains the comm page, used for reducing # of traps */
65#define KVM_GUEST_COMMPAGE_ADDR		0x0
66
67#define KVM_GUEST_KERNEL_MODE(vcpu)	((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
68					((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
69
70#define KVM_GUEST_KUSEG			0x00000000UL
71#define KVM_GUEST_KSEG0			0x40000000UL
72#define KVM_GUEST_KSEG23		0x60000000UL
73#define KVM_GUEST_KSEGX(a)		((_ACAST32_(a)) & 0x60000000)
74#define KVM_GUEST_CPHYSADDR(a)		((_ACAST32_(a)) & 0x1fffffff)
75
76#define KVM_GUEST_CKSEG0ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
77#define KVM_GUEST_CKSEG1ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
78#define KVM_GUEST_CKSEG23ADDR(a)	(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
79
80/*
81 * Map an address to a certain kernel segment
82 */
83#define KVM_GUEST_KSEG0ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
84#define KVM_GUEST_KSEG1ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
85#define KVM_GUEST_KSEG23ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
86
87#define KVM_INVALID_PAGE		0xdeadbeef
88#define KVM_INVALID_INST		0xdeadbeef
89#define KVM_INVALID_ADDR		0xdeadbeef
90
91#define KVM_MALTA_GUEST_RTC_ADDR	0xb8000070UL
92
93#define GUEST_TICKS_PER_JIFFY		(40000000/HZ)
94#define MS_TO_NS(x)			(x * 1E6L)
95
96#define CAUSEB_DC			27
97#define CAUSEF_DC			(_ULCAST_(1) << 27)
98
99extern atomic_t kvm_mips_instance;
100extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
101extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
102extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
103
104struct kvm_vm_stat {
105	u32 remote_tlb_flush;
106};
107
108struct kvm_vcpu_stat {
109	u32 wait_exits;
110	u32 cache_exits;
111	u32 signal_exits;
112	u32 int_exits;
113	u32 cop_unusable_exits;
114	u32 tlbmod_exits;
115	u32 tlbmiss_ld_exits;
116	u32 tlbmiss_st_exits;
117	u32 addrerr_st_exits;
118	u32 addrerr_ld_exits;
119	u32 syscall_exits;
120	u32 resvd_inst_exits;
121	u32 break_inst_exits;
122	u32 flush_dcache_exits;
123	u32 halt_wakeup;
124};
125
126enum kvm_mips_exit_types {
127	WAIT_EXITS,
128	CACHE_EXITS,
129	SIGNAL_EXITS,
130	INT_EXITS,
131	COP_UNUSABLE_EXITS,
132	TLBMOD_EXITS,
133	TLBMISS_LD_EXITS,
134	TLBMISS_ST_EXITS,
135	ADDRERR_ST_EXITS,
136	ADDRERR_LD_EXITS,
137	SYSCALL_EXITS,
138	RESVD_INST_EXITS,
139	BREAK_INST_EXITS,
140	FLUSH_DCACHE_EXITS,
141	MAX_KVM_MIPS_EXIT_TYPES
142};
143
144struct kvm_arch_memory_slot {
145};
146
147struct kvm_arch {
148	/* Guest GVA->HPA page table */
149	unsigned long *guest_pmap;
150	unsigned long guest_pmap_npages;
151
152	/* Wired host TLB used for the commpage */
153	int commpage_tlb;
154};
155
156#define N_MIPS_COPROC_REGS	32
157#define N_MIPS_COPROC_SEL	8
158
159struct mips_coproc {
160	unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
161#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
162	unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
163#endif
164};
165
166/*
167 * Coprocessor 0 register names
168 */
169#define MIPS_CP0_TLB_INDEX	0
170#define MIPS_CP0_TLB_RANDOM	1
171#define MIPS_CP0_TLB_LOW	2
172#define MIPS_CP0_TLB_LO0	2
173#define MIPS_CP0_TLB_LO1	3
174#define MIPS_CP0_TLB_CONTEXT	4
175#define MIPS_CP0_TLB_PG_MASK	5
176#define MIPS_CP0_TLB_WIRED	6
177#define MIPS_CP0_HWRENA		7
178#define MIPS_CP0_BAD_VADDR	8
179#define MIPS_CP0_COUNT		9
180#define MIPS_CP0_TLB_HI		10
181#define MIPS_CP0_COMPARE	11
182#define MIPS_CP0_STATUS		12
183#define MIPS_CP0_CAUSE		13
184#define MIPS_CP0_EXC_PC		14
185#define MIPS_CP0_PRID		15
186#define MIPS_CP0_CONFIG		16
187#define MIPS_CP0_LLADDR		17
188#define MIPS_CP0_WATCH_LO	18
189#define MIPS_CP0_WATCH_HI	19
190#define MIPS_CP0_TLB_XCONTEXT	20
191#define MIPS_CP0_ECC		26
192#define MIPS_CP0_CACHE_ERR	27
193#define MIPS_CP0_TAG_LO		28
194#define MIPS_CP0_TAG_HI		29
195#define MIPS_CP0_ERROR_PC	30
196#define MIPS_CP0_DEBUG		23
197#define MIPS_CP0_DEPC		24
198#define MIPS_CP0_PERFCNT	25
199#define MIPS_CP0_ERRCTL		26
200#define MIPS_CP0_DATA_LO	28
201#define MIPS_CP0_DATA_HI	29
202#define MIPS_CP0_DESAVE		31
203
204#define MIPS_CP0_CONFIG_SEL	0
205#define MIPS_CP0_CONFIG1_SEL	1
206#define MIPS_CP0_CONFIG2_SEL	2
207#define MIPS_CP0_CONFIG3_SEL	3
208
209/* Config0 register bits */
210#define CP0C0_M			31
211#define CP0C0_K23		28
212#define CP0C0_KU		25
213#define CP0C0_MDU		20
214#define CP0C0_MM		17
215#define CP0C0_BM		16
216#define CP0C0_BE		15
217#define CP0C0_AT		13
218#define CP0C0_AR		10
219#define CP0C0_MT		7
220#define CP0C0_VI		3
221#define CP0C0_K0		0
222
223/* Config1 register bits */
224#define CP0C1_M			31
225#define CP0C1_MMU		25
226#define CP0C1_IS		22
227#define CP0C1_IL		19
228#define CP0C1_IA		16
229#define CP0C1_DS		13
230#define CP0C1_DL		10
231#define CP0C1_DA		7
232#define CP0C1_C2		6
233#define CP0C1_MD		5
234#define CP0C1_PC		4
235#define CP0C1_WR		3
236#define CP0C1_CA		2
237#define CP0C1_EP		1
238#define CP0C1_FP		0
239
240/* Config2 Register bits */
241#define CP0C2_M			31
242#define CP0C2_TU		28
243#define CP0C2_TS		24
244#define CP0C2_TL		20
245#define CP0C2_TA		16
246#define CP0C2_SU		12
247#define CP0C2_SS		8
248#define CP0C2_SL		4
249#define CP0C2_SA		0
250
251/* Config3 Register bits */
252#define CP0C3_M			31
253#define CP0C3_ISA_ON_EXC	16
254#define CP0C3_ULRI		13
255#define CP0C3_DSPP		10
256#define CP0C3_LPA		7
257#define CP0C3_VEIC		6
258#define CP0C3_VInt		5
259#define CP0C3_SP		4
260#define CP0C3_MT		2
261#define CP0C3_SM		1
262#define CP0C3_TL		0
263
264/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
265#define MIPS_CONFIG0						\
266  ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
267
268/* Have config2, no coprocessor2 attached, no MDMX support attached,
269   no performance counters, watch registers present,
270   no code compression, EJTAG present, no FPU, no watch registers */
271#define MIPS_CONFIG1						\
272((1 << CP0C1_M) |						\
273 (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) |		\
274 (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) |		\
275 (0 << CP0C1_FP))
276
277/* Have config3, no tertiary/secondary caches implemented */
278#define MIPS_CONFIG2						\
279((1 << CP0C2_M))
280
281/* No config4, no DSP ASE, no large physaddr (PABITS),
282   no external interrupt controller, no vectored interrupts,
283   no 1kb pages, no SmartMIPS ASE, no trace logic */
284#define MIPS_CONFIG3						\
285((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) |	\
286 (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) |	\
287 (0 << CP0C3_SM) | (0 << CP0C3_TL))
288
289/* MMU types, the first four entries have the same layout as the
290   CP0C0_MT field.  */
291enum mips_mmu_types {
292	MMU_TYPE_NONE,
293	MMU_TYPE_R4000,
294	MMU_TYPE_RESERVED,
295	MMU_TYPE_FMT,
296	MMU_TYPE_R3000,
297	MMU_TYPE_R6000,
298	MMU_TYPE_R8000
299};
300
301/*
302 * Trap codes
303 */
304#define T_INT			0	/* Interrupt pending */
305#define T_TLB_MOD		1	/* TLB modified fault */
306#define T_TLB_LD_MISS		2	/* TLB miss on load or ifetch */
307#define T_TLB_ST_MISS		3	/* TLB miss on a store */
308#define T_ADDR_ERR_LD		4	/* Address error on a load or ifetch */
309#define T_ADDR_ERR_ST		5	/* Address error on a store */
310#define T_BUS_ERR_IFETCH	6	/* Bus error on an ifetch */
311#define T_BUS_ERR_LD_ST		7	/* Bus error on a load or store */
312#define T_SYSCALL		8	/* System call */
313#define T_BREAK			9	/* Breakpoint */
314#define T_RES_INST		10	/* Reserved instruction exception */
315#define T_COP_UNUSABLE		11	/* Coprocessor unusable */
316#define T_OVFLOW		12	/* Arithmetic overflow */
317
318/*
319 * Trap definitions added for r4000 port.
320 */
321#define T_TRAP			13	/* Trap instruction */
322#define T_VCEI			14	/* Virtual coherency exception */
323#define T_FPE			15	/* Floating point exception */
324#define T_WATCH			23	/* Watch address reference */
325#define T_VCED			31	/* Virtual coherency data */
326
327/* Resume Flags */
328#define RESUME_FLAG_DR		(1<<0)	/* Reload guest nonvolatile state? */
329#define RESUME_FLAG_HOST	(1<<1)	/* Resume host? */
330
331#define RESUME_GUEST		0
332#define RESUME_GUEST_DR		RESUME_FLAG_DR
333#define RESUME_HOST		RESUME_FLAG_HOST
334
335enum emulation_result {
336	EMULATE_DONE,		/* no further processing */
337	EMULATE_DO_MMIO,	/* kvm_run filled with MMIO request */
338	EMULATE_FAIL,		/* can't emulate this instruction */
339	EMULATE_WAIT,		/* WAIT instruction */
340	EMULATE_PRIV_FAIL,
341};
342
343#define MIPS3_PG_G	0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
344#define MIPS3_PG_V	0x00000002 /* Valid */
345#define MIPS3_PG_NV	0x00000000
346#define MIPS3_PG_D	0x00000004 /* Dirty */
347
348#define mips3_paddr_to_tlbpfn(x) \
349	(((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
350#define mips3_tlbpfn_to_paddr(x) \
351	((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
352
353#define MIPS3_PG_SHIFT		6
354#define MIPS3_PG_FRAME		0x3fffffc0
355
356#define VPN2_MASK		0xffffe000
357#define TLB_IS_GLOBAL(x)	(((x).tlb_lo0 & MIPS3_PG_G) &&		\
358				 ((x).tlb_lo1 & MIPS3_PG_G))
359#define TLB_VPN2(x)		((x).tlb_hi & VPN2_MASK)
360#define TLB_ASID(x)		((x).tlb_hi & ASID_MASK)
361#define TLB_IS_VALID(x, va)	(((va) & (1 << PAGE_SHIFT))		\
362				 ? ((x).tlb_lo1 & MIPS3_PG_V)		\
363				 : ((x).tlb_lo0 & MIPS3_PG_V))
364#define TLB_HI_VPN2_HIT(x, y)	((TLB_VPN2(x) & ~(x).tlb_mask) ==	\
365				 ((y) & VPN2_MASK & ~(x).tlb_mask))
366#define TLB_HI_ASID_HIT(x, y)	(TLB_IS_GLOBAL(x) ||			\
367				 TLB_ASID(x) == ((y) & ASID_MASK))
368
369struct kvm_mips_tlb {
370	long tlb_mask;
371	long tlb_hi;
372	long tlb_lo0;
373	long tlb_lo1;
374};
375
376#define KVM_MIPS_GUEST_TLB_SIZE	64
377struct kvm_vcpu_arch {
378	void *host_ebase, *guest_ebase;
379	unsigned long host_stack;
380	unsigned long host_gp;
381
382	/* Host CP0 registers used when handling exits from guest */
383	unsigned long host_cp0_badvaddr;
384	unsigned long host_cp0_cause;
385	unsigned long host_cp0_epc;
386	unsigned long host_cp0_entryhi;
387	uint32_t guest_inst;
388
389	/* GPRS */
390	unsigned long gprs[32];
391	unsigned long hi;
392	unsigned long lo;
393	unsigned long pc;
394
395	/* FPU State */
396	struct mips_fpu_struct fpu;
397
398	/* COP0 State */
399	struct mips_coproc *cop0;
400
401	/* Host KSEG0 address of the EI/DI offset */
402	void *kseg0_commpage;
403
404	u32 io_gpr;		/* GPR used as IO source/target */
405
406	struct hrtimer comparecount_timer;
407	/* Count timer control KVM register */
408	uint32_t count_ctl;
409	/* Count bias from the raw time */
410	uint32_t count_bias;
411	/* Frequency of timer in Hz */
412	uint32_t count_hz;
413	/* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
414	s64 count_dyn_bias;
415	/* Resume time */
416	ktime_t count_resume;
417	/* Period of timer tick in ns */
418	u64 count_period;
419
420	/* Bitmask of exceptions that are pending */
421	unsigned long pending_exceptions;
422
423	/* Bitmask of pending exceptions to be cleared */
424	unsigned long pending_exceptions_clr;
425
426	unsigned long pending_load_cause;
427
428	/* Save/Restore the entryhi register when are are preempted/scheduled back in */
429	unsigned long preempt_entryhi;
430
431	/* S/W Based TLB for guest */
432	struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
433
434	/* Cached guest kernel/user ASIDs */
435	uint32_t guest_user_asid[NR_CPUS];
436	uint32_t guest_kernel_asid[NR_CPUS];
437	struct mm_struct guest_kernel_mm, guest_user_mm;
438
439	int last_sched_cpu;
440
441	/* WAIT executed */
442	int wait;
443};
444
445
446#define kvm_read_c0_guest_index(cop0)		(cop0->reg[MIPS_CP0_TLB_INDEX][0])
447#define kvm_write_c0_guest_index(cop0, val)	(cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
448#define kvm_read_c0_guest_entrylo0(cop0)	(cop0->reg[MIPS_CP0_TLB_LO0][0])
449#define kvm_read_c0_guest_entrylo1(cop0)	(cop0->reg[MIPS_CP0_TLB_LO1][0])
450#define kvm_read_c0_guest_context(cop0)		(cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
451#define kvm_write_c0_guest_context(cop0, val)	(cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
452#define kvm_read_c0_guest_userlocal(cop0)	(cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
453#define kvm_write_c0_guest_userlocal(cop0, val)	(cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val))
454#define kvm_read_c0_guest_pagemask(cop0)	(cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
455#define kvm_write_c0_guest_pagemask(cop0, val)	(cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
456#define kvm_read_c0_guest_wired(cop0)		(cop0->reg[MIPS_CP0_TLB_WIRED][0])
457#define kvm_write_c0_guest_wired(cop0, val)	(cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
458#define kvm_read_c0_guest_hwrena(cop0)		(cop0->reg[MIPS_CP0_HWRENA][0])
459#define kvm_write_c0_guest_hwrena(cop0, val)	(cop0->reg[MIPS_CP0_HWRENA][0] = (val))
460#define kvm_read_c0_guest_badvaddr(cop0)	(cop0->reg[MIPS_CP0_BAD_VADDR][0])
461#define kvm_write_c0_guest_badvaddr(cop0, val)	(cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
462#define kvm_read_c0_guest_count(cop0)		(cop0->reg[MIPS_CP0_COUNT][0])
463#define kvm_write_c0_guest_count(cop0, val)	(cop0->reg[MIPS_CP0_COUNT][0] = (val))
464#define kvm_read_c0_guest_entryhi(cop0)		(cop0->reg[MIPS_CP0_TLB_HI][0])
465#define kvm_write_c0_guest_entryhi(cop0, val)	(cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
466#define kvm_read_c0_guest_compare(cop0)		(cop0->reg[MIPS_CP0_COMPARE][0])
467#define kvm_write_c0_guest_compare(cop0, val)	(cop0->reg[MIPS_CP0_COMPARE][0] = (val))
468#define kvm_read_c0_guest_status(cop0)		(cop0->reg[MIPS_CP0_STATUS][0])
469#define kvm_write_c0_guest_status(cop0, val)	(cop0->reg[MIPS_CP0_STATUS][0] = (val))
470#define kvm_read_c0_guest_intctl(cop0)		(cop0->reg[MIPS_CP0_STATUS][1])
471#define kvm_write_c0_guest_intctl(cop0, val)	(cop0->reg[MIPS_CP0_STATUS][1] = (val))
472#define kvm_read_c0_guest_cause(cop0)		(cop0->reg[MIPS_CP0_CAUSE][0])
473#define kvm_write_c0_guest_cause(cop0, val)	(cop0->reg[MIPS_CP0_CAUSE][0] = (val))
474#define kvm_read_c0_guest_epc(cop0)		(cop0->reg[MIPS_CP0_EXC_PC][0])
475#define kvm_write_c0_guest_epc(cop0, val)	(cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
476#define kvm_read_c0_guest_prid(cop0)		(cop0->reg[MIPS_CP0_PRID][0])
477#define kvm_write_c0_guest_prid(cop0, val)	(cop0->reg[MIPS_CP0_PRID][0] = (val))
478#define kvm_read_c0_guest_ebase(cop0)		(cop0->reg[MIPS_CP0_PRID][1])
479#define kvm_write_c0_guest_ebase(cop0, val)	(cop0->reg[MIPS_CP0_PRID][1] = (val))
480#define kvm_read_c0_guest_config(cop0)		(cop0->reg[MIPS_CP0_CONFIG][0])
481#define kvm_read_c0_guest_config1(cop0)		(cop0->reg[MIPS_CP0_CONFIG][1])
482#define kvm_read_c0_guest_config2(cop0)		(cop0->reg[MIPS_CP0_CONFIG][2])
483#define kvm_read_c0_guest_config3(cop0)		(cop0->reg[MIPS_CP0_CONFIG][3])
484#define kvm_read_c0_guest_config7(cop0)		(cop0->reg[MIPS_CP0_CONFIG][7])
485#define kvm_write_c0_guest_config(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][0] = (val))
486#define kvm_write_c0_guest_config1(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][1] = (val))
487#define kvm_write_c0_guest_config2(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][2] = (val))
488#define kvm_write_c0_guest_config3(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][3] = (val))
489#define kvm_write_c0_guest_config7(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][7] = (val))
490#define kvm_read_c0_guest_errorepc(cop0)	(cop0->reg[MIPS_CP0_ERROR_PC][0])
491#define kvm_write_c0_guest_errorepc(cop0, val)	(cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
492
493/*
494 * Some of the guest registers may be modified asynchronously (e.g. from a
495 * hrtimer callback in hard irq context) and therefore need stronger atomicity
496 * guarantees than other registers.
497 */
498
499static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
500						unsigned long val)
501{
502	unsigned long temp;
503	do {
504		__asm__ __volatile__(
505		"	.set	mips3				\n"
506		"	" __LL "%0, %1				\n"
507		"	or	%0, %2				\n"
508		"	" __SC	"%0, %1				\n"
509		"	.set	mips0				\n"
510		: "=&r" (temp), "+m" (*reg)
511		: "r" (val));
512	} while (unlikely(!temp));
513}
514
515static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
516						  unsigned long val)
517{
518	unsigned long temp;
519	do {
520		__asm__ __volatile__(
521		"	.set	mips3				\n"
522		"	" __LL "%0, %1				\n"
523		"	and	%0, %2				\n"
524		"	" __SC	"%0, %1				\n"
525		"	.set	mips0				\n"
526		: "=&r" (temp), "+m" (*reg)
527		: "r" (~val));
528	} while (unlikely(!temp));
529}
530
531static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
532						   unsigned long change,
533						   unsigned long val)
534{
535	unsigned long temp;
536	do {
537		__asm__ __volatile__(
538		"	.set	mips3				\n"
539		"	" __LL "%0, %1				\n"
540		"	and	%0, %2				\n"
541		"	or	%0, %3				\n"
542		"	" __SC	"%0, %1				\n"
543		"	.set	mips0				\n"
544		: "=&r" (temp), "+m" (*reg)
545		: "r" (~change), "r" (val & change));
546	} while (unlikely(!temp));
547}
548
549#define kvm_set_c0_guest_status(cop0, val)	(cop0->reg[MIPS_CP0_STATUS][0] |= (val))
550#define kvm_clear_c0_guest_status(cop0, val)	(cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
551
552/* Cause can be modified asynchronously from hardirq hrtimer callback */
553#define kvm_set_c0_guest_cause(cop0, val)				\
554	_kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
555#define kvm_clear_c0_guest_cause(cop0, val)				\
556	_kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
557#define kvm_change_c0_guest_cause(cop0, change, val)			\
558	_kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0],	\
559					change, val)
560
561#define kvm_set_c0_guest_ebase(cop0, val)	(cop0->reg[MIPS_CP0_PRID][1] |= (val))
562#define kvm_clear_c0_guest_ebase(cop0, val)	(cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
563#define kvm_change_c0_guest_ebase(cop0, change, val)			\
564{									\
565	kvm_clear_c0_guest_ebase(cop0, change);				\
566	kvm_set_c0_guest_ebase(cop0, ((val) & (change)));		\
567}
568
569
570struct kvm_mips_callbacks {
571	int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
572	int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
573	int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
574	int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
575	int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
576	int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
577	int (*handle_syscall)(struct kvm_vcpu *vcpu);
578	int (*handle_res_inst)(struct kvm_vcpu *vcpu);
579	int (*handle_break)(struct kvm_vcpu *vcpu);
580	int (*vm_init)(struct kvm *kvm);
581	int (*vcpu_init)(struct kvm_vcpu *vcpu);
582	int (*vcpu_setup)(struct kvm_vcpu *vcpu);
583	gpa_t (*gva_to_gpa)(gva_t gva);
584	void (*queue_timer_int)(struct kvm_vcpu *vcpu);
585	void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
586	void (*queue_io_int)(struct kvm_vcpu *vcpu,
587			     struct kvm_mips_interrupt *irq);
588	void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
589			       struct kvm_mips_interrupt *irq);
590	int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
591			   uint32_t cause);
592	int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
593			 uint32_t cause);
594	int (*get_one_reg)(struct kvm_vcpu *vcpu,
595			   const struct kvm_one_reg *reg, s64 *v);
596	int (*set_one_reg)(struct kvm_vcpu *vcpu,
597			   const struct kvm_one_reg *reg, s64 v);
598};
599extern struct kvm_mips_callbacks *kvm_mips_callbacks;
600int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
601
602/* Debug: dump vcpu state */
603int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
604
605/* Trampoline ASM routine to start running in "Guest" context */
606extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
607
608/* TLB handling */
609uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
610
611uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
612
613uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
614
615extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
616					   struct kvm_vcpu *vcpu);
617
618extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
619					      struct kvm_vcpu *vcpu);
620
621extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
622						struct kvm_mips_tlb *tlb,
623						unsigned long *hpa0,
624						unsigned long *hpa1);
625
626extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
627						     uint32_t *opc,
628						     struct kvm_run *run,
629						     struct kvm_vcpu *vcpu);
630
631extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
632						    uint32_t *opc,
633						    struct kvm_run *run,
634						    struct kvm_vcpu *vcpu);
635
636extern void kvm_mips_dump_host_tlbs(void);
637extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
638extern void kvm_mips_flush_host_tlb(int skip_kseg0);
639extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
640extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index);
641
642extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
643				     unsigned long entryhi);
644extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr);
645extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
646						   unsigned long gva);
647extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
648				    struct kvm_vcpu *vcpu);
649extern void kvm_local_flush_tlb_all(void);
650extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
651extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
652extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
653
654/* Emulation */
655uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
656enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
657
658extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
659						   uint32_t *opc,
660						   struct kvm_run *run,
661						   struct kvm_vcpu *vcpu);
662
663extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
664						      uint32_t *opc,
665						      struct kvm_run *run,
666						      struct kvm_vcpu *vcpu);
667
668extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
669							 uint32_t *opc,
670							 struct kvm_run *run,
671							 struct kvm_vcpu *vcpu);
672
673extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
674							uint32_t *opc,
675							struct kvm_run *run,
676							struct kvm_vcpu *vcpu);
677
678extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
679							 uint32_t *opc,
680							 struct kvm_run *run,
681							 struct kvm_vcpu *vcpu);
682
683extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
684							uint32_t *opc,
685							struct kvm_run *run,
686							struct kvm_vcpu *vcpu);
687
688extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
689						     uint32_t *opc,
690						     struct kvm_run *run,
691						     struct kvm_vcpu *vcpu);
692
693extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
694						      uint32_t *opc,
695						      struct kvm_run *run,
696						      struct kvm_vcpu *vcpu);
697
698extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
699						uint32_t *opc,
700						struct kvm_run *run,
701						struct kvm_vcpu *vcpu);
702
703extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
704						     uint32_t *opc,
705						     struct kvm_run *run,
706						     struct kvm_vcpu *vcpu);
707
708extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
709						     uint32_t *opc,
710						     struct kvm_run *run,
711						     struct kvm_vcpu *vcpu);
712
713extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
714							 struct kvm_run *run);
715
716uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
717void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
718void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare);
719void kvm_mips_init_count(struct kvm_vcpu *vcpu);
720int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
721int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
722int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
723void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
724void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
725enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
726
727enum emulation_result kvm_mips_check_privilege(unsigned long cause,
728					       uint32_t *opc,
729					       struct kvm_run *run,
730					       struct kvm_vcpu *vcpu);
731
732enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
733					     uint32_t *opc,
734					     uint32_t cause,
735					     struct kvm_run *run,
736					     struct kvm_vcpu *vcpu);
737enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
738					   uint32_t *opc,
739					   uint32_t cause,
740					   struct kvm_run *run,
741					   struct kvm_vcpu *vcpu);
742enum emulation_result kvm_mips_emulate_store(uint32_t inst,
743					     uint32_t cause,
744					     struct kvm_run *run,
745					     struct kvm_vcpu *vcpu);
746enum emulation_result kvm_mips_emulate_load(uint32_t inst,
747					    uint32_t cause,
748					    struct kvm_run *run,
749					    struct kvm_vcpu *vcpu);
750
751/* Dynamic binary translation */
752extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
753				      struct kvm_vcpu *vcpu);
754extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
755				   struct kvm_vcpu *vcpu);
756extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
757			       struct kvm_vcpu *vcpu);
758extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
759			       struct kvm_vcpu *vcpu);
760
761/* Misc */
762extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
763extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
764
765
766#endif /* __MIPS_KVM_HOST_H__ */
767