1/******************************************************************************
2 * emulate.c
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
10 *
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 *
14 *   Avi Kivity <avi@qumranet.com>
15 *   Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2.  See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
23#include <linux/kvm_host.h>
24#include "kvm_cache_regs.h"
25#include <linux/module.h>
26#include <asm/kvm_emulate.h>
27#include <linux/stringify.h>
28
29#include "x86.h"
30#include "tss.h"
31
32/*
33 * Operand types
34 */
35#define OpNone             0ull
36#define OpImplicit         1ull  /* No generic decode */
37#define OpReg              2ull  /* Register */
38#define OpMem              3ull  /* Memory */
39#define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
40#define OpDI               5ull  /* ES:DI/EDI/RDI */
41#define OpMem64            6ull  /* Memory, 64-bit */
42#define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
43#define OpDX               8ull  /* DX register */
44#define OpCL               9ull  /* CL register (for shifts) */
45#define OpImmByte         10ull  /* 8-bit sign extended immediate */
46#define OpOne             11ull  /* Implied 1 */
47#define OpImm             12ull  /* Sign extended up to 32-bit immediate */
48#define OpMem16           13ull  /* Memory operand (16-bit). */
49#define OpMem32           14ull  /* Memory operand (32-bit). */
50#define OpImmU            15ull  /* Immediate operand, zero extended */
51#define OpSI              16ull  /* SI/ESI/RSI */
52#define OpImmFAddr        17ull  /* Immediate far address */
53#define OpMemFAddr        18ull  /* Far address in memory */
54#define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
55#define OpES              20ull  /* ES */
56#define OpCS              21ull  /* CS */
57#define OpSS              22ull  /* SS */
58#define OpDS              23ull  /* DS */
59#define OpFS              24ull  /* FS */
60#define OpGS              25ull  /* GS */
61#define OpMem8            26ull  /* 8-bit zero extended memory operand */
62#define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
63#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
64#define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
65#define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
66
67#define OpBits             5  /* Width of operand field */
68#define OpMask             ((1ull << OpBits) - 1)
69
70/*
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
76 * not be handled.
77 */
78
79/* Operand sizes: 8-bit operands or specified/overridden size. */
80#define ByteOp      (1<<0)	/* 8-bit operands. */
81/* Destination operand type. */
82#define DstShift    1
83#define ImplicitOps (OpImplicit << DstShift)
84#define DstReg      (OpReg << DstShift)
85#define DstMem      (OpMem << DstShift)
86#define DstAcc      (OpAcc << DstShift)
87#define DstDI       (OpDI << DstShift)
88#define DstMem64    (OpMem64 << DstShift)
89#define DstImmUByte (OpImmUByte << DstShift)
90#define DstDX       (OpDX << DstShift)
91#define DstAccLo    (OpAccLo << DstShift)
92#define DstMask     (OpMask << DstShift)
93/* Source operand type. */
94#define SrcShift    6
95#define SrcNone     (OpNone << SrcShift)
96#define SrcReg      (OpReg << SrcShift)
97#define SrcMem      (OpMem << SrcShift)
98#define SrcMem16    (OpMem16 << SrcShift)
99#define SrcMem32    (OpMem32 << SrcShift)
100#define SrcImm      (OpImm << SrcShift)
101#define SrcImmByte  (OpImmByte << SrcShift)
102#define SrcOne      (OpOne << SrcShift)
103#define SrcImmUByte (OpImmUByte << SrcShift)
104#define SrcImmU     (OpImmU << SrcShift)
105#define SrcSI       (OpSI << SrcShift)
106#define SrcXLat     (OpXLat << SrcShift)
107#define SrcImmFAddr (OpImmFAddr << SrcShift)
108#define SrcMemFAddr (OpMemFAddr << SrcShift)
109#define SrcAcc      (OpAcc << SrcShift)
110#define SrcImmU16   (OpImmU16 << SrcShift)
111#define SrcImm64    (OpImm64 << SrcShift)
112#define SrcDX       (OpDX << SrcShift)
113#define SrcMem8     (OpMem8 << SrcShift)
114#define SrcAccHi    (OpAccHi << SrcShift)
115#define SrcMask     (OpMask << SrcShift)
116#define BitOp       (1<<11)
117#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
118#define String      (1<<13)     /* String instruction (rep capable) */
119#define Stack       (1<<14)     /* Stack instruction (push/pop) */
120#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
121#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
122#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
123#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
124#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
125#define Escape      (5<<15)     /* Escape to coprocessor instruction */
126#define Sse         (1<<18)     /* SSE Vector instruction */
127/* Generic ModRM decode. */
128#define ModRM       (1<<19)
129/* Destination is only written; never read. */
130#define Mov         (1<<20)
131/* Misc flags */
132#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
133#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136#define Undefined   (1<<25) /* No Such Instruction */
137#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
138#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
139#define No64	    (1<<28)
140#define PageTable   (1 << 29)   /* instruction used to write page table */
141#define NotImpl     (1 << 30)   /* instruction is not implemented */
142/* Source 2 operand type */
143#define Src2Shift   (31)
144#define Src2None    (OpNone << Src2Shift)
145#define Src2Mem     (OpMem << Src2Shift)
146#define Src2CL      (OpCL << Src2Shift)
147#define Src2ImmByte (OpImmByte << Src2Shift)
148#define Src2One     (OpOne << Src2Shift)
149#define Src2Imm     (OpImm << Src2Shift)
150#define Src2ES      (OpES << Src2Shift)
151#define Src2CS      (OpCS << Src2Shift)
152#define Src2SS      (OpSS << Src2Shift)
153#define Src2DS      (OpDS << Src2Shift)
154#define Src2FS      (OpFS << Src2Shift)
155#define Src2GS      (OpGS << Src2Shift)
156#define Src2Mask    (OpMask << Src2Shift)
157#define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
158#define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
159#define Unaligned   ((u64)1 << 42)  /* Explicitly unaligned (e.g. MOVDQU) */
160#define Avx         ((u64)1 << 43)  /* Advanced Vector Extensions */
161#define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
162#define NoWrite     ((u64)1 << 45)  /* No writeback */
163#define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
164#define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
165#define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
166#define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
167#define NoBigReal   ((u64)1 << 50)  /* No big real mode */
168#define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
169
170#define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
171
172#define X2(x...) x, x
173#define X3(x...) X2(x), x
174#define X4(x...) X2(x), X2(x)
175#define X5(x...) X4(x), x
176#define X6(x...) X4(x), X2(x)
177#define X7(x...) X4(x), X3(x)
178#define X8(x...) X4(x), X4(x)
179#define X16(x...) X8(x), X8(x)
180
181#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
182#define FASTOP_SIZE 8
183
184/*
185 * fastop functions have a special calling convention:
186 *
187 * dst:    rax        (in/out)
188 * src:    rdx        (in/out)
189 * src2:   rcx        (in)
190 * flags:  rflags     (in/out)
191 * ex:     rsi        (in:fastop pointer, out:zero if exception)
192 *
193 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
194 * different operand sizes can be reached by calculation, rather than a jump
195 * table (which would be bigger than the code).
196 *
197 * fastop functions are declared as taking a never-defined fastop parameter,
198 * so they can't be called from C directly.
199 */
200
201struct fastop;
202
203struct opcode {
204	u64 flags : 56;
205	u64 intercept : 8;
206	union {
207		int (*execute)(struct x86_emulate_ctxt *ctxt);
208		const struct opcode *group;
209		const struct group_dual *gdual;
210		const struct gprefix *gprefix;
211		const struct escape *esc;
212		void (*fastop)(struct fastop *fake);
213	} u;
214	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
215};
216
217struct group_dual {
218	struct opcode mod012[8];
219	struct opcode mod3[8];
220};
221
222struct gprefix {
223	struct opcode pfx_no;
224	struct opcode pfx_66;
225	struct opcode pfx_f2;
226	struct opcode pfx_f3;
227};
228
229struct escape {
230	struct opcode op[8];
231	struct opcode high[64];
232};
233
234/* EFLAGS bit definitions. */
235#define EFLG_ID (1<<21)
236#define EFLG_VIP (1<<20)
237#define EFLG_VIF (1<<19)
238#define EFLG_AC (1<<18)
239#define EFLG_VM (1<<17)
240#define EFLG_RF (1<<16)
241#define EFLG_IOPL (3<<12)
242#define EFLG_NT (1<<14)
243#define EFLG_OF (1<<11)
244#define EFLG_DF (1<<10)
245#define EFLG_IF (1<<9)
246#define EFLG_TF (1<<8)
247#define EFLG_SF (1<<7)
248#define EFLG_ZF (1<<6)
249#define EFLG_AF (1<<4)
250#define EFLG_PF (1<<2)
251#define EFLG_CF (1<<0)
252
253#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
254#define EFLG_RESERVED_ONE_MASK 2
255
256static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
257{
258	if (!(ctxt->regs_valid & (1 << nr))) {
259		ctxt->regs_valid |= 1 << nr;
260		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
261	}
262	return ctxt->_regs[nr];
263}
264
265static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
266{
267	ctxt->regs_valid |= 1 << nr;
268	ctxt->regs_dirty |= 1 << nr;
269	return &ctxt->_regs[nr];
270}
271
272static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
273{
274	reg_read(ctxt, nr);
275	return reg_write(ctxt, nr);
276}
277
278static void writeback_registers(struct x86_emulate_ctxt *ctxt)
279{
280	unsigned reg;
281
282	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
283		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
284}
285
286static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
287{
288	ctxt->regs_dirty = 0;
289	ctxt->regs_valid = 0;
290}
291
292/*
293 * These EFLAGS bits are restored from saved value during emulation, and
294 * any changes are written back to the saved value after emulation.
295 */
296#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
297
298#ifdef CONFIG_X86_64
299#define ON64(x) x
300#else
301#define ON64(x)
302#endif
303
304static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
305
306#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
307#define FOP_RET   "ret \n\t"
308
309#define FOP_START(op) \
310	extern void em_##op(struct fastop *fake); \
311	asm(".pushsection .text, \"ax\" \n\t" \
312	    ".global em_" #op " \n\t" \
313            FOP_ALIGN \
314	    "em_" #op ": \n\t"
315
316#define FOP_END \
317	    ".popsection")
318
319#define FOPNOP() FOP_ALIGN FOP_RET
320
321#define FOP1E(op,  dst) \
322	FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
323
324#define FOP1EEX(op,  dst) \
325	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
326
327#define FASTOP1(op) \
328	FOP_START(op) \
329	FOP1E(op##b, al) \
330	FOP1E(op##w, ax) \
331	FOP1E(op##l, eax) \
332	ON64(FOP1E(op##q, rax))	\
333	FOP_END
334
335/* 1-operand, using src2 (for MUL/DIV r/m) */
336#define FASTOP1SRC2(op, name) \
337	FOP_START(name) \
338	FOP1E(op, cl) \
339	FOP1E(op, cx) \
340	FOP1E(op, ecx) \
341	ON64(FOP1E(op, rcx)) \
342	FOP_END
343
344/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
345#define FASTOP1SRC2EX(op, name) \
346	FOP_START(name) \
347	FOP1EEX(op, cl) \
348	FOP1EEX(op, cx) \
349	FOP1EEX(op, ecx) \
350	ON64(FOP1EEX(op, rcx)) \
351	FOP_END
352
353#define FOP2E(op,  dst, src)	   \
354	FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
355
356#define FASTOP2(op) \
357	FOP_START(op) \
358	FOP2E(op##b, al, dl) \
359	FOP2E(op##w, ax, dx) \
360	FOP2E(op##l, eax, edx) \
361	ON64(FOP2E(op##q, rax, rdx)) \
362	FOP_END
363
364/* 2 operand, word only */
365#define FASTOP2W(op) \
366	FOP_START(op) \
367	FOPNOP() \
368	FOP2E(op##w, ax, dx) \
369	FOP2E(op##l, eax, edx) \
370	ON64(FOP2E(op##q, rax, rdx)) \
371	FOP_END
372
373/* 2 operand, src is CL */
374#define FASTOP2CL(op) \
375	FOP_START(op) \
376	FOP2E(op##b, al, cl) \
377	FOP2E(op##w, ax, cl) \
378	FOP2E(op##l, eax, cl) \
379	ON64(FOP2E(op##q, rax, cl)) \
380	FOP_END
381
382#define FOP3E(op,  dst, src, src2) \
383	FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
384
385/* 3-operand, word-only, src2=cl */
386#define FASTOP3WCL(op) \
387	FOP_START(op) \
388	FOPNOP() \
389	FOP3E(op##w, ax, dx, cl) \
390	FOP3E(op##l, eax, edx, cl) \
391	ON64(FOP3E(op##q, rax, rdx, cl)) \
392	FOP_END
393
394/* Special case for SETcc - 1 instruction per cc */
395#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
396
397asm(".global kvm_fastop_exception \n"
398    "kvm_fastop_exception: xor %esi, %esi; ret");
399
400FOP_START(setcc)
401FOP_SETCC(seto)
402FOP_SETCC(setno)
403FOP_SETCC(setc)
404FOP_SETCC(setnc)
405FOP_SETCC(setz)
406FOP_SETCC(setnz)
407FOP_SETCC(setbe)
408FOP_SETCC(setnbe)
409FOP_SETCC(sets)
410FOP_SETCC(setns)
411FOP_SETCC(setp)
412FOP_SETCC(setnp)
413FOP_SETCC(setl)
414FOP_SETCC(setnl)
415FOP_SETCC(setle)
416FOP_SETCC(setnle)
417FOP_END;
418
419FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
420FOP_END;
421
422static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
423				    enum x86_intercept intercept,
424				    enum x86_intercept_stage stage)
425{
426	struct x86_instruction_info info = {
427		.intercept  = intercept,
428		.rep_prefix = ctxt->rep_prefix,
429		.modrm_mod  = ctxt->modrm_mod,
430		.modrm_reg  = ctxt->modrm_reg,
431		.modrm_rm   = ctxt->modrm_rm,
432		.src_val    = ctxt->src.val64,
433		.dst_val    = ctxt->dst.val64,
434		.src_bytes  = ctxt->src.bytes,
435		.dst_bytes  = ctxt->dst.bytes,
436		.ad_bytes   = ctxt->ad_bytes,
437		.next_rip   = ctxt->eip,
438	};
439
440	return ctxt->ops->intercept(ctxt, &info, stage);
441}
442
443static void assign_masked(ulong *dest, ulong src, ulong mask)
444{
445	*dest = (*dest & ~mask) | (src & mask);
446}
447
448static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
449{
450	return (1UL << (ctxt->ad_bytes << 3)) - 1;
451}
452
453static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
454{
455	u16 sel;
456	struct desc_struct ss;
457
458	if (ctxt->mode == X86EMUL_MODE_PROT64)
459		return ~0UL;
460	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
461	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
462}
463
464static int stack_size(struct x86_emulate_ctxt *ctxt)
465{
466	return (__fls(stack_mask(ctxt)) + 1) >> 3;
467}
468
469/* Access/update address held in a register, based on addressing mode. */
470static inline unsigned long
471address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
472{
473	if (ctxt->ad_bytes == sizeof(unsigned long))
474		return reg;
475	else
476		return reg & ad_mask(ctxt);
477}
478
479static inline unsigned long
480register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
481{
482	return address_mask(ctxt, reg);
483}
484
485static void masked_increment(ulong *reg, ulong mask, int inc)
486{
487	assign_masked(reg, *reg + inc, mask);
488}
489
490static inline void
491register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
492{
493	ulong mask;
494
495	if (ctxt->ad_bytes == sizeof(unsigned long))
496		mask = ~0UL;
497	else
498		mask = ad_mask(ctxt);
499	masked_increment(reg, mask, inc);
500}
501
502static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
503{
504	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
505}
506
507static u32 desc_limit_scaled(struct desc_struct *desc)
508{
509	u32 limit = get_desc_limit(desc);
510
511	return desc->g ? (limit << 12) | 0xfff : limit;
512}
513
514static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
515{
516	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
517		return 0;
518
519	return ctxt->ops->get_cached_segment_base(ctxt, seg);
520}
521
522static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
523			     u32 error, bool valid)
524{
525	WARN_ON(vec > 0x1f);
526	ctxt->exception.vector = vec;
527	ctxt->exception.error_code = error;
528	ctxt->exception.error_code_valid = valid;
529	return X86EMUL_PROPAGATE_FAULT;
530}
531
532static int emulate_db(struct x86_emulate_ctxt *ctxt)
533{
534	return emulate_exception(ctxt, DB_VECTOR, 0, false);
535}
536
537static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
538{
539	return emulate_exception(ctxt, GP_VECTOR, err, true);
540}
541
542static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
543{
544	return emulate_exception(ctxt, SS_VECTOR, err, true);
545}
546
547static int emulate_ud(struct x86_emulate_ctxt *ctxt)
548{
549	return emulate_exception(ctxt, UD_VECTOR, 0, false);
550}
551
552static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
553{
554	return emulate_exception(ctxt, TS_VECTOR, err, true);
555}
556
557static int emulate_de(struct x86_emulate_ctxt *ctxt)
558{
559	return emulate_exception(ctxt, DE_VECTOR, 0, false);
560}
561
562static int emulate_nm(struct x86_emulate_ctxt *ctxt)
563{
564	return emulate_exception(ctxt, NM_VECTOR, 0, false);
565}
566
567static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
568			       int cs_l)
569{
570	switch (ctxt->op_bytes) {
571	case 2:
572		ctxt->_eip = (u16)dst;
573		break;
574	case 4:
575		ctxt->_eip = (u32)dst;
576		break;
577#ifdef CONFIG_X86_64
578	case 8:
579		if ((cs_l && is_noncanonical_address(dst)) ||
580		    (!cs_l && (dst >> 32) != 0))
581			return emulate_gp(ctxt, 0);
582		ctxt->_eip = dst;
583		break;
584#endif
585	default:
586		WARN(1, "unsupported eip assignment size\n");
587	}
588	return X86EMUL_CONTINUE;
589}
590
591static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
592{
593	return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
594}
595
596static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
597{
598	return assign_eip_near(ctxt, ctxt->_eip + rel);
599}
600
601static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
602{
603	u16 selector;
604	struct desc_struct desc;
605
606	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
607	return selector;
608}
609
610static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
611				 unsigned seg)
612{
613	u16 dummy;
614	u32 base3;
615	struct desc_struct desc;
616
617	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
618	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
619}
620
621/*
622 * x86 defines three classes of vector instructions: explicitly
623 * aligned, explicitly unaligned, and the rest, which change behaviour
624 * depending on whether they're AVX encoded or not.
625 *
626 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
627 * subject to the same check.
628 */
629static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
630{
631	if (likely(size < 16))
632		return false;
633
634	if (ctxt->d & Aligned)
635		return true;
636	else if (ctxt->d & Unaligned)
637		return false;
638	else if (ctxt->d & Avx)
639		return false;
640	else
641		return true;
642}
643
644static int __linearize(struct x86_emulate_ctxt *ctxt,
645		     struct segmented_address addr,
646		     unsigned *max_size, unsigned size,
647		     bool write, bool fetch,
648		     ulong *linear)
649{
650	struct desc_struct desc;
651	bool usable;
652	ulong la;
653	u32 lim;
654	u16 sel;
655	unsigned cpl;
656
657	la = seg_base(ctxt, addr.seg) + addr.ea;
658	*max_size = 0;
659	switch (ctxt->mode) {
660	case X86EMUL_MODE_PROT64:
661		if (((signed long)la << 16) >> 16 != la)
662			return emulate_gp(ctxt, 0);
663
664		*max_size = min_t(u64, ~0u, (1ull << 48) - la);
665		if (size > *max_size)
666			goto bad;
667		break;
668	default:
669		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
670						addr.seg);
671		if (!usable)
672			goto bad;
673		/* code segment in protected mode or read-only data segment */
674		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
675					|| !(desc.type & 2)) && write)
676			goto bad;
677		/* unreadable code segment */
678		if (!fetch && (desc.type & 8) && !(desc.type & 2))
679			goto bad;
680		lim = desc_limit_scaled(&desc);
681		if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
682		    (ctxt->d & NoBigReal)) {
683			/* la is between zero and 0xffff */
684			if (la > 0xffff)
685				goto bad;
686			*max_size = 0x10000 - la;
687		} else if ((desc.type & 8) || !(desc.type & 4)) {
688			/* expand-up segment */
689			if (addr.ea > lim)
690				goto bad;
691			*max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
692		} else {
693			/* expand-down segment */
694			if (addr.ea <= lim)
695				goto bad;
696			lim = desc.d ? 0xffffffff : 0xffff;
697			if (addr.ea > lim)
698				goto bad;
699			*max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
700		}
701		if (size > *max_size)
702			goto bad;
703		cpl = ctxt->ops->cpl(ctxt);
704		if (!(desc.type & 8)) {
705			/* data segment */
706			if (cpl > desc.dpl)
707				goto bad;
708		} else if ((desc.type & 8) && !(desc.type & 4)) {
709			/* nonconforming code segment */
710			if (cpl != desc.dpl)
711				goto bad;
712		} else if ((desc.type & 8) && (desc.type & 4)) {
713			/* conforming code segment */
714			if (cpl < desc.dpl)
715				goto bad;
716		}
717		break;
718	}
719	if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
720		la &= (u32)-1;
721	if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
722		return emulate_gp(ctxt, 0);
723	*linear = la;
724	return X86EMUL_CONTINUE;
725bad:
726	if (addr.seg == VCPU_SREG_SS)
727		return emulate_ss(ctxt, 0);
728	else
729		return emulate_gp(ctxt, 0);
730}
731
732static int linearize(struct x86_emulate_ctxt *ctxt,
733		     struct segmented_address addr,
734		     unsigned size, bool write,
735		     ulong *linear)
736{
737	unsigned max_size;
738	return __linearize(ctxt, addr, &max_size, size, write, false, linear);
739}
740
741
742static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
743			      struct segmented_address addr,
744			      void *data,
745			      unsigned size)
746{
747	int rc;
748	ulong linear;
749
750	rc = linearize(ctxt, addr, size, false, &linear);
751	if (rc != X86EMUL_CONTINUE)
752		return rc;
753	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
754}
755
756/*
757 * Prefetch the remaining bytes of the instruction without crossing page
758 * boundary if they are not in fetch_cache yet.
759 */
760static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
761{
762	int rc;
763	unsigned size, max_size;
764	unsigned long linear;
765	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
766	struct segmented_address addr = { .seg = VCPU_SREG_CS,
767					   .ea = ctxt->eip + cur_size };
768
769	/*
770	 * We do not know exactly how many bytes will be needed, and
771	 * __linearize is expensive, so fetch as much as possible.  We
772	 * just have to avoid going beyond the 15 byte limit, the end
773	 * of the segment, or the end of the page.
774	 *
775	 * __linearize is called with size 0 so that it does not do any
776	 * boundary check itself.  Instead, we use max_size to check
777	 * against op_size.
778	 */
779	rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
780	if (unlikely(rc != X86EMUL_CONTINUE))
781		return rc;
782
783	size = min_t(unsigned, 15UL ^ cur_size, max_size);
784	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
785
786	/*
787	 * One instruction can only straddle two pages,
788	 * and one has been loaded at the beginning of
789	 * x86_decode_insn.  So, if not enough bytes
790	 * still, we must have hit the 15-byte boundary.
791	 */
792	if (unlikely(size < op_size))
793		return emulate_gp(ctxt, 0);
794
795	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
796			      size, &ctxt->exception);
797	if (unlikely(rc != X86EMUL_CONTINUE))
798		return rc;
799	ctxt->fetch.end += size;
800	return X86EMUL_CONTINUE;
801}
802
803static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
804					       unsigned size)
805{
806	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
807
808	if (unlikely(done_size < size))
809		return __do_insn_fetch_bytes(ctxt, size - done_size);
810	else
811		return X86EMUL_CONTINUE;
812}
813
814/* Fetch next part of the instruction being emulated. */
815#define insn_fetch(_type, _ctxt)					\
816({	_type _x;							\
817									\
818	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
819	if (rc != X86EMUL_CONTINUE)					\
820		goto done;						\
821	ctxt->_eip += sizeof(_type);					\
822	_x = *(_type __aligned(1) *) ctxt->fetch.ptr;			\
823	ctxt->fetch.ptr += sizeof(_type);				\
824	_x;								\
825})
826
827#define insn_fetch_arr(_arr, _size, _ctxt)				\
828({									\
829	rc = do_insn_fetch_bytes(_ctxt, _size);				\
830	if (rc != X86EMUL_CONTINUE)					\
831		goto done;						\
832	ctxt->_eip += (_size);						\
833	memcpy(_arr, ctxt->fetch.ptr, _size);				\
834	ctxt->fetch.ptr += (_size);					\
835})
836
837/*
838 * Given the 'reg' portion of a ModRM byte, and a register block, return a
839 * pointer into the block that addresses the relevant register.
840 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
841 */
842static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
843			     int byteop)
844{
845	void *p;
846	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
847
848	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
849		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
850	else
851		p = reg_rmw(ctxt, modrm_reg);
852	return p;
853}
854
855static int read_descriptor(struct x86_emulate_ctxt *ctxt,
856			   struct segmented_address addr,
857			   u16 *size, unsigned long *address, int op_bytes)
858{
859	int rc;
860
861	if (op_bytes == 2)
862		op_bytes = 3;
863	*address = 0;
864	rc = segmented_read_std(ctxt, addr, size, 2);
865	if (rc != X86EMUL_CONTINUE)
866		return rc;
867	addr.ea += 2;
868	rc = segmented_read_std(ctxt, addr, address, op_bytes);
869	return rc;
870}
871
872FASTOP2(add);
873FASTOP2(or);
874FASTOP2(adc);
875FASTOP2(sbb);
876FASTOP2(and);
877FASTOP2(sub);
878FASTOP2(xor);
879FASTOP2(cmp);
880FASTOP2(test);
881
882FASTOP1SRC2(mul, mul_ex);
883FASTOP1SRC2(imul, imul_ex);
884FASTOP1SRC2EX(div, div_ex);
885FASTOP1SRC2EX(idiv, idiv_ex);
886
887FASTOP3WCL(shld);
888FASTOP3WCL(shrd);
889
890FASTOP2W(imul);
891
892FASTOP1(not);
893FASTOP1(neg);
894FASTOP1(inc);
895FASTOP1(dec);
896
897FASTOP2CL(rol);
898FASTOP2CL(ror);
899FASTOP2CL(rcl);
900FASTOP2CL(rcr);
901FASTOP2CL(shl);
902FASTOP2CL(shr);
903FASTOP2CL(sar);
904
905FASTOP2W(bsf);
906FASTOP2W(bsr);
907FASTOP2W(bt);
908FASTOP2W(bts);
909FASTOP2W(btr);
910FASTOP2W(btc);
911
912FASTOP2(xadd);
913
914static u8 test_cc(unsigned int condition, unsigned long flags)
915{
916	u8 rc;
917	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
918
919	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
920	asm("push %[flags]; popf; call *%[fastop]"
921	    : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
922	return rc;
923}
924
925static void fetch_register_operand(struct operand *op)
926{
927	switch (op->bytes) {
928	case 1:
929		op->val = *(u8 *)op->addr.reg;
930		break;
931	case 2:
932		op->val = *(u16 *)op->addr.reg;
933		break;
934	case 4:
935		op->val = *(u32 *)op->addr.reg;
936		break;
937	case 8:
938		op->val = *(u64 *)op->addr.reg;
939		break;
940	}
941}
942
943static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
944{
945	ctxt->ops->get_fpu(ctxt);
946	switch (reg) {
947	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
948	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
949	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
950	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
951	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
952	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
953	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
954	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
955#ifdef CONFIG_X86_64
956	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
957	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
958	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
959	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
960	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
961	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
962	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
963	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
964#endif
965	default: BUG();
966	}
967	ctxt->ops->put_fpu(ctxt);
968}
969
970static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
971			  int reg)
972{
973	ctxt->ops->get_fpu(ctxt);
974	switch (reg) {
975	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
976	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
977	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
978	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
979	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
980	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
981	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
982	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
983#ifdef CONFIG_X86_64
984	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
985	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
986	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
987	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
988	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
989	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
990	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
991	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
992#endif
993	default: BUG();
994	}
995	ctxt->ops->put_fpu(ctxt);
996}
997
998static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
999{
1000	ctxt->ops->get_fpu(ctxt);
1001	switch (reg) {
1002	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1003	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1004	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1005	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1006	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1007	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1008	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1009	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1010	default: BUG();
1011	}
1012	ctxt->ops->put_fpu(ctxt);
1013}
1014
1015static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1016{
1017	ctxt->ops->get_fpu(ctxt);
1018	switch (reg) {
1019	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1020	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1021	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1022	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1023	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1024	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1025	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1026	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1027	default: BUG();
1028	}
1029	ctxt->ops->put_fpu(ctxt);
1030}
1031
1032static int em_fninit(struct x86_emulate_ctxt *ctxt)
1033{
1034	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1035		return emulate_nm(ctxt);
1036
1037	ctxt->ops->get_fpu(ctxt);
1038	asm volatile("fninit");
1039	ctxt->ops->put_fpu(ctxt);
1040	return X86EMUL_CONTINUE;
1041}
1042
1043static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1044{
1045	u16 fcw;
1046
1047	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1048		return emulate_nm(ctxt);
1049
1050	ctxt->ops->get_fpu(ctxt);
1051	asm volatile("fnstcw %0": "+m"(fcw));
1052	ctxt->ops->put_fpu(ctxt);
1053
1054	/* force 2 byte destination */
1055	ctxt->dst.bytes = 2;
1056	ctxt->dst.val = fcw;
1057
1058	return X86EMUL_CONTINUE;
1059}
1060
1061static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1062{
1063	u16 fsw;
1064
1065	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1066		return emulate_nm(ctxt);
1067
1068	ctxt->ops->get_fpu(ctxt);
1069	asm volatile("fnstsw %0": "+m"(fsw));
1070	ctxt->ops->put_fpu(ctxt);
1071
1072	/* force 2 byte destination */
1073	ctxt->dst.bytes = 2;
1074	ctxt->dst.val = fsw;
1075
1076	return X86EMUL_CONTINUE;
1077}
1078
1079static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1080				    struct operand *op)
1081{
1082	unsigned reg = ctxt->modrm_reg;
1083
1084	if (!(ctxt->d & ModRM))
1085		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1086
1087	if (ctxt->d & Sse) {
1088		op->type = OP_XMM;
1089		op->bytes = 16;
1090		op->addr.xmm = reg;
1091		read_sse_reg(ctxt, &op->vec_val, reg);
1092		return;
1093	}
1094	if (ctxt->d & Mmx) {
1095		reg &= 7;
1096		op->type = OP_MM;
1097		op->bytes = 8;
1098		op->addr.mm = reg;
1099		return;
1100	}
1101
1102	op->type = OP_REG;
1103	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1104	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1105
1106	fetch_register_operand(op);
1107	op->orig_val = op->val;
1108}
1109
1110static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1111{
1112	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1113		ctxt->modrm_seg = VCPU_SREG_SS;
1114}
1115
1116static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1117			struct operand *op)
1118{
1119	u8 sib;
1120	int index_reg, base_reg, scale;
1121	int rc = X86EMUL_CONTINUE;
1122	ulong modrm_ea = 0;
1123
1124	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1125	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1126	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1127
1128	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1129	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1130	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1131	ctxt->modrm_seg = VCPU_SREG_DS;
1132
1133	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1134		op->type = OP_REG;
1135		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1136		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1137				ctxt->d & ByteOp);
1138		if (ctxt->d & Sse) {
1139			op->type = OP_XMM;
1140			op->bytes = 16;
1141			op->addr.xmm = ctxt->modrm_rm;
1142			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1143			return rc;
1144		}
1145		if (ctxt->d & Mmx) {
1146			op->type = OP_MM;
1147			op->bytes = 8;
1148			op->addr.mm = ctxt->modrm_rm & 7;
1149			return rc;
1150		}
1151		fetch_register_operand(op);
1152		return rc;
1153	}
1154
1155	op->type = OP_MEM;
1156
1157	if (ctxt->ad_bytes == 2) {
1158		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1159		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1160		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1161		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1162
1163		/* 16-bit ModR/M decode. */
1164		switch (ctxt->modrm_mod) {
1165		case 0:
1166			if (ctxt->modrm_rm == 6)
1167				modrm_ea += insn_fetch(u16, ctxt);
1168			break;
1169		case 1:
1170			modrm_ea += insn_fetch(s8, ctxt);
1171			break;
1172		case 2:
1173			modrm_ea += insn_fetch(u16, ctxt);
1174			break;
1175		}
1176		switch (ctxt->modrm_rm) {
1177		case 0:
1178			modrm_ea += bx + si;
1179			break;
1180		case 1:
1181			modrm_ea += bx + di;
1182			break;
1183		case 2:
1184			modrm_ea += bp + si;
1185			break;
1186		case 3:
1187			modrm_ea += bp + di;
1188			break;
1189		case 4:
1190			modrm_ea += si;
1191			break;
1192		case 5:
1193			modrm_ea += di;
1194			break;
1195		case 6:
1196			if (ctxt->modrm_mod != 0)
1197				modrm_ea += bp;
1198			break;
1199		case 7:
1200			modrm_ea += bx;
1201			break;
1202		}
1203		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1204		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1205			ctxt->modrm_seg = VCPU_SREG_SS;
1206		modrm_ea = (u16)modrm_ea;
1207	} else {
1208		/* 32/64-bit ModR/M decode. */
1209		if ((ctxt->modrm_rm & 7) == 4) {
1210			sib = insn_fetch(u8, ctxt);
1211			index_reg |= (sib >> 3) & 7;
1212			base_reg |= sib & 7;
1213			scale = sib >> 6;
1214
1215			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1216				modrm_ea += insn_fetch(s32, ctxt);
1217			else {
1218				modrm_ea += reg_read(ctxt, base_reg);
1219				adjust_modrm_seg(ctxt, base_reg);
1220			}
1221			if (index_reg != 4)
1222				modrm_ea += reg_read(ctxt, index_reg) << scale;
1223		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1224			if (ctxt->mode == X86EMUL_MODE_PROT64)
1225				ctxt->rip_relative = 1;
1226		} else {
1227			base_reg = ctxt->modrm_rm;
1228			modrm_ea += reg_read(ctxt, base_reg);
1229			adjust_modrm_seg(ctxt, base_reg);
1230		}
1231		switch (ctxt->modrm_mod) {
1232		case 0:
1233			if (ctxt->modrm_rm == 5)
1234				modrm_ea += insn_fetch(s32, ctxt);
1235			break;
1236		case 1:
1237			modrm_ea += insn_fetch(s8, ctxt);
1238			break;
1239		case 2:
1240			modrm_ea += insn_fetch(s32, ctxt);
1241			break;
1242		}
1243	}
1244	op->addr.mem.ea = modrm_ea;
1245	if (ctxt->ad_bytes != 8)
1246		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1247
1248done:
1249	return rc;
1250}
1251
1252static int decode_abs(struct x86_emulate_ctxt *ctxt,
1253		      struct operand *op)
1254{
1255	int rc = X86EMUL_CONTINUE;
1256
1257	op->type = OP_MEM;
1258	switch (ctxt->ad_bytes) {
1259	case 2:
1260		op->addr.mem.ea = insn_fetch(u16, ctxt);
1261		break;
1262	case 4:
1263		op->addr.mem.ea = insn_fetch(u32, ctxt);
1264		break;
1265	case 8:
1266		op->addr.mem.ea = insn_fetch(u64, ctxt);
1267		break;
1268	}
1269done:
1270	return rc;
1271}
1272
1273static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1274{
1275	long sv = 0, mask;
1276
1277	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1278		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1279
1280		if (ctxt->src.bytes == 2)
1281			sv = (s16)ctxt->src.val & (s16)mask;
1282		else if (ctxt->src.bytes == 4)
1283			sv = (s32)ctxt->src.val & (s32)mask;
1284		else
1285			sv = (s64)ctxt->src.val & (s64)mask;
1286
1287		ctxt->dst.addr.mem.ea += (sv >> 3);
1288	}
1289
1290	/* only subword offset */
1291	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1292}
1293
1294static int read_emulated(struct x86_emulate_ctxt *ctxt,
1295			 unsigned long addr, void *dest, unsigned size)
1296{
1297	int rc;
1298	struct read_cache *mc = &ctxt->mem_read;
1299
1300	if (mc->pos < mc->end)
1301		goto read_cached;
1302
1303	WARN_ON((mc->end + size) >= sizeof(mc->data));
1304
1305	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1306				      &ctxt->exception);
1307	if (rc != X86EMUL_CONTINUE)
1308		return rc;
1309
1310	mc->end += size;
1311
1312read_cached:
1313	memcpy(dest, mc->data + mc->pos, size);
1314	mc->pos += size;
1315	return X86EMUL_CONTINUE;
1316}
1317
1318static int segmented_read(struct x86_emulate_ctxt *ctxt,
1319			  struct segmented_address addr,
1320			  void *data,
1321			  unsigned size)
1322{
1323	int rc;
1324	ulong linear;
1325
1326	rc = linearize(ctxt, addr, size, false, &linear);
1327	if (rc != X86EMUL_CONTINUE)
1328		return rc;
1329	return read_emulated(ctxt, linear, data, size);
1330}
1331
1332static int segmented_write(struct x86_emulate_ctxt *ctxt,
1333			   struct segmented_address addr,
1334			   const void *data,
1335			   unsigned size)
1336{
1337	int rc;
1338	ulong linear;
1339
1340	rc = linearize(ctxt, addr, size, true, &linear);
1341	if (rc != X86EMUL_CONTINUE)
1342		return rc;
1343	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1344					 &ctxt->exception);
1345}
1346
1347static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1348			     struct segmented_address addr,
1349			     const void *orig_data, const void *data,
1350			     unsigned size)
1351{
1352	int rc;
1353	ulong linear;
1354
1355	rc = linearize(ctxt, addr, size, true, &linear);
1356	if (rc != X86EMUL_CONTINUE)
1357		return rc;
1358	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1359					   size, &ctxt->exception);
1360}
1361
1362static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1363			   unsigned int size, unsigned short port,
1364			   void *dest)
1365{
1366	struct read_cache *rc = &ctxt->io_read;
1367
1368	if (rc->pos == rc->end) { /* refill pio read ahead */
1369		unsigned int in_page, n;
1370		unsigned int count = ctxt->rep_prefix ?
1371			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1372		in_page = (ctxt->eflags & EFLG_DF) ?
1373			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1374			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1375		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1376		if (n == 0)
1377			n = 1;
1378		rc->pos = rc->end = 0;
1379		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1380			return 0;
1381		rc->end = n * size;
1382	}
1383
1384	if (ctxt->rep_prefix && (ctxt->d & String) &&
1385	    !(ctxt->eflags & EFLG_DF)) {
1386		ctxt->dst.data = rc->data + rc->pos;
1387		ctxt->dst.type = OP_MEM_STR;
1388		ctxt->dst.count = (rc->end - rc->pos) / size;
1389		rc->pos = rc->end;
1390	} else {
1391		memcpy(dest, rc->data + rc->pos, size);
1392		rc->pos += size;
1393	}
1394	return 1;
1395}
1396
1397static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1398				     u16 index, struct desc_struct *desc)
1399{
1400	struct desc_ptr dt;
1401	ulong addr;
1402
1403	ctxt->ops->get_idt(ctxt, &dt);
1404
1405	if (dt.size < index * 8 + 7)
1406		return emulate_gp(ctxt, index << 3 | 0x2);
1407
1408	addr = dt.address + index * 8;
1409	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1410				   &ctxt->exception);
1411}
1412
1413static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1414				     u16 selector, struct desc_ptr *dt)
1415{
1416	const struct x86_emulate_ops *ops = ctxt->ops;
1417	u32 base3 = 0;
1418
1419	if (selector & 1 << 2) {
1420		struct desc_struct desc;
1421		u16 sel;
1422
1423		memset (dt, 0, sizeof *dt);
1424		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1425				      VCPU_SREG_LDTR))
1426			return;
1427
1428		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1429		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1430	} else
1431		ops->get_gdt(ctxt, dt);
1432}
1433
1434/* allowed just for 8 bytes segments */
1435static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1436				   u16 selector, struct desc_struct *desc,
1437				   ulong *desc_addr_p)
1438{
1439	struct desc_ptr dt;
1440	u16 index = selector >> 3;
1441	ulong addr;
1442
1443	get_descriptor_table_ptr(ctxt, selector, &dt);
1444
1445	if (dt.size < index * 8 + 7)
1446		return emulate_gp(ctxt, selector & 0xfffc);
1447
1448	*desc_addr_p = addr = dt.address + index * 8;
1449	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1450				   &ctxt->exception);
1451}
1452
1453/* allowed just for 8 bytes segments */
1454static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1455				    u16 selector, struct desc_struct *desc)
1456{
1457	struct desc_ptr dt;
1458	u16 index = selector >> 3;
1459	ulong addr;
1460
1461	get_descriptor_table_ptr(ctxt, selector, &dt);
1462
1463	if (dt.size < index * 8 + 7)
1464		return emulate_gp(ctxt, selector & 0xfffc);
1465
1466	addr = dt.address + index * 8;
1467	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1468				    &ctxt->exception);
1469}
1470
1471/* Does not support long mode */
1472static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1473				     u16 selector, int seg, u8 cpl,
1474				     bool in_task_switch,
1475				     struct desc_struct *desc)
1476{
1477	struct desc_struct seg_desc, old_desc;
1478	u8 dpl, rpl;
1479	unsigned err_vec = GP_VECTOR;
1480	u32 err_code = 0;
1481	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1482	ulong desc_addr;
1483	int ret;
1484	u16 dummy;
1485	u32 base3 = 0;
1486
1487	memset(&seg_desc, 0, sizeof seg_desc);
1488
1489	if (ctxt->mode == X86EMUL_MODE_REAL) {
1490		/* set real mode segment descriptor (keep limit etc. for
1491		 * unreal mode) */
1492		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1493		set_desc_base(&seg_desc, selector << 4);
1494		goto load;
1495	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1496		/* VM86 needs a clean new segment descriptor */
1497		set_desc_base(&seg_desc, selector << 4);
1498		set_desc_limit(&seg_desc, 0xffff);
1499		seg_desc.type = 3;
1500		seg_desc.p = 1;
1501		seg_desc.s = 1;
1502		seg_desc.dpl = 3;
1503		goto load;
1504	}
1505
1506	rpl = selector & 3;
1507
1508	/* NULL selector is not valid for TR, CS and SS (except for long mode) */
1509	if ((seg == VCPU_SREG_CS
1510	     || (seg == VCPU_SREG_SS
1511		 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1512	     || seg == VCPU_SREG_TR)
1513	    && null_selector)
1514		goto exception;
1515
1516	/* TR should be in GDT only */
1517	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1518		goto exception;
1519
1520	if (null_selector) /* for NULL selector skip all following checks */
1521		goto load;
1522
1523	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1524	if (ret != X86EMUL_CONTINUE)
1525		return ret;
1526
1527	err_code = selector & 0xfffc;
1528	err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1529
1530	/* can't load system descriptor into segment selector */
1531	if (seg <= VCPU_SREG_GS && !seg_desc.s)
1532		goto exception;
1533
1534	if (!seg_desc.p) {
1535		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1536		goto exception;
1537	}
1538
1539	dpl = seg_desc.dpl;
1540
1541	switch (seg) {
1542	case VCPU_SREG_SS:
1543		/*
1544		 * segment is not a writable data segment or segment
1545		 * selector's RPL != CPL or segment selector's RPL != CPL
1546		 */
1547		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1548			goto exception;
1549		break;
1550	case VCPU_SREG_CS:
1551		if (!(seg_desc.type & 8))
1552			goto exception;
1553
1554		if (seg_desc.type & 4) {
1555			/* conforming */
1556			if (dpl > cpl)
1557				goto exception;
1558		} else {
1559			/* nonconforming */
1560			if (rpl > cpl || dpl != cpl)
1561				goto exception;
1562		}
1563		/* in long-mode d/b must be clear if l is set */
1564		if (seg_desc.d && seg_desc.l) {
1565			u64 efer = 0;
1566
1567			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1568			if (efer & EFER_LMA)
1569				goto exception;
1570		}
1571
1572		/* CS(RPL) <- CPL */
1573		selector = (selector & 0xfffc) | cpl;
1574		break;
1575	case VCPU_SREG_TR:
1576		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1577			goto exception;
1578		old_desc = seg_desc;
1579		seg_desc.type |= 2; /* busy */
1580		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1581						  sizeof(seg_desc), &ctxt->exception);
1582		if (ret != X86EMUL_CONTINUE)
1583			return ret;
1584		break;
1585	case VCPU_SREG_LDTR:
1586		if (seg_desc.s || seg_desc.type != 2)
1587			goto exception;
1588		break;
1589	default: /*  DS, ES, FS, or GS */
1590		/*
1591		 * segment is not a data or readable code segment or
1592		 * ((segment is a data or nonconforming code segment)
1593		 * and (both RPL and CPL > DPL))
1594		 */
1595		if ((seg_desc.type & 0xa) == 0x8 ||
1596		    (((seg_desc.type & 0xc) != 0xc) &&
1597		     (rpl > dpl && cpl > dpl)))
1598			goto exception;
1599		break;
1600	}
1601
1602	if (seg_desc.s) {
1603		/* mark segment as accessed */
1604		seg_desc.type |= 1;
1605		ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1606		if (ret != X86EMUL_CONTINUE)
1607			return ret;
1608	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1609		ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1610				sizeof(base3), &ctxt->exception);
1611		if (ret != X86EMUL_CONTINUE)
1612			return ret;
1613	}
1614load:
1615	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1616	if (desc)
1617		*desc = seg_desc;
1618	return X86EMUL_CONTINUE;
1619exception:
1620	return emulate_exception(ctxt, err_vec, err_code, true);
1621}
1622
1623static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1624				   u16 selector, int seg)
1625{
1626	u8 cpl = ctxt->ops->cpl(ctxt);
1627	return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1628}
1629
1630static void write_register_operand(struct operand *op)
1631{
1632	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1633	switch (op->bytes) {
1634	case 1:
1635		*(u8 *)op->addr.reg = (u8)op->val;
1636		break;
1637	case 2:
1638		*(u16 *)op->addr.reg = (u16)op->val;
1639		break;
1640	case 4:
1641		*op->addr.reg = (u32)op->val;
1642		break;	/* 64b: zero-extend */
1643	case 8:
1644		*op->addr.reg = op->val;
1645		break;
1646	}
1647}
1648
1649static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1650{
1651	switch (op->type) {
1652	case OP_REG:
1653		write_register_operand(op);
1654		break;
1655	case OP_MEM:
1656		if (ctxt->lock_prefix)
1657			return segmented_cmpxchg(ctxt,
1658						 op->addr.mem,
1659						 &op->orig_val,
1660						 &op->val,
1661						 op->bytes);
1662		else
1663			return segmented_write(ctxt,
1664					       op->addr.mem,
1665					       &op->val,
1666					       op->bytes);
1667		break;
1668	case OP_MEM_STR:
1669		return segmented_write(ctxt,
1670				       op->addr.mem,
1671				       op->data,
1672				       op->bytes * op->count);
1673		break;
1674	case OP_XMM:
1675		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1676		break;
1677	case OP_MM:
1678		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1679		break;
1680	case OP_NONE:
1681		/* no writeback */
1682		break;
1683	default:
1684		break;
1685	}
1686	return X86EMUL_CONTINUE;
1687}
1688
1689static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1690{
1691	struct segmented_address addr;
1692
1693	rsp_increment(ctxt, -bytes);
1694	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1695	addr.seg = VCPU_SREG_SS;
1696
1697	return segmented_write(ctxt, addr, data, bytes);
1698}
1699
1700static int em_push(struct x86_emulate_ctxt *ctxt)
1701{
1702	/* Disable writeback. */
1703	ctxt->dst.type = OP_NONE;
1704	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1705}
1706
1707static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1708		       void *dest, int len)
1709{
1710	int rc;
1711	struct segmented_address addr;
1712
1713	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1714	addr.seg = VCPU_SREG_SS;
1715	rc = segmented_read(ctxt, addr, dest, len);
1716	if (rc != X86EMUL_CONTINUE)
1717		return rc;
1718
1719	rsp_increment(ctxt, len);
1720	return rc;
1721}
1722
1723static int em_pop(struct x86_emulate_ctxt *ctxt)
1724{
1725	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1726}
1727
1728static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1729			void *dest, int len)
1730{
1731	int rc;
1732	unsigned long val, change_mask;
1733	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1734	int cpl = ctxt->ops->cpl(ctxt);
1735
1736	rc = emulate_pop(ctxt, &val, len);
1737	if (rc != X86EMUL_CONTINUE)
1738		return rc;
1739
1740	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1741		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1742
1743	switch(ctxt->mode) {
1744	case X86EMUL_MODE_PROT64:
1745	case X86EMUL_MODE_PROT32:
1746	case X86EMUL_MODE_PROT16:
1747		if (cpl == 0)
1748			change_mask |= EFLG_IOPL;
1749		if (cpl <= iopl)
1750			change_mask |= EFLG_IF;
1751		break;
1752	case X86EMUL_MODE_VM86:
1753		if (iopl < 3)
1754			return emulate_gp(ctxt, 0);
1755		change_mask |= EFLG_IF;
1756		break;
1757	default: /* real mode */
1758		change_mask |= (EFLG_IOPL | EFLG_IF);
1759		break;
1760	}
1761
1762	*(unsigned long *)dest =
1763		(ctxt->eflags & ~change_mask) | (val & change_mask);
1764
1765	return rc;
1766}
1767
1768static int em_popf(struct x86_emulate_ctxt *ctxt)
1769{
1770	ctxt->dst.type = OP_REG;
1771	ctxt->dst.addr.reg = &ctxt->eflags;
1772	ctxt->dst.bytes = ctxt->op_bytes;
1773	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1774}
1775
1776static int em_enter(struct x86_emulate_ctxt *ctxt)
1777{
1778	int rc;
1779	unsigned frame_size = ctxt->src.val;
1780	unsigned nesting_level = ctxt->src2.val & 31;
1781	ulong rbp;
1782
1783	if (nesting_level)
1784		return X86EMUL_UNHANDLEABLE;
1785
1786	rbp = reg_read(ctxt, VCPU_REGS_RBP);
1787	rc = push(ctxt, &rbp, stack_size(ctxt));
1788	if (rc != X86EMUL_CONTINUE)
1789		return rc;
1790	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1791		      stack_mask(ctxt));
1792	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1793		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1794		      stack_mask(ctxt));
1795	return X86EMUL_CONTINUE;
1796}
1797
1798static int em_leave(struct x86_emulate_ctxt *ctxt)
1799{
1800	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1801		      stack_mask(ctxt));
1802	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1803}
1804
1805static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1806{
1807	int seg = ctxt->src2.val;
1808
1809	ctxt->src.val = get_segment_selector(ctxt, seg);
1810
1811	return em_push(ctxt);
1812}
1813
1814static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1815{
1816	int seg = ctxt->src2.val;
1817	unsigned long selector;
1818	int rc;
1819
1820	rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1821	if (rc != X86EMUL_CONTINUE)
1822		return rc;
1823
1824	if (ctxt->modrm_reg == VCPU_SREG_SS)
1825		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1826
1827	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1828	return rc;
1829}
1830
1831static int em_pusha(struct x86_emulate_ctxt *ctxt)
1832{
1833	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1834	int rc = X86EMUL_CONTINUE;
1835	int reg = VCPU_REGS_RAX;
1836
1837	while (reg <= VCPU_REGS_RDI) {
1838		(reg == VCPU_REGS_RSP) ?
1839		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1840
1841		rc = em_push(ctxt);
1842		if (rc != X86EMUL_CONTINUE)
1843			return rc;
1844
1845		++reg;
1846	}
1847
1848	return rc;
1849}
1850
1851static int em_pushf(struct x86_emulate_ctxt *ctxt)
1852{
1853	ctxt->src.val =  (unsigned long)ctxt->eflags;
1854	return em_push(ctxt);
1855}
1856
1857static int em_popa(struct x86_emulate_ctxt *ctxt)
1858{
1859	int rc = X86EMUL_CONTINUE;
1860	int reg = VCPU_REGS_RDI;
1861
1862	while (reg >= VCPU_REGS_RAX) {
1863		if (reg == VCPU_REGS_RSP) {
1864			rsp_increment(ctxt, ctxt->op_bytes);
1865			--reg;
1866		}
1867
1868		rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1869		if (rc != X86EMUL_CONTINUE)
1870			break;
1871		--reg;
1872	}
1873	return rc;
1874}
1875
1876static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1877{
1878	const struct x86_emulate_ops *ops = ctxt->ops;
1879	int rc;
1880	struct desc_ptr dt;
1881	gva_t cs_addr;
1882	gva_t eip_addr;
1883	u16 cs, eip;
1884
1885	/* TODO: Add limit checks */
1886	ctxt->src.val = ctxt->eflags;
1887	rc = em_push(ctxt);
1888	if (rc != X86EMUL_CONTINUE)
1889		return rc;
1890
1891	ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1892
1893	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1894	rc = em_push(ctxt);
1895	if (rc != X86EMUL_CONTINUE)
1896		return rc;
1897
1898	ctxt->src.val = ctxt->_eip;
1899	rc = em_push(ctxt);
1900	if (rc != X86EMUL_CONTINUE)
1901		return rc;
1902
1903	ops->get_idt(ctxt, &dt);
1904
1905	eip_addr = dt.address + (irq << 2);
1906	cs_addr = dt.address + (irq << 2) + 2;
1907
1908	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1909	if (rc != X86EMUL_CONTINUE)
1910		return rc;
1911
1912	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1913	if (rc != X86EMUL_CONTINUE)
1914		return rc;
1915
1916	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1917	if (rc != X86EMUL_CONTINUE)
1918		return rc;
1919
1920	ctxt->_eip = eip;
1921
1922	return rc;
1923}
1924
1925int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1926{
1927	int rc;
1928
1929	invalidate_registers(ctxt);
1930	rc = __emulate_int_real(ctxt, irq);
1931	if (rc == X86EMUL_CONTINUE)
1932		writeback_registers(ctxt);
1933	return rc;
1934}
1935
1936static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1937{
1938	switch(ctxt->mode) {
1939	case X86EMUL_MODE_REAL:
1940		return __emulate_int_real(ctxt, irq);
1941	case X86EMUL_MODE_VM86:
1942	case X86EMUL_MODE_PROT16:
1943	case X86EMUL_MODE_PROT32:
1944	case X86EMUL_MODE_PROT64:
1945	default:
1946		/* Protected mode interrupts unimplemented yet */
1947		return X86EMUL_UNHANDLEABLE;
1948	}
1949}
1950
1951static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1952{
1953	int rc = X86EMUL_CONTINUE;
1954	unsigned long temp_eip = 0;
1955	unsigned long temp_eflags = 0;
1956	unsigned long cs = 0;
1957	unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1958			     EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1959			     EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1960	unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1961
1962	/* TODO: Add stack limit check */
1963
1964	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1965
1966	if (rc != X86EMUL_CONTINUE)
1967		return rc;
1968
1969	if (temp_eip & ~0xffff)
1970		return emulate_gp(ctxt, 0);
1971
1972	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1973
1974	if (rc != X86EMUL_CONTINUE)
1975		return rc;
1976
1977	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1978
1979	if (rc != X86EMUL_CONTINUE)
1980		return rc;
1981
1982	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1983
1984	if (rc != X86EMUL_CONTINUE)
1985		return rc;
1986
1987	ctxt->_eip = temp_eip;
1988
1989
1990	if (ctxt->op_bytes == 4)
1991		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1992	else if (ctxt->op_bytes == 2) {
1993		ctxt->eflags &= ~0xffff;
1994		ctxt->eflags |= temp_eflags;
1995	}
1996
1997	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1998	ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1999
2000	return rc;
2001}
2002
2003static int em_iret(struct x86_emulate_ctxt *ctxt)
2004{
2005	switch(ctxt->mode) {
2006	case X86EMUL_MODE_REAL:
2007		return emulate_iret_real(ctxt);
2008	case X86EMUL_MODE_VM86:
2009	case X86EMUL_MODE_PROT16:
2010	case X86EMUL_MODE_PROT32:
2011	case X86EMUL_MODE_PROT64:
2012	default:
2013		/* iret from protected mode unimplemented yet */
2014		return X86EMUL_UNHANDLEABLE;
2015	}
2016}
2017
2018static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2019{
2020	int rc;
2021	unsigned short sel, old_sel;
2022	struct desc_struct old_desc, new_desc;
2023	const struct x86_emulate_ops *ops = ctxt->ops;
2024	u8 cpl = ctxt->ops->cpl(ctxt);
2025
2026	/* Assignment of RIP may only fail in 64-bit mode */
2027	if (ctxt->mode == X86EMUL_MODE_PROT64)
2028		ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2029				 VCPU_SREG_CS);
2030
2031	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2032
2033	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2034				       &new_desc);
2035	if (rc != X86EMUL_CONTINUE)
2036		return rc;
2037
2038	rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2039	if (rc != X86EMUL_CONTINUE) {
2040		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2041		/* assigning eip failed; restore the old cs */
2042		ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2043		return rc;
2044	}
2045	return rc;
2046}
2047
2048static int em_grp45(struct x86_emulate_ctxt *ctxt)
2049{
2050	int rc = X86EMUL_CONTINUE;
2051
2052	switch (ctxt->modrm_reg) {
2053	case 2: /* call near abs */ {
2054		long int old_eip;
2055		old_eip = ctxt->_eip;
2056		rc = assign_eip_near(ctxt, ctxt->src.val);
2057		if (rc != X86EMUL_CONTINUE)
2058			break;
2059		ctxt->src.val = old_eip;
2060		rc = em_push(ctxt);
2061		break;
2062	}
2063	case 4: /* jmp abs */
2064		rc = assign_eip_near(ctxt, ctxt->src.val);
2065		break;
2066	case 5: /* jmp far */
2067		rc = em_jmp_far(ctxt);
2068		break;
2069	case 6:	/* push */
2070		rc = em_push(ctxt);
2071		break;
2072	}
2073	return rc;
2074}
2075
2076static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2077{
2078	u64 old = ctxt->dst.orig_val64;
2079
2080	if (ctxt->dst.bytes == 16)
2081		return X86EMUL_UNHANDLEABLE;
2082
2083	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2084	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2085		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2086		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2087		ctxt->eflags &= ~EFLG_ZF;
2088	} else {
2089		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2090			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2091
2092		ctxt->eflags |= EFLG_ZF;
2093	}
2094	return X86EMUL_CONTINUE;
2095}
2096
2097static int em_ret(struct x86_emulate_ctxt *ctxt)
2098{
2099	int rc;
2100	unsigned long eip;
2101
2102	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2103	if (rc != X86EMUL_CONTINUE)
2104		return rc;
2105
2106	return assign_eip_near(ctxt, eip);
2107}
2108
2109static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2110{
2111	int rc;
2112	unsigned long eip, cs;
2113	u16 old_cs;
2114	int cpl = ctxt->ops->cpl(ctxt);
2115	struct desc_struct old_desc, new_desc;
2116	const struct x86_emulate_ops *ops = ctxt->ops;
2117
2118	if (ctxt->mode == X86EMUL_MODE_PROT64)
2119		ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2120				 VCPU_SREG_CS);
2121
2122	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2123	if (rc != X86EMUL_CONTINUE)
2124		return rc;
2125	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2126	if (rc != X86EMUL_CONTINUE)
2127		return rc;
2128	/* Outer-privilege level return is not implemented */
2129	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2130		return X86EMUL_UNHANDLEABLE;
2131	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2132				       &new_desc);
2133	if (rc != X86EMUL_CONTINUE)
2134		return rc;
2135	rc = assign_eip_far(ctxt, eip, new_desc.l);
2136	if (rc != X86EMUL_CONTINUE) {
2137		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2138		ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2139	}
2140	return rc;
2141}
2142
2143static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2144{
2145        int rc;
2146
2147        rc = em_ret_far(ctxt);
2148        if (rc != X86EMUL_CONTINUE)
2149                return rc;
2150        rsp_increment(ctxt, ctxt->src.val);
2151        return X86EMUL_CONTINUE;
2152}
2153
2154static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2155{
2156	/* Save real source value, then compare EAX against destination. */
2157	ctxt->dst.orig_val = ctxt->dst.val;
2158	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2159	ctxt->src.orig_val = ctxt->src.val;
2160	ctxt->src.val = ctxt->dst.orig_val;
2161	fastop(ctxt, em_cmp);
2162
2163	if (ctxt->eflags & EFLG_ZF) {
2164		/* Success: write back to memory. */
2165		ctxt->dst.val = ctxt->src.orig_val;
2166	} else {
2167		/* Failure: write the value we saw to EAX. */
2168		ctxt->dst.type = OP_REG;
2169		ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2170		ctxt->dst.val = ctxt->dst.orig_val;
2171	}
2172	return X86EMUL_CONTINUE;
2173}
2174
2175static int em_lseg(struct x86_emulate_ctxt *ctxt)
2176{
2177	int seg = ctxt->src2.val;
2178	unsigned short sel;
2179	int rc;
2180
2181	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2182
2183	rc = load_segment_descriptor(ctxt, sel, seg);
2184	if (rc != X86EMUL_CONTINUE)
2185		return rc;
2186
2187	ctxt->dst.val = ctxt->src.val;
2188	return rc;
2189}
2190
2191static void
2192setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2193			struct desc_struct *cs, struct desc_struct *ss)
2194{
2195	cs->l = 0;		/* will be adjusted later */
2196	set_desc_base(cs, 0);	/* flat segment */
2197	cs->g = 1;		/* 4kb granularity */
2198	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2199	cs->type = 0x0b;	/* Read, Execute, Accessed */
2200	cs->s = 1;
2201	cs->dpl = 0;		/* will be adjusted later */
2202	cs->p = 1;
2203	cs->d = 1;
2204	cs->avl = 0;
2205
2206	set_desc_base(ss, 0);	/* flat segment */
2207	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2208	ss->g = 1;		/* 4kb granularity */
2209	ss->s = 1;
2210	ss->type = 0x03;	/* Read/Write, Accessed */
2211	ss->d = 1;		/* 32bit stack segment */
2212	ss->dpl = 0;
2213	ss->p = 1;
2214	ss->l = 0;
2215	ss->avl = 0;
2216}
2217
2218static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2219{
2220	u32 eax, ebx, ecx, edx;
2221
2222	eax = ecx = 0;
2223	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2224	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2225		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2226		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2227}
2228
2229static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2230{
2231	const struct x86_emulate_ops *ops = ctxt->ops;
2232	u32 eax, ebx, ecx, edx;
2233
2234	/*
2235	 * syscall should always be enabled in longmode - so only become
2236	 * vendor specific (cpuid) if other modes are active...
2237	 */
2238	if (ctxt->mode == X86EMUL_MODE_PROT64)
2239		return true;
2240
2241	eax = 0x00000000;
2242	ecx = 0x00000000;
2243	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2244	/*
2245	 * Intel ("GenuineIntel")
2246	 * remark: Intel CPUs only support "syscall" in 64bit
2247	 * longmode. Also an 64bit guest with a
2248	 * 32bit compat-app running will #UD !! While this
2249	 * behaviour can be fixed (by emulating) into AMD
2250	 * response - CPUs of AMD can't behave like Intel.
2251	 */
2252	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2253	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2254	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2255		return false;
2256
2257	/* AMD ("AuthenticAMD") */
2258	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2259	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2260	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2261		return true;
2262
2263	/* AMD ("AMDisbetter!") */
2264	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2265	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2266	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2267		return true;
2268
2269	/* default: (not Intel, not AMD), apply Intel's stricter rules... */
2270	return false;
2271}
2272
2273static int em_syscall(struct x86_emulate_ctxt *ctxt)
2274{
2275	const struct x86_emulate_ops *ops = ctxt->ops;
2276	struct desc_struct cs, ss;
2277	u64 msr_data;
2278	u16 cs_sel, ss_sel;
2279	u64 efer = 0;
2280
2281	/* syscall is not available in real mode */
2282	if (ctxt->mode == X86EMUL_MODE_REAL ||
2283	    ctxt->mode == X86EMUL_MODE_VM86)
2284		return emulate_ud(ctxt);
2285
2286	if (!(em_syscall_is_enabled(ctxt)))
2287		return emulate_ud(ctxt);
2288
2289	ops->get_msr(ctxt, MSR_EFER, &efer);
2290	setup_syscalls_segments(ctxt, &cs, &ss);
2291
2292	if (!(efer & EFER_SCE))
2293		return emulate_ud(ctxt);
2294
2295	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2296	msr_data >>= 32;
2297	cs_sel = (u16)(msr_data & 0xfffc);
2298	ss_sel = (u16)(msr_data + 8);
2299
2300	if (efer & EFER_LMA) {
2301		cs.d = 0;
2302		cs.l = 1;
2303	}
2304	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2305	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2306
2307	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2308	if (efer & EFER_LMA) {
2309#ifdef CONFIG_X86_64
2310		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2311
2312		ops->get_msr(ctxt,
2313			     ctxt->mode == X86EMUL_MODE_PROT64 ?
2314			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2315		ctxt->_eip = msr_data;
2316
2317		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2318		ctxt->eflags &= ~msr_data;
2319#endif
2320	} else {
2321		/* legacy mode */
2322		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2323		ctxt->_eip = (u32)msr_data;
2324
2325		ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2326	}
2327
2328	return X86EMUL_CONTINUE;
2329}
2330
2331static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2332{
2333	const struct x86_emulate_ops *ops = ctxt->ops;
2334	struct desc_struct cs, ss;
2335	u64 msr_data;
2336	u16 cs_sel, ss_sel;
2337	u64 efer = 0;
2338
2339	ops->get_msr(ctxt, MSR_EFER, &efer);
2340	/* inject #GP if in real mode */
2341	if (ctxt->mode == X86EMUL_MODE_REAL)
2342		return emulate_gp(ctxt, 0);
2343
2344	/*
2345	 * Not recognized on AMD in compat mode (but is recognized in legacy
2346	 * mode).
2347	 */
2348	if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2349	    && !vendor_intel(ctxt))
2350		return emulate_ud(ctxt);
2351
2352	/* XXX sysenter/sysexit have not been tested in 64bit mode.
2353	* Therefore, we inject an #UD.
2354	*/
2355	if (ctxt->mode == X86EMUL_MODE_PROT64)
2356		return emulate_ud(ctxt);
2357
2358	setup_syscalls_segments(ctxt, &cs, &ss);
2359
2360	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2361	switch (ctxt->mode) {
2362	case X86EMUL_MODE_PROT32:
2363		if ((msr_data & 0xfffc) == 0x0)
2364			return emulate_gp(ctxt, 0);
2365		break;
2366	case X86EMUL_MODE_PROT64:
2367		if (msr_data == 0x0)
2368			return emulate_gp(ctxt, 0);
2369		break;
2370	default:
2371		break;
2372	}
2373
2374	ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2375	cs_sel = (u16)msr_data;
2376	cs_sel &= ~SELECTOR_RPL_MASK;
2377	ss_sel = cs_sel + 8;
2378	ss_sel &= ~SELECTOR_RPL_MASK;
2379	if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2380		cs.d = 0;
2381		cs.l = 1;
2382	}
2383
2384	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2385	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2386
2387	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2388	ctxt->_eip = msr_data;
2389
2390	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2391	*reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2392
2393	return X86EMUL_CONTINUE;
2394}
2395
2396static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2397{
2398	const struct x86_emulate_ops *ops = ctxt->ops;
2399	struct desc_struct cs, ss;
2400	u64 msr_data, rcx, rdx;
2401	int usermode;
2402	u16 cs_sel = 0, ss_sel = 0;
2403
2404	/* inject #GP if in real mode or Virtual 8086 mode */
2405	if (ctxt->mode == X86EMUL_MODE_REAL ||
2406	    ctxt->mode == X86EMUL_MODE_VM86)
2407		return emulate_gp(ctxt, 0);
2408
2409	setup_syscalls_segments(ctxt, &cs, &ss);
2410
2411	if ((ctxt->rex_prefix & 0x8) != 0x0)
2412		usermode = X86EMUL_MODE_PROT64;
2413	else
2414		usermode = X86EMUL_MODE_PROT32;
2415
2416	rcx = reg_read(ctxt, VCPU_REGS_RCX);
2417	rdx = reg_read(ctxt, VCPU_REGS_RDX);
2418
2419	cs.dpl = 3;
2420	ss.dpl = 3;
2421	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2422	switch (usermode) {
2423	case X86EMUL_MODE_PROT32:
2424		cs_sel = (u16)(msr_data + 16);
2425		if ((msr_data & 0xfffc) == 0x0)
2426			return emulate_gp(ctxt, 0);
2427		ss_sel = (u16)(msr_data + 24);
2428		break;
2429	case X86EMUL_MODE_PROT64:
2430		cs_sel = (u16)(msr_data + 32);
2431		if (msr_data == 0x0)
2432			return emulate_gp(ctxt, 0);
2433		ss_sel = cs_sel + 8;
2434		cs.d = 0;
2435		cs.l = 1;
2436		if (is_noncanonical_address(rcx) ||
2437		    is_noncanonical_address(rdx))
2438			return emulate_gp(ctxt, 0);
2439		break;
2440	}
2441	cs_sel |= SELECTOR_RPL_MASK;
2442	ss_sel |= SELECTOR_RPL_MASK;
2443
2444	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2445	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2446
2447	ctxt->_eip = rdx;
2448	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2449
2450	return X86EMUL_CONTINUE;
2451}
2452
2453static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2454{
2455	int iopl;
2456	if (ctxt->mode == X86EMUL_MODE_REAL)
2457		return false;
2458	if (ctxt->mode == X86EMUL_MODE_VM86)
2459		return true;
2460	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2461	return ctxt->ops->cpl(ctxt) > iopl;
2462}
2463
2464static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2465					    u16 port, u16 len)
2466{
2467	const struct x86_emulate_ops *ops = ctxt->ops;
2468	struct desc_struct tr_seg;
2469	u32 base3;
2470	int r;
2471	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2472	unsigned mask = (1 << len) - 1;
2473	unsigned long base;
2474
2475	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2476	if (!tr_seg.p)
2477		return false;
2478	if (desc_limit_scaled(&tr_seg) < 103)
2479		return false;
2480	base = get_desc_base(&tr_seg);
2481#ifdef CONFIG_X86_64
2482	base |= ((u64)base3) << 32;
2483#endif
2484	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2485	if (r != X86EMUL_CONTINUE)
2486		return false;
2487	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2488		return false;
2489	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2490	if (r != X86EMUL_CONTINUE)
2491		return false;
2492	if ((perm >> bit_idx) & mask)
2493		return false;
2494	return true;
2495}
2496
2497static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2498				 u16 port, u16 len)
2499{
2500	if (ctxt->perm_ok)
2501		return true;
2502
2503	if (emulator_bad_iopl(ctxt))
2504		if (!emulator_io_port_access_allowed(ctxt, port, len))
2505			return false;
2506
2507	ctxt->perm_ok = true;
2508
2509	return true;
2510}
2511
2512static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2513				struct tss_segment_16 *tss)
2514{
2515	tss->ip = ctxt->_eip;
2516	tss->flag = ctxt->eflags;
2517	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2518	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2519	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2520	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2521	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2522	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2523	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2524	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2525
2526	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2527	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2528	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2529	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2530	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2531}
2532
2533static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2534				 struct tss_segment_16 *tss)
2535{
2536	int ret;
2537	u8 cpl;
2538
2539	ctxt->_eip = tss->ip;
2540	ctxt->eflags = tss->flag | 2;
2541	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2542	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2543	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2544	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2545	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2546	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2547	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2548	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2549
2550	/*
2551	 * SDM says that segment selectors are loaded before segment
2552	 * descriptors
2553	 */
2554	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2555	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2556	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2557	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2558	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2559
2560	cpl = tss->cs & 3;
2561
2562	/*
2563	 * Now load segment descriptors. If fault happens at this stage
2564	 * it is handled in a context of new task
2565	 */
2566	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2567					true, NULL);
2568	if (ret != X86EMUL_CONTINUE)
2569		return ret;
2570	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2571					true, NULL);
2572	if (ret != X86EMUL_CONTINUE)
2573		return ret;
2574	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2575					true, NULL);
2576	if (ret != X86EMUL_CONTINUE)
2577		return ret;
2578	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2579					true, NULL);
2580	if (ret != X86EMUL_CONTINUE)
2581		return ret;
2582	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2583					true, NULL);
2584	if (ret != X86EMUL_CONTINUE)
2585		return ret;
2586
2587	return X86EMUL_CONTINUE;
2588}
2589
2590static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2591			  u16 tss_selector, u16 old_tss_sel,
2592			  ulong old_tss_base, struct desc_struct *new_desc)
2593{
2594	const struct x86_emulate_ops *ops = ctxt->ops;
2595	struct tss_segment_16 tss_seg;
2596	int ret;
2597	u32 new_tss_base = get_desc_base(new_desc);
2598
2599	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2600			    &ctxt->exception);
2601	if (ret != X86EMUL_CONTINUE)
2602		/* FIXME: need to provide precise fault address */
2603		return ret;
2604
2605	save_state_to_tss16(ctxt, &tss_seg);
2606
2607	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2608			     &ctxt->exception);
2609	if (ret != X86EMUL_CONTINUE)
2610		/* FIXME: need to provide precise fault address */
2611		return ret;
2612
2613	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2614			    &ctxt->exception);
2615	if (ret != X86EMUL_CONTINUE)
2616		/* FIXME: need to provide precise fault address */
2617		return ret;
2618
2619	if (old_tss_sel != 0xffff) {
2620		tss_seg.prev_task_link = old_tss_sel;
2621
2622		ret = ops->write_std(ctxt, new_tss_base,
2623				     &tss_seg.prev_task_link,
2624				     sizeof tss_seg.prev_task_link,
2625				     &ctxt->exception);
2626		if (ret != X86EMUL_CONTINUE)
2627			/* FIXME: need to provide precise fault address */
2628			return ret;
2629	}
2630
2631	return load_state_from_tss16(ctxt, &tss_seg);
2632}
2633
2634static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2635				struct tss_segment_32 *tss)
2636{
2637	/* CR3 and ldt selector are not saved intentionally */
2638	tss->eip = ctxt->_eip;
2639	tss->eflags = ctxt->eflags;
2640	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2641	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2642	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2643	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2644	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2645	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2646	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2647	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2648
2649	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2650	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2651	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2652	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2653	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2654	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2655}
2656
2657static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2658				 struct tss_segment_32 *tss)
2659{
2660	int ret;
2661	u8 cpl;
2662
2663	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2664		return emulate_gp(ctxt, 0);
2665	ctxt->_eip = tss->eip;
2666	ctxt->eflags = tss->eflags | 2;
2667
2668	/* General purpose registers */
2669	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2670	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2671	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2672	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2673	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2674	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2675	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2676	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2677
2678	/*
2679	 * SDM says that segment selectors are loaded before segment
2680	 * descriptors.  This is important because CPL checks will
2681	 * use CS.RPL.
2682	 */
2683	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2684	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2685	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2686	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2687	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2688	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2689	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2690
2691	/*
2692	 * If we're switching between Protected Mode and VM86, we need to make
2693	 * sure to update the mode before loading the segment descriptors so
2694	 * that the selectors are interpreted correctly.
2695	 */
2696	if (ctxt->eflags & X86_EFLAGS_VM) {
2697		ctxt->mode = X86EMUL_MODE_VM86;
2698		cpl = 3;
2699	} else {
2700		ctxt->mode = X86EMUL_MODE_PROT32;
2701		cpl = tss->cs & 3;
2702	}
2703
2704	/*
2705	 * Now load segment descriptors. If fault happenes at this stage
2706	 * it is handled in a context of new task
2707	 */
2708	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2709					cpl, true, NULL);
2710	if (ret != X86EMUL_CONTINUE)
2711		return ret;
2712	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2713					true, NULL);
2714	if (ret != X86EMUL_CONTINUE)
2715		return ret;
2716	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2717					true, NULL);
2718	if (ret != X86EMUL_CONTINUE)
2719		return ret;
2720	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2721					true, NULL);
2722	if (ret != X86EMUL_CONTINUE)
2723		return ret;
2724	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2725					true, NULL);
2726	if (ret != X86EMUL_CONTINUE)
2727		return ret;
2728	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2729					true, NULL);
2730	if (ret != X86EMUL_CONTINUE)
2731		return ret;
2732	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2733					true, NULL);
2734	if (ret != X86EMUL_CONTINUE)
2735		return ret;
2736
2737	return X86EMUL_CONTINUE;
2738}
2739
2740static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2741			  u16 tss_selector, u16 old_tss_sel,
2742			  ulong old_tss_base, struct desc_struct *new_desc)
2743{
2744	const struct x86_emulate_ops *ops = ctxt->ops;
2745	struct tss_segment_32 tss_seg;
2746	int ret;
2747	u32 new_tss_base = get_desc_base(new_desc);
2748	u32 eip_offset = offsetof(struct tss_segment_32, eip);
2749	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2750
2751	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2752			    &ctxt->exception);
2753	if (ret != X86EMUL_CONTINUE)
2754		/* FIXME: need to provide precise fault address */
2755		return ret;
2756
2757	save_state_to_tss32(ctxt, &tss_seg);
2758
2759	/* Only GP registers and segment selectors are saved */
2760	ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2761			     ldt_sel_offset - eip_offset, &ctxt->exception);
2762	if (ret != X86EMUL_CONTINUE)
2763		/* FIXME: need to provide precise fault address */
2764		return ret;
2765
2766	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2767			    &ctxt->exception);
2768	if (ret != X86EMUL_CONTINUE)
2769		/* FIXME: need to provide precise fault address */
2770		return ret;
2771
2772	if (old_tss_sel != 0xffff) {
2773		tss_seg.prev_task_link = old_tss_sel;
2774
2775		ret = ops->write_std(ctxt, new_tss_base,
2776				     &tss_seg.prev_task_link,
2777				     sizeof tss_seg.prev_task_link,
2778				     &ctxt->exception);
2779		if (ret != X86EMUL_CONTINUE)
2780			/* FIXME: need to provide precise fault address */
2781			return ret;
2782	}
2783
2784	return load_state_from_tss32(ctxt, &tss_seg);
2785}
2786
2787static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2788				   u16 tss_selector, int idt_index, int reason,
2789				   bool has_error_code, u32 error_code)
2790{
2791	const struct x86_emulate_ops *ops = ctxt->ops;
2792	struct desc_struct curr_tss_desc, next_tss_desc;
2793	int ret;
2794	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2795	ulong old_tss_base =
2796		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2797	u32 desc_limit;
2798	ulong desc_addr;
2799
2800	/* FIXME: old_tss_base == ~0 ? */
2801
2802	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2803	if (ret != X86EMUL_CONTINUE)
2804		return ret;
2805	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2806	if (ret != X86EMUL_CONTINUE)
2807		return ret;
2808
2809	/* FIXME: check that next_tss_desc is tss */
2810
2811	/*
2812	 * Check privileges. The three cases are task switch caused by...
2813	 *
2814	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2815	 * 2. Exception/IRQ/iret: No check is performed
2816	 * 3. jmp/call to TSS: Check against DPL of the TSS
2817	 */
2818	if (reason == TASK_SWITCH_GATE) {
2819		if (idt_index != -1) {
2820			/* Software interrupts */
2821			struct desc_struct task_gate_desc;
2822			int dpl;
2823
2824			ret = read_interrupt_descriptor(ctxt, idt_index,
2825							&task_gate_desc);
2826			if (ret != X86EMUL_CONTINUE)
2827				return ret;
2828
2829			dpl = task_gate_desc.dpl;
2830			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2831				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2832		}
2833	} else if (reason != TASK_SWITCH_IRET) {
2834		int dpl = next_tss_desc.dpl;
2835		if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2836			return emulate_gp(ctxt, tss_selector);
2837	}
2838
2839
2840	desc_limit = desc_limit_scaled(&next_tss_desc);
2841	if (!next_tss_desc.p ||
2842	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2843	     desc_limit < 0x2b)) {
2844		return emulate_ts(ctxt, tss_selector & 0xfffc);
2845	}
2846
2847	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2848		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2849		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2850	}
2851
2852	if (reason == TASK_SWITCH_IRET)
2853		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2854
2855	/* set back link to prev task only if NT bit is set in eflags
2856	   note that old_tss_sel is not used after this point */
2857	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2858		old_tss_sel = 0xffff;
2859
2860	if (next_tss_desc.type & 8)
2861		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2862				     old_tss_base, &next_tss_desc);
2863	else
2864		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2865				     old_tss_base, &next_tss_desc);
2866	if (ret != X86EMUL_CONTINUE)
2867		return ret;
2868
2869	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2870		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2871
2872	if (reason != TASK_SWITCH_IRET) {
2873		next_tss_desc.type |= (1 << 1); /* set busy flag */
2874		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2875	}
2876
2877	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
2878	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2879
2880	if (has_error_code) {
2881		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2882		ctxt->lock_prefix = 0;
2883		ctxt->src.val = (unsigned long) error_code;
2884		ret = em_push(ctxt);
2885	}
2886
2887	return ret;
2888}
2889
2890int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2891			 u16 tss_selector, int idt_index, int reason,
2892			 bool has_error_code, u32 error_code)
2893{
2894	int rc;
2895
2896	invalidate_registers(ctxt);
2897	ctxt->_eip = ctxt->eip;
2898	ctxt->dst.type = OP_NONE;
2899
2900	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2901				     has_error_code, error_code);
2902
2903	if (rc == X86EMUL_CONTINUE) {
2904		ctxt->eip = ctxt->_eip;
2905		writeback_registers(ctxt);
2906	}
2907
2908	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2909}
2910
2911static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2912		struct operand *op)
2913{
2914	int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2915
2916	register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2917	op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2918}
2919
2920static int em_das(struct x86_emulate_ctxt *ctxt)
2921{
2922	u8 al, old_al;
2923	bool af, cf, old_cf;
2924
2925	cf = ctxt->eflags & X86_EFLAGS_CF;
2926	al = ctxt->dst.val;
2927
2928	old_al = al;
2929	old_cf = cf;
2930	cf = false;
2931	af = ctxt->eflags & X86_EFLAGS_AF;
2932	if ((al & 0x0f) > 9 || af) {
2933		al -= 6;
2934		cf = old_cf | (al >= 250);
2935		af = true;
2936	} else {
2937		af = false;
2938	}
2939	if (old_al > 0x99 || old_cf) {
2940		al -= 0x60;
2941		cf = true;
2942	}
2943
2944	ctxt->dst.val = al;
2945	/* Set PF, ZF, SF */
2946	ctxt->src.type = OP_IMM;
2947	ctxt->src.val = 0;
2948	ctxt->src.bytes = 1;
2949	fastop(ctxt, em_or);
2950	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2951	if (cf)
2952		ctxt->eflags |= X86_EFLAGS_CF;
2953	if (af)
2954		ctxt->eflags |= X86_EFLAGS_AF;
2955	return X86EMUL_CONTINUE;
2956}
2957
2958static int em_aam(struct x86_emulate_ctxt *ctxt)
2959{
2960	u8 al, ah;
2961
2962	if (ctxt->src.val == 0)
2963		return emulate_de(ctxt);
2964
2965	al = ctxt->dst.val & 0xff;
2966	ah = al / ctxt->src.val;
2967	al %= ctxt->src.val;
2968
2969	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2970
2971	/* Set PF, ZF, SF */
2972	ctxt->src.type = OP_IMM;
2973	ctxt->src.val = 0;
2974	ctxt->src.bytes = 1;
2975	fastop(ctxt, em_or);
2976
2977	return X86EMUL_CONTINUE;
2978}
2979
2980static int em_aad(struct x86_emulate_ctxt *ctxt)
2981{
2982	u8 al = ctxt->dst.val & 0xff;
2983	u8 ah = (ctxt->dst.val >> 8) & 0xff;
2984
2985	al = (al + (ah * ctxt->src.val)) & 0xff;
2986
2987	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2988
2989	/* Set PF, ZF, SF */
2990	ctxt->src.type = OP_IMM;
2991	ctxt->src.val = 0;
2992	ctxt->src.bytes = 1;
2993	fastop(ctxt, em_or);
2994
2995	return X86EMUL_CONTINUE;
2996}
2997
2998static int em_call(struct x86_emulate_ctxt *ctxt)
2999{
3000	int rc;
3001	long rel = ctxt->src.val;
3002
3003	ctxt->src.val = (unsigned long)ctxt->_eip;
3004	rc = jmp_rel(ctxt, rel);
3005	if (rc != X86EMUL_CONTINUE)
3006		return rc;
3007	return em_push(ctxt);
3008}
3009
3010static int em_call_far(struct x86_emulate_ctxt *ctxt)
3011{
3012	u16 sel, old_cs;
3013	ulong old_eip;
3014	int rc;
3015	struct desc_struct old_desc, new_desc;
3016	const struct x86_emulate_ops *ops = ctxt->ops;
3017	int cpl = ctxt->ops->cpl(ctxt);
3018
3019	old_eip = ctxt->_eip;
3020	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3021
3022	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3023	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3024				       &new_desc);
3025	if (rc != X86EMUL_CONTINUE)
3026		return X86EMUL_CONTINUE;
3027
3028	rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
3029	if (rc != X86EMUL_CONTINUE)
3030		goto fail;
3031
3032	ctxt->src.val = old_cs;
3033	rc = em_push(ctxt);
3034	if (rc != X86EMUL_CONTINUE)
3035		goto fail;
3036
3037	ctxt->src.val = old_eip;
3038	rc = em_push(ctxt);
3039	/* If we failed, we tainted the memory, but the very least we should
3040	   restore cs */
3041	if (rc != X86EMUL_CONTINUE)
3042		goto fail;
3043	return rc;
3044fail:
3045	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3046	return rc;
3047
3048}
3049
3050static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3051{
3052	int rc;
3053	unsigned long eip;
3054
3055	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3056	if (rc != X86EMUL_CONTINUE)
3057		return rc;
3058	rc = assign_eip_near(ctxt, eip);
3059	if (rc != X86EMUL_CONTINUE)
3060		return rc;
3061	rsp_increment(ctxt, ctxt->src.val);
3062	return X86EMUL_CONTINUE;
3063}
3064
3065static int em_xchg(struct x86_emulate_ctxt *ctxt)
3066{
3067	/* Write back the register source. */
3068	ctxt->src.val = ctxt->dst.val;
3069	write_register_operand(&ctxt->src);
3070
3071	/* Write back the memory destination with implicit LOCK prefix. */
3072	ctxt->dst.val = ctxt->src.orig_val;
3073	ctxt->lock_prefix = 1;
3074	return X86EMUL_CONTINUE;
3075}
3076
3077static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3078{
3079	ctxt->dst.val = ctxt->src2.val;
3080	return fastop(ctxt, em_imul);
3081}
3082
3083static int em_cwd(struct x86_emulate_ctxt *ctxt)
3084{
3085	ctxt->dst.type = OP_REG;
3086	ctxt->dst.bytes = ctxt->src.bytes;
3087	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3088	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3089
3090	return X86EMUL_CONTINUE;
3091}
3092
3093static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3094{
3095	u64 tsc = 0;
3096
3097	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3098	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3099	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3100	return X86EMUL_CONTINUE;
3101}
3102
3103static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3104{
3105	u64 pmc;
3106
3107	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3108		return emulate_gp(ctxt, 0);
3109	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3110	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3111	return X86EMUL_CONTINUE;
3112}
3113
3114static int em_mov(struct x86_emulate_ctxt *ctxt)
3115{
3116	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3117	return X86EMUL_CONTINUE;
3118}
3119
3120#define FFL(x) bit(X86_FEATURE_##x)
3121
3122static int em_movbe(struct x86_emulate_ctxt *ctxt)
3123{
3124	u32 ebx, ecx, edx, eax = 1;
3125	u16 tmp;
3126
3127	/*
3128	 * Check MOVBE is set in the guest-visible CPUID leaf.
3129	 */
3130	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3131	if (!(ecx & FFL(MOVBE)))
3132		return emulate_ud(ctxt);
3133
3134	switch (ctxt->op_bytes) {
3135	case 2:
3136		/*
3137		 * From MOVBE definition: "...When the operand size is 16 bits,
3138		 * the upper word of the destination register remains unchanged
3139		 * ..."
3140		 *
3141		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3142		 * rules so we have to do the operation almost per hand.
3143		 */
3144		tmp = (u16)ctxt->src.val;
3145		ctxt->dst.val &= ~0xffffUL;
3146		ctxt->dst.val |= (unsigned long)swab16(tmp);
3147		break;
3148	case 4:
3149		ctxt->dst.val = swab32((u32)ctxt->src.val);
3150		break;
3151	case 8:
3152		ctxt->dst.val = swab64(ctxt->src.val);
3153		break;
3154	default:
3155		BUG();
3156	}
3157	return X86EMUL_CONTINUE;
3158}
3159
3160static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3161{
3162	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3163		return emulate_gp(ctxt, 0);
3164
3165	/* Disable writeback. */
3166	ctxt->dst.type = OP_NONE;
3167	return X86EMUL_CONTINUE;
3168}
3169
3170static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3171{
3172	unsigned long val;
3173
3174	if (ctxt->mode == X86EMUL_MODE_PROT64)
3175		val = ctxt->src.val & ~0ULL;
3176	else
3177		val = ctxt->src.val & ~0U;
3178
3179	/* #UD condition is already handled. */
3180	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3181		return emulate_gp(ctxt, 0);
3182
3183	/* Disable writeback. */
3184	ctxt->dst.type = OP_NONE;
3185	return X86EMUL_CONTINUE;
3186}
3187
3188static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3189{
3190	u64 msr_data;
3191
3192	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3193		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3194	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3195		return emulate_gp(ctxt, 0);
3196
3197	return X86EMUL_CONTINUE;
3198}
3199
3200static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3201{
3202	u64 msr_data;
3203
3204	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3205		return emulate_gp(ctxt, 0);
3206
3207	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3208	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3209	return X86EMUL_CONTINUE;
3210}
3211
3212static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3213{
3214	if (ctxt->modrm_reg > VCPU_SREG_GS)
3215		return emulate_ud(ctxt);
3216
3217	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3218	return X86EMUL_CONTINUE;
3219}
3220
3221static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3222{
3223	u16 sel = ctxt->src.val;
3224
3225	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3226		return emulate_ud(ctxt);
3227
3228	if (ctxt->modrm_reg == VCPU_SREG_SS)
3229		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3230
3231	/* Disable writeback. */
3232	ctxt->dst.type = OP_NONE;
3233	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3234}
3235
3236static int em_lldt(struct x86_emulate_ctxt *ctxt)
3237{
3238	u16 sel = ctxt->src.val;
3239
3240	/* Disable writeback. */
3241	ctxt->dst.type = OP_NONE;
3242	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3243}
3244
3245static int em_ltr(struct x86_emulate_ctxt *ctxt)
3246{
3247	u16 sel = ctxt->src.val;
3248
3249	/* Disable writeback. */
3250	ctxt->dst.type = OP_NONE;
3251	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3252}
3253
3254static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3255{
3256	int rc;
3257	ulong linear;
3258
3259	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3260	if (rc == X86EMUL_CONTINUE)
3261		ctxt->ops->invlpg(ctxt, linear);
3262	/* Disable writeback. */
3263	ctxt->dst.type = OP_NONE;
3264	return X86EMUL_CONTINUE;
3265}
3266
3267static int em_clts(struct x86_emulate_ctxt *ctxt)
3268{
3269	ulong cr0;
3270
3271	cr0 = ctxt->ops->get_cr(ctxt, 0);
3272	cr0 &= ~X86_CR0_TS;
3273	ctxt->ops->set_cr(ctxt, 0, cr0);
3274	return X86EMUL_CONTINUE;
3275}
3276
3277static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3278{
3279	int rc = ctxt->ops->fix_hypercall(ctxt);
3280
3281	if (rc != X86EMUL_CONTINUE)
3282		return rc;
3283
3284	/* Let the processor re-execute the fixed hypercall */
3285	ctxt->_eip = ctxt->eip;
3286	/* Disable writeback. */
3287	ctxt->dst.type = OP_NONE;
3288	return X86EMUL_CONTINUE;
3289}
3290
3291static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3292				  void (*get)(struct x86_emulate_ctxt *ctxt,
3293					      struct desc_ptr *ptr))
3294{
3295	struct desc_ptr desc_ptr;
3296
3297	if (ctxt->mode == X86EMUL_MODE_PROT64)
3298		ctxt->op_bytes = 8;
3299	get(ctxt, &desc_ptr);
3300	if (ctxt->op_bytes == 2) {
3301		ctxt->op_bytes = 4;
3302		desc_ptr.address &= 0x00ffffff;
3303	}
3304	/* Disable writeback. */
3305	ctxt->dst.type = OP_NONE;
3306	return segmented_write(ctxt, ctxt->dst.addr.mem,
3307			       &desc_ptr, 2 + ctxt->op_bytes);
3308}
3309
3310static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3311{
3312	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3313}
3314
3315static int em_sidt(struct x86_emulate_ctxt *ctxt)
3316{
3317	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3318}
3319
3320static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3321{
3322	struct desc_ptr desc_ptr;
3323	int rc;
3324
3325	if (ctxt->mode == X86EMUL_MODE_PROT64)
3326		ctxt->op_bytes = 8;
3327	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3328			     &desc_ptr.size, &desc_ptr.address,
3329			     ctxt->op_bytes);
3330	if (rc != X86EMUL_CONTINUE)
3331		return rc;
3332	ctxt->ops->set_gdt(ctxt, &desc_ptr);
3333	/* Disable writeback. */
3334	ctxt->dst.type = OP_NONE;
3335	return X86EMUL_CONTINUE;
3336}
3337
3338static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3339{
3340	int rc;
3341
3342	rc = ctxt->ops->fix_hypercall(ctxt);
3343
3344	/* Disable writeback. */
3345	ctxt->dst.type = OP_NONE;
3346	return rc;
3347}
3348
3349static int em_lidt(struct x86_emulate_ctxt *ctxt)
3350{
3351	struct desc_ptr desc_ptr;
3352	int rc;
3353
3354	if (ctxt->mode == X86EMUL_MODE_PROT64)
3355		ctxt->op_bytes = 8;
3356	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3357			     &desc_ptr.size, &desc_ptr.address,
3358			     ctxt->op_bytes);
3359	if (rc != X86EMUL_CONTINUE)
3360		return rc;
3361	ctxt->ops->set_idt(ctxt, &desc_ptr);
3362	/* Disable writeback. */
3363	ctxt->dst.type = OP_NONE;
3364	return X86EMUL_CONTINUE;
3365}
3366
3367static int em_smsw(struct x86_emulate_ctxt *ctxt)
3368{
3369	if (ctxt->dst.type == OP_MEM)
3370		ctxt->dst.bytes = 2;
3371	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3372	return X86EMUL_CONTINUE;
3373}
3374
3375static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3376{
3377	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3378			  | (ctxt->src.val & 0x0f));
3379	ctxt->dst.type = OP_NONE;
3380	return X86EMUL_CONTINUE;
3381}
3382
3383static int em_loop(struct x86_emulate_ctxt *ctxt)
3384{
3385	int rc = X86EMUL_CONTINUE;
3386
3387	register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3388	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3389	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3390		rc = jmp_rel(ctxt, ctxt->src.val);
3391
3392	return rc;
3393}
3394
3395static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3396{
3397	int rc = X86EMUL_CONTINUE;
3398
3399	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3400		rc = jmp_rel(ctxt, ctxt->src.val);
3401
3402	return rc;
3403}
3404
3405static int em_in(struct x86_emulate_ctxt *ctxt)
3406{
3407	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3408			     &ctxt->dst.val))
3409		return X86EMUL_IO_NEEDED;
3410
3411	return X86EMUL_CONTINUE;
3412}
3413
3414static int em_out(struct x86_emulate_ctxt *ctxt)
3415{
3416	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3417				    &ctxt->src.val, 1);
3418	/* Disable writeback. */
3419	ctxt->dst.type = OP_NONE;
3420	return X86EMUL_CONTINUE;
3421}
3422
3423static int em_cli(struct x86_emulate_ctxt *ctxt)
3424{
3425	if (emulator_bad_iopl(ctxt))
3426		return emulate_gp(ctxt, 0);
3427
3428	ctxt->eflags &= ~X86_EFLAGS_IF;
3429	return X86EMUL_CONTINUE;
3430}
3431
3432static int em_sti(struct x86_emulate_ctxt *ctxt)
3433{
3434	if (emulator_bad_iopl(ctxt))
3435		return emulate_gp(ctxt, 0);
3436
3437	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3438	ctxt->eflags |= X86_EFLAGS_IF;
3439	return X86EMUL_CONTINUE;
3440}
3441
3442static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3443{
3444	u32 eax, ebx, ecx, edx;
3445
3446	eax = reg_read(ctxt, VCPU_REGS_RAX);
3447	ecx = reg_read(ctxt, VCPU_REGS_RCX);
3448	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3449	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
3450	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3451	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3452	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
3453	return X86EMUL_CONTINUE;
3454}
3455
3456static int em_sahf(struct x86_emulate_ctxt *ctxt)
3457{
3458	u32 flags;
3459
3460	flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3461	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3462
3463	ctxt->eflags &= ~0xffUL;
3464	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3465	return X86EMUL_CONTINUE;
3466}
3467
3468static int em_lahf(struct x86_emulate_ctxt *ctxt)
3469{
3470	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3471	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3472	return X86EMUL_CONTINUE;
3473}
3474
3475static int em_bswap(struct x86_emulate_ctxt *ctxt)
3476{
3477	switch (ctxt->op_bytes) {
3478#ifdef CONFIG_X86_64
3479	case 8:
3480		asm("bswap %0" : "+r"(ctxt->dst.val));
3481		break;
3482#endif
3483	default:
3484		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3485		break;
3486	}
3487	return X86EMUL_CONTINUE;
3488}
3489
3490static int em_clflush(struct x86_emulate_ctxt *ctxt)
3491{
3492	/* emulating clflush regardless of cpuid */
3493	return X86EMUL_CONTINUE;
3494}
3495
3496static bool valid_cr(int nr)
3497{
3498	switch (nr) {
3499	case 0:
3500	case 2 ... 4:
3501	case 8:
3502		return true;
3503	default:
3504		return false;
3505	}
3506}
3507
3508static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3509{
3510	if (!valid_cr(ctxt->modrm_reg))
3511		return emulate_ud(ctxt);
3512
3513	return X86EMUL_CONTINUE;
3514}
3515
3516static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3517{
3518	u64 new_val = ctxt->src.val64;
3519	int cr = ctxt->modrm_reg;
3520	u64 efer = 0;
3521
3522	static u64 cr_reserved_bits[] = {
3523		0xffffffff00000000ULL,
3524		0, 0, 0, /* CR3 checked later */
3525		CR4_RESERVED_BITS,
3526		0, 0, 0,
3527		CR8_RESERVED_BITS,
3528	};
3529
3530	if (!valid_cr(cr))
3531		return emulate_ud(ctxt);
3532
3533	if (new_val & cr_reserved_bits[cr])
3534		return emulate_gp(ctxt, 0);
3535
3536	switch (cr) {
3537	case 0: {
3538		u64 cr4;
3539		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3540		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3541			return emulate_gp(ctxt, 0);
3542
3543		cr4 = ctxt->ops->get_cr(ctxt, 4);
3544		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3545
3546		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3547		    !(cr4 & X86_CR4_PAE))
3548			return emulate_gp(ctxt, 0);
3549
3550		break;
3551		}
3552	case 3: {
3553		u64 rsvd = 0;
3554
3555		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3556		if (efer & EFER_LMA)
3557			rsvd = CR3_L_MODE_RESERVED_BITS;
3558
3559		if (new_val & rsvd)
3560			return emulate_gp(ctxt, 0);
3561
3562		break;
3563		}
3564	case 4: {
3565		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3566
3567		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3568			return emulate_gp(ctxt, 0);
3569
3570		break;
3571		}
3572	}
3573
3574	return X86EMUL_CONTINUE;
3575}
3576
3577static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3578{
3579	unsigned long dr7;
3580
3581	ctxt->ops->get_dr(ctxt, 7, &dr7);
3582
3583	/* Check if DR7.Global_Enable is set */
3584	return dr7 & (1 << 13);
3585}
3586
3587static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3588{
3589	int dr = ctxt->modrm_reg;
3590	u64 cr4;
3591
3592	if (dr > 7)
3593		return emulate_ud(ctxt);
3594
3595	cr4 = ctxt->ops->get_cr(ctxt, 4);
3596	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3597		return emulate_ud(ctxt);
3598
3599	if (check_dr7_gd(ctxt))
3600		return emulate_db(ctxt);
3601
3602	return X86EMUL_CONTINUE;
3603}
3604
3605static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3606{
3607	u64 new_val = ctxt->src.val64;
3608	int dr = ctxt->modrm_reg;
3609
3610	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3611		return emulate_gp(ctxt, 0);
3612
3613	return check_dr_read(ctxt);
3614}
3615
3616static int check_svme(struct x86_emulate_ctxt *ctxt)
3617{
3618	u64 efer;
3619
3620	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3621
3622	if (!(efer & EFER_SVME))
3623		return emulate_ud(ctxt);
3624
3625	return X86EMUL_CONTINUE;
3626}
3627
3628static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3629{
3630	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3631
3632	/* Valid physical address? */
3633	if (rax & 0xffff000000000000ULL)
3634		return emulate_gp(ctxt, 0);
3635
3636	return check_svme(ctxt);
3637}
3638
3639static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3640{
3641	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3642
3643	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3644		return emulate_ud(ctxt);
3645
3646	return X86EMUL_CONTINUE;
3647}
3648
3649static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3650{
3651	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3652	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3653
3654	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3655	    ctxt->ops->check_pmc(ctxt, rcx))
3656		return emulate_gp(ctxt, 0);
3657
3658	return X86EMUL_CONTINUE;
3659}
3660
3661static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3662{
3663	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3664	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3665		return emulate_gp(ctxt, 0);
3666
3667	return X86EMUL_CONTINUE;
3668}
3669
3670static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3671{
3672	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3673	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3674		return emulate_gp(ctxt, 0);
3675
3676	return X86EMUL_CONTINUE;
3677}
3678
3679#define D(_y) { .flags = (_y) }
3680#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3681#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3682		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
3683#define N    D(NotImpl)
3684#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3685#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3686#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3687#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3688#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3689#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3690#define II(_f, _e, _i) \
3691	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3692#define IIP(_f, _e, _i, _p) \
3693	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3694	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
3695#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3696
3697#define D2bv(_f)      D((_f) | ByteOp), D(_f)
3698#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3699#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
3700#define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
3701#define I2bvIP(_f, _e, _i, _p) \
3702	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3703
3704#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
3705		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
3706		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3707
3708static const struct opcode group7_rm0[] = {
3709	N,
3710	I(SrcNone | Priv | EmulateOnUD,	em_vmcall),
3711	N, N, N, N, N, N,
3712};
3713
3714static const struct opcode group7_rm1[] = {
3715	DI(SrcNone | Priv, monitor),
3716	DI(SrcNone | Priv, mwait),
3717	N, N, N, N, N, N,
3718};
3719
3720static const struct opcode group7_rm3[] = {
3721	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
3722	II(SrcNone  | Prot | EmulateOnUD,	em_vmmcall,	vmmcall),
3723	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
3724	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
3725	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
3726	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
3727	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
3728	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
3729};
3730
3731static const struct opcode group7_rm7[] = {
3732	N,
3733	DIP(SrcNone, rdtscp, check_rdtsc),
3734	N, N, N, N, N, N,
3735};
3736
3737static const struct opcode group1[] = {
3738	F(Lock, em_add),
3739	F(Lock | PageTable, em_or),
3740	F(Lock, em_adc),
3741	F(Lock, em_sbb),
3742	F(Lock | PageTable, em_and),
3743	F(Lock, em_sub),
3744	F(Lock, em_xor),
3745	F(NoWrite, em_cmp),
3746};
3747
3748static const struct opcode group1A[] = {
3749	I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3750};
3751
3752static const struct opcode group2[] = {
3753	F(DstMem | ModRM, em_rol),
3754	F(DstMem | ModRM, em_ror),
3755	F(DstMem | ModRM, em_rcl),
3756	F(DstMem | ModRM, em_rcr),
3757	F(DstMem | ModRM, em_shl),
3758	F(DstMem | ModRM, em_shr),
3759	F(DstMem | ModRM, em_shl),
3760	F(DstMem | ModRM, em_sar),
3761};
3762
3763static const struct opcode group3[] = {
3764	F(DstMem | SrcImm | NoWrite, em_test),
3765	F(DstMem | SrcImm | NoWrite, em_test),
3766	F(DstMem | SrcNone | Lock, em_not),
3767	F(DstMem | SrcNone | Lock, em_neg),
3768	F(DstXacc | Src2Mem, em_mul_ex),
3769	F(DstXacc | Src2Mem, em_imul_ex),
3770	F(DstXacc | Src2Mem, em_div_ex),
3771	F(DstXacc | Src2Mem, em_idiv_ex),
3772};
3773
3774static const struct opcode group4[] = {
3775	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3776	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3777	N, N, N, N, N, N,
3778};
3779
3780static const struct opcode group5[] = {
3781	F(DstMem | SrcNone | Lock,		em_inc),
3782	F(DstMem | SrcNone | Lock,		em_dec),
3783	I(SrcMem | Stack,			em_grp45),
3784	I(SrcMemFAddr | ImplicitOps | Stack,	em_call_far),
3785	I(SrcMem | Stack,			em_grp45),
3786	I(SrcMemFAddr | ImplicitOps,		em_grp45),
3787	I(SrcMem | Stack,			em_grp45), D(Undefined),
3788};
3789
3790static const struct opcode group6[] = {
3791	DI(Prot,	sldt),
3792	DI(Prot,	str),
3793	II(Prot | Priv | SrcMem16, em_lldt, lldt),
3794	II(Prot | Priv | SrcMem16, em_ltr, ltr),
3795	N, N, N, N,
3796};
3797
3798static const struct group_dual group7 = { {
3799	II(Mov | DstMem,			em_sgdt, sgdt),
3800	II(Mov | DstMem,			em_sidt, sidt),
3801	II(SrcMem | Priv,			em_lgdt, lgdt),
3802	II(SrcMem | Priv,			em_lidt, lidt),
3803	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
3804	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
3805	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
3806}, {
3807	EXT(0, group7_rm0),
3808	EXT(0, group7_rm1),
3809	N, EXT(0, group7_rm3),
3810	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
3811	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
3812	EXT(0, group7_rm7),
3813} };
3814
3815static const struct opcode group8[] = {
3816	N, N, N, N,
3817	F(DstMem | SrcImmByte | NoWrite,		em_bt),
3818	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
3819	F(DstMem | SrcImmByte | Lock,			em_btr),
3820	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
3821};
3822
3823static const struct group_dual group9 = { {
3824	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3825}, {
3826	N, N, N, N, N, N, N, N,
3827} };
3828
3829static const struct opcode group11[] = {
3830	I(DstMem | SrcImm | Mov | PageTable, em_mov),
3831	X7(D(Undefined)),
3832};
3833
3834static const struct gprefix pfx_0f_ae_7 = {
3835	I(SrcMem | ByteOp, em_clflush), N, N, N,
3836};
3837
3838static const struct group_dual group15 = { {
3839	N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3840}, {
3841	N, N, N, N, N, N, N, N,
3842} };
3843
3844static const struct gprefix pfx_0f_6f_0f_7f = {
3845	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3846};
3847
3848static const struct gprefix pfx_0f_2b = {
3849	I(0, em_mov), I(0, em_mov), N, N,
3850};
3851
3852static const struct gprefix pfx_0f_28_0f_29 = {
3853	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3854};
3855
3856static const struct gprefix pfx_0f_e7 = {
3857	N, I(Sse, em_mov), N, N,
3858};
3859
3860static const struct escape escape_d9 = { {
3861	N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3862}, {
3863	/* 0xC0 - 0xC7 */
3864	N, N, N, N, N, N, N, N,
3865	/* 0xC8 - 0xCF */
3866	N, N, N, N, N, N, N, N,
3867	/* 0xD0 - 0xC7 */
3868	N, N, N, N, N, N, N, N,
3869	/* 0xD8 - 0xDF */
3870	N, N, N, N, N, N, N, N,
3871	/* 0xE0 - 0xE7 */
3872	N, N, N, N, N, N, N, N,
3873	/* 0xE8 - 0xEF */
3874	N, N, N, N, N, N, N, N,
3875	/* 0xF0 - 0xF7 */
3876	N, N, N, N, N, N, N, N,
3877	/* 0xF8 - 0xFF */
3878	N, N, N, N, N, N, N, N,
3879} };
3880
3881static const struct escape escape_db = { {
3882	N, N, N, N, N, N, N, N,
3883}, {
3884	/* 0xC0 - 0xC7 */
3885	N, N, N, N, N, N, N, N,
3886	/* 0xC8 - 0xCF */
3887	N, N, N, N, N, N, N, N,
3888	/* 0xD0 - 0xC7 */
3889	N, N, N, N, N, N, N, N,
3890	/* 0xD8 - 0xDF */
3891	N, N, N, N, N, N, N, N,
3892	/* 0xE0 - 0xE7 */
3893	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3894	/* 0xE8 - 0xEF */
3895	N, N, N, N, N, N, N, N,
3896	/* 0xF0 - 0xF7 */
3897	N, N, N, N, N, N, N, N,
3898	/* 0xF8 - 0xFF */
3899	N, N, N, N, N, N, N, N,
3900} };
3901
3902static const struct escape escape_dd = { {
3903	N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3904}, {
3905	/* 0xC0 - 0xC7 */
3906	N, N, N, N, N, N, N, N,
3907	/* 0xC8 - 0xCF */
3908	N, N, N, N, N, N, N, N,
3909	/* 0xD0 - 0xC7 */
3910	N, N, N, N, N, N, N, N,
3911	/* 0xD8 - 0xDF */
3912	N, N, N, N, N, N, N, N,
3913	/* 0xE0 - 0xE7 */
3914	N, N, N, N, N, N, N, N,
3915	/* 0xE8 - 0xEF */
3916	N, N, N, N, N, N, N, N,
3917	/* 0xF0 - 0xF7 */
3918	N, N, N, N, N, N, N, N,
3919	/* 0xF8 - 0xFF */
3920	N, N, N, N, N, N, N, N,
3921} };
3922
3923static const struct opcode opcode_table[256] = {
3924	/* 0x00 - 0x07 */
3925	F6ALU(Lock, em_add),
3926	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3927	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3928	/* 0x08 - 0x0F */
3929	F6ALU(Lock | PageTable, em_or),
3930	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3931	N,
3932	/* 0x10 - 0x17 */
3933	F6ALU(Lock, em_adc),
3934	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3935	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3936	/* 0x18 - 0x1F */
3937	F6ALU(Lock, em_sbb),
3938	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3939	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3940	/* 0x20 - 0x27 */
3941	F6ALU(Lock | PageTable, em_and), N, N,
3942	/* 0x28 - 0x2F */
3943	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3944	/* 0x30 - 0x37 */
3945	F6ALU(Lock, em_xor), N, N,
3946	/* 0x38 - 0x3F */
3947	F6ALU(NoWrite, em_cmp), N, N,
3948	/* 0x40 - 0x4F */
3949	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3950	/* 0x50 - 0x57 */
3951	X8(I(SrcReg | Stack, em_push)),
3952	/* 0x58 - 0x5F */
3953	X8(I(DstReg | Stack, em_pop)),
3954	/* 0x60 - 0x67 */
3955	I(ImplicitOps | Stack | No64, em_pusha),
3956	I(ImplicitOps | Stack | No64, em_popa),
3957	N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3958	N, N, N, N,
3959	/* 0x68 - 0x6F */
3960	I(SrcImm | Mov | Stack, em_push),
3961	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3962	I(SrcImmByte | Mov | Stack, em_push),
3963	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3964	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3965	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3966	/* 0x70 - 0x7F */
3967	X16(D(SrcImmByte)),
3968	/* 0x80 - 0x87 */
3969	G(ByteOp | DstMem | SrcImm, group1),
3970	G(DstMem | SrcImm, group1),
3971	G(ByteOp | DstMem | SrcImm | No64, group1),
3972	G(DstMem | SrcImmByte, group1),
3973	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3974	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3975	/* 0x88 - 0x8F */
3976	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3977	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3978	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3979	D(ModRM | SrcMem | NoAccess | DstReg),
3980	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3981	G(0, group1A),
3982	/* 0x90 - 0x97 */
3983	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3984	/* 0x98 - 0x9F */
3985	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3986	I(SrcImmFAddr | No64, em_call_far), N,
3987	II(ImplicitOps | Stack, em_pushf, pushf),
3988	II(ImplicitOps | Stack, em_popf, popf),
3989	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3990	/* 0xA0 - 0xA7 */
3991	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3992	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3993	I2bv(SrcSI | DstDI | Mov | String, em_mov),
3994	F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
3995	/* 0xA8 - 0xAF */
3996	F2bv(DstAcc | SrcImm | NoWrite, em_test),
3997	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3998	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3999	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
4000	/* 0xB0 - 0xB7 */
4001	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4002	/* 0xB8 - 0xBF */
4003	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4004	/* 0xC0 - 0xC7 */
4005	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4006	I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
4007	I(ImplicitOps | Stack, em_ret),
4008	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4009	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4010	G(ByteOp, group11), G(0, group11),
4011	/* 0xC8 - 0xCF */
4012	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4013	I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4014	I(ImplicitOps | Stack, em_ret_far),
4015	D(ImplicitOps), DI(SrcImmByte, intn),
4016	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4017	/* 0xD0 - 0xD7 */
4018	G(Src2One | ByteOp, group2), G(Src2One, group2),
4019	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4020	I(DstAcc | SrcImmUByte | No64, em_aam),
4021	I(DstAcc | SrcImmUByte | No64, em_aad),
4022	F(DstAcc | ByteOp | No64, em_salc),
4023	I(DstAcc | SrcXLat | ByteOp, em_mov),
4024	/* 0xD8 - 0xDF */
4025	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4026	/* 0xE0 - 0xE7 */
4027	X3(I(SrcImmByte, em_loop)),
4028	I(SrcImmByte, em_jcxz),
4029	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
4030	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4031	/* 0xE8 - 0xEF */
4032	I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
4033	I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
4034	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
4035	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4036	/* 0xF0 - 0xF7 */
4037	N, DI(ImplicitOps, icebp), N, N,
4038	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4039	G(ByteOp, group3), G(0, group3),
4040	/* 0xF8 - 0xFF */
4041	D(ImplicitOps), D(ImplicitOps),
4042	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4043	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4044};
4045
4046static const struct opcode twobyte_table[256] = {
4047	/* 0x00 - 0x0F */
4048	G(0, group6), GD(0, &group7), N, N,
4049	N, I(ImplicitOps | EmulateOnUD, em_syscall),
4050	II(ImplicitOps | Priv, em_clts, clts), N,
4051	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4052	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4053	/* 0x10 - 0x1F */
4054	N, N, N, N, N, N, N, N,
4055	D(ImplicitOps | ModRM | SrcMem | NoAccess),
4056	N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4057	/* 0x20 - 0x2F */
4058	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4059	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4060	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4061						check_cr_write),
4062	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4063						check_dr_write),
4064	N, N, N, N,
4065	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4066	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4067	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4068	N, N, N, N,
4069	/* 0x30 - 0x3F */
4070	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4071	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4072	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4073	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4074	I(ImplicitOps | EmulateOnUD, em_sysenter),
4075	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4076	N, N,
4077	N, N, N, N, N, N, N, N,
4078	/* 0x40 - 0x4F */
4079	X16(D(DstReg | SrcMem | ModRM)),
4080	/* 0x50 - 0x5F */
4081	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4082	/* 0x60 - 0x6F */
4083	N, N, N, N,
4084	N, N, N, N,
4085	N, N, N, N,
4086	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4087	/* 0x70 - 0x7F */
4088	N, N, N, N,
4089	N, N, N, N,
4090	N, N, N, N,
4091	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4092	/* 0x80 - 0x8F */
4093	X16(D(SrcImm)),
4094	/* 0x90 - 0x9F */
4095	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4096	/* 0xA0 - 0xA7 */
4097	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4098	II(ImplicitOps, em_cpuid, cpuid),
4099	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4100	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4101	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4102	/* 0xA8 - 0xAF */
4103	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4104	DI(ImplicitOps, rsm),
4105	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4106	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4107	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4108	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4109	/* 0xB0 - 0xB7 */
4110	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4111	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4112	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4113	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4114	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4115	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4116	/* 0xB8 - 0xBF */
4117	N, N,
4118	G(BitOp, group8),
4119	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4120	F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4121	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4122	/* 0xC0 - 0xC7 */
4123	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4124	N, D(DstMem | SrcReg | ModRM | Mov),
4125	N, N, N, GD(0, &group9),
4126	/* 0xC8 - 0xCF */
4127	X8(I(DstReg, em_bswap)),
4128	/* 0xD0 - 0xDF */
4129	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4130	/* 0xE0 - 0xEF */
4131	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4132	N, N, N, N, N, N, N, N,
4133	/* 0xF0 - 0xFF */
4134	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4135};
4136
4137static const struct gprefix three_byte_0f_38_f0 = {
4138	I(DstReg | SrcMem | Mov, em_movbe), N, N, N
4139};
4140
4141static const struct gprefix three_byte_0f_38_f1 = {
4142	I(DstMem | SrcReg | Mov, em_movbe), N, N, N
4143};
4144
4145/*
4146 * Insns below are selected by the prefix which indexed by the third opcode
4147 * byte.
4148 */
4149static const struct opcode opcode_map_0f_38[256] = {
4150	/* 0x00 - 0x7f */
4151	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4152	/* 0x80 - 0xef */
4153	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4154	/* 0xf0 - 0xf1 */
4155	GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4156	GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4157	/* 0xf2 - 0xff */
4158	N, N, X4(N), X8(N)
4159};
4160
4161#undef D
4162#undef N
4163#undef G
4164#undef GD
4165#undef I
4166#undef GP
4167#undef EXT
4168
4169#undef D2bv
4170#undef D2bvIP
4171#undef I2bv
4172#undef I2bvIP
4173#undef I6ALU
4174
4175static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4176{
4177	unsigned size;
4178
4179	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4180	if (size == 8)
4181		size = 4;
4182	return size;
4183}
4184
4185static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4186		      unsigned size, bool sign_extension)
4187{
4188	int rc = X86EMUL_CONTINUE;
4189
4190	op->type = OP_IMM;
4191	op->bytes = size;
4192	op->addr.mem.ea = ctxt->_eip;
4193	/* NB. Immediates are sign-extended as necessary. */
4194	switch (op->bytes) {
4195	case 1:
4196		op->val = insn_fetch(s8, ctxt);
4197		break;
4198	case 2:
4199		op->val = insn_fetch(s16, ctxt);
4200		break;
4201	case 4:
4202		op->val = insn_fetch(s32, ctxt);
4203		break;
4204	case 8:
4205		op->val = insn_fetch(s64, ctxt);
4206		break;
4207	}
4208	if (!sign_extension) {
4209		switch (op->bytes) {
4210		case 1:
4211			op->val &= 0xff;
4212			break;
4213		case 2:
4214			op->val &= 0xffff;
4215			break;
4216		case 4:
4217			op->val &= 0xffffffff;
4218			break;
4219		}
4220	}
4221done:
4222	return rc;
4223}
4224
4225static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4226			  unsigned d)
4227{
4228	int rc = X86EMUL_CONTINUE;
4229
4230	switch (d) {
4231	case OpReg:
4232		decode_register_operand(ctxt, op);
4233		break;
4234	case OpImmUByte:
4235		rc = decode_imm(ctxt, op, 1, false);
4236		break;
4237	case OpMem:
4238		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4239	mem_common:
4240		*op = ctxt->memop;
4241		ctxt->memopp = op;
4242		if (ctxt->d & BitOp)
4243			fetch_bit_operand(ctxt);
4244		op->orig_val = op->val;
4245		break;
4246	case OpMem64:
4247		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4248		goto mem_common;
4249	case OpAcc:
4250		op->type = OP_REG;
4251		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4252		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4253		fetch_register_operand(op);
4254		op->orig_val = op->val;
4255		break;
4256	case OpAccLo:
4257		op->type = OP_REG;
4258		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4259		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4260		fetch_register_operand(op);
4261		op->orig_val = op->val;
4262		break;
4263	case OpAccHi:
4264		if (ctxt->d & ByteOp) {
4265			op->type = OP_NONE;
4266			break;
4267		}
4268		op->type = OP_REG;
4269		op->bytes = ctxt->op_bytes;
4270		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4271		fetch_register_operand(op);
4272		op->orig_val = op->val;
4273		break;
4274	case OpDI:
4275		op->type = OP_MEM;
4276		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4277		op->addr.mem.ea =
4278			register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4279		op->addr.mem.seg = VCPU_SREG_ES;
4280		op->val = 0;
4281		op->count = 1;
4282		break;
4283	case OpDX:
4284		op->type = OP_REG;
4285		op->bytes = 2;
4286		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4287		fetch_register_operand(op);
4288		break;
4289	case OpCL:
4290		op->type = OP_IMM;
4291		op->bytes = 1;
4292		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4293		break;
4294	case OpImmByte:
4295		rc = decode_imm(ctxt, op, 1, true);
4296		break;
4297	case OpOne:
4298		op->type = OP_IMM;
4299		op->bytes = 1;
4300		op->val = 1;
4301		break;
4302	case OpImm:
4303		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4304		break;
4305	case OpImm64:
4306		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4307		break;
4308	case OpMem8:
4309		ctxt->memop.bytes = 1;
4310		if (ctxt->memop.type == OP_REG) {
4311			ctxt->memop.addr.reg = decode_register(ctxt,
4312					ctxt->modrm_rm, true);
4313			fetch_register_operand(&ctxt->memop);
4314		}
4315		goto mem_common;
4316	case OpMem16:
4317		ctxt->memop.bytes = 2;
4318		goto mem_common;
4319	case OpMem32:
4320		ctxt->memop.bytes = 4;
4321		goto mem_common;
4322	case OpImmU16:
4323		rc = decode_imm(ctxt, op, 2, false);
4324		break;
4325	case OpImmU:
4326		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4327		break;
4328	case OpSI:
4329		op->type = OP_MEM;
4330		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4331		op->addr.mem.ea =
4332			register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4333		op->addr.mem.seg = ctxt->seg_override;
4334		op->val = 0;
4335		op->count = 1;
4336		break;
4337	case OpXLat:
4338		op->type = OP_MEM;
4339		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4340		op->addr.mem.ea =
4341			register_address(ctxt,
4342				reg_read(ctxt, VCPU_REGS_RBX) +
4343				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4344		op->addr.mem.seg = ctxt->seg_override;
4345		op->val = 0;
4346		break;
4347	case OpImmFAddr:
4348		op->type = OP_IMM;
4349		op->addr.mem.ea = ctxt->_eip;
4350		op->bytes = ctxt->op_bytes + 2;
4351		insn_fetch_arr(op->valptr, op->bytes, ctxt);
4352		break;
4353	case OpMemFAddr:
4354		ctxt->memop.bytes = ctxt->op_bytes + 2;
4355		goto mem_common;
4356	case OpES:
4357		op->type = OP_IMM;
4358		op->val = VCPU_SREG_ES;
4359		break;
4360	case OpCS:
4361		op->type = OP_IMM;
4362		op->val = VCPU_SREG_CS;
4363		break;
4364	case OpSS:
4365		op->type = OP_IMM;
4366		op->val = VCPU_SREG_SS;
4367		break;
4368	case OpDS:
4369		op->type = OP_IMM;
4370		op->val = VCPU_SREG_DS;
4371		break;
4372	case OpFS:
4373		op->type = OP_IMM;
4374		op->val = VCPU_SREG_FS;
4375		break;
4376	case OpGS:
4377		op->type = OP_IMM;
4378		op->val = VCPU_SREG_GS;
4379		break;
4380	case OpImplicit:
4381		/* Special instructions do their own operand decoding. */
4382	default:
4383		op->type = OP_NONE; /* Disable writeback. */
4384		break;
4385	}
4386
4387done:
4388	return rc;
4389}
4390
4391int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4392{
4393	int rc = X86EMUL_CONTINUE;
4394	int mode = ctxt->mode;
4395	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4396	bool op_prefix = false;
4397	bool has_seg_override = false;
4398	struct opcode opcode;
4399
4400	ctxt->memop.type = OP_NONE;
4401	ctxt->memopp = NULL;
4402	ctxt->_eip = ctxt->eip;
4403	ctxt->fetch.ptr = ctxt->fetch.data;
4404	ctxt->fetch.end = ctxt->fetch.data + insn_len;
4405	ctxt->opcode_len = 1;
4406	if (insn_len > 0)
4407		memcpy(ctxt->fetch.data, insn, insn_len);
4408	else {
4409		rc = __do_insn_fetch_bytes(ctxt, 1);
4410		if (rc != X86EMUL_CONTINUE)
4411			return rc;
4412	}
4413
4414	switch (mode) {
4415	case X86EMUL_MODE_REAL:
4416	case X86EMUL_MODE_VM86:
4417	case X86EMUL_MODE_PROT16:
4418		def_op_bytes = def_ad_bytes = 2;
4419		break;
4420	case X86EMUL_MODE_PROT32:
4421		def_op_bytes = def_ad_bytes = 4;
4422		break;
4423#ifdef CONFIG_X86_64
4424	case X86EMUL_MODE_PROT64:
4425		def_op_bytes = 4;
4426		def_ad_bytes = 8;
4427		break;
4428#endif
4429	default:
4430		return EMULATION_FAILED;
4431	}
4432
4433	ctxt->op_bytes = def_op_bytes;
4434	ctxt->ad_bytes = def_ad_bytes;
4435
4436	/* Legacy prefixes. */
4437	for (;;) {
4438		switch (ctxt->b = insn_fetch(u8, ctxt)) {
4439		case 0x66:	/* operand-size override */
4440			op_prefix = true;
4441			/* switch between 2/4 bytes */
4442			ctxt->op_bytes = def_op_bytes ^ 6;
4443			break;
4444		case 0x67:	/* address-size override */
4445			if (mode == X86EMUL_MODE_PROT64)
4446				/* switch between 4/8 bytes */
4447				ctxt->ad_bytes = def_ad_bytes ^ 12;
4448			else
4449				/* switch between 2/4 bytes */
4450				ctxt->ad_bytes = def_ad_bytes ^ 6;
4451			break;
4452		case 0x26:	/* ES override */
4453		case 0x2e:	/* CS override */
4454		case 0x36:	/* SS override */
4455		case 0x3e:	/* DS override */
4456			has_seg_override = true;
4457			ctxt->seg_override = (ctxt->b >> 3) & 3;
4458			break;
4459		case 0x64:	/* FS override */
4460		case 0x65:	/* GS override */
4461			has_seg_override = true;
4462			ctxt->seg_override = ctxt->b & 7;
4463			break;
4464		case 0x40 ... 0x4f: /* REX */
4465			if (mode != X86EMUL_MODE_PROT64)
4466				goto done_prefixes;
4467			ctxt->rex_prefix = ctxt->b;
4468			continue;
4469		case 0xf0:	/* LOCK */
4470			ctxt->lock_prefix = 1;
4471			break;
4472		case 0xf2:	/* REPNE/REPNZ */
4473		case 0xf3:	/* REP/REPE/REPZ */
4474			ctxt->rep_prefix = ctxt->b;
4475			break;
4476		default:
4477			goto done_prefixes;
4478		}
4479
4480		/* Any legacy prefix after a REX prefix nullifies its effect. */
4481
4482		ctxt->rex_prefix = 0;
4483	}
4484
4485done_prefixes:
4486
4487	/* REX prefix. */
4488	if (ctxt->rex_prefix & 8)
4489		ctxt->op_bytes = 8;	/* REX.W */
4490
4491	/* Opcode byte(s). */
4492	opcode = opcode_table[ctxt->b];
4493	/* Two-byte opcode? */
4494	if (ctxt->b == 0x0f) {
4495		ctxt->opcode_len = 2;
4496		ctxt->b = insn_fetch(u8, ctxt);
4497		opcode = twobyte_table[ctxt->b];
4498
4499		/* 0F_38 opcode map */
4500		if (ctxt->b == 0x38) {
4501			ctxt->opcode_len = 3;
4502			ctxt->b = insn_fetch(u8, ctxt);
4503			opcode = opcode_map_0f_38[ctxt->b];
4504		}
4505	}
4506	ctxt->d = opcode.flags;
4507
4508	if (ctxt->d & ModRM)
4509		ctxt->modrm = insn_fetch(u8, ctxt);
4510
4511	/* vex-prefix instructions are not implemented */
4512	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4513	    (mode == X86EMUL_MODE_PROT64 ||
4514	    (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
4515		ctxt->d = NotImpl;
4516	}
4517
4518	while (ctxt->d & GroupMask) {
4519		switch (ctxt->d & GroupMask) {
4520		case Group:
4521			goffset = (ctxt->modrm >> 3) & 7;
4522			opcode = opcode.u.group[goffset];
4523			break;
4524		case GroupDual:
4525			goffset = (ctxt->modrm >> 3) & 7;
4526			if ((ctxt->modrm >> 6) == 3)
4527				opcode = opcode.u.gdual->mod3[goffset];
4528			else
4529				opcode = opcode.u.gdual->mod012[goffset];
4530			break;
4531		case RMExt:
4532			goffset = ctxt->modrm & 7;
4533			opcode = opcode.u.group[goffset];
4534			break;
4535		case Prefix:
4536			if (ctxt->rep_prefix && op_prefix)
4537				return EMULATION_FAILED;
4538			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4539			switch (simd_prefix) {
4540			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4541			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4542			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4543			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4544			}
4545			break;
4546		case Escape:
4547			if (ctxt->modrm > 0xbf)
4548				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4549			else
4550				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4551			break;
4552		default:
4553			return EMULATION_FAILED;
4554		}
4555
4556		ctxt->d &= ~(u64)GroupMask;
4557		ctxt->d |= opcode.flags;
4558	}
4559
4560	/* Unrecognised? */
4561	if (ctxt->d == 0)
4562		return EMULATION_FAILED;
4563
4564	ctxt->execute = opcode.u.execute;
4565
4566	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4567		return EMULATION_FAILED;
4568
4569	if (unlikely(ctxt->d &
4570		     (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
4571		/*
4572		 * These are copied unconditionally here, and checked unconditionally
4573		 * in x86_emulate_insn.
4574		 */
4575		ctxt->check_perm = opcode.check_perm;
4576		ctxt->intercept = opcode.intercept;
4577
4578		if (ctxt->d & NotImpl)
4579			return EMULATION_FAILED;
4580
4581		if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
4582			ctxt->op_bytes = 8;
4583
4584		if (ctxt->d & Op3264) {
4585			if (mode == X86EMUL_MODE_PROT64)
4586				ctxt->op_bytes = 8;
4587			else
4588				ctxt->op_bytes = 4;
4589		}
4590
4591		if (ctxt->d & Sse)
4592			ctxt->op_bytes = 16;
4593		else if (ctxt->d & Mmx)
4594			ctxt->op_bytes = 8;
4595	}
4596
4597	/* ModRM and SIB bytes. */
4598	if (ctxt->d & ModRM) {
4599		rc = decode_modrm(ctxt, &ctxt->memop);
4600		if (!has_seg_override) {
4601			has_seg_override = true;
4602			ctxt->seg_override = ctxt->modrm_seg;
4603		}
4604	} else if (ctxt->d & MemAbs)
4605		rc = decode_abs(ctxt, &ctxt->memop);
4606	if (rc != X86EMUL_CONTINUE)
4607		goto done;
4608
4609	if (!has_seg_override)
4610		ctxt->seg_override = VCPU_SREG_DS;
4611
4612	ctxt->memop.addr.mem.seg = ctxt->seg_override;
4613
4614	/*
4615	 * Decode and fetch the source operand: register, memory
4616	 * or immediate.
4617	 */
4618	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4619	if (rc != X86EMUL_CONTINUE)
4620		goto done;
4621
4622	/*
4623	 * Decode and fetch the second source operand: register, memory
4624	 * or immediate.
4625	 */
4626	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4627	if (rc != X86EMUL_CONTINUE)
4628		goto done;
4629
4630	/* Decode and fetch the destination operand: register or memory. */
4631	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4632
4633	if (ctxt->rip_relative)
4634		ctxt->memopp->addr.mem.ea += ctxt->_eip;
4635
4636done:
4637	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4638}
4639
4640bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4641{
4642	return ctxt->d & PageTable;
4643}
4644
4645static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4646{
4647	/* The second termination condition only applies for REPE
4648	 * and REPNE. Test if the repeat string operation prefix is
4649	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4650	 * corresponding termination condition according to:
4651	 * 	- if REPE/REPZ and ZF = 0 then done
4652	 * 	- if REPNE/REPNZ and ZF = 1 then done
4653	 */
4654	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4655	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4656	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
4657		 ((ctxt->eflags & EFLG_ZF) == 0))
4658		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
4659		    ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4660		return true;
4661
4662	return false;
4663}
4664
4665static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4666{
4667	bool fault = false;
4668
4669	ctxt->ops->get_fpu(ctxt);
4670	asm volatile("1: fwait \n\t"
4671		     "2: \n\t"
4672		     ".pushsection .fixup,\"ax\" \n\t"
4673		     "3: \n\t"
4674		     "movb $1, %[fault] \n\t"
4675		     "jmp 2b \n\t"
4676		     ".popsection \n\t"
4677		     _ASM_EXTABLE(1b, 3b)
4678		     : [fault]"+qm"(fault));
4679	ctxt->ops->put_fpu(ctxt);
4680
4681	if (unlikely(fault))
4682		return emulate_exception(ctxt, MF_VECTOR, 0, false);
4683
4684	return X86EMUL_CONTINUE;
4685}
4686
4687static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4688				       struct operand *op)
4689{
4690	if (op->type == OP_MM)
4691		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4692}
4693
4694static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4695{
4696	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4697	if (!(ctxt->d & ByteOp))
4698		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4699	asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4700	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4701	      [fastop]"+S"(fop)
4702	    : "c"(ctxt->src2.val));
4703	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4704	if (!fop) /* exception is returned in fop variable */
4705		return emulate_de(ctxt);
4706	return X86EMUL_CONTINUE;
4707}
4708
4709void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4710{
4711	memset(&ctxt->rip_relative, 0,
4712	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4713
4714	ctxt->io_read.pos = 0;
4715	ctxt->io_read.end = 0;
4716	ctxt->mem_read.end = 0;
4717}
4718
4719int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4720{
4721	const struct x86_emulate_ops *ops = ctxt->ops;
4722	int rc = X86EMUL_CONTINUE;
4723	int saved_dst_type = ctxt->dst.type;
4724
4725	ctxt->mem_read.pos = 0;
4726
4727	/* LOCK prefix is allowed only with some instructions */
4728	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4729		rc = emulate_ud(ctxt);
4730		goto done;
4731	}
4732
4733	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4734		rc = emulate_ud(ctxt);
4735		goto done;
4736	}
4737
4738	if (unlikely(ctxt->d &
4739		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4740		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4741				(ctxt->d & Undefined)) {
4742			rc = emulate_ud(ctxt);
4743			goto done;
4744		}
4745
4746		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4747		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4748			rc = emulate_ud(ctxt);
4749			goto done;
4750		}
4751
4752		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4753			rc = emulate_nm(ctxt);
4754			goto done;
4755		}
4756
4757		if (ctxt->d & Mmx) {
4758			rc = flush_pending_x87_faults(ctxt);
4759			if (rc != X86EMUL_CONTINUE)
4760				goto done;
4761			/*
4762			 * Now that we know the fpu is exception safe, we can fetch
4763			 * operands from it.
4764			 */
4765			fetch_possible_mmx_operand(ctxt, &ctxt->src);
4766			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4767			if (!(ctxt->d & Mov))
4768				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4769		}
4770
4771		if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4772			rc = emulator_check_intercept(ctxt, ctxt->intercept,
4773						      X86_ICPT_PRE_EXCEPT);
4774			if (rc != X86EMUL_CONTINUE)
4775				goto done;
4776		}
4777
4778		/* Privileged instruction can be executed only in CPL=0 */
4779		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4780			if (ctxt->d & PrivUD)
4781				rc = emulate_ud(ctxt);
4782			else
4783				rc = emulate_gp(ctxt, 0);
4784			goto done;
4785		}
4786
4787		/* Instruction can only be executed in protected mode */
4788		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4789			rc = emulate_ud(ctxt);
4790			goto done;
4791		}
4792
4793		/* Do instruction specific permission checks */
4794		if (ctxt->d & CheckPerm) {
4795			rc = ctxt->check_perm(ctxt);
4796			if (rc != X86EMUL_CONTINUE)
4797				goto done;
4798		}
4799
4800		if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4801			rc = emulator_check_intercept(ctxt, ctxt->intercept,
4802						      X86_ICPT_POST_EXCEPT);
4803			if (rc != X86EMUL_CONTINUE)
4804				goto done;
4805		}
4806
4807		if (ctxt->rep_prefix && (ctxt->d & String)) {
4808			/* All REP prefixes have the same first termination condition */
4809			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4810				ctxt->eip = ctxt->_eip;
4811				ctxt->eflags &= ~EFLG_RF;
4812				goto done;
4813			}
4814		}
4815	}
4816
4817	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4818		rc = segmented_read(ctxt, ctxt->src.addr.mem,
4819				    ctxt->src.valptr, ctxt->src.bytes);
4820		if (rc != X86EMUL_CONTINUE)
4821			goto done;
4822		ctxt->src.orig_val64 = ctxt->src.val64;
4823	}
4824
4825	if (ctxt->src2.type == OP_MEM) {
4826		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4827				    &ctxt->src2.val, ctxt->src2.bytes);
4828		if (rc != X86EMUL_CONTINUE)
4829			goto done;
4830	}
4831
4832	if ((ctxt->d & DstMask) == ImplicitOps)
4833		goto special_insn;
4834
4835
4836	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4837		/* optimisation - avoid slow emulated read if Mov */
4838		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4839				   &ctxt->dst.val, ctxt->dst.bytes);
4840		if (rc != X86EMUL_CONTINUE)
4841			goto done;
4842	}
4843	ctxt->dst.orig_val = ctxt->dst.val;
4844
4845special_insn:
4846
4847	if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4848		rc = emulator_check_intercept(ctxt, ctxt->intercept,
4849					      X86_ICPT_POST_MEMACCESS);
4850		if (rc != X86EMUL_CONTINUE)
4851			goto done;
4852	}
4853
4854	if (ctxt->rep_prefix && (ctxt->d & String))
4855		ctxt->eflags |= EFLG_RF;
4856	else
4857		ctxt->eflags &= ~EFLG_RF;
4858
4859	if (ctxt->execute) {
4860		if (ctxt->d & Fastop) {
4861			void (*fop)(struct fastop *) = (void *)ctxt->execute;
4862			rc = fastop(ctxt, fop);
4863			if (rc != X86EMUL_CONTINUE)
4864				goto done;
4865			goto writeback;
4866		}
4867		rc = ctxt->execute(ctxt);
4868		if (rc != X86EMUL_CONTINUE)
4869			goto done;
4870		goto writeback;
4871	}
4872
4873	if (ctxt->opcode_len == 2)
4874		goto twobyte_insn;
4875	else if (ctxt->opcode_len == 3)
4876		goto threebyte_insn;
4877
4878	switch (ctxt->b) {
4879	case 0x63:		/* movsxd */
4880		if (ctxt->mode != X86EMUL_MODE_PROT64)
4881			goto cannot_emulate;
4882		ctxt->dst.val = (s32) ctxt->src.val;
4883		break;
4884	case 0x70 ... 0x7f: /* jcc (short) */
4885		if (test_cc(ctxt->b, ctxt->eflags))
4886			rc = jmp_rel(ctxt, ctxt->src.val);
4887		break;
4888	case 0x8d: /* lea r16/r32, m */
4889		ctxt->dst.val = ctxt->src.addr.mem.ea;
4890		break;
4891	case 0x90 ... 0x97: /* nop / xchg reg, rax */
4892		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4893			ctxt->dst.type = OP_NONE;
4894		else
4895			rc = em_xchg(ctxt);
4896		break;
4897	case 0x98: /* cbw/cwde/cdqe */
4898		switch (ctxt->op_bytes) {
4899		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4900		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4901		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4902		}
4903		break;
4904	case 0xcc:		/* int3 */
4905		rc = emulate_int(ctxt, 3);
4906		break;
4907	case 0xcd:		/* int n */
4908		rc = emulate_int(ctxt, ctxt->src.val);
4909		break;
4910	case 0xce:		/* into */
4911		if (ctxt->eflags & EFLG_OF)
4912			rc = emulate_int(ctxt, 4);
4913		break;
4914	case 0xe9: /* jmp rel */
4915	case 0xeb: /* jmp rel short */
4916		rc = jmp_rel(ctxt, ctxt->src.val);
4917		ctxt->dst.type = OP_NONE; /* Disable writeback. */
4918		break;
4919	case 0xf4:              /* hlt */
4920		ctxt->ops->halt(ctxt);
4921		break;
4922	case 0xf5:	/* cmc */
4923		/* complement carry flag from eflags reg */
4924		ctxt->eflags ^= EFLG_CF;
4925		break;
4926	case 0xf8: /* clc */
4927		ctxt->eflags &= ~EFLG_CF;
4928		break;
4929	case 0xf9: /* stc */
4930		ctxt->eflags |= EFLG_CF;
4931		break;
4932	case 0xfc: /* cld */
4933		ctxt->eflags &= ~EFLG_DF;
4934		break;
4935	case 0xfd: /* std */
4936		ctxt->eflags |= EFLG_DF;
4937		break;
4938	default:
4939		goto cannot_emulate;
4940	}
4941
4942	if (rc != X86EMUL_CONTINUE)
4943		goto done;
4944
4945writeback:
4946	if (ctxt->d & SrcWrite) {
4947		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4948		rc = writeback(ctxt, &ctxt->src);
4949		if (rc != X86EMUL_CONTINUE)
4950			goto done;
4951	}
4952	if (!(ctxt->d & NoWrite)) {
4953		rc = writeback(ctxt, &ctxt->dst);
4954		if (rc != X86EMUL_CONTINUE)
4955			goto done;
4956	}
4957
4958	/*
4959	 * restore dst type in case the decoding will be reused
4960	 * (happens for string instruction )
4961	 */
4962	ctxt->dst.type = saved_dst_type;
4963
4964	if ((ctxt->d & SrcMask) == SrcSI)
4965		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4966
4967	if ((ctxt->d & DstMask) == DstDI)
4968		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4969
4970	if (ctxt->rep_prefix && (ctxt->d & String)) {
4971		unsigned int count;
4972		struct read_cache *r = &ctxt->io_read;
4973		if ((ctxt->d & SrcMask) == SrcSI)
4974			count = ctxt->src.count;
4975		else
4976			count = ctxt->dst.count;
4977		register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4978				-count);
4979
4980		if (!string_insn_completed(ctxt)) {
4981			/*
4982			 * Re-enter guest when pio read ahead buffer is empty
4983			 * or, if it is not used, after each 1024 iteration.
4984			 */
4985			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4986			    (r->end == 0 || r->end != r->pos)) {
4987				/*
4988				 * Reset read cache. Usually happens before
4989				 * decode, but since instruction is restarted
4990				 * we have to do it here.
4991				 */
4992				ctxt->mem_read.end = 0;
4993				writeback_registers(ctxt);
4994				return EMULATION_RESTART;
4995			}
4996			goto done; /* skip rip writeback */
4997		}
4998		ctxt->eflags &= ~EFLG_RF;
4999	}
5000
5001	ctxt->eip = ctxt->_eip;
5002
5003done:
5004	if (rc == X86EMUL_PROPAGATE_FAULT) {
5005		WARN_ON(ctxt->exception.vector > 0x1f);
5006		ctxt->have_exception = true;
5007	}
5008	if (rc == X86EMUL_INTERCEPTED)
5009		return EMULATION_INTERCEPTED;
5010
5011	if (rc == X86EMUL_CONTINUE)
5012		writeback_registers(ctxt);
5013
5014	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5015
5016twobyte_insn:
5017	switch (ctxt->b) {
5018	case 0x09:		/* wbinvd */
5019		(ctxt->ops->wbinvd)(ctxt);
5020		break;
5021	case 0x08:		/* invd */
5022	case 0x0d:		/* GrpP (prefetch) */
5023	case 0x18:		/* Grp16 (prefetch/nop) */
5024	case 0x1f:		/* nop */
5025		break;
5026	case 0x20: /* mov cr, reg */
5027		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5028		break;
5029	case 0x21: /* mov from dr to reg */
5030		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5031		break;
5032	case 0x40 ... 0x4f:	/* cmov */
5033		if (test_cc(ctxt->b, ctxt->eflags))
5034			ctxt->dst.val = ctxt->src.val;
5035		else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5036			 ctxt->op_bytes != 4)
5037			ctxt->dst.type = OP_NONE; /* no writeback */
5038		break;
5039	case 0x80 ... 0x8f: /* jnz rel, etc*/
5040		if (test_cc(ctxt->b, ctxt->eflags))
5041			rc = jmp_rel(ctxt, ctxt->src.val);
5042		break;
5043	case 0x90 ... 0x9f:     /* setcc r/m8 */
5044		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5045		break;
5046	case 0xb6 ... 0xb7:	/* movzx */
5047		ctxt->dst.bytes = ctxt->op_bytes;
5048		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5049						       : (u16) ctxt->src.val;
5050		break;
5051	case 0xbe ... 0xbf:	/* movsx */
5052		ctxt->dst.bytes = ctxt->op_bytes;
5053		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5054							(s16) ctxt->src.val;
5055		break;
5056	case 0xc3:		/* movnti */
5057		ctxt->dst.bytes = ctxt->op_bytes;
5058		ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
5059							(u32) ctxt->src.val;
5060		break;
5061	default:
5062		goto cannot_emulate;
5063	}
5064
5065threebyte_insn:
5066
5067	if (rc != X86EMUL_CONTINUE)
5068		goto done;
5069
5070	goto writeback;
5071
5072cannot_emulate:
5073	return EMULATION_FAILED;
5074}
5075
5076void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5077{
5078	invalidate_registers(ctxt);
5079}
5080
5081void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5082{
5083	writeback_registers(ctxt);
5084}
5085