1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
35 #define OpImplicit 1ull /* No generic decode */
36 #define OpReg 2ull /* Register */
37 #define OpMem 3ull /* Memory */
38 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
39 #define OpDI 5ull /* ES:DI/EDI/RDI */
40 #define OpMem64 6ull /* Memory, 64-bit */
41 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
42 #define OpDX 8ull /* DX register */
43 #define OpCL 9ull /* CL register (for shifts) */
44 #define OpImmByte 10ull /* 8-bit sign extended immediate */
45 #define OpOne 11ull /* Implied 1 */
46 #define OpImm 12ull /* Sign extended immediate */
47 #define OpMem16 13ull /* Memory operand (16-bit). */
48 #define OpMem32 14ull /* Memory operand (32-bit). */
49 #define OpImmU 15ull /* Immediate operand, zero extended */
50 #define OpSI 16ull /* SI/ESI/RSI */
51 #define OpImmFAddr 17ull /* Immediate far address */
52 #define OpMemFAddr 18ull /* Far address in memory */
53 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
54 #define OpES 20ull /* ES */
55 #define OpCS 21ull /* CS */
56 #define OpSS 22ull /* SS */
57 #define OpDS 23ull /* DS */
58 #define OpFS 24ull /* FS */
59 #define OpGS 25ull /* GS */
60 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpBits 5 /* Width of operand field */
63 #define OpMask ((1ull << OpBits) - 1)
66 * Opcode effective-address decode tables.
67 * Note that we only emulate instructions that have at least one memory
68 * operand (excluding implicit stack references). We assume that stack
69 * references and instruction fetches will never occur in special memory
70 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
74 /* Operand sizes: 8-bit operands or specified/overridden size. */
75 #define ByteOp (1<<0) /* 8-bit operands. */
76 /* Destination operand type. */
78 #define ImplicitOps (OpImplicit << DstShift)
79 #define DstReg (OpReg << DstShift)
80 #define DstMem (OpMem << DstShift)
81 #define DstAcc (OpAcc << DstShift)
82 #define DstDI (OpDI << DstShift)
83 #define DstMem64 (OpMem64 << DstShift)
84 #define DstImmUByte (OpImmUByte << DstShift)
85 #define DstDX (OpDX << DstShift)
86 #define DstMask (OpMask << DstShift)
87 /* Source operand type. */
89 #define SrcNone (OpNone << SrcShift)
90 #define SrcReg (OpReg << SrcShift)
91 #define SrcMem (OpMem << SrcShift)
92 #define SrcMem16 (OpMem16 << SrcShift)
93 #define SrcMem32 (OpMem32 << SrcShift)
94 #define SrcImm (OpImm << SrcShift)
95 #define SrcImmByte (OpImmByte << SrcShift)
96 #define SrcOne (OpOne << SrcShift)
97 #define SrcImmUByte (OpImmUByte << SrcShift)
98 #define SrcImmU (OpImmU << SrcShift)
99 #define SrcSI (OpSI << SrcShift)
100 #define SrcImmFAddr (OpImmFAddr << SrcShift)
101 #define SrcMemFAddr (OpMemFAddr << SrcShift)
102 #define SrcAcc (OpAcc << SrcShift)
103 #define SrcImmU16 (OpImmU16 << SrcShift)
104 #define SrcDX (OpDX << SrcShift)
105 #define SrcMem8 (OpMem8 << SrcShift)
106 #define SrcMask (OpMask << SrcShift)
107 #define BitOp (1<<11)
108 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
109 #define String (1<<13) /* String instruction (rep capable) */
110 #define Stack (1<<14) /* Stack instruction (push/pop) */
111 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
112 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
113 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
114 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
115 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
116 #define Sse (1<<18) /* SSE Vector instruction */
117 /* Generic ModRM decode. */
118 #define ModRM (1<<19)
119 /* Destination is only written; never read. */
122 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
123 #define VendorSpecific (1<<22) /* Vendor specific instruction */
124 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
125 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
126 #define Undefined (1<<25) /* No Such Instruction */
127 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
128 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
130 #define PageTable (1 << 29) /* instruction used to write page table */
131 /* Source 2 operand type */
132 #define Src2Shift (30)
133 #define Src2None (OpNone << Src2Shift)
134 #define Src2CL (OpCL << Src2Shift)
135 #define Src2ImmByte (OpImmByte << Src2Shift)
136 #define Src2One (OpOne << Src2Shift)
137 #define Src2Imm (OpImm << Src2Shift)
138 #define Src2ES (OpES << Src2Shift)
139 #define Src2CS (OpCS << Src2Shift)
140 #define Src2SS (OpSS << Src2Shift)
141 #define Src2DS (OpDS << Src2Shift)
142 #define Src2FS (OpFS << Src2Shift)
143 #define Src2GS (OpGS << Src2Shift)
144 #define Src2Mask (OpMask << Src2Shift)
145 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
146 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
147 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
149 #define X2(x...) x, x
150 #define X3(x...) X2(x), x
151 #define X4(x...) X2(x), X2(x)
152 #define X5(x...) X4(x), x
153 #define X6(x...) X4(x), X2(x)
154 #define X7(x...) X4(x), X3(x)
155 #define X8(x...) X4(x), X4(x)
156 #define X16(x...) X8(x), X8(x)
162 int (*execute)(struct x86_emulate_ctxt *ctxt);
163 struct opcode *group;
164 struct group_dual *gdual;
165 struct gprefix *gprefix;
167 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
171 struct opcode mod012[8];
172 struct opcode mod3[8];
176 struct opcode pfx_no;
177 struct opcode pfx_66;
178 struct opcode pfx_f2;
179 struct opcode pfx_f3;
182 /* EFLAGS bit definitions. */
183 #define EFLG_ID (1<<21)
184 #define EFLG_VIP (1<<20)
185 #define EFLG_VIF (1<<19)
186 #define EFLG_AC (1<<18)
187 #define EFLG_VM (1<<17)
188 #define EFLG_RF (1<<16)
189 #define EFLG_IOPL (3<<12)
190 #define EFLG_NT (1<<14)
191 #define EFLG_OF (1<<11)
192 #define EFLG_DF (1<<10)
193 #define EFLG_IF (1<<9)
194 #define EFLG_TF (1<<8)
195 #define EFLG_SF (1<<7)
196 #define EFLG_ZF (1<<6)
197 #define EFLG_AF (1<<4)
198 #define EFLG_PF (1<<2)
199 #define EFLG_CF (1<<0)
201 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
202 #define EFLG_RESERVED_ONE_MASK 2
205 * Instruction emulation:
206 * Most instructions are emulated directly via a fragment of inline assembly
207 * code. This allows us to save/restore EFLAGS and thus very easily pick up
208 * any modified flags.
211 #if defined(CONFIG_X86_64)
212 #define _LO32 "k" /* force 32-bit operand */
213 #define _STK "%%rsp" /* stack pointer */
214 #elif defined(__i386__)
215 #define _LO32 "" /* force 32-bit operand */
216 #define _STK "%%esp" /* stack pointer */
220 * These EFLAGS bits are restored from saved value during emulation, and
221 * any changes are written back to the saved value after emulation.
223 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
225 /* Before executing instruction: restore necessary bits in EFLAGS. */
226 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
227 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
228 "movl %"_sav",%"_LO32 _tmp"; " \
231 "movl %"_msk",%"_LO32 _tmp"; " \
232 "andl %"_LO32 _tmp",("_STK"); " \
234 "notl %"_LO32 _tmp"; " \
235 "andl %"_LO32 _tmp",("_STK"); " \
236 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
238 "orl %"_LO32 _tmp",("_STK"); " \
242 /* After executing instruction: write-back necessary bits in EFLAGS. */
243 #define _POST_EFLAGS(_sav, _msk, _tmp) \
244 /* _sav |= EFLAGS & _msk; */ \
247 "andl %"_msk",%"_LO32 _tmp"; " \
248 "orl %"_LO32 _tmp",%"_sav"; "
256 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
258 __asm__ __volatile__ ( \
259 _PRE_EFLAGS("0", "4", "2") \
260 _op _suffix " %"_x"3,%1; " \
261 _POST_EFLAGS("0", "4", "2") \
262 : "=m" ((ctxt)->eflags), \
263 "+q" (*(_dsttype*)&(ctxt)->dst.val), \
265 : _y ((ctxt)->src.val), "i" (EFLAGS_MASK)); \
269 /* Raw emulation: instruction has two explicit operands. */
270 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
272 unsigned long _tmp; \
274 switch ((ctxt)->dst.bytes) { \
276 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
279 ____emulate_2op(ctxt,_op,_lx,_ly,"l",u32); \
282 ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \
287 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
289 unsigned long _tmp; \
290 switch ((ctxt)->dst.bytes) { \
292 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
295 __emulate_2op_nobyte(ctxt, _op, \
296 _wx, _wy, _lx, _ly, _qx, _qy); \
301 /* Source operand is byte-sized and may be restricted to just %cl. */
302 #define emulate_2op_SrcB(ctxt, _op) \
303 __emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c")
305 /* Source operand is byte, word, long or quad sized. */
306 #define emulate_2op_SrcV(ctxt, _op) \
307 __emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r")
309 /* Source operand is word, long or quad sized. */
310 #define emulate_2op_SrcV_nobyte(ctxt, _op) \
311 __emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r")
313 /* Instruction has three operands and one operand is stored in ECX register */
314 #define __emulate_2op_cl(ctxt, _op, _suffix, _type) \
316 unsigned long _tmp; \
317 _type _clv = (ctxt)->src2.val; \
318 _type _srcv = (ctxt)->src.val; \
319 _type _dstv = (ctxt)->dst.val; \
321 __asm__ __volatile__ ( \
322 _PRE_EFLAGS("0", "5", "2") \
323 _op _suffix " %4,%1 \n" \
324 _POST_EFLAGS("0", "5", "2") \
325 : "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \
326 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
329 (ctxt)->src2.val = (unsigned long) _clv; \
330 (ctxt)->src2.val = (unsigned long) _srcv; \
331 (ctxt)->dst.val = (unsigned long) _dstv; \
334 #define emulate_2op_cl(ctxt, _op) \
336 switch ((ctxt)->dst.bytes) { \
338 __emulate_2op_cl(ctxt, _op, "w", u16); \
341 __emulate_2op_cl(ctxt, _op, "l", u32); \
344 ON64(__emulate_2op_cl(ctxt, _op, "q", ulong)); \
349 #define __emulate_1op(ctxt, _op, _suffix) \
351 unsigned long _tmp; \
353 __asm__ __volatile__ ( \
354 _PRE_EFLAGS("0", "3", "2") \
355 _op _suffix " %1; " \
356 _POST_EFLAGS("0", "3", "2") \
357 : "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \
359 : "i" (EFLAGS_MASK)); \
362 /* Instruction has only one explicit operand (no source operand). */
363 #define emulate_1op(ctxt, _op) \
365 switch ((ctxt)->dst.bytes) { \
366 case 1: __emulate_1op(ctxt, _op, "b"); break; \
367 case 2: __emulate_1op(ctxt, _op, "w"); break; \
368 case 4: __emulate_1op(ctxt, _op, "l"); break; \
369 case 8: ON64(__emulate_1op(ctxt, _op, "q")); break; \
373 #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
375 unsigned long _tmp; \
376 ulong *rax = &(ctxt)->regs[VCPU_REGS_RAX]; \
377 ulong *rdx = &(ctxt)->regs[VCPU_REGS_RDX]; \
379 __asm__ __volatile__ ( \
380 _PRE_EFLAGS("0", "5", "1") \
382 _op _suffix " %6; " \
384 _POST_EFLAGS("0", "5", "1") \
385 ".pushsection .fixup,\"ax\" \n\t" \
386 "3: movb $1, %4 \n\t" \
389 _ASM_EXTABLE(1b, 3b) \
390 : "=m" ((ctxt)->eflags), "=&r" (_tmp), \
391 "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
392 : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \
393 "a" (*rax), "d" (*rdx)); \
396 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
397 #define emulate_1op_rax_rdx(ctxt, _op, _ex) \
399 switch((ctxt)->src.bytes) { \
401 __emulate_1op_rax_rdx(ctxt, _op, "b", _ex); \
404 __emulate_1op_rax_rdx(ctxt, _op, "w", _ex); \
407 __emulate_1op_rax_rdx(ctxt, _op, "l", _ex); \
410 __emulate_1op_rax_rdx(ctxt, _op, "q", _ex)); \
415 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
416 enum x86_intercept intercept,
417 enum x86_intercept_stage stage)
419 struct x86_instruction_info info = {
420 .intercept = intercept,
421 .rep_prefix = ctxt->rep_prefix,
422 .modrm_mod = ctxt->modrm_mod,
423 .modrm_reg = ctxt->modrm_reg,
424 .modrm_rm = ctxt->modrm_rm,
425 .src_val = ctxt->src.val64,
426 .src_bytes = ctxt->src.bytes,
427 .dst_bytes = ctxt->dst.bytes,
428 .ad_bytes = ctxt->ad_bytes,
429 .next_rip = ctxt->eip,
432 return ctxt->ops->intercept(ctxt, &info, stage);
435 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
437 return (1UL << (ctxt->ad_bytes << 3)) - 1;
440 /* Access/update address held in a register, based on addressing mode. */
441 static inline unsigned long
442 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
444 if (ctxt->ad_bytes == sizeof(unsigned long))
447 return reg & ad_mask(ctxt);
450 static inline unsigned long
451 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
453 return address_mask(ctxt, reg);
457 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
459 if (ctxt->ad_bytes == sizeof(unsigned long))
462 *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
465 static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
467 register_address_increment(ctxt, &ctxt->_eip, rel);
470 static u32 desc_limit_scaled(struct desc_struct *desc)
472 u32 limit = get_desc_limit(desc);
474 return desc->g ? (limit << 12) | 0xfff : limit;
477 static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
479 ctxt->has_seg_override = true;
480 ctxt->seg_override = seg;
483 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
485 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
488 return ctxt->ops->get_cached_segment_base(ctxt, seg);
491 static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
493 if (!ctxt->has_seg_override)
496 return ctxt->seg_override;
499 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
500 u32 error, bool valid)
502 ctxt->exception.vector = vec;
503 ctxt->exception.error_code = error;
504 ctxt->exception.error_code_valid = valid;
505 return X86EMUL_PROPAGATE_FAULT;
508 static int emulate_db(struct x86_emulate_ctxt *ctxt)
510 return emulate_exception(ctxt, DB_VECTOR, 0, false);
513 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
515 return emulate_exception(ctxt, GP_VECTOR, err, true);
518 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
520 return emulate_exception(ctxt, SS_VECTOR, err, true);
523 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
525 return emulate_exception(ctxt, UD_VECTOR, 0, false);
528 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
530 return emulate_exception(ctxt, TS_VECTOR, err, true);
533 static int emulate_de(struct x86_emulate_ctxt *ctxt)
535 return emulate_exception(ctxt, DE_VECTOR, 0, false);
538 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
540 return emulate_exception(ctxt, NM_VECTOR, 0, false);
543 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
546 struct desc_struct desc;
548 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
552 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
557 struct desc_struct desc;
559 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
560 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
564 * x86 defines three classes of vector instructions: explicitly
565 * aligned, explicitly unaligned, and the rest, which change behaviour
566 * depending on whether they're AVX encoded or not.
568 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
569 * subject to the same check.
571 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
573 if (likely(size < 16))
576 if (ctxt->d & Aligned)
578 else if (ctxt->d & Unaligned)
580 else if (ctxt->d & Avx)
586 static int __linearize(struct x86_emulate_ctxt *ctxt,
587 struct segmented_address addr,
588 unsigned size, bool write, bool fetch,
591 struct desc_struct desc;
598 la = seg_base(ctxt, addr.seg) + addr.ea;
599 switch (ctxt->mode) {
600 case X86EMUL_MODE_REAL:
602 case X86EMUL_MODE_PROT64:
603 if (((signed long)la << 16) >> 16 != la)
604 return emulate_gp(ctxt, 0);
607 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
611 /* code segment or read-only data segment */
612 if (((desc.type & 8) || !(desc.type & 2)) && write)
614 /* unreadable code segment */
615 if (!fetch && (desc.type & 8) && !(desc.type & 2))
617 lim = desc_limit_scaled(&desc);
618 if ((desc.type & 8) || !(desc.type & 4)) {
619 /* expand-up segment */
620 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
623 /* exapand-down segment */
624 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
626 lim = desc.d ? 0xffffffff : 0xffff;
627 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
630 cpl = ctxt->ops->cpl(ctxt);
633 if (!(desc.type & 8)) {
637 } else if ((desc.type & 8) && !(desc.type & 4)) {
638 /* nonconforming code segment */
641 } else if ((desc.type & 8) && (desc.type & 4)) {
642 /* conforming code segment */
648 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
650 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
651 return emulate_gp(ctxt, 0);
653 return X86EMUL_CONTINUE;
655 if (addr.seg == VCPU_SREG_SS)
656 return emulate_ss(ctxt, addr.seg);
658 return emulate_gp(ctxt, addr.seg);
661 static int linearize(struct x86_emulate_ctxt *ctxt,
662 struct segmented_address addr,
663 unsigned size, bool write,
666 return __linearize(ctxt, addr, size, write, false, linear);
670 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
671 struct segmented_address addr,
678 rc = linearize(ctxt, addr, size, false, &linear);
679 if (rc != X86EMUL_CONTINUE)
681 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
685 * Fetch the next byte of the instruction being emulated which is pointed to
686 * by ctxt->_eip, then increment ctxt->_eip.
688 * Also prefetch the remaining bytes of the instruction without crossing page
689 * boundary if they are not in fetch_cache yet.
691 static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
693 struct fetch_cache *fc = &ctxt->fetch;
697 if (ctxt->_eip == fc->end) {
698 unsigned long linear;
699 struct segmented_address addr = { .seg = VCPU_SREG_CS,
701 cur_size = fc->end - fc->start;
702 size = min(15UL - cur_size,
703 PAGE_SIZE - offset_in_page(ctxt->_eip));
704 rc = __linearize(ctxt, addr, size, false, true, &linear);
705 if (unlikely(rc != X86EMUL_CONTINUE))
707 rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
708 size, &ctxt->exception);
709 if (unlikely(rc != X86EMUL_CONTINUE))
713 *dest = fc->data[ctxt->_eip - fc->start];
715 return X86EMUL_CONTINUE;
718 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
719 void *dest, unsigned size)
723 /* x86 instructions are limited to 15 bytes. */
724 if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
725 return X86EMUL_UNHANDLEABLE;
727 rc = do_insn_fetch_byte(ctxt, dest++);
728 if (rc != X86EMUL_CONTINUE)
731 return X86EMUL_CONTINUE;
734 /* Fetch next part of the instruction being emulated. */
735 #define insn_fetch(_type, _ctxt) \
736 ({ unsigned long _x; \
737 rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \
738 if (rc != X86EMUL_CONTINUE) \
743 #define insn_fetch_arr(_arr, _size, _ctxt) \
744 ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
745 if (rc != X86EMUL_CONTINUE) \
750 * Given the 'reg' portion of a ModRM byte, and a register block, return a
751 * pointer into the block that addresses the relevant register.
752 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
754 static void *decode_register(u8 modrm_reg, unsigned long *regs,
759 p = ®s[modrm_reg];
760 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
761 p = (unsigned char *)®s[modrm_reg & 3] + 1;
765 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
766 struct segmented_address addr,
767 u16 *size, unsigned long *address, int op_bytes)
774 rc = segmented_read_std(ctxt, addr, size, 2);
775 if (rc != X86EMUL_CONTINUE)
778 rc = segmented_read_std(ctxt, addr, address, op_bytes);
782 static int test_cc(unsigned int condition, unsigned int flags)
786 switch ((condition & 15) >> 1) {
788 rc |= (flags & EFLG_OF);
790 case 1: /* b/c/nae */
791 rc |= (flags & EFLG_CF);
794 rc |= (flags & EFLG_ZF);
797 rc |= (flags & (EFLG_CF|EFLG_ZF));
800 rc |= (flags & EFLG_SF);
803 rc |= (flags & EFLG_PF);
806 rc |= (flags & EFLG_ZF);
809 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
813 /* Odd condition identifiers (lsb == 1) have inverted sense. */
814 return (!!rc ^ (condition & 1));
817 static void fetch_register_operand(struct operand *op)
821 op->val = *(u8 *)op->addr.reg;
824 op->val = *(u16 *)op->addr.reg;
827 op->val = *(u32 *)op->addr.reg;
830 op->val = *(u64 *)op->addr.reg;
835 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
837 ctxt->ops->get_fpu(ctxt);
839 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
840 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
841 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
842 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
843 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
844 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
845 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
846 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
848 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
849 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
850 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
851 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
852 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
853 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
854 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
855 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
859 ctxt->ops->put_fpu(ctxt);
862 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
865 ctxt->ops->get_fpu(ctxt);
867 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
868 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
869 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
870 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
871 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
872 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
873 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
874 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
876 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
877 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
878 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
879 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
880 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
881 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
882 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
883 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
887 ctxt->ops->put_fpu(ctxt);
890 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
893 unsigned reg = ctxt->modrm_reg;
894 int highbyte_regs = ctxt->rex_prefix == 0;
896 if (!(ctxt->d & ModRM))
897 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
903 read_sse_reg(ctxt, &op->vec_val, reg);
908 if (ctxt->d & ByteOp) {
909 op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
912 op->addr.reg = decode_register(reg, ctxt->regs, 0);
913 op->bytes = ctxt->op_bytes;
915 fetch_register_operand(op);
916 op->orig_val = op->val;
919 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
923 int index_reg = 0, base_reg = 0, scale;
924 int rc = X86EMUL_CONTINUE;
927 if (ctxt->rex_prefix) {
928 ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */
929 index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
930 ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
933 ctxt->modrm = insn_fetch(u8, ctxt);
934 ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
935 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
936 ctxt->modrm_rm |= (ctxt->modrm & 0x07);
937 ctxt->modrm_seg = VCPU_SREG_DS;
939 if (ctxt->modrm_mod == 3) {
941 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
942 op->addr.reg = decode_register(ctxt->modrm_rm,
943 ctxt->regs, ctxt->d & ByteOp);
947 op->addr.xmm = ctxt->modrm_rm;
948 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
951 fetch_register_operand(op);
957 if (ctxt->ad_bytes == 2) {
958 unsigned bx = ctxt->regs[VCPU_REGS_RBX];
959 unsigned bp = ctxt->regs[VCPU_REGS_RBP];
960 unsigned si = ctxt->regs[VCPU_REGS_RSI];
961 unsigned di = ctxt->regs[VCPU_REGS_RDI];
963 /* 16-bit ModR/M decode. */
964 switch (ctxt->modrm_mod) {
966 if (ctxt->modrm_rm == 6)
967 modrm_ea += insn_fetch(u16, ctxt);
970 modrm_ea += insn_fetch(s8, ctxt);
973 modrm_ea += insn_fetch(u16, ctxt);
976 switch (ctxt->modrm_rm) {
996 if (ctxt->modrm_mod != 0)
1003 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1004 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1005 ctxt->modrm_seg = VCPU_SREG_SS;
1006 modrm_ea = (u16)modrm_ea;
1008 /* 32/64-bit ModR/M decode. */
1009 if ((ctxt->modrm_rm & 7) == 4) {
1010 sib = insn_fetch(u8, ctxt);
1011 index_reg |= (sib >> 3) & 7;
1012 base_reg |= sib & 7;
1015 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1016 modrm_ea += insn_fetch(s32, ctxt);
1018 modrm_ea += ctxt->regs[base_reg];
1020 modrm_ea += ctxt->regs[index_reg] << scale;
1021 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1022 if (ctxt->mode == X86EMUL_MODE_PROT64)
1023 ctxt->rip_relative = 1;
1025 modrm_ea += ctxt->regs[ctxt->modrm_rm];
1026 switch (ctxt->modrm_mod) {
1028 if (ctxt->modrm_rm == 5)
1029 modrm_ea += insn_fetch(s32, ctxt);
1032 modrm_ea += insn_fetch(s8, ctxt);
1035 modrm_ea += insn_fetch(s32, ctxt);
1039 op->addr.mem.ea = modrm_ea;
1044 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1047 int rc = X86EMUL_CONTINUE;
1050 switch (ctxt->ad_bytes) {
1052 op->addr.mem.ea = insn_fetch(u16, ctxt);
1055 op->addr.mem.ea = insn_fetch(u32, ctxt);
1058 op->addr.mem.ea = insn_fetch(u64, ctxt);
1065 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1069 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1070 mask = ~(ctxt->dst.bytes * 8 - 1);
1072 if (ctxt->src.bytes == 2)
1073 sv = (s16)ctxt->src.val & (s16)mask;
1074 else if (ctxt->src.bytes == 4)
1075 sv = (s32)ctxt->src.val & (s32)mask;
1077 ctxt->dst.addr.mem.ea += (sv >> 3);
1080 /* only subword offset */
1081 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1084 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1085 unsigned long addr, void *dest, unsigned size)
1088 struct read_cache *mc = &ctxt->mem_read;
1091 int n = min(size, 8u);
1093 if (mc->pos < mc->end)
1096 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1098 if (rc != X86EMUL_CONTINUE)
1103 memcpy(dest, mc->data + mc->pos, n);
1108 return X86EMUL_CONTINUE;
1111 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1112 struct segmented_address addr,
1119 rc = linearize(ctxt, addr, size, false, &linear);
1120 if (rc != X86EMUL_CONTINUE)
1122 return read_emulated(ctxt, linear, data, size);
1125 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1126 struct segmented_address addr,
1133 rc = linearize(ctxt, addr, size, true, &linear);
1134 if (rc != X86EMUL_CONTINUE)
1136 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1140 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1141 struct segmented_address addr,
1142 const void *orig_data, const void *data,
1148 rc = linearize(ctxt, addr, size, true, &linear);
1149 if (rc != X86EMUL_CONTINUE)
1151 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1152 size, &ctxt->exception);
1155 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1156 unsigned int size, unsigned short port,
1159 struct read_cache *rc = &ctxt->io_read;
1161 if (rc->pos == rc->end) { /* refill pio read ahead */
1162 unsigned int in_page, n;
1163 unsigned int count = ctxt->rep_prefix ?
1164 address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
1165 in_page = (ctxt->eflags & EFLG_DF) ?
1166 offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
1167 PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
1168 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1172 rc->pos = rc->end = 0;
1173 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1178 memcpy(dest, rc->data + rc->pos, size);
1183 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1184 u16 index, struct desc_struct *desc)
1189 ctxt->ops->get_idt(ctxt, &dt);
1191 if (dt.size < index * 8 + 7)
1192 return emulate_gp(ctxt, index << 3 | 0x2);
1194 addr = dt.address + index * 8;
1195 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1199 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1200 u16 selector, struct desc_ptr *dt)
1202 struct x86_emulate_ops *ops = ctxt->ops;
1204 if (selector & 1 << 2) {
1205 struct desc_struct desc;
1208 memset (dt, 0, sizeof *dt);
1209 if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1212 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1213 dt->address = get_desc_base(&desc);
1215 ops->get_gdt(ctxt, dt);
1218 /* allowed just for 8 bytes segments */
1219 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1220 u16 selector, struct desc_struct *desc)
1223 u16 index = selector >> 3;
1226 get_descriptor_table_ptr(ctxt, selector, &dt);
1228 if (dt.size < index * 8 + 7)
1229 return emulate_gp(ctxt, selector & 0xfffc);
1231 addr = dt.address + index * 8;
1232 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1236 /* allowed just for 8 bytes segments */
1237 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1238 u16 selector, struct desc_struct *desc)
1241 u16 index = selector >> 3;
1244 get_descriptor_table_ptr(ctxt, selector, &dt);
1246 if (dt.size < index * 8 + 7)
1247 return emulate_gp(ctxt, selector & 0xfffc);
1249 addr = dt.address + index * 8;
1250 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1254 /* Does not support long mode */
1255 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1256 u16 selector, int seg)
1258 struct desc_struct seg_desc;
1260 unsigned err_vec = GP_VECTOR;
1262 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1265 memset(&seg_desc, 0, sizeof seg_desc);
1267 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1268 || ctxt->mode == X86EMUL_MODE_REAL) {
1269 /* set real mode segment descriptor */
1270 set_desc_base(&seg_desc, selector << 4);
1271 set_desc_limit(&seg_desc, 0xffff);
1275 if (ctxt->mode == X86EMUL_MODE_VM86)
1280 /* NULL selector is not valid for TR, CS and SS */
1281 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1285 /* TR should be in GDT only */
1286 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1289 if (null_selector) /* for NULL selector skip all following checks */
1292 ret = read_segment_descriptor(ctxt, selector, &seg_desc);
1293 if (ret != X86EMUL_CONTINUE)
1296 err_code = selector & 0xfffc;
1297 err_vec = GP_VECTOR;
1299 /* can't load system descriptor into segment selecor */
1300 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1304 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1310 cpl = ctxt->ops->cpl(ctxt);
1315 * segment is not a writable data segment or segment
1316 * selector's RPL != CPL or segment selector's RPL != CPL
1318 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1322 if (!(seg_desc.type & 8))
1325 if (seg_desc.type & 4) {
1331 if (rpl > cpl || dpl != cpl)
1334 /* CS(RPL) <- CPL */
1335 selector = (selector & 0xfffc) | cpl;
1338 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1341 case VCPU_SREG_LDTR:
1342 if (seg_desc.s || seg_desc.type != 2)
1345 default: /* DS, ES, FS, or GS */
1347 * segment is not a data or readable code segment or
1348 * ((segment is a data or nonconforming code segment)
1349 * and (both RPL and CPL > DPL))
1351 if ((seg_desc.type & 0xa) == 0x8 ||
1352 (((seg_desc.type & 0xc) != 0xc) &&
1353 (rpl > dpl && cpl > dpl)))
1359 /* mark segment as accessed */
1361 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1362 if (ret != X86EMUL_CONTINUE)
1366 ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1367 return X86EMUL_CONTINUE;
1369 emulate_exception(ctxt, err_vec, err_code, true);
1370 return X86EMUL_PROPAGATE_FAULT;
1373 static void write_register_operand(struct operand *op)
1375 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1376 switch (op->bytes) {
1378 *(u8 *)op->addr.reg = (u8)op->val;
1381 *(u16 *)op->addr.reg = (u16)op->val;
1384 *op->addr.reg = (u32)op->val;
1385 break; /* 64b: zero-extend */
1387 *op->addr.reg = op->val;
1392 static int writeback(struct x86_emulate_ctxt *ctxt)
1396 switch (ctxt->dst.type) {
1398 write_register_operand(&ctxt->dst);
1401 if (ctxt->lock_prefix)
1402 rc = segmented_cmpxchg(ctxt,
1404 &ctxt->dst.orig_val,
1408 rc = segmented_write(ctxt,
1412 if (rc != X86EMUL_CONTINUE)
1416 write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
1424 return X86EMUL_CONTINUE;
1427 static int em_push(struct x86_emulate_ctxt *ctxt)
1429 struct segmented_address addr;
1431 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
1432 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1433 addr.seg = VCPU_SREG_SS;
1435 /* Disable writeback. */
1436 ctxt->dst.type = OP_NONE;
1437 return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
1440 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1441 void *dest, int len)
1444 struct segmented_address addr;
1446 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1447 addr.seg = VCPU_SREG_SS;
1448 rc = segmented_read(ctxt, addr, dest, len);
1449 if (rc != X86EMUL_CONTINUE)
1452 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
1456 static int em_pop(struct x86_emulate_ctxt *ctxt)
1458 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1461 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1462 void *dest, int len)
1465 unsigned long val, change_mask;
1466 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1467 int cpl = ctxt->ops->cpl(ctxt);
1469 rc = emulate_pop(ctxt, &val, len);
1470 if (rc != X86EMUL_CONTINUE)
1473 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1474 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1476 switch(ctxt->mode) {
1477 case X86EMUL_MODE_PROT64:
1478 case X86EMUL_MODE_PROT32:
1479 case X86EMUL_MODE_PROT16:
1481 change_mask |= EFLG_IOPL;
1483 change_mask |= EFLG_IF;
1485 case X86EMUL_MODE_VM86:
1487 return emulate_gp(ctxt, 0);
1488 change_mask |= EFLG_IF;
1490 default: /* real mode */
1491 change_mask |= (EFLG_IOPL | EFLG_IF);
1495 *(unsigned long *)dest =
1496 (ctxt->eflags & ~change_mask) | (val & change_mask);
1501 static int em_popf(struct x86_emulate_ctxt *ctxt)
1503 ctxt->dst.type = OP_REG;
1504 ctxt->dst.addr.reg = &ctxt->eflags;
1505 ctxt->dst.bytes = ctxt->op_bytes;
1506 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1509 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1511 int seg = ctxt->src2.val;
1513 ctxt->src.val = get_segment_selector(ctxt, seg);
1515 return em_push(ctxt);
1518 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1520 int seg = ctxt->src2.val;
1521 unsigned long selector;
1524 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1525 if (rc != X86EMUL_CONTINUE)
1528 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1532 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1534 unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
1535 int rc = X86EMUL_CONTINUE;
1536 int reg = VCPU_REGS_RAX;
1538 while (reg <= VCPU_REGS_RDI) {
1539 (reg == VCPU_REGS_RSP) ?
1540 (ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
1543 if (rc != X86EMUL_CONTINUE)
1552 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1554 ctxt->src.val = (unsigned long)ctxt->eflags;
1555 return em_push(ctxt);
1558 static int em_popa(struct x86_emulate_ctxt *ctxt)
1560 int rc = X86EMUL_CONTINUE;
1561 int reg = VCPU_REGS_RDI;
1563 while (reg >= VCPU_REGS_RAX) {
1564 if (reg == VCPU_REGS_RSP) {
1565 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
1570 rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
1571 if (rc != X86EMUL_CONTINUE)
1578 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1580 struct x86_emulate_ops *ops = ctxt->ops;
1587 /* TODO: Add limit checks */
1588 ctxt->src.val = ctxt->eflags;
1590 if (rc != X86EMUL_CONTINUE)
1593 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1595 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1597 if (rc != X86EMUL_CONTINUE)
1600 ctxt->src.val = ctxt->_eip;
1602 if (rc != X86EMUL_CONTINUE)
1605 ops->get_idt(ctxt, &dt);
1607 eip_addr = dt.address + (irq << 2);
1608 cs_addr = dt.address + (irq << 2) + 2;
1610 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1611 if (rc != X86EMUL_CONTINUE)
1614 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1615 if (rc != X86EMUL_CONTINUE)
1618 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1619 if (rc != X86EMUL_CONTINUE)
1627 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1629 switch(ctxt->mode) {
1630 case X86EMUL_MODE_REAL:
1631 return emulate_int_real(ctxt, irq);
1632 case X86EMUL_MODE_VM86:
1633 case X86EMUL_MODE_PROT16:
1634 case X86EMUL_MODE_PROT32:
1635 case X86EMUL_MODE_PROT64:
1637 /* Protected mode interrupts unimplemented yet */
1638 return X86EMUL_UNHANDLEABLE;
1642 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1644 int rc = X86EMUL_CONTINUE;
1645 unsigned long temp_eip = 0;
1646 unsigned long temp_eflags = 0;
1647 unsigned long cs = 0;
1648 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1649 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1650 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1651 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1653 /* TODO: Add stack limit check */
1655 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1657 if (rc != X86EMUL_CONTINUE)
1660 if (temp_eip & ~0xffff)
1661 return emulate_gp(ctxt, 0);
1663 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1665 if (rc != X86EMUL_CONTINUE)
1668 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1670 if (rc != X86EMUL_CONTINUE)
1673 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1675 if (rc != X86EMUL_CONTINUE)
1678 ctxt->_eip = temp_eip;
1681 if (ctxt->op_bytes == 4)
1682 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1683 else if (ctxt->op_bytes == 2) {
1684 ctxt->eflags &= ~0xffff;
1685 ctxt->eflags |= temp_eflags;
1688 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1689 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1694 static int em_iret(struct x86_emulate_ctxt *ctxt)
1696 switch(ctxt->mode) {
1697 case X86EMUL_MODE_REAL:
1698 return emulate_iret_real(ctxt);
1699 case X86EMUL_MODE_VM86:
1700 case X86EMUL_MODE_PROT16:
1701 case X86EMUL_MODE_PROT32:
1702 case X86EMUL_MODE_PROT64:
1704 /* iret from protected mode unimplemented yet */
1705 return X86EMUL_UNHANDLEABLE;
1709 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1714 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1716 rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1717 if (rc != X86EMUL_CONTINUE)
1721 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1722 return X86EMUL_CONTINUE;
1725 static int em_grp2(struct x86_emulate_ctxt *ctxt)
1727 switch (ctxt->modrm_reg) {
1729 emulate_2op_SrcB(ctxt, "rol");
1732 emulate_2op_SrcB(ctxt, "ror");
1735 emulate_2op_SrcB(ctxt, "rcl");
1738 emulate_2op_SrcB(ctxt, "rcr");
1740 case 4: /* sal/shl */
1741 case 6: /* sal/shl */
1742 emulate_2op_SrcB(ctxt, "sal");
1745 emulate_2op_SrcB(ctxt, "shr");
1748 emulate_2op_SrcB(ctxt, "sar");
1751 return X86EMUL_CONTINUE;
1754 static int em_not(struct x86_emulate_ctxt *ctxt)
1756 ctxt->dst.val = ~ctxt->dst.val;
1757 return X86EMUL_CONTINUE;
1760 static int em_neg(struct x86_emulate_ctxt *ctxt)
1762 emulate_1op(ctxt, "neg");
1763 return X86EMUL_CONTINUE;
1766 static int em_mul_ex(struct x86_emulate_ctxt *ctxt)
1770 emulate_1op_rax_rdx(ctxt, "mul", ex);
1771 return X86EMUL_CONTINUE;
1774 static int em_imul_ex(struct x86_emulate_ctxt *ctxt)
1778 emulate_1op_rax_rdx(ctxt, "imul", ex);
1779 return X86EMUL_CONTINUE;
1782 static int em_div_ex(struct x86_emulate_ctxt *ctxt)
1786 emulate_1op_rax_rdx(ctxt, "div", de);
1788 return emulate_de(ctxt);
1789 return X86EMUL_CONTINUE;
1792 static int em_idiv_ex(struct x86_emulate_ctxt *ctxt)
1796 emulate_1op_rax_rdx(ctxt, "idiv", de);
1798 return emulate_de(ctxt);
1799 return X86EMUL_CONTINUE;
1802 static int em_grp45(struct x86_emulate_ctxt *ctxt)
1804 int rc = X86EMUL_CONTINUE;
1806 switch (ctxt->modrm_reg) {
1808 emulate_1op(ctxt, "inc");
1811 emulate_1op(ctxt, "dec");
1813 case 2: /* call near abs */ {
1815 old_eip = ctxt->_eip;
1816 ctxt->_eip = ctxt->src.val;
1817 ctxt->src.val = old_eip;
1821 case 4: /* jmp abs */
1822 ctxt->_eip = ctxt->src.val;
1824 case 5: /* jmp far */
1825 rc = em_jmp_far(ctxt);
1834 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
1836 u64 old = ctxt->dst.orig_val64;
1838 if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
1839 ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
1840 ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1841 ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1842 ctxt->eflags &= ~EFLG_ZF;
1844 ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
1845 (u32) ctxt->regs[VCPU_REGS_RBX];
1847 ctxt->eflags |= EFLG_ZF;
1849 return X86EMUL_CONTINUE;
1852 static int em_ret(struct x86_emulate_ctxt *ctxt)
1854 ctxt->dst.type = OP_REG;
1855 ctxt->dst.addr.reg = &ctxt->_eip;
1856 ctxt->dst.bytes = ctxt->op_bytes;
1857 return em_pop(ctxt);
1860 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1865 rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
1866 if (rc != X86EMUL_CONTINUE)
1868 if (ctxt->op_bytes == 4)
1869 ctxt->_eip = (u32)ctxt->_eip;
1870 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1871 if (rc != X86EMUL_CONTINUE)
1873 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1877 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
1879 /* Save real source value, then compare EAX against destination. */
1880 ctxt->src.orig_val = ctxt->src.val;
1881 ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
1882 emulate_2op_SrcV(ctxt, "cmp");
1884 if (ctxt->eflags & EFLG_ZF) {
1885 /* Success: write back to memory. */
1886 ctxt->dst.val = ctxt->src.orig_val;
1888 /* Failure: write the value we saw to EAX. */
1889 ctxt->dst.type = OP_REG;
1890 ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
1892 return X86EMUL_CONTINUE;
1895 static int em_lseg(struct x86_emulate_ctxt *ctxt)
1897 int seg = ctxt->src2.val;
1901 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1903 rc = load_segment_descriptor(ctxt, sel, seg);
1904 if (rc != X86EMUL_CONTINUE)
1907 ctxt->dst.val = ctxt->src.val;
1912 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1913 struct desc_struct *cs, struct desc_struct *ss)
1917 memset(cs, 0, sizeof(struct desc_struct));
1918 ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1919 memset(ss, 0, sizeof(struct desc_struct));
1921 cs->l = 0; /* will be adjusted later */
1922 set_desc_base(cs, 0); /* flat segment */
1923 cs->g = 1; /* 4kb granularity */
1924 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1925 cs->type = 0x0b; /* Read, Execute, Accessed */
1927 cs->dpl = 0; /* will be adjusted later */
1931 set_desc_base(ss, 0); /* flat segment */
1932 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1933 ss->g = 1; /* 4kb granularity */
1935 ss->type = 0x03; /* Read/Write, Accessed */
1936 ss->d = 1; /* 32bit stack segment */
1941 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
1943 u32 eax, ebx, ecx, edx;
1946 return ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)
1947 && ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
1948 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
1949 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
1952 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
1954 struct x86_emulate_ops *ops = ctxt->ops;
1955 u32 eax, ebx, ecx, edx;
1958 * syscall should always be enabled in longmode - so only become
1959 * vendor specific (cpuid) if other modes are active...
1961 if (ctxt->mode == X86EMUL_MODE_PROT64)
1966 if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) {
1968 * Intel ("GenuineIntel")
1969 * remark: Intel CPUs only support "syscall" in 64bit
1970 * longmode. Also an 64bit guest with a
1971 * 32bit compat-app running will #UD !! While this
1972 * behaviour can be fixed (by emulating) into AMD
1973 * response - CPUs of AMD can't behave like Intel.
1975 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
1976 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
1977 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
1980 /* AMD ("AuthenticAMD") */
1981 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
1982 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
1983 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
1986 /* AMD ("AMDisbetter!") */
1987 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
1988 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
1989 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
1993 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
1997 static int em_syscall(struct x86_emulate_ctxt *ctxt)
1999 struct x86_emulate_ops *ops = ctxt->ops;
2000 struct desc_struct cs, ss;
2005 /* syscall is not available in real mode */
2006 if (ctxt->mode == X86EMUL_MODE_REAL ||
2007 ctxt->mode == X86EMUL_MODE_VM86)
2008 return emulate_ud(ctxt);
2010 if (!(em_syscall_is_enabled(ctxt)))
2011 return emulate_ud(ctxt);
2013 ops->get_msr(ctxt, MSR_EFER, &efer);
2014 setup_syscalls_segments(ctxt, &cs, &ss);
2016 if (!(efer & EFER_SCE))
2017 return emulate_ud(ctxt);
2019 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2021 cs_sel = (u16)(msr_data & 0xfffc);
2022 ss_sel = (u16)(msr_data + 8);
2024 if (efer & EFER_LMA) {
2028 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2029 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2031 ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
2032 if (efer & EFER_LMA) {
2033 #ifdef CONFIG_X86_64
2034 ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
2037 ctxt->mode == X86EMUL_MODE_PROT64 ?
2038 MSR_LSTAR : MSR_CSTAR, &msr_data);
2039 ctxt->_eip = msr_data;
2041 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2042 ctxt->eflags &= ~(msr_data | EFLG_RF);
2046 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2047 ctxt->_eip = (u32)msr_data;
2049 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2052 return X86EMUL_CONTINUE;
2055 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2057 struct x86_emulate_ops *ops = ctxt->ops;
2058 struct desc_struct cs, ss;
2063 ops->get_msr(ctxt, MSR_EFER, &efer);
2064 /* inject #GP if in real mode */
2065 if (ctxt->mode == X86EMUL_MODE_REAL)
2066 return emulate_gp(ctxt, 0);
2069 * Not recognized on AMD in compat mode (but is recognized in legacy
2072 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2073 && !vendor_intel(ctxt))
2074 return emulate_ud(ctxt);
2076 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2077 * Therefore, we inject an #UD.
2079 if (ctxt->mode == X86EMUL_MODE_PROT64)
2080 return emulate_ud(ctxt);
2082 setup_syscalls_segments(ctxt, &cs, &ss);
2084 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2085 switch (ctxt->mode) {
2086 case X86EMUL_MODE_PROT32:
2087 if ((msr_data & 0xfffc) == 0x0)
2088 return emulate_gp(ctxt, 0);
2090 case X86EMUL_MODE_PROT64:
2091 if (msr_data == 0x0)
2092 return emulate_gp(ctxt, 0);
2096 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2097 cs_sel = (u16)msr_data;
2098 cs_sel &= ~SELECTOR_RPL_MASK;
2099 ss_sel = cs_sel + 8;
2100 ss_sel &= ~SELECTOR_RPL_MASK;
2101 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2106 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2107 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2109 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2110 ctxt->_eip = msr_data;
2112 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2113 ctxt->regs[VCPU_REGS_RSP] = msr_data;
2115 return X86EMUL_CONTINUE;
2118 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2120 struct x86_emulate_ops *ops = ctxt->ops;
2121 struct desc_struct cs, ss;
2124 u16 cs_sel = 0, ss_sel = 0;
2126 /* inject #GP if in real mode or Virtual 8086 mode */
2127 if (ctxt->mode == X86EMUL_MODE_REAL ||
2128 ctxt->mode == X86EMUL_MODE_VM86)
2129 return emulate_gp(ctxt, 0);
2131 setup_syscalls_segments(ctxt, &cs, &ss);
2133 if ((ctxt->rex_prefix & 0x8) != 0x0)
2134 usermode = X86EMUL_MODE_PROT64;
2136 usermode = X86EMUL_MODE_PROT32;
2140 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2142 case X86EMUL_MODE_PROT32:
2143 cs_sel = (u16)(msr_data + 16);
2144 if ((msr_data & 0xfffc) == 0x0)
2145 return emulate_gp(ctxt, 0);
2146 ss_sel = (u16)(msr_data + 24);
2148 case X86EMUL_MODE_PROT64:
2149 cs_sel = (u16)(msr_data + 32);
2150 if (msr_data == 0x0)
2151 return emulate_gp(ctxt, 0);
2152 ss_sel = cs_sel + 8;
2157 cs_sel |= SELECTOR_RPL_MASK;
2158 ss_sel |= SELECTOR_RPL_MASK;
2160 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2161 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2163 ctxt->_eip = ctxt->regs[VCPU_REGS_RDX];
2164 ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX];
2166 return X86EMUL_CONTINUE;
2169 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2172 if (ctxt->mode == X86EMUL_MODE_REAL)
2174 if (ctxt->mode == X86EMUL_MODE_VM86)
2176 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2177 return ctxt->ops->cpl(ctxt) > iopl;
2180 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2183 struct x86_emulate_ops *ops = ctxt->ops;
2184 struct desc_struct tr_seg;
2187 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2188 unsigned mask = (1 << len) - 1;
2191 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2194 if (desc_limit_scaled(&tr_seg) < 103)
2196 base = get_desc_base(&tr_seg);
2197 #ifdef CONFIG_X86_64
2198 base |= ((u64)base3) << 32;
2200 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2201 if (r != X86EMUL_CONTINUE)
2203 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2205 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2206 if (r != X86EMUL_CONTINUE)
2208 if ((perm >> bit_idx) & mask)
2213 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2219 if (emulator_bad_iopl(ctxt))
2220 if (!emulator_io_port_access_allowed(ctxt, port, len))
2223 ctxt->perm_ok = true;
2228 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2229 struct tss_segment_16 *tss)
2231 tss->ip = ctxt->_eip;
2232 tss->flag = ctxt->eflags;
2233 tss->ax = ctxt->regs[VCPU_REGS_RAX];
2234 tss->cx = ctxt->regs[VCPU_REGS_RCX];
2235 tss->dx = ctxt->regs[VCPU_REGS_RDX];
2236 tss->bx = ctxt->regs[VCPU_REGS_RBX];
2237 tss->sp = ctxt->regs[VCPU_REGS_RSP];
2238 tss->bp = ctxt->regs[VCPU_REGS_RBP];
2239 tss->si = ctxt->regs[VCPU_REGS_RSI];
2240 tss->di = ctxt->regs[VCPU_REGS_RDI];
2242 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2243 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2244 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2245 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2246 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2249 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2250 struct tss_segment_16 *tss)
2254 ctxt->_eip = tss->ip;
2255 ctxt->eflags = tss->flag | 2;
2256 ctxt->regs[VCPU_REGS_RAX] = tss->ax;
2257 ctxt->regs[VCPU_REGS_RCX] = tss->cx;
2258 ctxt->regs[VCPU_REGS_RDX] = tss->dx;
2259 ctxt->regs[VCPU_REGS_RBX] = tss->bx;
2260 ctxt->regs[VCPU_REGS_RSP] = tss->sp;
2261 ctxt->regs[VCPU_REGS_RBP] = tss->bp;
2262 ctxt->regs[VCPU_REGS_RSI] = tss->si;
2263 ctxt->regs[VCPU_REGS_RDI] = tss->di;
2266 * SDM says that segment selectors are loaded before segment
2269 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2270 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2271 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2272 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2273 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2276 * Now load segment descriptors. If fault happenes at this stage
2277 * it is handled in a context of new task
2279 ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
2280 if (ret != X86EMUL_CONTINUE)
2282 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2283 if (ret != X86EMUL_CONTINUE)
2285 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2286 if (ret != X86EMUL_CONTINUE)
2288 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2289 if (ret != X86EMUL_CONTINUE)
2291 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2292 if (ret != X86EMUL_CONTINUE)
2295 return X86EMUL_CONTINUE;
2298 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2299 u16 tss_selector, u16 old_tss_sel,
2300 ulong old_tss_base, struct desc_struct *new_desc)
2302 struct x86_emulate_ops *ops = ctxt->ops;
2303 struct tss_segment_16 tss_seg;
2305 u32 new_tss_base = get_desc_base(new_desc);
2307 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2309 if (ret != X86EMUL_CONTINUE)
2310 /* FIXME: need to provide precise fault address */
2313 save_state_to_tss16(ctxt, &tss_seg);
2315 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2317 if (ret != X86EMUL_CONTINUE)
2318 /* FIXME: need to provide precise fault address */
2321 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2323 if (ret != X86EMUL_CONTINUE)
2324 /* FIXME: need to provide precise fault address */
2327 if (old_tss_sel != 0xffff) {
2328 tss_seg.prev_task_link = old_tss_sel;
2330 ret = ops->write_std(ctxt, new_tss_base,
2331 &tss_seg.prev_task_link,
2332 sizeof tss_seg.prev_task_link,
2334 if (ret != X86EMUL_CONTINUE)
2335 /* FIXME: need to provide precise fault address */
2339 return load_state_from_tss16(ctxt, &tss_seg);
2342 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2343 struct tss_segment_32 *tss)
2345 tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2346 tss->eip = ctxt->_eip;
2347 tss->eflags = ctxt->eflags;
2348 tss->eax = ctxt->regs[VCPU_REGS_RAX];
2349 tss->ecx = ctxt->regs[VCPU_REGS_RCX];
2350 tss->edx = ctxt->regs[VCPU_REGS_RDX];
2351 tss->ebx = ctxt->regs[VCPU_REGS_RBX];
2352 tss->esp = ctxt->regs[VCPU_REGS_RSP];
2353 tss->ebp = ctxt->regs[VCPU_REGS_RBP];
2354 tss->esi = ctxt->regs[VCPU_REGS_RSI];
2355 tss->edi = ctxt->regs[VCPU_REGS_RDI];
2357 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2358 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2359 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2360 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2361 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2362 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2363 tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2366 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2367 struct tss_segment_32 *tss)
2371 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2372 return emulate_gp(ctxt, 0);
2373 ctxt->_eip = tss->eip;
2374 ctxt->eflags = tss->eflags | 2;
2376 /* General purpose registers */
2377 ctxt->regs[VCPU_REGS_RAX] = tss->eax;
2378 ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
2379 ctxt->regs[VCPU_REGS_RDX] = tss->edx;
2380 ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
2381 ctxt->regs[VCPU_REGS_RSP] = tss->esp;
2382 ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
2383 ctxt->regs[VCPU_REGS_RSI] = tss->esi;
2384 ctxt->regs[VCPU_REGS_RDI] = tss->edi;
2387 * SDM says that segment selectors are loaded before segment
2390 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2391 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2392 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2393 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2394 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2395 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2396 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2399 * If we're switching between Protected Mode and VM86, we need to make
2400 * sure to update the mode before loading the segment descriptors so
2401 * that the selectors are interpreted correctly.
2403 * Need to get rflags to the vcpu struct immediately because it
2404 * influences the CPL which is checked at least when loading the segment
2405 * descriptors and when pushing an error code to the new kernel stack.
2407 * TODO Introduce a separate ctxt->ops->set_cpl callback
2409 if (ctxt->eflags & X86_EFLAGS_VM)
2410 ctxt->mode = X86EMUL_MODE_VM86;
2412 ctxt->mode = X86EMUL_MODE_PROT32;
2414 ctxt->ops->set_rflags(ctxt, ctxt->eflags);
2417 * Now load segment descriptors. If fault happenes at this stage
2418 * it is handled in a context of new task
2420 ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2421 if (ret != X86EMUL_CONTINUE)
2423 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2424 if (ret != X86EMUL_CONTINUE)
2426 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2427 if (ret != X86EMUL_CONTINUE)
2429 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2430 if (ret != X86EMUL_CONTINUE)
2432 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2433 if (ret != X86EMUL_CONTINUE)
2435 ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
2436 if (ret != X86EMUL_CONTINUE)
2438 ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
2439 if (ret != X86EMUL_CONTINUE)
2442 return X86EMUL_CONTINUE;
2445 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2446 u16 tss_selector, u16 old_tss_sel,
2447 ulong old_tss_base, struct desc_struct *new_desc)
2449 struct x86_emulate_ops *ops = ctxt->ops;
2450 struct tss_segment_32 tss_seg;
2452 u32 new_tss_base = get_desc_base(new_desc);
2454 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2456 if (ret != X86EMUL_CONTINUE)
2457 /* FIXME: need to provide precise fault address */
2460 save_state_to_tss32(ctxt, &tss_seg);
2462 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2464 if (ret != X86EMUL_CONTINUE)
2465 /* FIXME: need to provide precise fault address */
2468 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2470 if (ret != X86EMUL_CONTINUE)
2471 /* FIXME: need to provide precise fault address */
2474 if (old_tss_sel != 0xffff) {
2475 tss_seg.prev_task_link = old_tss_sel;
2477 ret = ops->write_std(ctxt, new_tss_base,
2478 &tss_seg.prev_task_link,
2479 sizeof tss_seg.prev_task_link,
2481 if (ret != X86EMUL_CONTINUE)
2482 /* FIXME: need to provide precise fault address */
2486 return load_state_from_tss32(ctxt, &tss_seg);
2489 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2490 u16 tss_selector, int idt_index, int reason,
2491 bool has_error_code, u32 error_code)
2493 struct x86_emulate_ops *ops = ctxt->ops;
2494 struct desc_struct curr_tss_desc, next_tss_desc;
2496 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2497 ulong old_tss_base =
2498 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2501 /* FIXME: old_tss_base == ~0 ? */
2503 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2504 if (ret != X86EMUL_CONTINUE)
2506 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2507 if (ret != X86EMUL_CONTINUE)
2510 /* FIXME: check that next_tss_desc is tss */
2513 * Check privileges. The three cases are task switch caused by...
2515 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2516 * 2. Exception/IRQ/iret: No check is performed
2517 * 3. jmp/call to TSS: Check agains DPL of the TSS
2519 if (reason == TASK_SWITCH_GATE) {
2520 if (idt_index != -1) {
2521 /* Software interrupts */
2522 struct desc_struct task_gate_desc;
2525 ret = read_interrupt_descriptor(ctxt, idt_index,
2527 if (ret != X86EMUL_CONTINUE)
2530 dpl = task_gate_desc.dpl;
2531 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2532 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2534 } else if (reason != TASK_SWITCH_IRET) {
2535 int dpl = next_tss_desc.dpl;
2536 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2537 return emulate_gp(ctxt, tss_selector);
2541 desc_limit = desc_limit_scaled(&next_tss_desc);
2542 if (!next_tss_desc.p ||
2543 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2544 desc_limit < 0x2b)) {
2545 emulate_ts(ctxt, tss_selector & 0xfffc);
2546 return X86EMUL_PROPAGATE_FAULT;
2549 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2550 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2551 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2554 if (reason == TASK_SWITCH_IRET)
2555 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2557 /* set back link to prev task only if NT bit is set in eflags
2558 note that old_tss_sel is not used afetr this point */
2559 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2560 old_tss_sel = 0xffff;
2562 if (next_tss_desc.type & 8)
2563 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2564 old_tss_base, &next_tss_desc);
2566 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2567 old_tss_base, &next_tss_desc);
2568 if (ret != X86EMUL_CONTINUE)
2571 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2572 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2574 if (reason != TASK_SWITCH_IRET) {
2575 next_tss_desc.type |= (1 << 1); /* set busy flag */
2576 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2579 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2580 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2582 if (has_error_code) {
2583 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2584 ctxt->lock_prefix = 0;
2585 ctxt->src.val = (unsigned long) error_code;
2586 ret = em_push(ctxt);
2592 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2593 u16 tss_selector, int idt_index, int reason,
2594 bool has_error_code, u32 error_code)
2598 ctxt->_eip = ctxt->eip;
2599 ctxt->dst.type = OP_NONE;
2601 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2602 has_error_code, error_code);
2604 if (rc == X86EMUL_CONTINUE)
2605 ctxt->eip = ctxt->_eip;
2607 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2610 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2611 int reg, struct operand *op)
2613 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2615 register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
2616 op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
2617 op->addr.mem.seg = seg;
2620 static int em_das(struct x86_emulate_ctxt *ctxt)
2623 bool af, cf, old_cf;
2625 cf = ctxt->eflags & X86_EFLAGS_CF;
2631 af = ctxt->eflags & X86_EFLAGS_AF;
2632 if ((al & 0x0f) > 9 || af) {
2634 cf = old_cf | (al >= 250);
2639 if (old_al > 0x99 || old_cf) {
2645 /* Set PF, ZF, SF */
2646 ctxt->src.type = OP_IMM;
2648 ctxt->src.bytes = 1;
2649 emulate_2op_SrcV(ctxt, "or");
2650 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2652 ctxt->eflags |= X86_EFLAGS_CF;
2654 ctxt->eflags |= X86_EFLAGS_AF;
2655 return X86EMUL_CONTINUE;
2658 static int em_call(struct x86_emulate_ctxt *ctxt)
2660 long rel = ctxt->src.val;
2662 ctxt->src.val = (unsigned long)ctxt->_eip;
2664 return em_push(ctxt);
2667 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2673 old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2674 old_eip = ctxt->_eip;
2676 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2677 if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2678 return X86EMUL_CONTINUE;
2681 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
2683 ctxt->src.val = old_cs;
2685 if (rc != X86EMUL_CONTINUE)
2688 ctxt->src.val = old_eip;
2689 return em_push(ctxt);
2692 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2696 ctxt->dst.type = OP_REG;
2697 ctxt->dst.addr.reg = &ctxt->_eip;
2698 ctxt->dst.bytes = ctxt->op_bytes;
2699 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2700 if (rc != X86EMUL_CONTINUE)
2702 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
2703 return X86EMUL_CONTINUE;
2706 static int em_add(struct x86_emulate_ctxt *ctxt)
2708 emulate_2op_SrcV(ctxt, "add");
2709 return X86EMUL_CONTINUE;
2712 static int em_or(struct x86_emulate_ctxt *ctxt)
2714 emulate_2op_SrcV(ctxt, "or");
2715 return X86EMUL_CONTINUE;
2718 static int em_adc(struct x86_emulate_ctxt *ctxt)
2720 emulate_2op_SrcV(ctxt, "adc");
2721 return X86EMUL_CONTINUE;
2724 static int em_sbb(struct x86_emulate_ctxt *ctxt)
2726 emulate_2op_SrcV(ctxt, "sbb");
2727 return X86EMUL_CONTINUE;
2730 static int em_and(struct x86_emulate_ctxt *ctxt)
2732 emulate_2op_SrcV(ctxt, "and");
2733 return X86EMUL_CONTINUE;
2736 static int em_sub(struct x86_emulate_ctxt *ctxt)
2738 emulate_2op_SrcV(ctxt, "sub");
2739 return X86EMUL_CONTINUE;
2742 static int em_xor(struct x86_emulate_ctxt *ctxt)
2744 emulate_2op_SrcV(ctxt, "xor");
2745 return X86EMUL_CONTINUE;
2748 static int em_cmp(struct x86_emulate_ctxt *ctxt)
2750 emulate_2op_SrcV(ctxt, "cmp");
2751 /* Disable writeback. */
2752 ctxt->dst.type = OP_NONE;
2753 return X86EMUL_CONTINUE;
2756 static int em_test(struct x86_emulate_ctxt *ctxt)
2758 emulate_2op_SrcV(ctxt, "test");
2759 /* Disable writeback. */
2760 ctxt->dst.type = OP_NONE;
2761 return X86EMUL_CONTINUE;
2764 static int em_xchg(struct x86_emulate_ctxt *ctxt)
2766 /* Write back the register source. */
2767 ctxt->src.val = ctxt->dst.val;
2768 write_register_operand(&ctxt->src);
2770 /* Write back the memory destination with implicit LOCK prefix. */
2771 ctxt->dst.val = ctxt->src.orig_val;
2772 ctxt->lock_prefix = 1;
2773 return X86EMUL_CONTINUE;
2776 static int em_imul(struct x86_emulate_ctxt *ctxt)
2778 emulate_2op_SrcV_nobyte(ctxt, "imul");
2779 return X86EMUL_CONTINUE;
2782 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2784 ctxt->dst.val = ctxt->src2.val;
2785 return em_imul(ctxt);
2788 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2790 ctxt->dst.type = OP_REG;
2791 ctxt->dst.bytes = ctxt->src.bytes;
2792 ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
2793 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2795 return X86EMUL_CONTINUE;
2798 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2802 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2803 ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
2804 ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
2805 return X86EMUL_CONTINUE;
2808 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
2812 if (ctxt->ops->read_pmc(ctxt, ctxt->regs[VCPU_REGS_RCX], &pmc))
2813 return emulate_gp(ctxt, 0);
2814 ctxt->regs[VCPU_REGS_RAX] = (u32)pmc;
2815 ctxt->regs[VCPU_REGS_RDX] = pmc >> 32;
2816 return X86EMUL_CONTINUE;
2819 static int em_mov(struct x86_emulate_ctxt *ctxt)
2821 ctxt->dst.val = ctxt->src.val;
2822 return X86EMUL_CONTINUE;
2825 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
2827 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
2828 return emulate_gp(ctxt, 0);
2830 /* Disable writeback. */
2831 ctxt->dst.type = OP_NONE;
2832 return X86EMUL_CONTINUE;
2835 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
2839 if (ctxt->mode == X86EMUL_MODE_PROT64)
2840 val = ctxt->src.val & ~0ULL;
2842 val = ctxt->src.val & ~0U;
2844 /* #UD condition is already handled. */
2845 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
2846 return emulate_gp(ctxt, 0);
2848 /* Disable writeback. */
2849 ctxt->dst.type = OP_NONE;
2850 return X86EMUL_CONTINUE;
2853 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
2857 msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
2858 | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
2859 if (ctxt->ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data))
2860 return emulate_gp(ctxt, 0);
2862 return X86EMUL_CONTINUE;
2865 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
2869 if (ctxt->ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data))
2870 return emulate_gp(ctxt, 0);
2872 ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
2873 ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
2874 return X86EMUL_CONTINUE;
2877 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
2879 if (ctxt->modrm_reg > VCPU_SREG_GS)
2880 return emulate_ud(ctxt);
2882 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
2883 return X86EMUL_CONTINUE;
2886 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
2888 u16 sel = ctxt->src.val;
2890 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
2891 return emulate_ud(ctxt);
2893 if (ctxt->modrm_reg == VCPU_SREG_SS)
2894 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2896 /* Disable writeback. */
2897 ctxt->dst.type = OP_NONE;
2898 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
2901 static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2903 memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
2904 return X86EMUL_CONTINUE;
2907 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2912 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
2913 if (rc == X86EMUL_CONTINUE)
2914 ctxt->ops->invlpg(ctxt, linear);
2915 /* Disable writeback. */
2916 ctxt->dst.type = OP_NONE;
2917 return X86EMUL_CONTINUE;
2920 static int em_clts(struct x86_emulate_ctxt *ctxt)
2924 cr0 = ctxt->ops->get_cr(ctxt, 0);
2926 ctxt->ops->set_cr(ctxt, 0, cr0);
2927 return X86EMUL_CONTINUE;
2930 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
2934 if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
2935 return X86EMUL_UNHANDLEABLE;
2937 rc = ctxt->ops->fix_hypercall(ctxt);
2938 if (rc != X86EMUL_CONTINUE)
2941 /* Let the processor re-execute the fixed hypercall */
2942 ctxt->_eip = ctxt->eip;
2943 /* Disable writeback. */
2944 ctxt->dst.type = OP_NONE;
2945 return X86EMUL_CONTINUE;
2948 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
2950 struct desc_ptr desc_ptr;
2953 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2954 &desc_ptr.size, &desc_ptr.address,
2956 if (rc != X86EMUL_CONTINUE)
2958 ctxt->ops->set_gdt(ctxt, &desc_ptr);
2959 /* Disable writeback. */
2960 ctxt->dst.type = OP_NONE;
2961 return X86EMUL_CONTINUE;
2964 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
2968 rc = ctxt->ops->fix_hypercall(ctxt);
2970 /* Disable writeback. */
2971 ctxt->dst.type = OP_NONE;
2975 static int em_lidt(struct x86_emulate_ctxt *ctxt)
2977 struct desc_ptr desc_ptr;
2980 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2981 &desc_ptr.size, &desc_ptr.address,
2983 if (rc != X86EMUL_CONTINUE)
2985 ctxt->ops->set_idt(ctxt, &desc_ptr);
2986 /* Disable writeback. */
2987 ctxt->dst.type = OP_NONE;
2988 return X86EMUL_CONTINUE;
2991 static int em_smsw(struct x86_emulate_ctxt *ctxt)
2993 ctxt->dst.bytes = 2;
2994 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
2995 return X86EMUL_CONTINUE;
2998 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3000 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3001 | (ctxt->src.val & 0x0f));
3002 ctxt->dst.type = OP_NONE;
3003 return X86EMUL_CONTINUE;
3006 static int em_loop(struct x86_emulate_ctxt *ctxt)
3008 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
3009 if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
3010 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3011 jmp_rel(ctxt, ctxt->src.val);
3013 return X86EMUL_CONTINUE;
3016 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3018 if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
3019 jmp_rel(ctxt, ctxt->src.val);
3021 return X86EMUL_CONTINUE;
3024 static int em_in(struct x86_emulate_ctxt *ctxt)
3026 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3028 return X86EMUL_IO_NEEDED;
3030 return X86EMUL_CONTINUE;
3033 static int em_out(struct x86_emulate_ctxt *ctxt)
3035 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3037 /* Disable writeback. */
3038 ctxt->dst.type = OP_NONE;
3039 return X86EMUL_CONTINUE;
3042 static int em_cli(struct x86_emulate_ctxt *ctxt)
3044 if (emulator_bad_iopl(ctxt))
3045 return emulate_gp(ctxt, 0);
3047 ctxt->eflags &= ~X86_EFLAGS_IF;
3048 return X86EMUL_CONTINUE;
3051 static int em_sti(struct x86_emulate_ctxt *ctxt)
3053 if (emulator_bad_iopl(ctxt))
3054 return emulate_gp(ctxt, 0);
3056 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3057 ctxt->eflags |= X86_EFLAGS_IF;
3058 return X86EMUL_CONTINUE;
3061 static int em_bt(struct x86_emulate_ctxt *ctxt)
3063 /* Disable writeback. */
3064 ctxt->dst.type = OP_NONE;
3065 /* only subword offset */
3066 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
3068 emulate_2op_SrcV_nobyte(ctxt, "bt");
3069 return X86EMUL_CONTINUE;
3072 static int em_bts(struct x86_emulate_ctxt *ctxt)
3074 emulate_2op_SrcV_nobyte(ctxt, "bts");
3075 return X86EMUL_CONTINUE;
3078 static int em_btr(struct x86_emulate_ctxt *ctxt)
3080 emulate_2op_SrcV_nobyte(ctxt, "btr");
3081 return X86EMUL_CONTINUE;
3084 static int em_btc(struct x86_emulate_ctxt *ctxt)
3086 emulate_2op_SrcV_nobyte(ctxt, "btc");
3087 return X86EMUL_CONTINUE;
3090 static int em_bsf(struct x86_emulate_ctxt *ctxt)
3094 __asm__ ("bsf %2, %0; setz %1"
3095 : "=r"(ctxt->dst.val), "=q"(zf)
3096 : "r"(ctxt->src.val));
3098 ctxt->eflags &= ~X86_EFLAGS_ZF;
3100 ctxt->eflags |= X86_EFLAGS_ZF;
3101 /* Disable writeback. */
3102 ctxt->dst.type = OP_NONE;
3104 return X86EMUL_CONTINUE;
3107 static int em_bsr(struct x86_emulate_ctxt *ctxt)
3111 __asm__ ("bsr %2, %0; setz %1"
3112 : "=r"(ctxt->dst.val), "=q"(zf)
3113 : "r"(ctxt->src.val));
3115 ctxt->eflags &= ~X86_EFLAGS_ZF;
3117 ctxt->eflags |= X86_EFLAGS_ZF;
3118 /* Disable writeback. */
3119 ctxt->dst.type = OP_NONE;
3121 return X86EMUL_CONTINUE;
3124 static bool valid_cr(int nr)
3136 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3138 if (!valid_cr(ctxt->modrm_reg))
3139 return emulate_ud(ctxt);
3141 return X86EMUL_CONTINUE;
3144 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3146 u64 new_val = ctxt->src.val64;
3147 int cr = ctxt->modrm_reg;
3150 static u64 cr_reserved_bits[] = {
3151 0xffffffff00000000ULL,
3152 0, 0, 0, /* CR3 checked later */
3159 return emulate_ud(ctxt);
3161 if (new_val & cr_reserved_bits[cr])
3162 return emulate_gp(ctxt, 0);
3167 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3168 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3169 return emulate_gp(ctxt, 0);
3171 cr4 = ctxt->ops->get_cr(ctxt, 4);
3172 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3174 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3175 !(cr4 & X86_CR4_PAE))
3176 return emulate_gp(ctxt, 0);
3183 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3184 if (efer & EFER_LMA)
3185 rsvd = CR3_L_MODE_RESERVED_BITS;
3186 else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
3187 rsvd = CR3_PAE_RESERVED_BITS;
3188 else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
3189 rsvd = CR3_NONPAE_RESERVED_BITS;
3192 return emulate_gp(ctxt, 0);
3197 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3199 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3200 return emulate_gp(ctxt, 0);
3206 return X86EMUL_CONTINUE;
3209 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3213 ctxt->ops->get_dr(ctxt, 7, &dr7);
3215 /* Check if DR7.Global_Enable is set */
3216 return dr7 & (1 << 13);
3219 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3221 int dr = ctxt->modrm_reg;
3225 return emulate_ud(ctxt);
3227 cr4 = ctxt->ops->get_cr(ctxt, 4);
3228 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3229 return emulate_ud(ctxt);
3231 if (check_dr7_gd(ctxt))
3232 return emulate_db(ctxt);
3234 return X86EMUL_CONTINUE;
3237 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3239 u64 new_val = ctxt->src.val64;
3240 int dr = ctxt->modrm_reg;
3242 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3243 return emulate_gp(ctxt, 0);
3245 return check_dr_read(ctxt);
3248 static int check_svme(struct x86_emulate_ctxt *ctxt)
3252 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3254 if (!(efer & EFER_SVME))
3255 return emulate_ud(ctxt);
3257 return X86EMUL_CONTINUE;
3260 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3262 u64 rax = ctxt->regs[VCPU_REGS_RAX];
3264 /* Valid physical address? */
3265 if (rax & 0xffff000000000000ULL)
3266 return emulate_gp(ctxt, 0);
3268 return check_svme(ctxt);
3271 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3273 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3275 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3276 return emulate_ud(ctxt);
3278 return X86EMUL_CONTINUE;
3281 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3283 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3284 u64 rcx = ctxt->regs[VCPU_REGS_RCX];
3286 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3288 return emulate_gp(ctxt, 0);
3290 return X86EMUL_CONTINUE;
3293 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3295 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3296 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3297 return emulate_gp(ctxt, 0);
3299 return X86EMUL_CONTINUE;
3302 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3304 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3305 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3306 return emulate_gp(ctxt, 0);
3308 return X86EMUL_CONTINUE;
3311 #define D(_y) { .flags = (_y) }
3312 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
3313 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
3314 .check_perm = (_p) }
3316 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3317 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
3318 #define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
3319 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3320 #define II(_f, _e, _i) \
3321 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3322 #define IIP(_f, _e, _i, _p) \
3323 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3324 .check_perm = (_p) }
3325 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3327 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3328 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3329 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3330 #define I2bvIP(_f, _e, _i, _p) \
3331 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3333 #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3334 I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3335 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3337 static struct opcode group7_rm1[] = {
3338 DI(SrcNone | ModRM | Priv, monitor),
3339 DI(SrcNone | ModRM | Priv, mwait),
3343 static struct opcode group7_rm3[] = {
3344 DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
3345 II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
3346 DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
3347 DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
3348 DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
3349 DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme),
3350 DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme),
3351 DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
3354 static struct opcode group7_rm7[] = {
3356 DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
3360 static struct opcode group1[] = {
3362 I(Lock | PageTable, em_or),
3365 I(Lock | PageTable, em_and),
3371 static struct opcode group1A[] = {
3372 I(DstMem | SrcNone | ModRM | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3375 static struct opcode group3[] = {
3376 I(DstMem | SrcImm | ModRM, em_test),
3377 I(DstMem | SrcImm | ModRM, em_test),
3378 I(DstMem | SrcNone | ModRM | Lock, em_not),
3379 I(DstMem | SrcNone | ModRM | Lock, em_neg),
3380 I(SrcMem | ModRM, em_mul_ex),
3381 I(SrcMem | ModRM, em_imul_ex),
3382 I(SrcMem | ModRM, em_div_ex),
3383 I(SrcMem | ModRM, em_idiv_ex),
3386 static struct opcode group4[] = {
3387 I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45),
3388 I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45),
3392 static struct opcode group5[] = {
3393 I(DstMem | SrcNone | ModRM | Lock, em_grp45),
3394 I(DstMem | SrcNone | ModRM | Lock, em_grp45),
3395 I(SrcMem | ModRM | Stack, em_grp45),
3396 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
3397 I(SrcMem | ModRM | Stack, em_grp45),
3398 I(SrcMemFAddr | ModRM | ImplicitOps, em_grp45),
3399 I(SrcMem | ModRM | Stack, em_grp45), N,
3402 static struct opcode group6[] = {
3403 DI(ModRM | Prot, sldt),
3404 DI(ModRM | Prot, str),
3405 DI(ModRM | Prot | Priv, lldt),
3406 DI(ModRM | Prot | Priv, ltr),
3410 static struct group_dual group7 = { {
3411 DI(ModRM | Mov | DstMem | Priv, sgdt),
3412 DI(ModRM | Mov | DstMem | Priv, sidt),
3413 II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
3414 II(ModRM | SrcMem | Priv, em_lidt, lidt),
3415 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3416 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
3417 II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3419 I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
3421 N, EXT(0, group7_rm3),
3422 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3423 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
3426 static struct opcode group8[] = {
3428 I(DstMem | SrcImmByte | ModRM, em_bt),
3429 I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_bts),
3430 I(DstMem | SrcImmByte | ModRM | Lock, em_btr),
3431 I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_btc),
3434 static struct group_dual group9 = { {
3435 N, I(DstMem64 | ModRM | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3437 N, N, N, N, N, N, N, N,
3440 static struct opcode group11[] = {
3441 I(DstMem | SrcImm | ModRM | Mov | PageTable, em_mov),
3445 static struct gprefix pfx_0f_6f_0f_7f = {
3446 N, N, N, I(Sse | Unaligned, em_movdqu),
3449 static struct opcode opcode_table[256] = {
3451 I6ALU(Lock, em_add),
3452 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3453 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3455 I6ALU(Lock | PageTable, em_or),
3456 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3459 I6ALU(Lock, em_adc),
3460 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3461 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3463 I6ALU(Lock, em_sbb),
3464 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3465 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3467 I6ALU(Lock | PageTable, em_and), N, N,
3469 I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3471 I6ALU(Lock, em_xor), N, N,
3473 I6ALU(0, em_cmp), N, N,
3477 X8(I(SrcReg | Stack, em_push)),
3479 X8(I(DstReg | Stack, em_pop)),
3481 I(ImplicitOps | Stack | No64, em_pusha),
3482 I(ImplicitOps | Stack | No64, em_popa),
3483 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3486 I(SrcImm | Mov | Stack, em_push),
3487 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3488 I(SrcImmByte | Mov | Stack, em_push),
3489 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3490 I2bvIP(DstDI | SrcDX | Mov | String, em_in, ins, check_perm_in), /* insb, insw/insd */
3491 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3495 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
3496 G(DstMem | SrcImm | ModRM | Group, group1),
3497 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
3498 G(DstMem | SrcImmByte | ModRM | Group, group1),
3499 I2bv(DstMem | SrcReg | ModRM, em_test),
3500 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3502 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3503 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3504 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3505 D(ModRM | SrcMem | NoAccess | DstReg),
3506 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3509 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3511 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3512 I(SrcImmFAddr | No64, em_call_far), N,
3513 II(ImplicitOps | Stack, em_pushf, pushf),
3514 II(ImplicitOps | Stack, em_popf, popf), N, N,
3516 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3517 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3518 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3519 I2bv(SrcSI | DstDI | String, em_cmp),
3521 I2bv(DstAcc | SrcImm, em_test),
3522 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3523 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3524 I2bv(SrcAcc | DstDI | String, em_cmp),
3526 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3528 X8(I(DstReg | SrcImm | Mov, em_mov)),
3530 D2bv(DstMem | SrcImmByte | ModRM),
3531 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3532 I(ImplicitOps | Stack, em_ret),
3533 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
3534 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3535 G(ByteOp, group11), G(0, group11),
3537 N, N, N, I(ImplicitOps | Stack, em_ret_far),
3538 D(ImplicitOps), DI(SrcImmByte, intn),
3539 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3541 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3544 N, N, N, N, N, N, N, N,
3546 X3(I(SrcImmByte, em_loop)),
3547 I(SrcImmByte, em_jcxz),
3548 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
3549 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
3551 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
3552 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3553 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
3554 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
3556 N, DI(ImplicitOps, icebp), N, N,
3557 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3558 G(ByteOp, group3), G(0, group3),
3560 D(ImplicitOps), D(ImplicitOps),
3561 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3562 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3565 static struct opcode twobyte_table[256] = {
3567 G(0, group6), GD(0, &group7), N, N,
3568 N, I(ImplicitOps | VendorSpecific, em_syscall),
3569 II(ImplicitOps | Priv, em_clts, clts), N,
3570 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3571 N, D(ImplicitOps | ModRM), N, N,
3573 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
3575 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3576 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3577 IIP(ModRM | SrcMem | Priv | Op3264, em_cr_write, cr_write, check_cr_write),
3578 IIP(ModRM | SrcMem | Priv | Op3264, em_dr_write, dr_write, check_dr_write),
3580 N, N, N, N, N, N, N, N,
3582 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
3583 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3584 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
3585 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
3586 I(ImplicitOps | VendorSpecific, em_sysenter),
3587 I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
3589 N, N, N, N, N, N, N, N,
3591 X16(D(DstReg | SrcMem | ModRM | Mov)),
3593 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3598 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3603 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3607 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3609 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
3610 DI(ImplicitOps, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt),
3611 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3612 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
3614 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
3615 DI(ImplicitOps, rsm),
3616 I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
3617 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3618 D(DstMem | SrcReg | Src2CL | ModRM),
3619 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3621 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
3622 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
3623 I(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
3624 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
3625 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
3626 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3630 I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
3631 I(DstReg | SrcMem | ModRM, em_bsf), I(DstReg | SrcMem | ModRM, em_bsr),
3632 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3634 D2bv(DstMem | SrcReg | ModRM | Lock),
3635 N, D(DstMem | SrcReg | ModRM | Mov),
3636 N, N, N, GD(0, &group9),
3637 N, N, N, N, N, N, N, N,
3639 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3641 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3643 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3660 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3664 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3670 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3671 unsigned size, bool sign_extension)
3673 int rc = X86EMUL_CONTINUE;
3677 op->addr.mem.ea = ctxt->_eip;
3678 /* NB. Immediates are sign-extended as necessary. */
3679 switch (op->bytes) {
3681 op->val = insn_fetch(s8, ctxt);
3684 op->val = insn_fetch(s16, ctxt);
3687 op->val = insn_fetch(s32, ctxt);
3690 if (!sign_extension) {
3691 switch (op->bytes) {
3699 op->val &= 0xffffffff;
3707 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
3710 int rc = X86EMUL_CONTINUE;
3714 decode_register_operand(ctxt, op);
3717 rc = decode_imm(ctxt, op, 1, false);
3720 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3724 if ((ctxt->d & BitOp) && op == &ctxt->dst)
3725 fetch_bit_operand(ctxt);
3726 op->orig_val = op->val;
3729 ctxt->memop.bytes = 8;
3733 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3734 op->addr.reg = &ctxt->regs[VCPU_REGS_RAX];
3735 fetch_register_operand(op);
3736 op->orig_val = op->val;
3740 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3742 register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
3743 op->addr.mem.seg = VCPU_SREG_ES;
3749 op->addr.reg = &ctxt->regs[VCPU_REGS_RDX];
3750 fetch_register_operand(op);
3754 op->val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
3757 rc = decode_imm(ctxt, op, 1, true);
3764 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
3767 ctxt->memop.bytes = 1;
3770 ctxt->memop.bytes = 2;
3773 ctxt->memop.bytes = 4;
3776 rc = decode_imm(ctxt, op, 2, false);
3779 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
3783 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3785 register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]);
3786 op->addr.mem.seg = seg_override(ctxt);
3791 op->addr.mem.ea = ctxt->_eip;
3792 op->bytes = ctxt->op_bytes + 2;
3793 insn_fetch_arr(op->valptr, op->bytes, ctxt);
3796 ctxt->memop.bytes = ctxt->op_bytes + 2;
3799 op->val = VCPU_SREG_ES;
3802 op->val = VCPU_SREG_CS;
3805 op->val = VCPU_SREG_SS;
3808 op->val = VCPU_SREG_DS;
3811 op->val = VCPU_SREG_FS;
3814 op->val = VCPU_SREG_GS;
3817 /* Special instructions do their own operand decoding. */
3819 op->type = OP_NONE; /* Disable writeback. */
3827 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3829 int rc = X86EMUL_CONTINUE;
3830 int mode = ctxt->mode;
3831 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3832 bool op_prefix = false;
3833 struct opcode opcode;
3835 ctxt->memop.type = OP_NONE;
3836 ctxt->memopp = NULL;
3837 ctxt->_eip = ctxt->eip;
3838 ctxt->fetch.start = ctxt->_eip;
3839 ctxt->fetch.end = ctxt->fetch.start + insn_len;
3841 memcpy(ctxt->fetch.data, insn, insn_len);
3844 case X86EMUL_MODE_REAL:
3845 case X86EMUL_MODE_VM86:
3846 case X86EMUL_MODE_PROT16:
3847 def_op_bytes = def_ad_bytes = 2;
3849 case X86EMUL_MODE_PROT32:
3850 def_op_bytes = def_ad_bytes = 4;
3852 #ifdef CONFIG_X86_64
3853 case X86EMUL_MODE_PROT64:
3859 return EMULATION_FAILED;
3862 ctxt->op_bytes = def_op_bytes;
3863 ctxt->ad_bytes = def_ad_bytes;
3865 /* Legacy prefixes. */
3867 switch (ctxt->b = insn_fetch(u8, ctxt)) {
3868 case 0x66: /* operand-size override */
3870 /* switch between 2/4 bytes */
3871 ctxt->op_bytes = def_op_bytes ^ 6;
3873 case 0x67: /* address-size override */
3874 if (mode == X86EMUL_MODE_PROT64)
3875 /* switch between 4/8 bytes */
3876 ctxt->ad_bytes = def_ad_bytes ^ 12;
3878 /* switch between 2/4 bytes */
3879 ctxt->ad_bytes = def_ad_bytes ^ 6;
3881 case 0x26: /* ES override */
3882 case 0x2e: /* CS override */
3883 case 0x36: /* SS override */
3884 case 0x3e: /* DS override */
3885 set_seg_override(ctxt, (ctxt->b >> 3) & 3);
3887 case 0x64: /* FS override */
3888 case 0x65: /* GS override */
3889 set_seg_override(ctxt, ctxt->b & 7);
3891 case 0x40 ... 0x4f: /* REX */
3892 if (mode != X86EMUL_MODE_PROT64)
3894 ctxt->rex_prefix = ctxt->b;
3896 case 0xf0: /* LOCK */
3897 ctxt->lock_prefix = 1;
3899 case 0xf2: /* REPNE/REPNZ */
3900 case 0xf3: /* REP/REPE/REPZ */
3901 ctxt->rep_prefix = ctxt->b;
3907 /* Any legacy prefix after a REX prefix nullifies its effect. */
3909 ctxt->rex_prefix = 0;
3915 if (ctxt->rex_prefix & 8)
3916 ctxt->op_bytes = 8; /* REX.W */
3918 /* Opcode byte(s). */
3919 opcode = opcode_table[ctxt->b];
3920 /* Two-byte opcode? */
3921 if (ctxt->b == 0x0f) {
3923 ctxt->b = insn_fetch(u8, ctxt);
3924 opcode = twobyte_table[ctxt->b];
3926 ctxt->d = opcode.flags;
3928 while (ctxt->d & GroupMask) {
3929 switch (ctxt->d & GroupMask) {
3931 ctxt->modrm = insn_fetch(u8, ctxt);
3933 goffset = (ctxt->modrm >> 3) & 7;
3934 opcode = opcode.u.group[goffset];
3937 ctxt->modrm = insn_fetch(u8, ctxt);
3939 goffset = (ctxt->modrm >> 3) & 7;
3940 if ((ctxt->modrm >> 6) == 3)
3941 opcode = opcode.u.gdual->mod3[goffset];
3943 opcode = opcode.u.gdual->mod012[goffset];
3946 goffset = ctxt->modrm & 7;
3947 opcode = opcode.u.group[goffset];
3950 if (ctxt->rep_prefix && op_prefix)
3951 return EMULATION_FAILED;
3952 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
3953 switch (simd_prefix) {
3954 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
3955 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
3956 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
3957 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
3961 return EMULATION_FAILED;
3964 ctxt->d &= ~(u64)GroupMask;
3965 ctxt->d |= opcode.flags;
3968 ctxt->execute = opcode.u.execute;
3969 ctxt->check_perm = opcode.check_perm;
3970 ctxt->intercept = opcode.intercept;
3973 if (ctxt->d == 0 || (ctxt->d & Undefined))
3974 return EMULATION_FAILED;
3976 if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
3977 return EMULATION_FAILED;
3979 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
3982 if (ctxt->d & Op3264) {
3983 if (mode == X86EMUL_MODE_PROT64)
3990 ctxt->op_bytes = 16;
3992 /* ModRM and SIB bytes. */
3993 if (ctxt->d & ModRM) {
3994 rc = decode_modrm(ctxt, &ctxt->memop);
3995 if (!ctxt->has_seg_override)
3996 set_seg_override(ctxt, ctxt->modrm_seg);
3997 } else if (ctxt->d & MemAbs)
3998 rc = decode_abs(ctxt, &ctxt->memop);
3999 if (rc != X86EMUL_CONTINUE)
4002 if (!ctxt->has_seg_override)
4003 set_seg_override(ctxt, VCPU_SREG_DS);
4005 ctxt->memop.addr.mem.seg = seg_override(ctxt);
4007 if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8)
4008 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
4011 * Decode and fetch the source operand: register, memory
4014 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4015 if (rc != X86EMUL_CONTINUE)
4019 * Decode and fetch the second source operand: register, memory
4022 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4023 if (rc != X86EMUL_CONTINUE)
4026 /* Decode and fetch the destination operand: register or memory. */
4027 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4030 if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative)
4031 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4033 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4036 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4038 return ctxt->d & PageTable;
4041 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4043 /* The second termination condition only applies for REPE
4044 * and REPNE. Test if the repeat string operation prefix is
4045 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4046 * corresponding termination condition according to:
4047 * - if REPE/REPZ and ZF = 0 then done
4048 * - if REPNE/REPNZ and ZF = 1 then done
4050 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4051 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4052 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4053 ((ctxt->eflags & EFLG_ZF) == 0))
4054 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4055 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4061 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4063 struct x86_emulate_ops *ops = ctxt->ops;
4064 int rc = X86EMUL_CONTINUE;
4065 int saved_dst_type = ctxt->dst.type;
4067 ctxt->mem_read.pos = 0;
4069 if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
4070 rc = emulate_ud(ctxt);
4074 /* LOCK prefix is allowed only with some instructions */
4075 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4076 rc = emulate_ud(ctxt);
4080 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4081 rc = emulate_ud(ctxt);
4086 && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
4087 || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4088 rc = emulate_ud(ctxt);
4092 if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4093 rc = emulate_nm(ctxt);
4097 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4098 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4099 X86_ICPT_PRE_EXCEPT);
4100 if (rc != X86EMUL_CONTINUE)
4104 /* Privileged instruction can be executed only in CPL=0 */
4105 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4106 rc = emulate_gp(ctxt, 0);
4110 /* Instruction can only be executed in protected mode */
4111 if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
4112 rc = emulate_ud(ctxt);
4116 /* Do instruction specific permission checks */
4117 if (ctxt->check_perm) {
4118 rc = ctxt->check_perm(ctxt);
4119 if (rc != X86EMUL_CONTINUE)
4123 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4124 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4125 X86_ICPT_POST_EXCEPT);
4126 if (rc != X86EMUL_CONTINUE)
4130 if (ctxt->rep_prefix && (ctxt->d & String)) {
4131 /* All REP prefixes have the same first termination condition */
4132 if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) {
4133 ctxt->eip = ctxt->_eip;
4138 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4139 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4140 ctxt->src.valptr, ctxt->src.bytes);
4141 if (rc != X86EMUL_CONTINUE)
4143 ctxt->src.orig_val64 = ctxt->src.val64;
4146 if (ctxt->src2.type == OP_MEM) {
4147 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4148 &ctxt->src2.val, ctxt->src2.bytes);
4149 if (rc != X86EMUL_CONTINUE)
4153 if ((ctxt->d & DstMask) == ImplicitOps)
4157 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4158 /* optimisation - avoid slow emulated read if Mov */
4159 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4160 &ctxt->dst.val, ctxt->dst.bytes);
4161 if (rc != X86EMUL_CONTINUE)
4164 ctxt->dst.orig_val = ctxt->dst.val;
4168 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4169 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4170 X86_ICPT_POST_MEMACCESS);
4171 if (rc != X86EMUL_CONTINUE)
4175 if (ctxt->execute) {
4176 rc = ctxt->execute(ctxt);
4177 if (rc != X86EMUL_CONTINUE)
4186 case 0x40 ... 0x47: /* inc r16/r32 */
4187 emulate_1op(ctxt, "inc");
4189 case 0x48 ... 0x4f: /* dec r16/r32 */
4190 emulate_1op(ctxt, "dec");
4192 case 0x63: /* movsxd */
4193 if (ctxt->mode != X86EMUL_MODE_PROT64)
4194 goto cannot_emulate;
4195 ctxt->dst.val = (s32) ctxt->src.val;
4197 case 0x70 ... 0x7f: /* jcc (short) */
4198 if (test_cc(ctxt->b, ctxt->eflags))
4199 jmp_rel(ctxt, ctxt->src.val);
4201 case 0x8d: /* lea r16/r32, m */
4202 ctxt->dst.val = ctxt->src.addr.mem.ea;
4204 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4205 if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
4209 case 0x98: /* cbw/cwde/cdqe */
4210 switch (ctxt->op_bytes) {
4211 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4212 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4213 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4219 case 0xcc: /* int3 */
4220 rc = emulate_int(ctxt, 3);
4222 case 0xcd: /* int n */
4223 rc = emulate_int(ctxt, ctxt->src.val);
4225 case 0xce: /* into */
4226 if (ctxt->eflags & EFLG_OF)
4227 rc = emulate_int(ctxt, 4);
4229 case 0xd0 ... 0xd1: /* Grp2 */
4232 case 0xd2 ... 0xd3: /* Grp2 */
4233 ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
4236 case 0xe9: /* jmp rel */
4237 case 0xeb: /* jmp rel short */
4238 jmp_rel(ctxt, ctxt->src.val);
4239 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4241 case 0xf4: /* hlt */
4242 ctxt->ops->halt(ctxt);
4244 case 0xf5: /* cmc */
4245 /* complement carry flag from eflags reg */
4246 ctxt->eflags ^= EFLG_CF;
4248 case 0xf8: /* clc */
4249 ctxt->eflags &= ~EFLG_CF;
4251 case 0xf9: /* stc */
4252 ctxt->eflags |= EFLG_CF;
4254 case 0xfc: /* cld */
4255 ctxt->eflags &= ~EFLG_DF;
4257 case 0xfd: /* std */
4258 ctxt->eflags |= EFLG_DF;
4261 goto cannot_emulate;
4264 if (rc != X86EMUL_CONTINUE)
4268 rc = writeback(ctxt);
4269 if (rc != X86EMUL_CONTINUE)
4273 * restore dst type in case the decoding will be reused
4274 * (happens for string instruction )
4276 ctxt->dst.type = saved_dst_type;
4278 if ((ctxt->d & SrcMask) == SrcSI)
4279 string_addr_inc(ctxt, seg_override(ctxt),
4280 VCPU_REGS_RSI, &ctxt->src);
4282 if ((ctxt->d & DstMask) == DstDI)
4283 string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
4286 if (ctxt->rep_prefix && (ctxt->d & String)) {
4287 struct read_cache *r = &ctxt->io_read;
4288 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
4290 if (!string_insn_completed(ctxt)) {
4292 * Re-enter guest when pio read ahead buffer is empty
4293 * or, if it is not used, after each 1024 iteration.
4295 if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
4296 (r->end == 0 || r->end != r->pos)) {
4298 * Reset read cache. Usually happens before
4299 * decode, but since instruction is restarted
4300 * we have to do it here.
4302 ctxt->mem_read.end = 0;
4303 return EMULATION_RESTART;
4305 goto done; /* skip rip writeback */
4309 ctxt->eip = ctxt->_eip;
4312 if (rc == X86EMUL_PROPAGATE_FAULT)
4313 ctxt->have_exception = true;
4314 if (rc == X86EMUL_INTERCEPTED)
4315 return EMULATION_INTERCEPTED;
4317 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4321 case 0x09: /* wbinvd */
4322 (ctxt->ops->wbinvd)(ctxt);
4324 case 0x08: /* invd */
4325 case 0x0d: /* GrpP (prefetch) */
4326 case 0x18: /* Grp16 (prefetch/nop) */
4328 case 0x20: /* mov cr, reg */
4329 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4331 case 0x21: /* mov from dr to reg */
4332 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
4334 case 0x40 ... 0x4f: /* cmov */
4335 ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
4336 if (!test_cc(ctxt->b, ctxt->eflags))
4337 ctxt->dst.type = OP_NONE; /* no writeback */
4339 case 0x80 ... 0x8f: /* jnz rel, etc*/
4340 if (test_cc(ctxt->b, ctxt->eflags))
4341 jmp_rel(ctxt, ctxt->src.val);
4343 case 0x90 ... 0x9f: /* setcc r/m8 */
4344 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4346 case 0xa4: /* shld imm8, r, r/m */
4347 case 0xa5: /* shld cl, r, r/m */
4348 emulate_2op_cl(ctxt, "shld");
4350 case 0xac: /* shrd imm8, r, r/m */
4351 case 0xad: /* shrd cl, r, r/m */
4352 emulate_2op_cl(ctxt, "shrd");
4354 case 0xae: /* clflush */
4356 case 0xb6 ... 0xb7: /* movzx */
4357 ctxt->dst.bytes = ctxt->op_bytes;
4358 ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
4359 : (u16) ctxt->src.val;
4361 case 0xbe ... 0xbf: /* movsx */
4362 ctxt->dst.bytes = ctxt->op_bytes;
4363 ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
4364 (s16) ctxt->src.val;
4366 case 0xc0 ... 0xc1: /* xadd */
4367 emulate_2op_SrcV(ctxt, "add");
4368 /* Write back the register source. */
4369 ctxt->src.val = ctxt->dst.orig_val;
4370 write_register_operand(&ctxt->src);
4372 case 0xc3: /* movnti */
4373 ctxt->dst.bytes = ctxt->op_bytes;
4374 ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
4375 (u64) ctxt->src.val;
4378 goto cannot_emulate;
4381 if (rc != X86EMUL_CONTINUE)
4387 return EMULATION_FAILED;