1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
127 #define Sse (1<<18) /* SSE Vector instruction */
128 /* Generic ModRM decode. */
129 #define ModRM (1<<19)
130 /* Destination is only written; never read. */
133 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
134 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
135 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
136 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
137 #define Undefined (1<<25) /* No Such Instruction */
138 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
139 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
141 #define PageTable (1 << 29) /* instruction used to write page table */
142 #define NotImpl (1 << 30) /* instruction is not implemented */
143 /* Source 2 operand type */
144 #define Src2Shift (31)
145 #define Src2None (OpNone << Src2Shift)
146 #define Src2Mem (OpMem << Src2Shift)
147 #define Src2CL (OpCL << Src2Shift)
148 #define Src2ImmByte (OpImmByte << Src2Shift)
149 #define Src2One (OpOne << Src2Shift)
150 #define Src2Imm (OpImm << Src2Shift)
151 #define Src2ES (OpES << Src2Shift)
152 #define Src2CS (OpCS << Src2Shift)
153 #define Src2SS (OpSS << Src2Shift)
154 #define Src2DS (OpDS << Src2Shift)
155 #define Src2FS (OpFS << Src2Shift)
156 #define Src2GS (OpGS << Src2Shift)
157 #define Src2Mask (OpMask << Src2Shift)
158 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
159 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
160 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
161 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
162 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
163 #define NoWrite ((u64)1 << 45) /* No writeback */
164 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
165 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
166 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
167 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
168 #define NoBigReal ((u64)1 << 50) /* No big real mode */
169 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
170 #define NearBranch ((u64)1 << 52) /* Near branches */
171 #define No16 ((u64)1 << 53) /* No 16 bit operand */
173 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
175 #define X2(x...) x, x
176 #define X3(x...) X2(x), x
177 #define X4(x...) X2(x), X2(x)
178 #define X5(x...) X4(x), x
179 #define X6(x...) X4(x), X2(x)
180 #define X7(x...) X4(x), X3(x)
181 #define X8(x...) X4(x), X4(x)
182 #define X16(x...) X8(x), X8(x)
184 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
185 #define FASTOP_SIZE 8
188 * fastop functions have a special calling convention:
193 * flags: rflags (in/out)
194 * ex: rsi (in:fastop pointer, out:zero if exception)
196 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
197 * different operand sizes can be reached by calculation, rather than a jump
198 * table (which would be bigger than the code).
200 * fastop functions are declared as taking a never-defined fastop parameter,
201 * so they can't be called from C directly.
210 int (*execute)(struct x86_emulate_ctxt *ctxt);
211 const struct opcode *group;
212 const struct group_dual *gdual;
213 const struct gprefix *gprefix;
214 const struct escape *esc;
215 const struct instr_dual *idual;
216 void (*fastop)(struct fastop *fake);
218 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
222 struct opcode mod012[8];
223 struct opcode mod3[8];
227 struct opcode pfx_no;
228 struct opcode pfx_66;
229 struct opcode pfx_f2;
230 struct opcode pfx_f3;
235 struct opcode high[64];
239 struct opcode mod012;
243 /* EFLAGS bit definitions. */
244 #define EFLG_ID (1<<21)
245 #define EFLG_VIP (1<<20)
246 #define EFLG_VIF (1<<19)
247 #define EFLG_AC (1<<18)
248 #define EFLG_VM (1<<17)
249 #define EFLG_RF (1<<16)
250 #define EFLG_IOPL (3<<12)
251 #define EFLG_NT (1<<14)
252 #define EFLG_OF (1<<11)
253 #define EFLG_DF (1<<10)
254 #define EFLG_IF (1<<9)
255 #define EFLG_TF (1<<8)
256 #define EFLG_SF (1<<7)
257 #define EFLG_ZF (1<<6)
258 #define EFLG_AF (1<<4)
259 #define EFLG_PF (1<<2)
260 #define EFLG_CF (1<<0)
262 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
263 #define EFLG_RESERVED_ONE_MASK 2
265 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
267 if (!(ctxt->regs_valid & (1 << nr))) {
268 ctxt->regs_valid |= 1 << nr;
269 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
271 return ctxt->_regs[nr];
274 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
276 ctxt->regs_valid |= 1 << nr;
277 ctxt->regs_dirty |= 1 << nr;
278 return &ctxt->_regs[nr];
281 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
284 return reg_write(ctxt, nr);
287 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
291 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
292 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
295 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
297 ctxt->regs_dirty = 0;
298 ctxt->regs_valid = 0;
302 * These EFLAGS bits are restored from saved value during emulation, and
303 * any changes are written back to the saved value after emulation.
305 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
313 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
315 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
316 #define FOP_RET "ret \n\t"
318 #define FOP_START(op) \
319 extern void em_##op(struct fastop *fake); \
320 asm(".pushsection .text, \"ax\" \n\t" \
321 ".global em_" #op " \n\t" \
328 #define FOPNOP() FOP_ALIGN FOP_RET
330 #define FOP1E(op, dst) \
331 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
333 #define FOP1EEX(op, dst) \
334 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
336 #define FASTOP1(op) \
341 ON64(FOP1E(op##q, rax)) \
344 /* 1-operand, using src2 (for MUL/DIV r/m) */
345 #define FASTOP1SRC2(op, name) \
350 ON64(FOP1E(op, rcx)) \
353 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
354 #define FASTOP1SRC2EX(op, name) \
359 ON64(FOP1EEX(op, rcx)) \
362 #define FOP2E(op, dst, src) \
363 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
365 #define FASTOP2(op) \
367 FOP2E(op##b, al, dl) \
368 FOP2E(op##w, ax, dx) \
369 FOP2E(op##l, eax, edx) \
370 ON64(FOP2E(op##q, rax, rdx)) \
373 /* 2 operand, word only */
374 #define FASTOP2W(op) \
377 FOP2E(op##w, ax, dx) \
378 FOP2E(op##l, eax, edx) \
379 ON64(FOP2E(op##q, rax, rdx)) \
382 /* 2 operand, src is CL */
383 #define FASTOP2CL(op) \
385 FOP2E(op##b, al, cl) \
386 FOP2E(op##w, ax, cl) \
387 FOP2E(op##l, eax, cl) \
388 ON64(FOP2E(op##q, rax, cl)) \
391 /* 2 operand, src and dest are reversed */
392 #define FASTOP2R(op, name) \
394 FOP2E(op##b, dl, al) \
395 FOP2E(op##w, dx, ax) \
396 FOP2E(op##l, edx, eax) \
397 ON64(FOP2E(op##q, rdx, rax)) \
400 #define FOP3E(op, dst, src, src2) \
401 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
403 /* 3-operand, word-only, src2=cl */
404 #define FASTOP3WCL(op) \
407 FOP3E(op##w, ax, dx, cl) \
408 FOP3E(op##l, eax, edx, cl) \
409 ON64(FOP3E(op##q, rax, rdx, cl)) \
412 /* Special case for SETcc - 1 instruction per cc */
413 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
415 asm(".global kvm_fastop_exception \n"
416 "kvm_fastop_exception: xor %esi, %esi; ret");
437 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
440 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
441 enum x86_intercept intercept,
442 enum x86_intercept_stage stage)
444 struct x86_instruction_info info = {
445 .intercept = intercept,
446 .rep_prefix = ctxt->rep_prefix,
447 .modrm_mod = ctxt->modrm_mod,
448 .modrm_reg = ctxt->modrm_reg,
449 .modrm_rm = ctxt->modrm_rm,
450 .src_val = ctxt->src.val64,
451 .dst_val = ctxt->dst.val64,
452 .src_bytes = ctxt->src.bytes,
453 .dst_bytes = ctxt->dst.bytes,
454 .ad_bytes = ctxt->ad_bytes,
455 .next_rip = ctxt->eip,
458 return ctxt->ops->intercept(ctxt, &info, stage);
461 static void assign_masked(ulong *dest, ulong src, ulong mask)
463 *dest = (*dest & ~mask) | (src & mask);
466 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
468 return (1UL << (ctxt->ad_bytes << 3)) - 1;
471 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
474 struct desc_struct ss;
476 if (ctxt->mode == X86EMUL_MODE_PROT64)
478 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
479 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
482 static int stack_size(struct x86_emulate_ctxt *ctxt)
484 return (__fls(stack_mask(ctxt)) + 1) >> 3;
487 /* Access/update address held in a register, based on addressing mode. */
488 static inline unsigned long
489 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
491 if (ctxt->ad_bytes == sizeof(unsigned long))
494 return reg & ad_mask(ctxt);
497 static inline unsigned long
498 register_address(struct x86_emulate_ctxt *ctxt, int reg)
500 return address_mask(ctxt, reg_read(ctxt, reg));
503 static void masked_increment(ulong *reg, ulong mask, int inc)
505 assign_masked(reg, *reg + inc, mask);
509 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
513 if (ctxt->ad_bytes == sizeof(unsigned long))
516 mask = ad_mask(ctxt);
517 masked_increment(reg_rmw(ctxt, reg), mask, inc);
520 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
522 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
525 static u32 desc_limit_scaled(struct desc_struct *desc)
527 u32 limit = get_desc_limit(desc);
529 return desc->g ? (limit << 12) | 0xfff : limit;
532 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
534 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
537 return ctxt->ops->get_cached_segment_base(ctxt, seg);
540 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
541 u32 error, bool valid)
544 ctxt->exception.vector = vec;
545 ctxt->exception.error_code = error;
546 ctxt->exception.error_code_valid = valid;
547 return X86EMUL_PROPAGATE_FAULT;
550 static int emulate_db(struct x86_emulate_ctxt *ctxt)
552 return emulate_exception(ctxt, DB_VECTOR, 0, false);
555 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
557 return emulate_exception(ctxt, GP_VECTOR, err, true);
560 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
562 return emulate_exception(ctxt, SS_VECTOR, err, true);
565 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
567 return emulate_exception(ctxt, UD_VECTOR, 0, false);
570 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
572 return emulate_exception(ctxt, TS_VECTOR, err, true);
575 static int emulate_de(struct x86_emulate_ctxt *ctxt)
577 return emulate_exception(ctxt, DE_VECTOR, 0, false);
580 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
582 return emulate_exception(ctxt, NM_VECTOR, 0, false);
585 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
588 struct desc_struct desc;
590 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
594 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
599 struct desc_struct desc;
601 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
602 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
606 * x86 defines three classes of vector instructions: explicitly
607 * aligned, explicitly unaligned, and the rest, which change behaviour
608 * depending on whether they're AVX encoded or not.
610 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
611 * subject to the same check.
613 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
615 if (likely(size < 16))
618 if (ctxt->d & Aligned)
620 else if (ctxt->d & Unaligned)
622 else if (ctxt->d & Avx)
628 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
629 struct segmented_address addr,
630 unsigned *max_size, unsigned size,
631 bool write, bool fetch,
632 enum x86emul_mode mode, ulong *linear)
634 struct desc_struct desc;
640 la = seg_base(ctxt, addr.seg) + addr.ea;
643 case X86EMUL_MODE_PROT64:
644 if (is_noncanonical_address(la))
647 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
648 if (size > *max_size)
652 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
656 /* code segment in protected mode or read-only data segment */
657 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
658 || !(desc.type & 2)) && write)
660 /* unreadable code segment */
661 if (!fetch && (desc.type & 8) && !(desc.type & 2))
663 lim = desc_limit_scaled(&desc);
664 if (!(desc.type & 8) && (desc.type & 4)) {
665 /* expand-down segment */
668 lim = desc.d ? 0xffffffff : 0xffff;
672 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
673 if (size > *max_size)
678 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
679 return emulate_gp(ctxt, 0);
681 return X86EMUL_CONTINUE;
683 if (addr.seg == VCPU_SREG_SS)
684 return emulate_ss(ctxt, 0);
686 return emulate_gp(ctxt, 0);
689 static int linearize(struct x86_emulate_ctxt *ctxt,
690 struct segmented_address addr,
691 unsigned size, bool write,
695 return __linearize(ctxt, addr, &max_size, size, write, false,
699 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
700 enum x86emul_mode mode)
705 struct segmented_address addr = { .seg = VCPU_SREG_CS,
708 if (ctxt->op_bytes != sizeof(unsigned long))
709 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
710 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
711 if (rc == X86EMUL_CONTINUE)
712 ctxt->_eip = addr.ea;
716 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
718 return assign_eip(ctxt, dst, ctxt->mode);
721 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
722 const struct desc_struct *cs_desc)
724 enum x86emul_mode mode = ctxt->mode;
727 if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
730 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
732 mode = X86EMUL_MODE_PROT64;
735 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
736 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
737 return assign_eip(ctxt, dst, mode);
740 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
742 return assign_eip_near(ctxt, ctxt->_eip + rel);
745 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
746 struct segmented_address addr,
753 rc = linearize(ctxt, addr, size, false, &linear);
754 if (rc != X86EMUL_CONTINUE)
756 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
760 * Prefetch the remaining bytes of the instruction without crossing page
761 * boundary if they are not in fetch_cache yet.
763 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
766 unsigned size, max_size;
767 unsigned long linear;
768 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
769 struct segmented_address addr = { .seg = VCPU_SREG_CS,
770 .ea = ctxt->eip + cur_size };
773 * We do not know exactly how many bytes will be needed, and
774 * __linearize is expensive, so fetch as much as possible. We
775 * just have to avoid going beyond the 15 byte limit, the end
776 * of the segment, or the end of the page.
778 * __linearize is called with size 0 so that it does not do any
779 * boundary check itself. Instead, we use max_size to check
782 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
784 if (unlikely(rc != X86EMUL_CONTINUE))
787 size = min_t(unsigned, 15UL ^ cur_size, max_size);
788 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
791 * One instruction can only straddle two pages,
792 * and one has been loaded at the beginning of
793 * x86_decode_insn. So, if not enough bytes
794 * still, we must have hit the 15-byte boundary.
796 if (unlikely(size < op_size))
797 return emulate_gp(ctxt, 0);
799 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
800 size, &ctxt->exception);
801 if (unlikely(rc != X86EMUL_CONTINUE))
803 ctxt->fetch.end += size;
804 return X86EMUL_CONTINUE;
807 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
810 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
812 if (unlikely(done_size < size))
813 return __do_insn_fetch_bytes(ctxt, size - done_size);
815 return X86EMUL_CONTINUE;
818 /* Fetch next part of the instruction being emulated. */
819 #define insn_fetch(_type, _ctxt) \
822 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
823 if (rc != X86EMUL_CONTINUE) \
825 ctxt->_eip += sizeof(_type); \
826 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
827 ctxt->fetch.ptr += sizeof(_type); \
831 #define insn_fetch_arr(_arr, _size, _ctxt) \
833 rc = do_insn_fetch_bytes(_ctxt, _size); \
834 if (rc != X86EMUL_CONTINUE) \
836 ctxt->_eip += (_size); \
837 memcpy(_arr, ctxt->fetch.ptr, _size); \
838 ctxt->fetch.ptr += (_size); \
842 * Given the 'reg' portion of a ModRM byte, and a register block, return a
843 * pointer into the block that addresses the relevant register.
844 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
846 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
850 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
852 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
853 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
855 p = reg_rmw(ctxt, modrm_reg);
859 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
860 struct segmented_address addr,
861 u16 *size, unsigned long *address, int op_bytes)
868 rc = segmented_read_std(ctxt, addr, size, 2);
869 if (rc != X86EMUL_CONTINUE)
872 rc = segmented_read_std(ctxt, addr, address, op_bytes);
886 FASTOP1SRC2(mul, mul_ex);
887 FASTOP1SRC2(imul, imul_ex);
888 FASTOP1SRC2EX(div, div_ex);
889 FASTOP1SRC2EX(idiv, idiv_ex);
918 FASTOP2R(cmp, cmp_r);
920 static u8 test_cc(unsigned int condition, unsigned long flags)
923 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
925 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
926 asm("push %[flags]; popf; call *%[fastop]"
927 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
931 static void fetch_register_operand(struct operand *op)
935 op->val = *(u8 *)op->addr.reg;
938 op->val = *(u16 *)op->addr.reg;
941 op->val = *(u32 *)op->addr.reg;
944 op->val = *(u64 *)op->addr.reg;
949 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
951 ctxt->ops->get_fpu(ctxt);
953 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
954 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
955 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
956 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
957 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
958 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
959 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
960 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
962 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
963 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
964 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
965 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
966 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
967 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
968 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
969 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
973 ctxt->ops->put_fpu(ctxt);
976 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
979 ctxt->ops->get_fpu(ctxt);
981 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
982 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
983 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
984 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
985 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
986 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
987 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
988 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
990 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
991 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
992 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
993 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
994 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
995 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
996 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
997 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1001 ctxt->ops->put_fpu(ctxt);
1004 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1006 ctxt->ops->get_fpu(ctxt);
1008 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1009 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1010 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1011 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1012 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1013 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1014 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1015 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1018 ctxt->ops->put_fpu(ctxt);
1021 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1023 ctxt->ops->get_fpu(ctxt);
1025 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1026 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1027 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1028 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1029 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1030 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1031 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1032 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1035 ctxt->ops->put_fpu(ctxt);
1038 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1040 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1041 return emulate_nm(ctxt);
1043 ctxt->ops->get_fpu(ctxt);
1044 asm volatile("fninit");
1045 ctxt->ops->put_fpu(ctxt);
1046 return X86EMUL_CONTINUE;
1049 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1053 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1054 return emulate_nm(ctxt);
1056 ctxt->ops->get_fpu(ctxt);
1057 asm volatile("fnstcw %0": "+m"(fcw));
1058 ctxt->ops->put_fpu(ctxt);
1060 /* force 2 byte destination */
1061 ctxt->dst.bytes = 2;
1062 ctxt->dst.val = fcw;
1064 return X86EMUL_CONTINUE;
1067 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1071 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1072 return emulate_nm(ctxt);
1074 ctxt->ops->get_fpu(ctxt);
1075 asm volatile("fnstsw %0": "+m"(fsw));
1076 ctxt->ops->put_fpu(ctxt);
1078 /* force 2 byte destination */
1079 ctxt->dst.bytes = 2;
1080 ctxt->dst.val = fsw;
1082 return X86EMUL_CONTINUE;
1085 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1088 unsigned reg = ctxt->modrm_reg;
1090 if (!(ctxt->d & ModRM))
1091 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1093 if (ctxt->d & Sse) {
1097 read_sse_reg(ctxt, &op->vec_val, reg);
1100 if (ctxt->d & Mmx) {
1109 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1110 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1112 fetch_register_operand(op);
1113 op->orig_val = op->val;
1116 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1118 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1119 ctxt->modrm_seg = VCPU_SREG_SS;
1122 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1126 int index_reg, base_reg, scale;
1127 int rc = X86EMUL_CONTINUE;
1130 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1131 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1132 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1134 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1135 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1136 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1137 ctxt->modrm_seg = VCPU_SREG_DS;
1139 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1141 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1142 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1144 if (ctxt->d & Sse) {
1147 op->addr.xmm = ctxt->modrm_rm;
1148 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1151 if (ctxt->d & Mmx) {
1154 op->addr.mm = ctxt->modrm_rm & 7;
1157 fetch_register_operand(op);
1163 if (ctxt->ad_bytes == 2) {
1164 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1165 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1166 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1167 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1169 /* 16-bit ModR/M decode. */
1170 switch (ctxt->modrm_mod) {
1172 if (ctxt->modrm_rm == 6)
1173 modrm_ea += insn_fetch(u16, ctxt);
1176 modrm_ea += insn_fetch(s8, ctxt);
1179 modrm_ea += insn_fetch(u16, ctxt);
1182 switch (ctxt->modrm_rm) {
1184 modrm_ea += bx + si;
1187 modrm_ea += bx + di;
1190 modrm_ea += bp + si;
1193 modrm_ea += bp + di;
1202 if (ctxt->modrm_mod != 0)
1209 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1210 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1211 ctxt->modrm_seg = VCPU_SREG_SS;
1212 modrm_ea = (u16)modrm_ea;
1214 /* 32/64-bit ModR/M decode. */
1215 if ((ctxt->modrm_rm & 7) == 4) {
1216 sib = insn_fetch(u8, ctxt);
1217 index_reg |= (sib >> 3) & 7;
1218 base_reg |= sib & 7;
1221 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1222 modrm_ea += insn_fetch(s32, ctxt);
1224 modrm_ea += reg_read(ctxt, base_reg);
1225 adjust_modrm_seg(ctxt, base_reg);
1228 modrm_ea += reg_read(ctxt, index_reg) << scale;
1229 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1230 modrm_ea += insn_fetch(s32, ctxt);
1231 if (ctxt->mode == X86EMUL_MODE_PROT64)
1232 ctxt->rip_relative = 1;
1234 base_reg = ctxt->modrm_rm;
1235 modrm_ea += reg_read(ctxt, base_reg);
1236 adjust_modrm_seg(ctxt, base_reg);
1238 switch (ctxt->modrm_mod) {
1240 modrm_ea += insn_fetch(s8, ctxt);
1243 modrm_ea += insn_fetch(s32, ctxt);
1247 op->addr.mem.ea = modrm_ea;
1248 if (ctxt->ad_bytes != 8)
1249 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1255 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1258 int rc = X86EMUL_CONTINUE;
1261 switch (ctxt->ad_bytes) {
1263 op->addr.mem.ea = insn_fetch(u16, ctxt);
1266 op->addr.mem.ea = insn_fetch(u32, ctxt);
1269 op->addr.mem.ea = insn_fetch(u64, ctxt);
1276 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1280 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1281 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1283 if (ctxt->src.bytes == 2)
1284 sv = (s16)ctxt->src.val & (s16)mask;
1285 else if (ctxt->src.bytes == 4)
1286 sv = (s32)ctxt->src.val & (s32)mask;
1288 sv = (s64)ctxt->src.val & (s64)mask;
1290 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1291 ctxt->dst.addr.mem.ea + (sv >> 3));
1294 /* only subword offset */
1295 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1298 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1299 unsigned long addr, void *dest, unsigned size)
1302 struct read_cache *mc = &ctxt->mem_read;
1304 if (mc->pos < mc->end)
1307 WARN_ON((mc->end + size) >= sizeof(mc->data));
1309 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1311 if (rc != X86EMUL_CONTINUE)
1317 memcpy(dest, mc->data + mc->pos, size);
1319 return X86EMUL_CONTINUE;
1322 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1323 struct segmented_address addr,
1330 rc = linearize(ctxt, addr, size, false, &linear);
1331 if (rc != X86EMUL_CONTINUE)
1333 return read_emulated(ctxt, linear, data, size);
1336 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1337 struct segmented_address addr,
1344 rc = linearize(ctxt, addr, size, true, &linear);
1345 if (rc != X86EMUL_CONTINUE)
1347 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1351 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1352 struct segmented_address addr,
1353 const void *orig_data, const void *data,
1359 rc = linearize(ctxt, addr, size, true, &linear);
1360 if (rc != X86EMUL_CONTINUE)
1362 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1363 size, &ctxt->exception);
1366 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1367 unsigned int size, unsigned short port,
1370 struct read_cache *rc = &ctxt->io_read;
1372 if (rc->pos == rc->end) { /* refill pio read ahead */
1373 unsigned int in_page, n;
1374 unsigned int count = ctxt->rep_prefix ?
1375 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1376 in_page = (ctxt->eflags & EFLG_DF) ?
1377 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1378 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1379 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1382 rc->pos = rc->end = 0;
1383 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1388 if (ctxt->rep_prefix && (ctxt->d & String) &&
1389 !(ctxt->eflags & EFLG_DF)) {
1390 ctxt->dst.data = rc->data + rc->pos;
1391 ctxt->dst.type = OP_MEM_STR;
1392 ctxt->dst.count = (rc->end - rc->pos) / size;
1395 memcpy(dest, rc->data + rc->pos, size);
1401 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1402 u16 index, struct desc_struct *desc)
1407 ctxt->ops->get_idt(ctxt, &dt);
1409 if (dt.size < index * 8 + 7)
1410 return emulate_gp(ctxt, index << 3 | 0x2);
1412 addr = dt.address + index * 8;
1413 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1417 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1418 u16 selector, struct desc_ptr *dt)
1420 const struct x86_emulate_ops *ops = ctxt->ops;
1423 if (selector & 1 << 2) {
1424 struct desc_struct desc;
1427 memset (dt, 0, sizeof *dt);
1428 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1432 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1433 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1435 ops->get_gdt(ctxt, dt);
1438 /* allowed just for 8 bytes segments */
1439 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1440 u16 selector, struct desc_struct *desc,
1444 u16 index = selector >> 3;
1447 get_descriptor_table_ptr(ctxt, selector, &dt);
1449 if (dt.size < index * 8 + 7)
1450 return emulate_gp(ctxt, selector & 0xfffc);
1452 *desc_addr_p = addr = dt.address + index * 8;
1453 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1457 /* allowed just for 8 bytes segments */
1458 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1459 u16 selector, struct desc_struct *desc)
1462 u16 index = selector >> 3;
1465 get_descriptor_table_ptr(ctxt, selector, &dt);
1467 if (dt.size < index * 8 + 7)
1468 return emulate_gp(ctxt, selector & 0xfffc);
1470 addr = dt.address + index * 8;
1471 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1475 /* Does not support long mode */
1476 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1477 u16 selector, int seg, u8 cpl,
1478 bool in_task_switch,
1479 struct desc_struct *desc)
1481 struct desc_struct seg_desc, old_desc;
1483 unsigned err_vec = GP_VECTOR;
1485 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1491 memset(&seg_desc, 0, sizeof seg_desc);
1493 if (ctxt->mode == X86EMUL_MODE_REAL) {
1494 /* set real mode segment descriptor (keep limit etc. for
1496 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1497 set_desc_base(&seg_desc, selector << 4);
1499 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1500 /* VM86 needs a clean new segment descriptor */
1501 set_desc_base(&seg_desc, selector << 4);
1502 set_desc_limit(&seg_desc, 0xffff);
1512 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1513 if ((seg == VCPU_SREG_CS
1514 || (seg == VCPU_SREG_SS
1515 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1516 || seg == VCPU_SREG_TR)
1520 /* TR should be in GDT only */
1521 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1524 if (null_selector) /* for NULL selector skip all following checks */
1527 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1528 if (ret != X86EMUL_CONTINUE)
1531 err_code = selector & 0xfffc;
1532 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1534 /* can't load system descriptor into segment selector */
1535 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1539 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1548 * segment is not a writable data segment or segment
1549 * selector's RPL != CPL or segment selector's RPL != CPL
1551 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1555 if (!(seg_desc.type & 8))
1558 if (seg_desc.type & 4) {
1564 if (rpl > cpl || dpl != cpl)
1567 /* in long-mode d/b must be clear if l is set */
1568 if (seg_desc.d && seg_desc.l) {
1571 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1572 if (efer & EFER_LMA)
1576 /* CS(RPL) <- CPL */
1577 selector = (selector & 0xfffc) | cpl;
1580 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1582 old_desc = seg_desc;
1583 seg_desc.type |= 2; /* busy */
1584 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1585 sizeof(seg_desc), &ctxt->exception);
1586 if (ret != X86EMUL_CONTINUE)
1589 case VCPU_SREG_LDTR:
1590 if (seg_desc.s || seg_desc.type != 2)
1593 default: /* DS, ES, FS, or GS */
1595 * segment is not a data or readable code segment or
1596 * ((segment is a data or nonconforming code segment)
1597 * and (both RPL and CPL > DPL))
1599 if ((seg_desc.type & 0xa) == 0x8 ||
1600 (((seg_desc.type & 0xc) != 0xc) &&
1601 (rpl > dpl && cpl > dpl)))
1607 /* mark segment as accessed */
1609 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1610 if (ret != X86EMUL_CONTINUE)
1612 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1613 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1614 sizeof(base3), &ctxt->exception);
1615 if (ret != X86EMUL_CONTINUE)
1617 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1618 ((u64)base3 << 32)))
1619 return emulate_gp(ctxt, 0);
1622 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1625 return X86EMUL_CONTINUE;
1627 return emulate_exception(ctxt, err_vec, err_code, true);
1630 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1631 u16 selector, int seg)
1633 u8 cpl = ctxt->ops->cpl(ctxt);
1634 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1637 static void write_register_operand(struct operand *op)
1639 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1640 switch (op->bytes) {
1642 *(u8 *)op->addr.reg = (u8)op->val;
1645 *(u16 *)op->addr.reg = (u16)op->val;
1648 *op->addr.reg = (u32)op->val;
1649 break; /* 64b: zero-extend */
1651 *op->addr.reg = op->val;
1656 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1660 write_register_operand(op);
1663 if (ctxt->lock_prefix)
1664 return segmented_cmpxchg(ctxt,
1670 return segmented_write(ctxt,
1676 return segmented_write(ctxt,
1679 op->bytes * op->count);
1682 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1685 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1693 return X86EMUL_CONTINUE;
1696 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1698 struct segmented_address addr;
1700 rsp_increment(ctxt, -bytes);
1701 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1702 addr.seg = VCPU_SREG_SS;
1704 return segmented_write(ctxt, addr, data, bytes);
1707 static int em_push(struct x86_emulate_ctxt *ctxt)
1709 /* Disable writeback. */
1710 ctxt->dst.type = OP_NONE;
1711 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1714 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1715 void *dest, int len)
1718 struct segmented_address addr;
1720 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1721 addr.seg = VCPU_SREG_SS;
1722 rc = segmented_read(ctxt, addr, dest, len);
1723 if (rc != X86EMUL_CONTINUE)
1726 rsp_increment(ctxt, len);
1730 static int em_pop(struct x86_emulate_ctxt *ctxt)
1732 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1735 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1736 void *dest, int len)
1739 unsigned long val, change_mask;
1740 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1741 int cpl = ctxt->ops->cpl(ctxt);
1743 rc = emulate_pop(ctxt, &val, len);
1744 if (rc != X86EMUL_CONTINUE)
1747 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1748 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1750 switch(ctxt->mode) {
1751 case X86EMUL_MODE_PROT64:
1752 case X86EMUL_MODE_PROT32:
1753 case X86EMUL_MODE_PROT16:
1755 change_mask |= EFLG_IOPL;
1757 change_mask |= EFLG_IF;
1759 case X86EMUL_MODE_VM86:
1761 return emulate_gp(ctxt, 0);
1762 change_mask |= EFLG_IF;
1764 default: /* real mode */
1765 change_mask |= (EFLG_IOPL | EFLG_IF);
1769 *(unsigned long *)dest =
1770 (ctxt->eflags & ~change_mask) | (val & change_mask);
1775 static int em_popf(struct x86_emulate_ctxt *ctxt)
1777 ctxt->dst.type = OP_REG;
1778 ctxt->dst.addr.reg = &ctxt->eflags;
1779 ctxt->dst.bytes = ctxt->op_bytes;
1780 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1783 static int em_enter(struct x86_emulate_ctxt *ctxt)
1786 unsigned frame_size = ctxt->src.val;
1787 unsigned nesting_level = ctxt->src2.val & 31;
1791 return X86EMUL_UNHANDLEABLE;
1793 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1794 rc = push(ctxt, &rbp, stack_size(ctxt));
1795 if (rc != X86EMUL_CONTINUE)
1797 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1799 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1800 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1802 return X86EMUL_CONTINUE;
1805 static int em_leave(struct x86_emulate_ctxt *ctxt)
1807 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1809 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1812 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1814 int seg = ctxt->src2.val;
1816 ctxt->src.val = get_segment_selector(ctxt, seg);
1817 if (ctxt->op_bytes == 4) {
1818 rsp_increment(ctxt, -2);
1822 return em_push(ctxt);
1825 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1827 int seg = ctxt->src2.val;
1828 unsigned long selector;
1831 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1832 if (rc != X86EMUL_CONTINUE)
1835 if (ctxt->modrm_reg == VCPU_SREG_SS)
1836 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1838 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1842 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1844 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1845 int rc = X86EMUL_CONTINUE;
1846 int reg = VCPU_REGS_RAX;
1848 while (reg <= VCPU_REGS_RDI) {
1849 (reg == VCPU_REGS_RSP) ?
1850 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1853 if (rc != X86EMUL_CONTINUE)
1862 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1864 ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
1865 return em_push(ctxt);
1868 static int em_popa(struct x86_emulate_ctxt *ctxt)
1870 int rc = X86EMUL_CONTINUE;
1871 int reg = VCPU_REGS_RDI;
1873 while (reg >= VCPU_REGS_RAX) {
1874 if (reg == VCPU_REGS_RSP) {
1875 rsp_increment(ctxt, ctxt->op_bytes);
1879 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1880 if (rc != X86EMUL_CONTINUE)
1887 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1889 const struct x86_emulate_ops *ops = ctxt->ops;
1896 /* TODO: Add limit checks */
1897 ctxt->src.val = ctxt->eflags;
1899 if (rc != X86EMUL_CONTINUE)
1902 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1904 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1906 if (rc != X86EMUL_CONTINUE)
1909 ctxt->src.val = ctxt->_eip;
1911 if (rc != X86EMUL_CONTINUE)
1914 ops->get_idt(ctxt, &dt);
1916 eip_addr = dt.address + (irq << 2);
1917 cs_addr = dt.address + (irq << 2) + 2;
1919 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1920 if (rc != X86EMUL_CONTINUE)
1923 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1924 if (rc != X86EMUL_CONTINUE)
1927 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1928 if (rc != X86EMUL_CONTINUE)
1936 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1940 invalidate_registers(ctxt);
1941 rc = __emulate_int_real(ctxt, irq);
1942 if (rc == X86EMUL_CONTINUE)
1943 writeback_registers(ctxt);
1947 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1949 switch(ctxt->mode) {
1950 case X86EMUL_MODE_REAL:
1951 return __emulate_int_real(ctxt, irq);
1952 case X86EMUL_MODE_VM86:
1953 case X86EMUL_MODE_PROT16:
1954 case X86EMUL_MODE_PROT32:
1955 case X86EMUL_MODE_PROT64:
1957 /* Protected mode interrupts unimplemented yet */
1958 return X86EMUL_UNHANDLEABLE;
1962 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1964 int rc = X86EMUL_CONTINUE;
1965 unsigned long temp_eip = 0;
1966 unsigned long temp_eflags = 0;
1967 unsigned long cs = 0;
1968 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1969 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1970 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1971 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1973 /* TODO: Add stack limit check */
1975 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1977 if (rc != X86EMUL_CONTINUE)
1980 if (temp_eip & ~0xffff)
1981 return emulate_gp(ctxt, 0);
1983 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1985 if (rc != X86EMUL_CONTINUE)
1988 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1990 if (rc != X86EMUL_CONTINUE)
1993 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1995 if (rc != X86EMUL_CONTINUE)
1998 ctxt->_eip = temp_eip;
2001 if (ctxt->op_bytes == 4)
2002 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2003 else if (ctxt->op_bytes == 2) {
2004 ctxt->eflags &= ~0xffff;
2005 ctxt->eflags |= temp_eflags;
2008 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2009 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2014 static int em_iret(struct x86_emulate_ctxt *ctxt)
2016 switch(ctxt->mode) {
2017 case X86EMUL_MODE_REAL:
2018 return emulate_iret_real(ctxt);
2019 case X86EMUL_MODE_VM86:
2020 case X86EMUL_MODE_PROT16:
2021 case X86EMUL_MODE_PROT32:
2022 case X86EMUL_MODE_PROT64:
2024 /* iret from protected mode unimplemented yet */
2025 return X86EMUL_UNHANDLEABLE;
2029 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2032 unsigned short sel, old_sel;
2033 struct desc_struct old_desc, new_desc;
2034 const struct x86_emulate_ops *ops = ctxt->ops;
2035 u8 cpl = ctxt->ops->cpl(ctxt);
2037 /* Assignment of RIP may only fail in 64-bit mode */
2038 if (ctxt->mode == X86EMUL_MODE_PROT64)
2039 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2042 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2044 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2046 if (rc != X86EMUL_CONTINUE)
2049 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2050 if (rc != X86EMUL_CONTINUE) {
2051 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2052 /* assigning eip failed; restore the old cs */
2053 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2059 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2061 return assign_eip_near(ctxt, ctxt->src.val);
2064 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2069 old_eip = ctxt->_eip;
2070 rc = assign_eip_near(ctxt, ctxt->src.val);
2071 if (rc != X86EMUL_CONTINUE)
2073 ctxt->src.val = old_eip;
2078 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2080 u64 old = ctxt->dst.orig_val64;
2082 if (ctxt->dst.bytes == 16)
2083 return X86EMUL_UNHANDLEABLE;
2085 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2086 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2087 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2088 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2089 ctxt->eflags &= ~EFLG_ZF;
2091 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2092 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2094 ctxt->eflags |= EFLG_ZF;
2096 return X86EMUL_CONTINUE;
2099 static int em_ret(struct x86_emulate_ctxt *ctxt)
2104 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2105 if (rc != X86EMUL_CONTINUE)
2108 return assign_eip_near(ctxt, eip);
2111 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2114 unsigned long eip, cs;
2116 int cpl = ctxt->ops->cpl(ctxt);
2117 struct desc_struct old_desc, new_desc;
2118 const struct x86_emulate_ops *ops = ctxt->ops;
2120 if (ctxt->mode == X86EMUL_MODE_PROT64)
2121 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2124 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2125 if (rc != X86EMUL_CONTINUE)
2127 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2128 if (rc != X86EMUL_CONTINUE)
2130 /* Outer-privilege level return is not implemented */
2131 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2132 return X86EMUL_UNHANDLEABLE;
2133 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false,
2135 if (rc != X86EMUL_CONTINUE)
2137 rc = assign_eip_far(ctxt, eip, &new_desc);
2138 if (rc != X86EMUL_CONTINUE) {
2139 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2140 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2145 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2149 rc = em_ret_far(ctxt);
2150 if (rc != X86EMUL_CONTINUE)
2152 rsp_increment(ctxt, ctxt->src.val);
2153 return X86EMUL_CONTINUE;
2156 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2158 /* Save real source value, then compare EAX against destination. */
2159 ctxt->dst.orig_val = ctxt->dst.val;
2160 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2161 ctxt->src.orig_val = ctxt->src.val;
2162 ctxt->src.val = ctxt->dst.orig_val;
2163 fastop(ctxt, em_cmp);
2165 if (ctxt->eflags & EFLG_ZF) {
2166 /* Success: write back to memory. */
2167 ctxt->dst.val = ctxt->src.orig_val;
2169 /* Failure: write the value we saw to EAX. */
2170 ctxt->dst.type = OP_REG;
2171 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2172 ctxt->dst.val = ctxt->dst.orig_val;
2174 return X86EMUL_CONTINUE;
2177 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2179 int seg = ctxt->src2.val;
2183 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2185 rc = load_segment_descriptor(ctxt, sel, seg);
2186 if (rc != X86EMUL_CONTINUE)
2189 ctxt->dst.val = ctxt->src.val;
2194 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2195 struct desc_struct *cs, struct desc_struct *ss)
2197 cs->l = 0; /* will be adjusted later */
2198 set_desc_base(cs, 0); /* flat segment */
2199 cs->g = 1; /* 4kb granularity */
2200 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2201 cs->type = 0x0b; /* Read, Execute, Accessed */
2203 cs->dpl = 0; /* will be adjusted later */
2208 set_desc_base(ss, 0); /* flat segment */
2209 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2210 ss->g = 1; /* 4kb granularity */
2212 ss->type = 0x03; /* Read/Write, Accessed */
2213 ss->d = 1; /* 32bit stack segment */
2220 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2222 u32 eax, ebx, ecx, edx;
2225 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2226 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2227 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2228 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2231 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2233 const struct x86_emulate_ops *ops = ctxt->ops;
2234 u32 eax, ebx, ecx, edx;
2237 * syscall should always be enabled in longmode - so only become
2238 * vendor specific (cpuid) if other modes are active...
2240 if (ctxt->mode == X86EMUL_MODE_PROT64)
2245 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2247 * Intel ("GenuineIntel")
2248 * remark: Intel CPUs only support "syscall" in 64bit
2249 * longmode. Also an 64bit guest with a
2250 * 32bit compat-app running will #UD !! While this
2251 * behaviour can be fixed (by emulating) into AMD
2252 * response - CPUs of AMD can't behave like Intel.
2254 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2255 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2256 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2259 /* AMD ("AuthenticAMD") */
2260 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2261 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2262 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2265 /* AMD ("AMDisbetter!") */
2266 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2267 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2268 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2271 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2275 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2277 const struct x86_emulate_ops *ops = ctxt->ops;
2278 struct desc_struct cs, ss;
2283 /* syscall is not available in real mode */
2284 if (ctxt->mode == X86EMUL_MODE_REAL ||
2285 ctxt->mode == X86EMUL_MODE_VM86)
2286 return emulate_ud(ctxt);
2288 if (!(em_syscall_is_enabled(ctxt)))
2289 return emulate_ud(ctxt);
2291 ops->get_msr(ctxt, MSR_EFER, &efer);
2292 setup_syscalls_segments(ctxt, &cs, &ss);
2294 if (!(efer & EFER_SCE))
2295 return emulate_ud(ctxt);
2297 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2299 cs_sel = (u16)(msr_data & 0xfffc);
2300 ss_sel = (u16)(msr_data + 8);
2302 if (efer & EFER_LMA) {
2306 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2307 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2309 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2310 if (efer & EFER_LMA) {
2311 #ifdef CONFIG_X86_64
2312 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2315 ctxt->mode == X86EMUL_MODE_PROT64 ?
2316 MSR_LSTAR : MSR_CSTAR, &msr_data);
2317 ctxt->_eip = msr_data;
2319 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2320 ctxt->eflags &= ~msr_data;
2321 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2325 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2326 ctxt->_eip = (u32)msr_data;
2328 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2331 return X86EMUL_CONTINUE;
2334 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2336 const struct x86_emulate_ops *ops = ctxt->ops;
2337 struct desc_struct cs, ss;
2342 ops->get_msr(ctxt, MSR_EFER, &efer);
2343 /* inject #GP if in real mode */
2344 if (ctxt->mode == X86EMUL_MODE_REAL)
2345 return emulate_gp(ctxt, 0);
2348 * Not recognized on AMD in compat mode (but is recognized in legacy
2351 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2352 && !vendor_intel(ctxt))
2353 return emulate_ud(ctxt);
2355 /* sysenter/sysexit have not been tested in 64bit mode. */
2356 if (ctxt->mode == X86EMUL_MODE_PROT64)
2357 return X86EMUL_UNHANDLEABLE;
2359 setup_syscalls_segments(ctxt, &cs, &ss);
2361 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2362 if ((msr_data & 0xfffc) == 0x0)
2363 return emulate_gp(ctxt, 0);
2365 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2366 cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
2367 ss_sel = cs_sel + 8;
2368 if (efer & EFER_LMA) {
2373 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2374 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2376 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2377 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2379 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2380 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2383 return X86EMUL_CONTINUE;
2386 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2388 const struct x86_emulate_ops *ops = ctxt->ops;
2389 struct desc_struct cs, ss;
2390 u64 msr_data, rcx, rdx;
2392 u16 cs_sel = 0, ss_sel = 0;
2394 /* inject #GP if in real mode or Virtual 8086 mode */
2395 if (ctxt->mode == X86EMUL_MODE_REAL ||
2396 ctxt->mode == X86EMUL_MODE_VM86)
2397 return emulate_gp(ctxt, 0);
2399 setup_syscalls_segments(ctxt, &cs, &ss);
2401 if ((ctxt->rex_prefix & 0x8) != 0x0)
2402 usermode = X86EMUL_MODE_PROT64;
2404 usermode = X86EMUL_MODE_PROT32;
2406 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2407 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2411 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2413 case X86EMUL_MODE_PROT32:
2414 cs_sel = (u16)(msr_data + 16);
2415 if ((msr_data & 0xfffc) == 0x0)
2416 return emulate_gp(ctxt, 0);
2417 ss_sel = (u16)(msr_data + 24);
2421 case X86EMUL_MODE_PROT64:
2422 cs_sel = (u16)(msr_data + 32);
2423 if (msr_data == 0x0)
2424 return emulate_gp(ctxt, 0);
2425 ss_sel = cs_sel + 8;
2428 if (is_noncanonical_address(rcx) ||
2429 is_noncanonical_address(rdx))
2430 return emulate_gp(ctxt, 0);
2433 cs_sel |= SELECTOR_RPL_MASK;
2434 ss_sel |= SELECTOR_RPL_MASK;
2436 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2437 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2440 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2442 return X86EMUL_CONTINUE;
2445 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2448 if (ctxt->mode == X86EMUL_MODE_REAL)
2450 if (ctxt->mode == X86EMUL_MODE_VM86)
2452 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2453 return ctxt->ops->cpl(ctxt) > iopl;
2456 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2459 const struct x86_emulate_ops *ops = ctxt->ops;
2460 struct desc_struct tr_seg;
2463 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2464 unsigned mask = (1 << len) - 1;
2467 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2470 if (desc_limit_scaled(&tr_seg) < 103)
2472 base = get_desc_base(&tr_seg);
2473 #ifdef CONFIG_X86_64
2474 base |= ((u64)base3) << 32;
2476 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2477 if (r != X86EMUL_CONTINUE)
2479 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2481 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2482 if (r != X86EMUL_CONTINUE)
2484 if ((perm >> bit_idx) & mask)
2489 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2495 if (emulator_bad_iopl(ctxt))
2496 if (!emulator_io_port_access_allowed(ctxt, port, len))
2499 ctxt->perm_ok = true;
2504 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2505 struct tss_segment_16 *tss)
2507 tss->ip = ctxt->_eip;
2508 tss->flag = ctxt->eflags;
2509 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2510 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2511 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2512 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2513 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2514 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2515 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2516 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2518 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2519 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2520 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2521 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2522 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2525 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2526 struct tss_segment_16 *tss)
2531 ctxt->_eip = tss->ip;
2532 ctxt->eflags = tss->flag | 2;
2533 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2534 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2535 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2536 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2537 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2538 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2539 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2540 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2543 * SDM says that segment selectors are loaded before segment
2546 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2547 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2548 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2549 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2550 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2555 * Now load segment descriptors. If fault happens at this stage
2556 * it is handled in a context of new task
2558 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2560 if (ret != X86EMUL_CONTINUE)
2562 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2564 if (ret != X86EMUL_CONTINUE)
2566 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2568 if (ret != X86EMUL_CONTINUE)
2570 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2572 if (ret != X86EMUL_CONTINUE)
2574 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2576 if (ret != X86EMUL_CONTINUE)
2579 return X86EMUL_CONTINUE;
2582 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2583 u16 tss_selector, u16 old_tss_sel,
2584 ulong old_tss_base, struct desc_struct *new_desc)
2586 const struct x86_emulate_ops *ops = ctxt->ops;
2587 struct tss_segment_16 tss_seg;
2589 u32 new_tss_base = get_desc_base(new_desc);
2591 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2593 if (ret != X86EMUL_CONTINUE)
2596 save_state_to_tss16(ctxt, &tss_seg);
2598 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2600 if (ret != X86EMUL_CONTINUE)
2603 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2605 if (ret != X86EMUL_CONTINUE)
2608 if (old_tss_sel != 0xffff) {
2609 tss_seg.prev_task_link = old_tss_sel;
2611 ret = ops->write_std(ctxt, new_tss_base,
2612 &tss_seg.prev_task_link,
2613 sizeof tss_seg.prev_task_link,
2615 if (ret != X86EMUL_CONTINUE)
2619 return load_state_from_tss16(ctxt, &tss_seg);
2622 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2623 struct tss_segment_32 *tss)
2625 /* CR3 and ldt selector are not saved intentionally */
2626 tss->eip = ctxt->_eip;
2627 tss->eflags = ctxt->eflags;
2628 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2629 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2630 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2631 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2632 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2633 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2634 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2635 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2637 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2638 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2639 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2640 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2641 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2642 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2645 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2646 struct tss_segment_32 *tss)
2651 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2652 return emulate_gp(ctxt, 0);
2653 ctxt->_eip = tss->eip;
2654 ctxt->eflags = tss->eflags | 2;
2656 /* General purpose registers */
2657 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2658 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2659 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2660 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2661 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2662 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2663 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2664 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2667 * SDM says that segment selectors are loaded before segment
2668 * descriptors. This is important because CPL checks will
2671 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2672 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2673 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2674 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2675 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2676 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2677 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2680 * If we're switching between Protected Mode and VM86, we need to make
2681 * sure to update the mode before loading the segment descriptors so
2682 * that the selectors are interpreted correctly.
2684 if (ctxt->eflags & X86_EFLAGS_VM) {
2685 ctxt->mode = X86EMUL_MODE_VM86;
2688 ctxt->mode = X86EMUL_MODE_PROT32;
2693 * Now load segment descriptors. If fault happenes at this stage
2694 * it is handled in a context of new task
2696 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2698 if (ret != X86EMUL_CONTINUE)
2700 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2702 if (ret != X86EMUL_CONTINUE)
2704 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2706 if (ret != X86EMUL_CONTINUE)
2708 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2710 if (ret != X86EMUL_CONTINUE)
2712 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2714 if (ret != X86EMUL_CONTINUE)
2716 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2718 if (ret != X86EMUL_CONTINUE)
2720 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2722 if (ret != X86EMUL_CONTINUE)
2725 return X86EMUL_CONTINUE;
2728 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2729 u16 tss_selector, u16 old_tss_sel,
2730 ulong old_tss_base, struct desc_struct *new_desc)
2732 const struct x86_emulate_ops *ops = ctxt->ops;
2733 struct tss_segment_32 tss_seg;
2735 u32 new_tss_base = get_desc_base(new_desc);
2736 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2737 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2739 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2741 if (ret != X86EMUL_CONTINUE)
2742 /* FIXME: need to provide precise fault address */
2745 save_state_to_tss32(ctxt, &tss_seg);
2747 /* Only GP registers and segment selectors are saved */
2748 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2749 ldt_sel_offset - eip_offset, &ctxt->exception);
2750 if (ret != X86EMUL_CONTINUE)
2751 /* FIXME: need to provide precise fault address */
2754 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2756 if (ret != X86EMUL_CONTINUE)
2757 /* FIXME: need to provide precise fault address */
2760 if (old_tss_sel != 0xffff) {
2761 tss_seg.prev_task_link = old_tss_sel;
2763 ret = ops->write_std(ctxt, new_tss_base,
2764 &tss_seg.prev_task_link,
2765 sizeof tss_seg.prev_task_link,
2767 if (ret != X86EMUL_CONTINUE)
2768 /* FIXME: need to provide precise fault address */
2772 return load_state_from_tss32(ctxt, &tss_seg);
2775 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2776 u16 tss_selector, int idt_index, int reason,
2777 bool has_error_code, u32 error_code)
2779 const struct x86_emulate_ops *ops = ctxt->ops;
2780 struct desc_struct curr_tss_desc, next_tss_desc;
2782 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2783 ulong old_tss_base =
2784 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2788 /* FIXME: old_tss_base == ~0 ? */
2790 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2791 if (ret != X86EMUL_CONTINUE)
2793 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2794 if (ret != X86EMUL_CONTINUE)
2797 /* FIXME: check that next_tss_desc is tss */
2800 * Check privileges. The three cases are task switch caused by...
2802 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2803 * 2. Exception/IRQ/iret: No check is performed
2804 * 3. jmp/call to TSS/task-gate: No check is performed since the
2805 * hardware checks it before exiting.
2807 if (reason == TASK_SWITCH_GATE) {
2808 if (idt_index != -1) {
2809 /* Software interrupts */
2810 struct desc_struct task_gate_desc;
2813 ret = read_interrupt_descriptor(ctxt, idt_index,
2815 if (ret != X86EMUL_CONTINUE)
2818 dpl = task_gate_desc.dpl;
2819 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2820 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2824 desc_limit = desc_limit_scaled(&next_tss_desc);
2825 if (!next_tss_desc.p ||
2826 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2827 desc_limit < 0x2b)) {
2828 return emulate_ts(ctxt, tss_selector & 0xfffc);
2831 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2832 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2833 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2836 if (reason == TASK_SWITCH_IRET)
2837 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2839 /* set back link to prev task only if NT bit is set in eflags
2840 note that old_tss_sel is not used after this point */
2841 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2842 old_tss_sel = 0xffff;
2844 if (next_tss_desc.type & 8)
2845 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2846 old_tss_base, &next_tss_desc);
2848 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2849 old_tss_base, &next_tss_desc);
2850 if (ret != X86EMUL_CONTINUE)
2853 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2854 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2856 if (reason != TASK_SWITCH_IRET) {
2857 next_tss_desc.type |= (1 << 1); /* set busy flag */
2858 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2861 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2862 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2864 if (has_error_code) {
2865 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2866 ctxt->lock_prefix = 0;
2867 ctxt->src.val = (unsigned long) error_code;
2868 ret = em_push(ctxt);
2874 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2875 u16 tss_selector, int idt_index, int reason,
2876 bool has_error_code, u32 error_code)
2880 invalidate_registers(ctxt);
2881 ctxt->_eip = ctxt->eip;
2882 ctxt->dst.type = OP_NONE;
2884 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2885 has_error_code, error_code);
2887 if (rc == X86EMUL_CONTINUE) {
2888 ctxt->eip = ctxt->_eip;
2889 writeback_registers(ctxt);
2892 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2895 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2898 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2900 register_address_increment(ctxt, reg, df * op->bytes);
2901 op->addr.mem.ea = register_address(ctxt, reg);
2904 static int em_das(struct x86_emulate_ctxt *ctxt)
2907 bool af, cf, old_cf;
2909 cf = ctxt->eflags & X86_EFLAGS_CF;
2915 af = ctxt->eflags & X86_EFLAGS_AF;
2916 if ((al & 0x0f) > 9 || af) {
2918 cf = old_cf | (al >= 250);
2923 if (old_al > 0x99 || old_cf) {
2929 /* Set PF, ZF, SF */
2930 ctxt->src.type = OP_IMM;
2932 ctxt->src.bytes = 1;
2933 fastop(ctxt, em_or);
2934 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2936 ctxt->eflags |= X86_EFLAGS_CF;
2938 ctxt->eflags |= X86_EFLAGS_AF;
2939 return X86EMUL_CONTINUE;
2942 static int em_aam(struct x86_emulate_ctxt *ctxt)
2946 if (ctxt->src.val == 0)
2947 return emulate_de(ctxt);
2949 al = ctxt->dst.val & 0xff;
2950 ah = al / ctxt->src.val;
2951 al %= ctxt->src.val;
2953 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2955 /* Set PF, ZF, SF */
2956 ctxt->src.type = OP_IMM;
2958 ctxt->src.bytes = 1;
2959 fastop(ctxt, em_or);
2961 return X86EMUL_CONTINUE;
2964 static int em_aad(struct x86_emulate_ctxt *ctxt)
2966 u8 al = ctxt->dst.val & 0xff;
2967 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2969 al = (al + (ah * ctxt->src.val)) & 0xff;
2971 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2973 /* Set PF, ZF, SF */
2974 ctxt->src.type = OP_IMM;
2976 ctxt->src.bytes = 1;
2977 fastop(ctxt, em_or);
2979 return X86EMUL_CONTINUE;
2982 static int em_call(struct x86_emulate_ctxt *ctxt)
2985 long rel = ctxt->src.val;
2987 ctxt->src.val = (unsigned long)ctxt->_eip;
2988 rc = jmp_rel(ctxt, rel);
2989 if (rc != X86EMUL_CONTINUE)
2991 return em_push(ctxt);
2994 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2999 struct desc_struct old_desc, new_desc;
3000 const struct x86_emulate_ops *ops = ctxt->ops;
3001 int cpl = ctxt->ops->cpl(ctxt);
3003 old_eip = ctxt->_eip;
3004 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3006 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3007 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3009 if (rc != X86EMUL_CONTINUE)
3010 return X86EMUL_CONTINUE;
3012 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3013 if (rc != X86EMUL_CONTINUE)
3016 ctxt->src.val = old_cs;
3018 if (rc != X86EMUL_CONTINUE)
3021 ctxt->src.val = old_eip;
3023 /* If we failed, we tainted the memory, but the very least we should
3025 if (rc != X86EMUL_CONTINUE)
3029 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3034 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3039 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3040 if (rc != X86EMUL_CONTINUE)
3042 rc = assign_eip_near(ctxt, eip);
3043 if (rc != X86EMUL_CONTINUE)
3045 rsp_increment(ctxt, ctxt->src.val);
3046 return X86EMUL_CONTINUE;
3049 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3051 /* Write back the register source. */
3052 ctxt->src.val = ctxt->dst.val;
3053 write_register_operand(&ctxt->src);
3055 /* Write back the memory destination with implicit LOCK prefix. */
3056 ctxt->dst.val = ctxt->src.orig_val;
3057 ctxt->lock_prefix = 1;
3058 return X86EMUL_CONTINUE;
3061 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3063 ctxt->dst.val = ctxt->src2.val;
3064 return fastop(ctxt, em_imul);
3067 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3069 ctxt->dst.type = OP_REG;
3070 ctxt->dst.bytes = ctxt->src.bytes;
3071 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3072 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3074 return X86EMUL_CONTINUE;
3077 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3081 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3082 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3083 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3084 return X86EMUL_CONTINUE;
3087 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3091 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3092 return emulate_gp(ctxt, 0);
3093 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3094 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3095 return X86EMUL_CONTINUE;
3098 static int em_mov(struct x86_emulate_ctxt *ctxt)
3100 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3101 return X86EMUL_CONTINUE;
3104 #define FFL(x) bit(X86_FEATURE_##x)
3106 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3108 u32 ebx, ecx, edx, eax = 1;
3112 * Check MOVBE is set in the guest-visible CPUID leaf.
3114 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3115 if (!(ecx & FFL(MOVBE)))
3116 return emulate_ud(ctxt);
3118 switch (ctxt->op_bytes) {
3121 * From MOVBE definition: "...When the operand size is 16 bits,
3122 * the upper word of the destination register remains unchanged
3125 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3126 * rules so we have to do the operation almost per hand.
3128 tmp = (u16)ctxt->src.val;
3129 ctxt->dst.val &= ~0xffffUL;
3130 ctxt->dst.val |= (unsigned long)swab16(tmp);
3133 ctxt->dst.val = swab32((u32)ctxt->src.val);
3136 ctxt->dst.val = swab64(ctxt->src.val);
3141 return X86EMUL_CONTINUE;
3144 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3146 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3147 return emulate_gp(ctxt, 0);
3149 /* Disable writeback. */
3150 ctxt->dst.type = OP_NONE;
3151 return X86EMUL_CONTINUE;
3154 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3158 if (ctxt->mode == X86EMUL_MODE_PROT64)
3159 val = ctxt->src.val & ~0ULL;
3161 val = ctxt->src.val & ~0U;
3163 /* #UD condition is already handled. */
3164 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3165 return emulate_gp(ctxt, 0);
3167 /* Disable writeback. */
3168 ctxt->dst.type = OP_NONE;
3169 return X86EMUL_CONTINUE;
3172 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3176 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3177 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3178 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3179 return emulate_gp(ctxt, 0);
3181 return X86EMUL_CONTINUE;
3184 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3188 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3189 return emulate_gp(ctxt, 0);
3191 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3192 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3193 return X86EMUL_CONTINUE;
3196 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3198 if (ctxt->modrm_reg > VCPU_SREG_GS)
3199 return emulate_ud(ctxt);
3201 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3202 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3203 ctxt->dst.bytes = 2;
3204 return X86EMUL_CONTINUE;
3207 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3209 u16 sel = ctxt->src.val;
3211 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3212 return emulate_ud(ctxt);
3214 if (ctxt->modrm_reg == VCPU_SREG_SS)
3215 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3217 /* Disable writeback. */
3218 ctxt->dst.type = OP_NONE;
3219 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3222 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3224 u16 sel = ctxt->src.val;
3226 /* Disable writeback. */
3227 ctxt->dst.type = OP_NONE;
3228 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3231 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3233 u16 sel = ctxt->src.val;
3235 /* Disable writeback. */
3236 ctxt->dst.type = OP_NONE;
3237 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3240 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3245 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3246 if (rc == X86EMUL_CONTINUE)
3247 ctxt->ops->invlpg(ctxt, linear);
3248 /* Disable writeback. */
3249 ctxt->dst.type = OP_NONE;
3250 return X86EMUL_CONTINUE;
3253 static int em_clts(struct x86_emulate_ctxt *ctxt)
3257 cr0 = ctxt->ops->get_cr(ctxt, 0);
3259 ctxt->ops->set_cr(ctxt, 0, cr0);
3260 return X86EMUL_CONTINUE;
3263 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3265 int rc = ctxt->ops->fix_hypercall(ctxt);
3267 if (rc != X86EMUL_CONTINUE)
3270 /* Let the processor re-execute the fixed hypercall */
3271 ctxt->_eip = ctxt->eip;
3272 /* Disable writeback. */
3273 ctxt->dst.type = OP_NONE;
3274 return X86EMUL_CONTINUE;
3277 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3278 void (*get)(struct x86_emulate_ctxt *ctxt,
3279 struct desc_ptr *ptr))
3281 struct desc_ptr desc_ptr;
3283 if (ctxt->mode == X86EMUL_MODE_PROT64)
3285 get(ctxt, &desc_ptr);
3286 if (ctxt->op_bytes == 2) {
3288 desc_ptr.address &= 0x00ffffff;
3290 /* Disable writeback. */
3291 ctxt->dst.type = OP_NONE;
3292 return segmented_write(ctxt, ctxt->dst.addr.mem,
3293 &desc_ptr, 2 + ctxt->op_bytes);
3296 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3298 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3301 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3303 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3306 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3308 struct desc_ptr desc_ptr;
3311 if (ctxt->mode == X86EMUL_MODE_PROT64)
3313 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3314 &desc_ptr.size, &desc_ptr.address,
3316 if (rc != X86EMUL_CONTINUE)
3318 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3319 is_noncanonical_address(desc_ptr.address))
3320 return emulate_gp(ctxt, 0);
3322 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3324 ctxt->ops->set_idt(ctxt, &desc_ptr);
3325 /* Disable writeback. */
3326 ctxt->dst.type = OP_NONE;
3327 return X86EMUL_CONTINUE;
3330 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3332 return em_lgdt_lidt(ctxt, true);
3335 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3339 rc = ctxt->ops->fix_hypercall(ctxt);
3341 /* Disable writeback. */
3342 ctxt->dst.type = OP_NONE;
3346 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3348 return em_lgdt_lidt(ctxt, false);
3351 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3353 if (ctxt->dst.type == OP_MEM)
3354 ctxt->dst.bytes = 2;
3355 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3356 return X86EMUL_CONTINUE;
3359 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3361 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3362 | (ctxt->src.val & 0x0f));
3363 ctxt->dst.type = OP_NONE;
3364 return X86EMUL_CONTINUE;
3367 static int em_loop(struct x86_emulate_ctxt *ctxt)
3369 int rc = X86EMUL_CONTINUE;
3371 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3372 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3373 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3374 rc = jmp_rel(ctxt, ctxt->src.val);
3379 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3381 int rc = X86EMUL_CONTINUE;
3383 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3384 rc = jmp_rel(ctxt, ctxt->src.val);
3389 static int em_in(struct x86_emulate_ctxt *ctxt)
3391 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3393 return X86EMUL_IO_NEEDED;
3395 return X86EMUL_CONTINUE;
3398 static int em_out(struct x86_emulate_ctxt *ctxt)
3400 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3402 /* Disable writeback. */
3403 ctxt->dst.type = OP_NONE;
3404 return X86EMUL_CONTINUE;
3407 static int em_cli(struct x86_emulate_ctxt *ctxt)
3409 if (emulator_bad_iopl(ctxt))
3410 return emulate_gp(ctxt, 0);
3412 ctxt->eflags &= ~X86_EFLAGS_IF;
3413 return X86EMUL_CONTINUE;
3416 static int em_sti(struct x86_emulate_ctxt *ctxt)
3418 if (emulator_bad_iopl(ctxt))
3419 return emulate_gp(ctxt, 0);
3421 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3422 ctxt->eflags |= X86_EFLAGS_IF;
3423 return X86EMUL_CONTINUE;
3426 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3428 u32 eax, ebx, ecx, edx;
3430 eax = reg_read(ctxt, VCPU_REGS_RAX);
3431 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3432 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3433 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3434 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3435 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3436 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3437 return X86EMUL_CONTINUE;
3440 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3444 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3445 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3447 ctxt->eflags &= ~0xffUL;
3448 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3449 return X86EMUL_CONTINUE;
3452 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3454 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3455 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3456 return X86EMUL_CONTINUE;
3459 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3461 switch (ctxt->op_bytes) {
3462 #ifdef CONFIG_X86_64
3464 asm("bswap %0" : "+r"(ctxt->dst.val));
3468 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3471 return X86EMUL_CONTINUE;
3474 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3476 /* emulating clflush regardless of cpuid */
3477 return X86EMUL_CONTINUE;
3480 static bool valid_cr(int nr)
3492 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3494 if (!valid_cr(ctxt->modrm_reg))
3495 return emulate_ud(ctxt);
3497 return X86EMUL_CONTINUE;
3500 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3502 u64 new_val = ctxt->src.val64;
3503 int cr = ctxt->modrm_reg;
3506 static u64 cr_reserved_bits[] = {
3507 0xffffffff00000000ULL,
3508 0, 0, 0, /* CR3 checked later */
3515 return emulate_ud(ctxt);
3517 if (new_val & cr_reserved_bits[cr])
3518 return emulate_gp(ctxt, 0);
3523 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3524 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3525 return emulate_gp(ctxt, 0);
3527 cr4 = ctxt->ops->get_cr(ctxt, 4);
3528 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3530 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3531 !(cr4 & X86_CR4_PAE))
3532 return emulate_gp(ctxt, 0);
3539 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3540 if (efer & EFER_LMA)
3541 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3544 return emulate_gp(ctxt, 0);
3549 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3551 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3552 return emulate_gp(ctxt, 0);
3558 return X86EMUL_CONTINUE;
3561 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3565 ctxt->ops->get_dr(ctxt, 7, &dr7);
3567 /* Check if DR7.Global_Enable is set */
3568 return dr7 & (1 << 13);
3571 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3573 int dr = ctxt->modrm_reg;
3577 return emulate_ud(ctxt);
3579 cr4 = ctxt->ops->get_cr(ctxt, 4);
3580 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3581 return emulate_ud(ctxt);
3583 if (check_dr7_gd(ctxt)) {
3586 ctxt->ops->get_dr(ctxt, 6, &dr6);
3588 dr6 |= DR6_BD | DR6_RTM;
3589 ctxt->ops->set_dr(ctxt, 6, dr6);
3590 return emulate_db(ctxt);
3593 return X86EMUL_CONTINUE;
3596 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3598 u64 new_val = ctxt->src.val64;
3599 int dr = ctxt->modrm_reg;
3601 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3602 return emulate_gp(ctxt, 0);
3604 return check_dr_read(ctxt);
3607 static int check_svme(struct x86_emulate_ctxt *ctxt)
3611 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3613 if (!(efer & EFER_SVME))
3614 return emulate_ud(ctxt);
3616 return X86EMUL_CONTINUE;
3619 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3621 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3623 /* Valid physical address? */
3624 if (rax & 0xffff000000000000ULL)
3625 return emulate_gp(ctxt, 0);
3627 return check_svme(ctxt);
3630 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3632 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3634 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3635 return emulate_ud(ctxt);
3637 return X86EMUL_CONTINUE;
3640 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3642 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3643 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3645 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3646 ctxt->ops->check_pmc(ctxt, rcx))
3647 return emulate_gp(ctxt, 0);
3649 return X86EMUL_CONTINUE;
3652 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3654 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3655 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3656 return emulate_gp(ctxt, 0);
3658 return X86EMUL_CONTINUE;
3661 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3663 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3664 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3665 return emulate_gp(ctxt, 0);
3667 return X86EMUL_CONTINUE;
3670 #define D(_y) { .flags = (_y) }
3671 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3672 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3673 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3674 #define N D(NotImpl)
3675 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3676 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3677 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3678 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
3679 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3680 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3681 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3682 #define II(_f, _e, _i) \
3683 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3684 #define IIP(_f, _e, _i, _p) \
3685 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3686 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3687 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3689 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3690 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3691 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3692 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3693 #define I2bvIP(_f, _e, _i, _p) \
3694 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3696 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3697 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3698 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3700 static const struct opcode group7_rm0[] = {
3702 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3706 static const struct opcode group7_rm1[] = {
3707 DI(SrcNone | Priv, monitor),
3708 DI(SrcNone | Priv, mwait),
3712 static const struct opcode group7_rm3[] = {
3713 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3714 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3715 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3716 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3717 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3718 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3719 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3720 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3723 static const struct opcode group7_rm7[] = {
3725 DIP(SrcNone, rdtscp, check_rdtsc),
3729 static const struct opcode group1[] = {
3731 F(Lock | PageTable, em_or),
3734 F(Lock | PageTable, em_and),
3740 static const struct opcode group1A[] = {
3741 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3744 static const struct opcode group2[] = {
3745 F(DstMem | ModRM, em_rol),
3746 F(DstMem | ModRM, em_ror),
3747 F(DstMem | ModRM, em_rcl),
3748 F(DstMem | ModRM, em_rcr),
3749 F(DstMem | ModRM, em_shl),
3750 F(DstMem | ModRM, em_shr),
3751 F(DstMem | ModRM, em_shl),
3752 F(DstMem | ModRM, em_sar),
3755 static const struct opcode group3[] = {
3756 F(DstMem | SrcImm | NoWrite, em_test),
3757 F(DstMem | SrcImm | NoWrite, em_test),
3758 F(DstMem | SrcNone | Lock, em_not),
3759 F(DstMem | SrcNone | Lock, em_neg),
3760 F(DstXacc | Src2Mem, em_mul_ex),
3761 F(DstXacc | Src2Mem, em_imul_ex),
3762 F(DstXacc | Src2Mem, em_div_ex),
3763 F(DstXacc | Src2Mem, em_idiv_ex),
3766 static const struct opcode group4[] = {
3767 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3768 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3772 static const struct opcode group5[] = {
3773 F(DstMem | SrcNone | Lock, em_inc),
3774 F(DstMem | SrcNone | Lock, em_dec),
3775 I(SrcMem | NearBranch, em_call_near_abs),
3776 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3777 I(SrcMem | NearBranch, em_jmp_abs),
3778 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3779 I(SrcMem | Stack, em_push), D(Undefined),
3782 static const struct opcode group6[] = {
3783 DI(Prot | DstMem, sldt),
3784 DI(Prot | DstMem, str),
3785 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3786 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3790 static const struct group_dual group7 = { {
3791 II(Mov | DstMem, em_sgdt, sgdt),
3792 II(Mov | DstMem, em_sidt, sidt),
3793 II(SrcMem | Priv, em_lgdt, lgdt),
3794 II(SrcMem | Priv, em_lidt, lidt),
3795 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3796 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3797 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3801 N, EXT(0, group7_rm3),
3802 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3803 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3807 static const struct opcode group8[] = {
3809 F(DstMem | SrcImmByte | NoWrite, em_bt),
3810 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3811 F(DstMem | SrcImmByte | Lock, em_btr),
3812 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3815 static const struct group_dual group9 = { {
3816 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3818 N, N, N, N, N, N, N, N,
3821 static const struct opcode group11[] = {
3822 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3826 static const struct gprefix pfx_0f_ae_7 = {
3827 I(SrcMem | ByteOp, em_clflush), N, N, N,
3830 static const struct group_dual group15 = { {
3831 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3833 N, N, N, N, N, N, N, N,
3836 static const struct gprefix pfx_0f_6f_0f_7f = {
3837 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3840 static const struct instr_dual instr_dual_0f_2b = {
3844 static const struct gprefix pfx_0f_2b = {
3845 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
3848 static const struct gprefix pfx_0f_28_0f_29 = {
3849 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3852 static const struct gprefix pfx_0f_e7 = {
3853 N, I(Sse, em_mov), N, N,
3856 static const struct escape escape_d9 = { {
3857 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3860 N, N, N, N, N, N, N, N,
3862 N, N, N, N, N, N, N, N,
3864 N, N, N, N, N, N, N, N,
3866 N, N, N, N, N, N, N, N,
3868 N, N, N, N, N, N, N, N,
3870 N, N, N, N, N, N, N, N,
3872 N, N, N, N, N, N, N, N,
3874 N, N, N, N, N, N, N, N,
3877 static const struct escape escape_db = { {
3878 N, N, N, N, N, N, N, N,
3881 N, N, N, N, N, N, N, N,
3883 N, N, N, N, N, N, N, N,
3885 N, N, N, N, N, N, N, N,
3887 N, N, N, N, N, N, N, N,
3889 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3891 N, N, N, N, N, N, N, N,
3893 N, N, N, N, N, N, N, N,
3895 N, N, N, N, N, N, N, N,
3898 static const struct escape escape_dd = { {
3899 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3902 N, N, N, N, N, N, N, N,
3904 N, N, N, N, N, N, N, N,
3906 N, N, N, N, N, N, N, N,
3908 N, N, N, N, N, N, N, N,
3910 N, N, N, N, N, N, N, N,
3912 N, N, N, N, N, N, N, N,
3914 N, N, N, N, N, N, N, N,
3916 N, N, N, N, N, N, N, N,
3919 static const struct instr_dual instr_dual_0f_c3 = {
3920 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
3923 static const struct opcode opcode_table[256] = {
3925 F6ALU(Lock, em_add),
3926 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3927 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3929 F6ALU(Lock | PageTable, em_or),
3930 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3933 F6ALU(Lock, em_adc),
3934 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3935 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3937 F6ALU(Lock, em_sbb),
3938 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3939 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3941 F6ALU(Lock | PageTable, em_and), N, N,
3943 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3945 F6ALU(Lock, em_xor), N, N,
3947 F6ALU(NoWrite, em_cmp), N, N,
3949 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3951 X8(I(SrcReg | Stack, em_push)),
3953 X8(I(DstReg | Stack, em_pop)),
3955 I(ImplicitOps | Stack | No64, em_pusha),
3956 I(ImplicitOps | Stack | No64, em_popa),
3957 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3960 I(SrcImm | Mov | Stack, em_push),
3961 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3962 I(SrcImmByte | Mov | Stack, em_push),
3963 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3964 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3965 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3967 X16(D(SrcImmByte | NearBranch)),
3969 G(ByteOp | DstMem | SrcImm, group1),
3970 G(DstMem | SrcImm, group1),
3971 G(ByteOp | DstMem | SrcImm | No64, group1),
3972 G(DstMem | SrcImmByte, group1),
3973 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3974 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3976 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3977 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3978 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3979 D(ModRM | SrcMem | NoAccess | DstReg),
3980 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3983 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3985 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3986 I(SrcImmFAddr | No64, em_call_far), N,
3987 II(ImplicitOps | Stack, em_pushf, pushf),
3988 II(ImplicitOps | Stack, em_popf, popf),
3989 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3991 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3992 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3993 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3994 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
3996 F2bv(DstAcc | SrcImm | NoWrite, em_test),
3997 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3998 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3999 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4001 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4003 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4005 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4006 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4007 I(ImplicitOps | NearBranch, em_ret),
4008 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4009 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4010 G(ByteOp, group11), G(0, group11),
4012 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4013 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4014 I(ImplicitOps | Stack, em_ret_far),
4015 D(ImplicitOps), DI(SrcImmByte, intn),
4016 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4018 G(Src2One | ByteOp, group2), G(Src2One, group2),
4019 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4020 I(DstAcc | SrcImmUByte | No64, em_aam),
4021 I(DstAcc | SrcImmUByte | No64, em_aad),
4022 F(DstAcc | ByteOp | No64, em_salc),
4023 I(DstAcc | SrcXLat | ByteOp, em_mov),
4025 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4027 X3(I(SrcImmByte | NearBranch, em_loop)),
4028 I(SrcImmByte | NearBranch, em_jcxz),
4029 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4030 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4032 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4033 I(SrcImmFAddr | No64, em_jmp_far),
4034 D(SrcImmByte | ImplicitOps | NearBranch),
4035 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4036 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4038 N, DI(ImplicitOps, icebp), N, N,
4039 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4040 G(ByteOp, group3), G(0, group3),
4042 D(ImplicitOps), D(ImplicitOps),
4043 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4044 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4047 static const struct opcode twobyte_table[256] = {
4049 G(0, group6), GD(0, &group7), N, N,
4050 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4051 II(ImplicitOps | Priv, em_clts, clts), N,
4052 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4053 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4055 N, N, N, N, N, N, N, N,
4056 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4057 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4059 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4060 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4061 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4063 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4066 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4067 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4068 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4071 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4072 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4073 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4074 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4075 I(ImplicitOps | EmulateOnUD, em_sysenter),
4076 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4078 N, N, N, N, N, N, N, N,
4080 X16(D(DstReg | SrcMem | ModRM)),
4082 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4087 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4092 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4094 X16(D(SrcImm | NearBranch)),
4096 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4098 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4099 II(ImplicitOps, em_cpuid, cpuid),
4100 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4101 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4102 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4104 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4105 DI(ImplicitOps, rsm),
4106 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4107 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4108 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4109 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4111 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4112 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4113 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4114 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4115 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4116 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4120 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4121 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4122 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4124 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4125 N, ID(0, &instr_dual_0f_c3),
4126 N, N, N, GD(0, &group9),
4128 X8(I(DstReg, em_bswap)),
4130 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4132 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4133 N, N, N, N, N, N, N, N,
4135 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4138 static const struct instr_dual instr_dual_0f_38_f0 = {
4139 I(DstReg | SrcMem | Mov, em_movbe), N
4142 static const struct instr_dual instr_dual_0f_38_f1 = {
4143 I(DstMem | SrcReg | Mov, em_movbe), N
4146 static const struct gprefix three_byte_0f_38_f0 = {
4147 ID(0, &instr_dual_0f_38_f0), N, N, N
4150 static const struct gprefix three_byte_0f_38_f1 = {
4151 ID(0, &instr_dual_0f_38_f1), N, N, N
4155 * Insns below are selected by the prefix which indexed by the third opcode
4158 static const struct opcode opcode_map_0f_38[256] = {
4160 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4162 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4164 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4165 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4184 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4188 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4194 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4195 unsigned size, bool sign_extension)
4197 int rc = X86EMUL_CONTINUE;
4201 op->addr.mem.ea = ctxt->_eip;
4202 /* NB. Immediates are sign-extended as necessary. */
4203 switch (op->bytes) {
4205 op->val = insn_fetch(s8, ctxt);
4208 op->val = insn_fetch(s16, ctxt);
4211 op->val = insn_fetch(s32, ctxt);
4214 op->val = insn_fetch(s64, ctxt);
4217 if (!sign_extension) {
4218 switch (op->bytes) {
4226 op->val &= 0xffffffff;
4234 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4237 int rc = X86EMUL_CONTINUE;
4241 decode_register_operand(ctxt, op);
4244 rc = decode_imm(ctxt, op, 1, false);
4247 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4251 if (ctxt->d & BitOp)
4252 fetch_bit_operand(ctxt);
4253 op->orig_val = op->val;
4256 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4260 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4261 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4262 fetch_register_operand(op);
4263 op->orig_val = op->val;
4267 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4268 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4269 fetch_register_operand(op);
4270 op->orig_val = op->val;
4273 if (ctxt->d & ByteOp) {
4278 op->bytes = ctxt->op_bytes;
4279 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4280 fetch_register_operand(op);
4281 op->orig_val = op->val;
4285 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4287 register_address(ctxt, VCPU_REGS_RDI);
4288 op->addr.mem.seg = VCPU_SREG_ES;
4295 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4296 fetch_register_operand(op);
4301 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4304 rc = decode_imm(ctxt, op, 1, true);
4312 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4315 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4318 ctxt->memop.bytes = 1;
4319 if (ctxt->memop.type == OP_REG) {
4320 ctxt->memop.addr.reg = decode_register(ctxt,
4321 ctxt->modrm_rm, true);
4322 fetch_register_operand(&ctxt->memop);
4326 ctxt->memop.bytes = 2;
4329 ctxt->memop.bytes = 4;
4332 rc = decode_imm(ctxt, op, 2, false);
4335 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4339 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4341 register_address(ctxt, VCPU_REGS_RSI);
4342 op->addr.mem.seg = ctxt->seg_override;
4348 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4351 reg_read(ctxt, VCPU_REGS_RBX) +
4352 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4353 op->addr.mem.seg = ctxt->seg_override;
4358 op->addr.mem.ea = ctxt->_eip;
4359 op->bytes = ctxt->op_bytes + 2;
4360 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4363 ctxt->memop.bytes = ctxt->op_bytes + 2;
4367 op->val = VCPU_SREG_ES;
4371 op->val = VCPU_SREG_CS;
4375 op->val = VCPU_SREG_SS;
4379 op->val = VCPU_SREG_DS;
4383 op->val = VCPU_SREG_FS;
4387 op->val = VCPU_SREG_GS;
4390 /* Special instructions do their own operand decoding. */
4392 op->type = OP_NONE; /* Disable writeback. */
4400 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4402 int rc = X86EMUL_CONTINUE;
4403 int mode = ctxt->mode;
4404 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4405 bool op_prefix = false;
4406 bool has_seg_override = false;
4407 struct opcode opcode;
4409 ctxt->memop.type = OP_NONE;
4410 ctxt->memopp = NULL;
4411 ctxt->_eip = ctxt->eip;
4412 ctxt->fetch.ptr = ctxt->fetch.data;
4413 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4414 ctxt->opcode_len = 1;
4416 memcpy(ctxt->fetch.data, insn, insn_len);
4418 rc = __do_insn_fetch_bytes(ctxt, 1);
4419 if (rc != X86EMUL_CONTINUE)
4424 case X86EMUL_MODE_REAL:
4425 case X86EMUL_MODE_VM86:
4426 case X86EMUL_MODE_PROT16:
4427 def_op_bytes = def_ad_bytes = 2;
4429 case X86EMUL_MODE_PROT32:
4430 def_op_bytes = def_ad_bytes = 4;
4432 #ifdef CONFIG_X86_64
4433 case X86EMUL_MODE_PROT64:
4439 return EMULATION_FAILED;
4442 ctxt->op_bytes = def_op_bytes;
4443 ctxt->ad_bytes = def_ad_bytes;
4445 /* Legacy prefixes. */
4447 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4448 case 0x66: /* operand-size override */
4450 /* switch between 2/4 bytes */
4451 ctxt->op_bytes = def_op_bytes ^ 6;
4453 case 0x67: /* address-size override */
4454 if (mode == X86EMUL_MODE_PROT64)
4455 /* switch between 4/8 bytes */
4456 ctxt->ad_bytes = def_ad_bytes ^ 12;
4458 /* switch between 2/4 bytes */
4459 ctxt->ad_bytes = def_ad_bytes ^ 6;
4461 case 0x26: /* ES override */
4462 case 0x2e: /* CS override */
4463 case 0x36: /* SS override */
4464 case 0x3e: /* DS override */
4465 has_seg_override = true;
4466 ctxt->seg_override = (ctxt->b >> 3) & 3;
4468 case 0x64: /* FS override */
4469 case 0x65: /* GS override */
4470 has_seg_override = true;
4471 ctxt->seg_override = ctxt->b & 7;
4473 case 0x40 ... 0x4f: /* REX */
4474 if (mode != X86EMUL_MODE_PROT64)
4476 ctxt->rex_prefix = ctxt->b;
4478 case 0xf0: /* LOCK */
4479 ctxt->lock_prefix = 1;
4481 case 0xf2: /* REPNE/REPNZ */
4482 case 0xf3: /* REP/REPE/REPZ */
4483 ctxt->rep_prefix = ctxt->b;
4489 /* Any legacy prefix after a REX prefix nullifies its effect. */
4491 ctxt->rex_prefix = 0;
4497 if (ctxt->rex_prefix & 8)
4498 ctxt->op_bytes = 8; /* REX.W */
4500 /* Opcode byte(s). */
4501 opcode = opcode_table[ctxt->b];
4502 /* Two-byte opcode? */
4503 if (ctxt->b == 0x0f) {
4504 ctxt->opcode_len = 2;
4505 ctxt->b = insn_fetch(u8, ctxt);
4506 opcode = twobyte_table[ctxt->b];
4508 /* 0F_38 opcode map */
4509 if (ctxt->b == 0x38) {
4510 ctxt->opcode_len = 3;
4511 ctxt->b = insn_fetch(u8, ctxt);
4512 opcode = opcode_map_0f_38[ctxt->b];
4515 ctxt->d = opcode.flags;
4517 if (ctxt->d & ModRM)
4518 ctxt->modrm = insn_fetch(u8, ctxt);
4520 /* vex-prefix instructions are not implemented */
4521 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4522 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4526 while (ctxt->d & GroupMask) {
4527 switch (ctxt->d & GroupMask) {
4529 goffset = (ctxt->modrm >> 3) & 7;
4530 opcode = opcode.u.group[goffset];
4533 goffset = (ctxt->modrm >> 3) & 7;
4534 if ((ctxt->modrm >> 6) == 3)
4535 opcode = opcode.u.gdual->mod3[goffset];
4537 opcode = opcode.u.gdual->mod012[goffset];
4540 goffset = ctxt->modrm & 7;
4541 opcode = opcode.u.group[goffset];
4544 if (ctxt->rep_prefix && op_prefix)
4545 return EMULATION_FAILED;
4546 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4547 switch (simd_prefix) {
4548 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4549 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4550 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4551 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4555 if (ctxt->modrm > 0xbf)
4556 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4558 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4561 if ((ctxt->modrm >> 6) == 3)
4562 opcode = opcode.u.idual->mod3;
4564 opcode = opcode.u.idual->mod012;
4567 return EMULATION_FAILED;
4570 ctxt->d &= ~(u64)GroupMask;
4571 ctxt->d |= opcode.flags;
4576 return EMULATION_FAILED;
4578 ctxt->execute = opcode.u.execute;
4580 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4581 return EMULATION_FAILED;
4583 if (unlikely(ctxt->d &
4584 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4587 * These are copied unconditionally here, and checked unconditionally
4588 * in x86_emulate_insn.
4590 ctxt->check_perm = opcode.check_perm;
4591 ctxt->intercept = opcode.intercept;
4593 if (ctxt->d & NotImpl)
4594 return EMULATION_FAILED;
4596 if (mode == X86EMUL_MODE_PROT64) {
4597 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4599 else if (ctxt->d & NearBranch)
4603 if (ctxt->d & Op3264) {
4604 if (mode == X86EMUL_MODE_PROT64)
4610 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4614 ctxt->op_bytes = 16;
4615 else if (ctxt->d & Mmx)
4619 /* ModRM and SIB bytes. */
4620 if (ctxt->d & ModRM) {
4621 rc = decode_modrm(ctxt, &ctxt->memop);
4622 if (!has_seg_override) {
4623 has_seg_override = true;
4624 ctxt->seg_override = ctxt->modrm_seg;
4626 } else if (ctxt->d & MemAbs)
4627 rc = decode_abs(ctxt, &ctxt->memop);
4628 if (rc != X86EMUL_CONTINUE)
4631 if (!has_seg_override)
4632 ctxt->seg_override = VCPU_SREG_DS;
4634 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4637 * Decode and fetch the source operand: register, memory
4640 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4641 if (rc != X86EMUL_CONTINUE)
4645 * Decode and fetch the second source operand: register, memory
4648 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4649 if (rc != X86EMUL_CONTINUE)
4652 /* Decode and fetch the destination operand: register or memory. */
4653 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4655 if (ctxt->rip_relative)
4656 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4657 ctxt->memopp->addr.mem.ea + ctxt->_eip);
4660 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4663 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4665 return ctxt->d & PageTable;
4668 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4670 /* The second termination condition only applies for REPE
4671 * and REPNE. Test if the repeat string operation prefix is
4672 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4673 * corresponding termination condition according to:
4674 * - if REPE/REPZ and ZF = 0 then done
4675 * - if REPNE/REPNZ and ZF = 1 then done
4677 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4678 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4679 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4680 ((ctxt->eflags & EFLG_ZF) == 0))
4681 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4682 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4688 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4692 ctxt->ops->get_fpu(ctxt);
4693 asm volatile("1: fwait \n\t"
4695 ".pushsection .fixup,\"ax\" \n\t"
4697 "movb $1, %[fault] \n\t"
4700 _ASM_EXTABLE(1b, 3b)
4701 : [fault]"+qm"(fault));
4702 ctxt->ops->put_fpu(ctxt);
4704 if (unlikely(fault))
4705 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4707 return X86EMUL_CONTINUE;
4710 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4713 if (op->type == OP_MM)
4714 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4717 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4719 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4720 if (!(ctxt->d & ByteOp))
4721 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4722 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4723 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4725 : "c"(ctxt->src2.val));
4726 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4727 if (!fop) /* exception is returned in fop variable */
4728 return emulate_de(ctxt);
4729 return X86EMUL_CONTINUE;
4732 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4734 memset(&ctxt->rip_relative, 0,
4735 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4737 ctxt->io_read.pos = 0;
4738 ctxt->io_read.end = 0;
4739 ctxt->mem_read.end = 0;
4742 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4744 const struct x86_emulate_ops *ops = ctxt->ops;
4745 int rc = X86EMUL_CONTINUE;
4746 int saved_dst_type = ctxt->dst.type;
4748 ctxt->mem_read.pos = 0;
4750 /* LOCK prefix is allowed only with some instructions */
4751 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4752 rc = emulate_ud(ctxt);
4756 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4757 rc = emulate_ud(ctxt);
4761 if (unlikely(ctxt->d &
4762 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4763 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4764 (ctxt->d & Undefined)) {
4765 rc = emulate_ud(ctxt);
4769 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4770 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4771 rc = emulate_ud(ctxt);
4775 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4776 rc = emulate_nm(ctxt);
4780 if (ctxt->d & Mmx) {
4781 rc = flush_pending_x87_faults(ctxt);
4782 if (rc != X86EMUL_CONTINUE)
4785 * Now that we know the fpu is exception safe, we can fetch
4788 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4789 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4790 if (!(ctxt->d & Mov))
4791 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4794 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4795 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4796 X86_ICPT_PRE_EXCEPT);
4797 if (rc != X86EMUL_CONTINUE)
4801 /* Instruction can only be executed in protected mode */
4802 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4803 rc = emulate_ud(ctxt);
4807 /* Privileged instruction can be executed only in CPL=0 */
4808 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4809 if (ctxt->d & PrivUD)
4810 rc = emulate_ud(ctxt);
4812 rc = emulate_gp(ctxt, 0);
4816 /* Do instruction specific permission checks */
4817 if (ctxt->d & CheckPerm) {
4818 rc = ctxt->check_perm(ctxt);
4819 if (rc != X86EMUL_CONTINUE)
4823 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4824 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4825 X86_ICPT_POST_EXCEPT);
4826 if (rc != X86EMUL_CONTINUE)
4830 if (ctxt->rep_prefix && (ctxt->d & String)) {
4831 /* All REP prefixes have the same first termination condition */
4832 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4833 ctxt->eip = ctxt->_eip;
4834 ctxt->eflags &= ~EFLG_RF;
4840 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4841 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4842 ctxt->src.valptr, ctxt->src.bytes);
4843 if (rc != X86EMUL_CONTINUE)
4845 ctxt->src.orig_val64 = ctxt->src.val64;
4848 if (ctxt->src2.type == OP_MEM) {
4849 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4850 &ctxt->src2.val, ctxt->src2.bytes);
4851 if (rc != X86EMUL_CONTINUE)
4855 if ((ctxt->d & DstMask) == ImplicitOps)
4859 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4860 /* optimisation - avoid slow emulated read if Mov */
4861 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4862 &ctxt->dst.val, ctxt->dst.bytes);
4863 if (rc != X86EMUL_CONTINUE)
4866 ctxt->dst.orig_val = ctxt->dst.val;
4870 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4871 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4872 X86_ICPT_POST_MEMACCESS);
4873 if (rc != X86EMUL_CONTINUE)
4877 if (ctxt->rep_prefix && (ctxt->d & String))
4878 ctxt->eflags |= EFLG_RF;
4880 ctxt->eflags &= ~EFLG_RF;
4882 if (ctxt->execute) {
4883 if (ctxt->d & Fastop) {
4884 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4885 rc = fastop(ctxt, fop);
4886 if (rc != X86EMUL_CONTINUE)
4890 rc = ctxt->execute(ctxt);
4891 if (rc != X86EMUL_CONTINUE)
4896 if (ctxt->opcode_len == 2)
4898 else if (ctxt->opcode_len == 3)
4899 goto threebyte_insn;
4902 case 0x63: /* movsxd */
4903 if (ctxt->mode != X86EMUL_MODE_PROT64)
4904 goto cannot_emulate;
4905 ctxt->dst.val = (s32) ctxt->src.val;
4907 case 0x70 ... 0x7f: /* jcc (short) */
4908 if (test_cc(ctxt->b, ctxt->eflags))
4909 rc = jmp_rel(ctxt, ctxt->src.val);
4911 case 0x8d: /* lea r16/r32, m */
4912 ctxt->dst.val = ctxt->src.addr.mem.ea;
4914 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4915 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4916 ctxt->dst.type = OP_NONE;
4920 case 0x98: /* cbw/cwde/cdqe */
4921 switch (ctxt->op_bytes) {
4922 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4923 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4924 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4927 case 0xcc: /* int3 */
4928 rc = emulate_int(ctxt, 3);
4930 case 0xcd: /* int n */
4931 rc = emulate_int(ctxt, ctxt->src.val);
4933 case 0xce: /* into */
4934 if (ctxt->eflags & EFLG_OF)
4935 rc = emulate_int(ctxt, 4);
4937 case 0xe9: /* jmp rel */
4938 case 0xeb: /* jmp rel short */
4939 rc = jmp_rel(ctxt, ctxt->src.val);
4940 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4942 case 0xf4: /* hlt */
4943 ctxt->ops->halt(ctxt);
4945 case 0xf5: /* cmc */
4946 /* complement carry flag from eflags reg */
4947 ctxt->eflags ^= EFLG_CF;
4949 case 0xf8: /* clc */
4950 ctxt->eflags &= ~EFLG_CF;
4952 case 0xf9: /* stc */
4953 ctxt->eflags |= EFLG_CF;
4955 case 0xfc: /* cld */
4956 ctxt->eflags &= ~EFLG_DF;
4958 case 0xfd: /* std */
4959 ctxt->eflags |= EFLG_DF;
4962 goto cannot_emulate;
4965 if (rc != X86EMUL_CONTINUE)
4969 if (ctxt->d & SrcWrite) {
4970 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4971 rc = writeback(ctxt, &ctxt->src);
4972 if (rc != X86EMUL_CONTINUE)
4975 if (!(ctxt->d & NoWrite)) {
4976 rc = writeback(ctxt, &ctxt->dst);
4977 if (rc != X86EMUL_CONTINUE)
4982 * restore dst type in case the decoding will be reused
4983 * (happens for string instruction )
4985 ctxt->dst.type = saved_dst_type;
4987 if ((ctxt->d & SrcMask) == SrcSI)
4988 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4990 if ((ctxt->d & DstMask) == DstDI)
4991 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4993 if (ctxt->rep_prefix && (ctxt->d & String)) {
4995 struct read_cache *r = &ctxt->io_read;
4996 if ((ctxt->d & SrcMask) == SrcSI)
4997 count = ctxt->src.count;
4999 count = ctxt->dst.count;
5000 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5002 if (!string_insn_completed(ctxt)) {
5004 * Re-enter guest when pio read ahead buffer is empty
5005 * or, if it is not used, after each 1024 iteration.
5007 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5008 (r->end == 0 || r->end != r->pos)) {
5010 * Reset read cache. Usually happens before
5011 * decode, but since instruction is restarted
5012 * we have to do it here.
5014 ctxt->mem_read.end = 0;
5015 writeback_registers(ctxt);
5016 return EMULATION_RESTART;
5018 goto done; /* skip rip writeback */
5020 ctxt->eflags &= ~EFLG_RF;
5023 ctxt->eip = ctxt->_eip;
5026 if (rc == X86EMUL_PROPAGATE_FAULT) {
5027 WARN_ON(ctxt->exception.vector > 0x1f);
5028 ctxt->have_exception = true;
5030 if (rc == X86EMUL_INTERCEPTED)
5031 return EMULATION_INTERCEPTED;
5033 if (rc == X86EMUL_CONTINUE)
5034 writeback_registers(ctxt);
5036 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5040 case 0x09: /* wbinvd */
5041 (ctxt->ops->wbinvd)(ctxt);
5043 case 0x08: /* invd */
5044 case 0x0d: /* GrpP (prefetch) */
5045 case 0x18: /* Grp16 (prefetch/nop) */
5046 case 0x1f: /* nop */
5048 case 0x20: /* mov cr, reg */
5049 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5051 case 0x21: /* mov from dr to reg */
5052 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5054 case 0x40 ... 0x4f: /* cmov */
5055 if (test_cc(ctxt->b, ctxt->eflags))
5056 ctxt->dst.val = ctxt->src.val;
5057 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5058 ctxt->op_bytes != 4)
5059 ctxt->dst.type = OP_NONE; /* no writeback */
5061 case 0x80 ... 0x8f: /* jnz rel, etc*/
5062 if (test_cc(ctxt->b, ctxt->eflags))
5063 rc = jmp_rel(ctxt, ctxt->src.val);
5065 case 0x90 ... 0x9f: /* setcc r/m8 */
5066 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5068 case 0xb6 ... 0xb7: /* movzx */
5069 ctxt->dst.bytes = ctxt->op_bytes;
5070 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5071 : (u16) ctxt->src.val;
5073 case 0xbe ... 0xbf: /* movsx */
5074 ctxt->dst.bytes = ctxt->op_bytes;
5075 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5076 (s16) ctxt->src.val;
5079 goto cannot_emulate;
5084 if (rc != X86EMUL_CONTINUE)
5090 return EMULATION_FAILED;
5093 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5095 invalidate_registers(ctxt);
5098 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5100 writeback_registers(ctxt);