VirtualBox

source: vbox/trunk/src/recompiler_new/target-i386/op_helper.c@ 16499

Last change on this file since 16499 was 16499, checked in by vboxsync, 16 years ago

REM: fixed 3588, segments selector wasn't loaded sometimes

File size: 201.9 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34# ifdef VBOX_WITH_VMI
35# include <VBox/parav.h>
36# endif
37#include "qemu-common.h"
38#include <math.h>
39#include "tcg.h"
40#endif
41//#define DEBUG_PCALL
42
43#if 0
44#define raise_exception_err(a, b)\
45do {\
46 if (logfile)\
47 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
48 (raise_exception_err)(a, b);\
49} while (0)
50#endif
51
52const uint8_t parity_table[256] = {
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85};
86
87/* modulo 17 table */
88const uint8_t rclw_table[32] = {
89 0, 1, 2, 3, 4, 5, 6, 7,
90 8, 9,10,11,12,13,14,15,
91 16, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 9,10,11,12,13,14,
93};
94
95/* modulo 9 table */
96const uint8_t rclb_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 0, 1, 2, 3, 4, 5, 6,
99 7, 8, 0, 1, 2, 3, 4, 5,
100 6, 7, 8, 0, 1, 2, 3, 4,
101};
102
103const CPU86_LDouble f15rk[7] =
104{
105 0.00000000000000000000L,
106 1.00000000000000000000L,
107 3.14159265358979323851L, /*pi*/
108 0.30102999566398119523L, /*lg2*/
109 0.69314718055994530943L, /*ln2*/
110 1.44269504088896340739L, /*l2e*/
111 3.32192809488736234781L, /*l2t*/
112};
113
114/* broken thread support */
115
116spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117
118void helper_lock(void)
119{
120 spin_lock(&global_cpu_lock);
121}
122
123void helper_unlock(void)
124{
125 spin_unlock(&global_cpu_lock);
126}
127
128void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129{
130 load_eflags(t0, update_mask);
131}
132
133target_ulong helper_read_eflags(void)
134{
135 uint32_t eflags;
136 eflags = cc_table[CC_OP].compute_all();
137 eflags |= (DF & DF_MASK);
138 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139 return eflags;
140}
141
142#ifdef VBOX
143void helper_write_eflags_vme(target_ulong t0)
144{
145 unsigned int new_eflags = t0;
146
147 assert(env->eflags & (1<<VM_SHIFT));
148
149 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
150 /* if TF will be set -> #GP */
151 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
152 || (new_eflags & TF_MASK)) {
153 raise_exception(EXCP0D_GPF);
154 } else {
155 load_eflags(new_eflags,
156 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
157
158 if (new_eflags & IF_MASK) {
159 env->eflags |= VIF_MASK;
160 } else {
161 env->eflags &= ~VIF_MASK;
162 }
163 }
164}
165
166target_ulong helper_read_eflags_vme(void)
167{
168 uint32_t eflags;
169 eflags = cc_table[CC_OP].compute_all();
170 eflags |= (DF & DF_MASK);
171 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
172 if (env->eflags & VIF_MASK)
173 eflags |= IF_MASK;
174 else
175 eflags &= ~IF_MASK;
176
177 /* According to AMD manual, should be read with IOPL == 3 */
178 eflags |= (3 << IOPL_SHIFT);
179
180 /* We only use helper_read_eflags_vme() in 16-bits mode */
181 return eflags & 0xffff;
182}
183
184void helper_dump_state()
185{
186 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
187 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
188 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
189 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
190 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
191 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
192 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
193}
194#endif
195
196/* return non zero if error */
197#ifndef VBOX
198static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
199#else /* VBOX */
200DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
201#endif /* VBOX */
202 int selector)
203{
204 SegmentCache *dt;
205 int index;
206 target_ulong ptr;
207
208#ifdef VBOX
209 /* Trying to load a selector with CPL=1? */
210 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
211 {
212 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
213 selector = selector & 0xfffc;
214 }
215#endif
216
217 if (selector & 0x4)
218 dt = &env->ldt;
219 else
220 dt = &env->gdt;
221 index = selector & ~7;
222 if ((index + 7) > dt->limit)
223 return -1;
224 ptr = dt->base + index;
225 *e1_ptr = ldl_kernel(ptr);
226 *e2_ptr = ldl_kernel(ptr + 4);
227 return 0;
228}
229
230#ifndef VBOX
231static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
232#else /* VBOX */
233DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
234#endif /* VBOX */
235{
236 unsigned int limit;
237 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
238 if (e2 & DESC_G_MASK)
239 limit = (limit << 12) | 0xfff;
240 return limit;
241}
242
243#ifndef VBOX
244static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
245#else /* VBOX */
246DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
247#endif /* VBOX */
248{
249 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
250}
251
252#ifndef VBOX
253static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
254#else /* VBOX */
255DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
256#endif /* VBOX */
257{
258 sc->base = get_seg_base(e1, e2);
259 sc->limit = get_seg_limit(e1, e2);
260 sc->flags = e2;
261}
262
263/* init the segment cache in vm86 mode. */
264#ifndef VBOX
265static inline void load_seg_vm(int seg, int selector)
266#else /* VBOX */
267DECLINLINE(void) load_seg_vm(int seg, int selector)
268#endif /* VBOX */
269{
270 selector &= 0xffff;
271#ifdef VBOX
272 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK;
273
274 if (seg == R_CS)
275 flags |= DESC_CS_MASK;
276
277 cpu_x86_load_seg_cache(env, seg, selector,
278 (selector << 4), 0xffff, flags);
279#else
280 cpu_x86_load_seg_cache(env, seg, selector,
281 (selector << 4), 0xffff, 0);
282#endif
283}
284
285#ifndef VBOX
286static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
287#else /* VBOX */
288DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
289#endif /* VBOX */
290 uint32_t *esp_ptr, int dpl)
291{
292#ifndef VBOX
293 int type, index, shift;
294#else
295 unsigned int type, index, shift;
296#endif
297
298#if 0
299 {
300 int i;
301 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
302 for(i=0;i<env->tr.limit;i++) {
303 printf("%02x ", env->tr.base[i]);
304 if ((i & 7) == 7) printf("\n");
305 }
306 printf("\n");
307 }
308#endif
309
310 if (!(env->tr.flags & DESC_P_MASK))
311 cpu_abort(env, "invalid tss");
312 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if ((type & 7) != 1)
314 cpu_abort(env, "invalid tss type");
315 shift = type >> 3;
316 index = (dpl * 4 + 2) << shift;
317 if (index + (4 << shift) - 1 > env->tr.limit)
318 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
319 if (shift == 0) {
320 *esp_ptr = lduw_kernel(env->tr.base + index);
321 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
322 } else {
323 *esp_ptr = ldl_kernel(env->tr.base + index);
324 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
325 }
326}
327
328/* XXX: merge with load_seg() */
329static void tss_load_seg(int seg_reg, int selector)
330{
331 uint32_t e1, e2;
332 int rpl, dpl, cpl;
333
334#ifdef VBOX
335 e1 = e2 = 0;
336 cpl = env->hflags & HF_CPL_MASK;
337 /* Trying to load a selector with CPL=1? */
338 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
339 {
340 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
341 selector = selector & 0xfffc;
342 }
343#endif
344
345 if ((selector & 0xfffc) != 0) {
346 if (load_segment(&e1, &e2, selector) != 0)
347 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
348 if (!(e2 & DESC_S_MASK))
349 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
350 rpl = selector & 3;
351 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
352 cpl = env->hflags & HF_CPL_MASK;
353 if (seg_reg == R_CS) {
354 if (!(e2 & DESC_CS_MASK))
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 /* XXX: is it correct ? */
357 if (dpl != rpl)
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 if ((e2 & DESC_C_MASK) && dpl > rpl)
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 } else if (seg_reg == R_SS) {
362 /* SS must be writable data */
363 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 if (dpl != cpl || dpl != rpl)
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 } else {
368 /* not readable code */
369 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371 /* if data or non conforming code, checks the rights */
372 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
373 if (dpl < cpl || dpl < rpl)
374 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
375 }
376 }
377 if (!(e2 & DESC_P_MASK))
378 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
379 cpu_x86_load_seg_cache(env, seg_reg, selector,
380 get_seg_base(e1, e2),
381 get_seg_limit(e1, e2),
382 e2);
383 } else {
384 if (seg_reg == R_SS || seg_reg == R_CS)
385 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
386#ifdef VBOX
387 cpu_x86_load_seg_cache(env, seg_reg, selector,
388 0, 0, 0);
389#endif
390 }
391}
392
393#define SWITCH_TSS_JMP 0
394#define SWITCH_TSS_IRET 1
395#define SWITCH_TSS_CALL 2
396
397/* XXX: restore CPU state in registers (PowerPC case) */
398static void switch_tss(int tss_selector,
399 uint32_t e1, uint32_t e2, int source,
400 uint32_t next_eip)
401{
402 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
403 target_ulong tss_base;
404 uint32_t new_regs[8], new_segs[6];
405 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
406 uint32_t old_eflags, eflags_mask;
407 SegmentCache *dt;
408#ifndef VBOX
409 int index;
410#else
411 unsigned int index;
412#endif
413 target_ulong ptr;
414
415 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
416#ifdef DEBUG_PCALL
417 if (loglevel & CPU_LOG_PCALL)
418 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
419#endif
420
421#if defined(VBOX) && defined(DEBUG)
422 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
423#endif
424
425 /* if task gate, we read the TSS segment and we load it */
426 if (type == 5) {
427 if (!(e2 & DESC_P_MASK))
428 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
429 tss_selector = e1 >> 16;
430 if (tss_selector & 4)
431 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
432 if (load_segment(&e1, &e2, tss_selector) != 0)
433 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
434 if (e2 & DESC_S_MASK)
435 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
436 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
437 if ((type & 7) != 1)
438 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
439 }
440
441 if (!(e2 & DESC_P_MASK))
442 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
443
444 if (type & 8)
445 tss_limit_max = 103;
446 else
447 tss_limit_max = 43;
448 tss_limit = get_seg_limit(e1, e2);
449 tss_base = get_seg_base(e1, e2);
450 if ((tss_selector & 4) != 0 ||
451 tss_limit < tss_limit_max)
452 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
453 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
454 if (old_type & 8)
455 old_tss_limit_max = 103;
456 else
457 old_tss_limit_max = 43;
458
459 /* read all the registers from the new TSS */
460 if (type & 8) {
461 /* 32 bit */
462 new_cr3 = ldl_kernel(tss_base + 0x1c);
463 new_eip = ldl_kernel(tss_base + 0x20);
464 new_eflags = ldl_kernel(tss_base + 0x24);
465 for(i = 0; i < 8; i++)
466 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
467 for(i = 0; i < 6; i++)
468 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
469 new_ldt = lduw_kernel(tss_base + 0x60);
470 new_trap = ldl_kernel(tss_base + 0x64);
471 } else {
472 /* 16 bit */
473 new_cr3 = 0;
474 new_eip = lduw_kernel(tss_base + 0x0e);
475 new_eflags = lduw_kernel(tss_base + 0x10);
476 for(i = 0; i < 8; i++)
477 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
478 for(i = 0; i < 4; i++)
479 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
480 new_ldt = lduw_kernel(tss_base + 0x2a);
481 new_segs[R_FS] = 0;
482 new_segs[R_GS] = 0;
483 new_trap = 0;
484 }
485
486 /* NOTE: we must avoid memory exceptions during the task switch,
487 so we make dummy accesses before */
488 /* XXX: it can still fail in some cases, so a bigger hack is
489 necessary to valid the TLB after having done the accesses */
490
491 v1 = ldub_kernel(env->tr.base);
492 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
493 stb_kernel(env->tr.base, v1);
494 stb_kernel(env->tr.base + old_tss_limit_max, v2);
495
496 /* clear busy bit (it is restartable) */
497 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
498 target_ulong ptr;
499 uint32_t e2;
500 ptr = env->gdt.base + (env->tr.selector & ~7);
501 e2 = ldl_kernel(ptr + 4);
502 e2 &= ~DESC_TSS_BUSY_MASK;
503 stl_kernel(ptr + 4, e2);
504 }
505 old_eflags = compute_eflags();
506 if (source == SWITCH_TSS_IRET)
507 old_eflags &= ~NT_MASK;
508
509 /* save the current state in the old TSS */
510 if (type & 8) {
511 /* 32 bit */
512 stl_kernel(env->tr.base + 0x20, next_eip);
513 stl_kernel(env->tr.base + 0x24, old_eflags);
514 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
515 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
516 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
517 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
518 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
519 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
520 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
521 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
522 for(i = 0; i < 6; i++)
523 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
524#if defined(VBOX) && defined(DEBUG)
525 printf("TSS 32 bits switch\n");
526 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
527#endif
528 } else {
529 /* 16 bit */
530 stw_kernel(env->tr.base + 0x0e, next_eip);
531 stw_kernel(env->tr.base + 0x10, old_eflags);
532 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
533 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
534 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
535 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
536 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
537 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
538 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
539 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
540 for(i = 0; i < 4; i++)
541 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
542 }
543
544 /* now if an exception occurs, it will occurs in the next task
545 context */
546
547 if (source == SWITCH_TSS_CALL) {
548 stw_kernel(tss_base, env->tr.selector);
549 new_eflags |= NT_MASK;
550 }
551
552 /* set busy bit */
553 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
554 target_ulong ptr;
555 uint32_t e2;
556 ptr = env->gdt.base + (tss_selector & ~7);
557 e2 = ldl_kernel(ptr + 4);
558 e2 |= DESC_TSS_BUSY_MASK;
559 stl_kernel(ptr + 4, e2);
560 }
561
562 /* set the new CPU state */
563 /* from this point, any exception which occurs can give problems */
564 env->cr[0] |= CR0_TS_MASK;
565 env->hflags |= HF_TS_MASK;
566 env->tr.selector = tss_selector;
567 env->tr.base = tss_base;
568 env->tr.limit = tss_limit;
569 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
570
571 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
572 cpu_x86_update_cr3(env, new_cr3);
573 }
574
575 /* load all registers without an exception, then reload them with
576 possible exception */
577 env->eip = new_eip;
578 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
579 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
580 if (!(type & 8))
581 eflags_mask &= 0xffff;
582 load_eflags(new_eflags, eflags_mask);
583 /* XXX: what to do in 16 bit case ? */
584 EAX = new_regs[0];
585 ECX = new_regs[1];
586 EDX = new_regs[2];
587 EBX = new_regs[3];
588 ESP = new_regs[4];
589 EBP = new_regs[5];
590 ESI = new_regs[6];
591 EDI = new_regs[7];
592 if (new_eflags & VM_MASK) {
593 for(i = 0; i < 6; i++)
594 load_seg_vm(i, new_segs[i]);
595 /* in vm86, CPL is always 3 */
596 cpu_x86_set_cpl(env, 3);
597 } else {
598 /* CPL is set the RPL of CS */
599 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
600 /* first just selectors as the rest may trigger exceptions */
601 for(i = 0; i < 6; i++)
602 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
603 }
604
605 env->ldt.selector = new_ldt & ~4;
606 env->ldt.base = 0;
607 env->ldt.limit = 0;
608 env->ldt.flags = 0;
609
610 /* load the LDT */
611 if (new_ldt & 4)
612 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
613
614 if ((new_ldt & 0xfffc) != 0) {
615 dt = &env->gdt;
616 index = new_ldt & ~7;
617 if ((index + 7) > dt->limit)
618 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
619 ptr = dt->base + index;
620 e1 = ldl_kernel(ptr);
621 e2 = ldl_kernel(ptr + 4);
622 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
623 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
624 if (!(e2 & DESC_P_MASK))
625 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
626 load_seg_cache_raw_dt(&env->ldt, e1, e2);
627 }
628
629 /* load the segments */
630 if (!(new_eflags & VM_MASK)) {
631 tss_load_seg(R_CS, new_segs[R_CS]);
632 tss_load_seg(R_SS, new_segs[R_SS]);
633 tss_load_seg(R_ES, new_segs[R_ES]);
634 tss_load_seg(R_DS, new_segs[R_DS]);
635 tss_load_seg(R_FS, new_segs[R_FS]);
636 tss_load_seg(R_GS, new_segs[R_GS]);
637 }
638
639 /* check that EIP is in the CS segment limits */
640 if (new_eip > env->segs[R_CS].limit) {
641 /* XXX: different exception if CALL ? */
642 raise_exception_err(EXCP0D_GPF, 0);
643 }
644}
645
646/* check if Port I/O is allowed in TSS */
647#ifndef VBOX
648static inline void check_io(int addr, int size)
649{
650 int io_offset, val, mask;
651
652#else /* VBOX */
653DECLINLINE(void) check_io(int addr, int size)
654{
655 int val, mask;
656 unsigned int io_offset;
657#endif /* VBOX */
658 /* TSS must be a valid 32 bit one */
659 if (!(env->tr.flags & DESC_P_MASK) ||
660 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
661 env->tr.limit < 103)
662 goto fail;
663 io_offset = lduw_kernel(env->tr.base + 0x66);
664 io_offset += (addr >> 3);
665 /* Note: the check needs two bytes */
666 if ((io_offset + 1) > env->tr.limit)
667 goto fail;
668 val = lduw_kernel(env->tr.base + io_offset);
669 val >>= (addr & 7);
670 mask = (1 << size) - 1;
671 /* all bits must be zero to allow the I/O */
672 if ((val & mask) != 0) {
673 fail:
674 raise_exception_err(EXCP0D_GPF, 0);
675 }
676}
677
678#ifdef VBOX
679/* Keep in sync with gen_check_external_event() */
680void helper_check_external_event()
681{
682 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
683 | CPU_INTERRUPT_EXTERNAL_TIMER
684 | CPU_INTERRUPT_EXTERNAL_DMA))
685 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
686 && (env->eflags & IF_MASK)
687 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
688 {
689 helper_external_event();
690 }
691
692}
693
694void helper_sync_seg(uint32_t reg)
695{
696 if (env->segs[reg].newselector)
697 sync_seg(env, reg, env->segs[reg].newselector);
698}
699#endif
700
701void helper_check_iob(uint32_t t0)
702{
703 check_io(t0, 1);
704}
705
706void helper_check_iow(uint32_t t0)
707{
708 check_io(t0, 2);
709}
710
711void helper_check_iol(uint32_t t0)
712{
713 check_io(t0, 4);
714}
715
716void helper_outb(uint32_t port, uint32_t data)
717{
718 cpu_outb(env, port, data & 0xff);
719}
720
721target_ulong helper_inb(uint32_t port)
722{
723 return cpu_inb(env, port);
724}
725
726void helper_outw(uint32_t port, uint32_t data)
727{
728 cpu_outw(env, port, data & 0xffff);
729}
730
731target_ulong helper_inw(uint32_t port)
732{
733 return cpu_inw(env, port);
734}
735
736void helper_outl(uint32_t port, uint32_t data)
737{
738 cpu_outl(env, port, data);
739}
740
741target_ulong helper_inl(uint32_t port)
742{
743 return cpu_inl(env, port);
744}
745
746#ifndef VBOX
747static inline unsigned int get_sp_mask(unsigned int e2)
748#else /* VBOX */
749DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
750#endif /* VBOX */
751{
752 if (e2 & DESC_B_MASK)
753 return 0xffffffff;
754 else
755 return 0xffff;
756}
757
758#ifdef TARGET_X86_64
759#define SET_ESP(val, sp_mask)\
760do {\
761 if ((sp_mask) == 0xffff)\
762 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
763 else if ((sp_mask) == 0xffffffffLL)\
764 ESP = (uint32_t)(val);\
765 else\
766 ESP = (val);\
767} while (0)
768#else
769#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
770#endif
771
772/* in 64-bit machines, this can overflow. So this segment addition macro
773 * can be used to trim the value to 32-bit whenever needed */
774#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
775
776/* XXX: add a is_user flag to have proper security support */
777#define PUSHW(ssp, sp, sp_mask, val)\
778{\
779 sp -= 2;\
780 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
781}
782
783#define PUSHL(ssp, sp, sp_mask, val)\
784{\
785 sp -= 4;\
786 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
787}
788
789#define POPW(ssp, sp, sp_mask, val)\
790{\
791 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
792 sp += 2;\
793}
794
795#define POPL(ssp, sp, sp_mask, val)\
796{\
797 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
798 sp += 4;\
799}
800
801/* protected mode interrupt */
802static void do_interrupt_protected(int intno, int is_int, int error_code,
803 unsigned int next_eip, int is_hw)
804{
805 SegmentCache *dt;
806 target_ulong ptr, ssp;
807 int type, dpl, selector, ss_dpl, cpl;
808 int has_error_code, new_stack, shift;
809 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
810 uint32_t old_eip, sp_mask;
811
812#ifdef VBOX
813 ss = ss_e1 = ss_e2 = 0;
814# ifdef VBOX_WITH_VMI
815 if ( intno == 6
816 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
817 {
818 env->exception_index = EXCP_PARAV_CALL;
819 cpu_loop_exit();
820 }
821# endif
822 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
823 cpu_loop_exit();
824#endif
825
826 has_error_code = 0;
827 if (!is_int && !is_hw) {
828 switch(intno) {
829 case 8:
830 case 10:
831 case 11:
832 case 12:
833 case 13:
834 case 14:
835 case 17:
836 has_error_code = 1;
837 break;
838 }
839 }
840 if (is_int)
841 old_eip = next_eip;
842 else
843 old_eip = env->eip;
844
845 dt = &env->idt;
846#ifndef VBOX
847 if (intno * 8 + 7 > dt->limit)
848#else
849 if ((unsigned)intno * 8 + 7 > dt->limit)
850#endif
851 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
852 ptr = dt->base + intno * 8;
853 e1 = ldl_kernel(ptr);
854 e2 = ldl_kernel(ptr + 4);
855 /* check gate type */
856 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
857 switch(type) {
858 case 5: /* task gate */
859 /* must do that check here to return the correct error code */
860 if (!(e2 & DESC_P_MASK))
861 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
862 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
863 if (has_error_code) {
864 int type;
865 uint32_t mask;
866 /* push the error code */
867 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
868 shift = type >> 3;
869 if (env->segs[R_SS].flags & DESC_B_MASK)
870 mask = 0xffffffff;
871 else
872 mask = 0xffff;
873 esp = (ESP - (2 << shift)) & mask;
874 ssp = env->segs[R_SS].base + esp;
875 if (shift)
876 stl_kernel(ssp, error_code);
877 else
878 stw_kernel(ssp, error_code);
879 SET_ESP(esp, mask);
880 }
881 return;
882 case 6: /* 286 interrupt gate */
883 case 7: /* 286 trap gate */
884 case 14: /* 386 interrupt gate */
885 case 15: /* 386 trap gate */
886 break;
887 default:
888 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
889 break;
890 }
891 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
892 cpl = env->hflags & HF_CPL_MASK;
893 /* check privilege if software int */
894 if (is_int && dpl < cpl)
895 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
896 /* check valid bit */
897 if (!(e2 & DESC_P_MASK))
898 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
899 selector = e1 >> 16;
900 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
901 if ((selector & 0xfffc) == 0)
902 raise_exception_err(EXCP0D_GPF, 0);
903
904 if (load_segment(&e1, &e2, selector) != 0)
905 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
906 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
907 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
909 if (dpl > cpl)
910 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
911 if (!(e2 & DESC_P_MASK))
912 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
913 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
914 /* to inner privilege */
915 get_ss_esp_from_tss(&ss, &esp, dpl);
916 if ((ss & 0xfffc) == 0)
917 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
918 if ((ss & 3) != dpl)
919 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
920 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
921 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
922 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
923 if (ss_dpl != dpl)
924 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
925 if (!(ss_e2 & DESC_S_MASK) ||
926 (ss_e2 & DESC_CS_MASK) ||
927 !(ss_e2 & DESC_W_MASK))
928 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
929 if (!(ss_e2 & DESC_P_MASK))
930#ifdef VBOX /* See page 3-477 of 253666.pdf */
931 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
932#else
933 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
934#endif
935 new_stack = 1;
936 sp_mask = get_sp_mask(ss_e2);
937 ssp = get_seg_base(ss_e1, ss_e2);
938#if defined(VBOX) && defined(DEBUG)
939 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
940#endif
941 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
942 /* to same privilege */
943 if (env->eflags & VM_MASK)
944 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
945 new_stack = 0;
946 sp_mask = get_sp_mask(env->segs[R_SS].flags);
947 ssp = env->segs[R_SS].base;
948 esp = ESP;
949 dpl = cpl;
950 } else {
951 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
952 new_stack = 0; /* avoid warning */
953 sp_mask = 0; /* avoid warning */
954 ssp = 0; /* avoid warning */
955 esp = 0; /* avoid warning */
956 }
957
958 shift = type >> 3;
959
960#if 0
961 /* XXX: check that enough room is available */
962 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
963 if (env->eflags & VM_MASK)
964 push_size += 8;
965 push_size <<= shift;
966#endif
967 if (shift == 1) {
968 if (new_stack) {
969 if (env->eflags & VM_MASK) {
970 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
971 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
972 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
973 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
974 }
975 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
976 PUSHL(ssp, esp, sp_mask, ESP);
977 }
978 PUSHL(ssp, esp, sp_mask, compute_eflags());
979 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
980 PUSHL(ssp, esp, sp_mask, old_eip);
981 if (has_error_code) {
982 PUSHL(ssp, esp, sp_mask, error_code);
983 }
984 } else {
985 if (new_stack) {
986 if (env->eflags & VM_MASK) {
987 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
988 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
989 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
990 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
991 }
992 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
993 PUSHW(ssp, esp, sp_mask, ESP);
994 }
995 PUSHW(ssp, esp, sp_mask, compute_eflags());
996 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
997 PUSHW(ssp, esp, sp_mask, old_eip);
998 if (has_error_code) {
999 PUSHW(ssp, esp, sp_mask, error_code);
1000 }
1001 }
1002
1003 if (new_stack) {
1004 if (env->eflags & VM_MASK) {
1005 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1006 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1007 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1008 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1009 }
1010 ss = (ss & ~3) | dpl;
1011 cpu_x86_load_seg_cache(env, R_SS, ss,
1012 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1013 }
1014 SET_ESP(esp, sp_mask);
1015
1016 selector = (selector & ~3) | dpl;
1017 cpu_x86_load_seg_cache(env, R_CS, selector,
1018 get_seg_base(e1, e2),
1019 get_seg_limit(e1, e2),
1020 e2);
1021 cpu_x86_set_cpl(env, dpl);
1022 env->eip = offset;
1023
1024 /* interrupt gate clear IF mask */
1025 if ((type & 1) == 0) {
1026 env->eflags &= ~IF_MASK;
1027 }
1028 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1029}
1030#ifdef VBOX
1031
1032/* check if VME interrupt redirection is enabled in TSS */
1033DECLINLINE(bool) is_vme_irq_redirected(int intno)
1034{
1035 unsigned int io_offset, intredir_offset;
1036 unsigned char val, mask;
1037
1038 /* TSS must be a valid 32 bit one */
1039 if (!(env->tr.flags & DESC_P_MASK) ||
1040 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1041 env->tr.limit < 103)
1042 goto fail;
1043 io_offset = lduw_kernel(env->tr.base + 0x66);
1044 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1045 if (io_offset < 0x68 + 0x20)
1046 io_offset = 0x68 + 0x20;
1047 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1048 intredir_offset = io_offset - 0x20;
1049
1050 intredir_offset += (intno >> 3);
1051 if ((intredir_offset) > env->tr.limit)
1052 goto fail;
1053
1054 val = ldub_kernel(env->tr.base + intredir_offset);
1055 mask = 1 << (unsigned char)(intno & 7);
1056
1057 /* bit set means no redirection. */
1058 if ((val & mask) != 0) {
1059 return false;
1060 }
1061 return true;
1062
1063fail:
1064 raise_exception_err(EXCP0D_GPF, 0);
1065 return true;
1066}
1067
1068/* V86 mode software interrupt with CR4.VME=1 */
1069static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1070{
1071 target_ulong ptr, ssp;
1072 int selector;
1073 uint32_t offset, esp;
1074 uint32_t old_cs, old_eflags;
1075 uint32_t iopl;
1076
1077 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1078
1079 if (!is_vme_irq_redirected(intno))
1080 {
1081 if (iopl == 3)
1082 {
1083 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1084 return;
1085 }
1086 else
1087 raise_exception_err(EXCP0D_GPF, 0);
1088 }
1089
1090 /* virtual mode idt is at linear address 0 */
1091 ptr = 0 + intno * 4;
1092 offset = lduw_kernel(ptr);
1093 selector = lduw_kernel(ptr + 2);
1094 esp = ESP;
1095 ssp = env->segs[R_SS].base;
1096 old_cs = env->segs[R_CS].selector;
1097
1098 old_eflags = compute_eflags();
1099 if (iopl < 3)
1100 {
1101 /* copy VIF into IF and set IOPL to 3 */
1102 if (env->eflags & VIF_MASK)
1103 old_eflags |= IF_MASK;
1104 else
1105 old_eflags &= ~IF_MASK;
1106
1107 old_eflags |= (3 << IOPL_SHIFT);
1108 }
1109
1110 /* XXX: use SS segment size ? */
1111 PUSHW(ssp, esp, 0xffff, old_eflags);
1112 PUSHW(ssp, esp, 0xffff, old_cs);
1113 PUSHW(ssp, esp, 0xffff, next_eip);
1114
1115 /* update processor state */
1116 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1117 env->eip = offset;
1118 env->segs[R_CS].selector = selector;
1119 env->segs[R_CS].base = (selector << 4);
1120 env->eflags &= ~(TF_MASK | RF_MASK);
1121
1122 if (iopl < 3)
1123 env->eflags &= ~VIF_MASK;
1124 else
1125 env->eflags &= ~IF_MASK;
1126}
1127#endif /* VBOX */
1128
1129#ifdef TARGET_X86_64
1130
1131#define PUSHQ(sp, val)\
1132{\
1133 sp -= 8;\
1134 stq_kernel(sp, (val));\
1135}
1136
1137#define POPQ(sp, val)\
1138{\
1139 val = ldq_kernel(sp);\
1140 sp += 8;\
1141}
1142
1143#ifndef VBOX
1144static inline target_ulong get_rsp_from_tss(int level)
1145#else /* VBOX */
1146DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1147#endif /* VBOX */
1148{
1149 int index;
1150
1151#if 0
1152 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1153 env->tr.base, env->tr.limit);
1154#endif
1155
1156 if (!(env->tr.flags & DESC_P_MASK))
1157 cpu_abort(env, "invalid tss");
1158 index = 8 * level + 4;
1159 if ((index + 7) > env->tr.limit)
1160 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1161 return ldq_kernel(env->tr.base + index);
1162}
1163
1164/* 64 bit interrupt */
1165static void do_interrupt64(int intno, int is_int, int error_code,
1166 target_ulong next_eip, int is_hw)
1167{
1168 SegmentCache *dt;
1169 target_ulong ptr;
1170 int type, dpl, selector, cpl, ist;
1171 int has_error_code, new_stack;
1172 uint32_t e1, e2, e3, ss;
1173 target_ulong old_eip, esp, offset;
1174
1175#ifdef VBOX
1176 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1177 cpu_loop_exit();
1178#endif
1179
1180 has_error_code = 0;
1181 if (!is_int && !is_hw) {
1182 switch(intno) {
1183 case 8:
1184 case 10:
1185 case 11:
1186 case 12:
1187 case 13:
1188 case 14:
1189 case 17:
1190 has_error_code = 1;
1191 break;
1192 }
1193 }
1194 if (is_int)
1195 old_eip = next_eip;
1196 else
1197 old_eip = env->eip;
1198
1199 dt = &env->idt;
1200 if (intno * 16 + 15 > dt->limit)
1201 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1202 ptr = dt->base + intno * 16;
1203 e1 = ldl_kernel(ptr);
1204 e2 = ldl_kernel(ptr + 4);
1205 e3 = ldl_kernel(ptr + 8);
1206 /* check gate type */
1207 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1208 switch(type) {
1209 case 14: /* 386 interrupt gate */
1210 case 15: /* 386 trap gate */
1211 break;
1212 default:
1213 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1214 break;
1215 }
1216 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1217 cpl = env->hflags & HF_CPL_MASK;
1218 /* check privilege if software int */
1219 if (is_int && dpl < cpl)
1220 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1221 /* check valid bit */
1222 if (!(e2 & DESC_P_MASK))
1223 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1224 selector = e1 >> 16;
1225 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1226 ist = e2 & 7;
1227 if ((selector & 0xfffc) == 0)
1228 raise_exception_err(EXCP0D_GPF, 0);
1229
1230 if (load_segment(&e1, &e2, selector) != 0)
1231 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1232 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1233 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1234 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1235 if (dpl > cpl)
1236 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1237 if (!(e2 & DESC_P_MASK))
1238 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1239 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1240 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1241 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1242 /* to inner privilege */
1243 if (ist != 0)
1244 esp = get_rsp_from_tss(ist + 3);
1245 else
1246 esp = get_rsp_from_tss(dpl);
1247 esp &= ~0xfLL; /* align stack */
1248 ss = 0;
1249 new_stack = 1;
1250 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1251 /* to same privilege */
1252 if (env->eflags & VM_MASK)
1253 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1254 new_stack = 0;
1255 if (ist != 0)
1256 esp = get_rsp_from_tss(ist + 3);
1257 else
1258 esp = ESP;
1259 esp &= ~0xfLL; /* align stack */
1260 dpl = cpl;
1261 } else {
1262 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1263 new_stack = 0; /* avoid warning */
1264 esp = 0; /* avoid warning */
1265 }
1266
1267 PUSHQ(esp, env->segs[R_SS].selector);
1268 PUSHQ(esp, ESP);
1269 PUSHQ(esp, compute_eflags());
1270 PUSHQ(esp, env->segs[R_CS].selector);
1271 PUSHQ(esp, old_eip);
1272 if (has_error_code) {
1273 PUSHQ(esp, error_code);
1274 }
1275
1276 if (new_stack) {
1277 ss = 0 | dpl;
1278 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1279 }
1280 ESP = esp;
1281
1282 selector = (selector & ~3) | dpl;
1283 cpu_x86_load_seg_cache(env, R_CS, selector,
1284 get_seg_base(e1, e2),
1285 get_seg_limit(e1, e2),
1286 e2);
1287 cpu_x86_set_cpl(env, dpl);
1288 env->eip = offset;
1289
1290 /* interrupt gate clear IF mask */
1291 if ((type & 1) == 0) {
1292 env->eflags &= ~IF_MASK;
1293 }
1294 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1295}
1296#endif
1297
1298#if defined(CONFIG_USER_ONLY)
1299void helper_syscall(int next_eip_addend)
1300{
1301 env->exception_index = EXCP_SYSCALL;
1302 env->exception_next_eip = env->eip + next_eip_addend;
1303 cpu_loop_exit();
1304}
1305#else
1306void helper_syscall(int next_eip_addend)
1307{
1308 int selector;
1309
1310 if (!(env->efer & MSR_EFER_SCE)) {
1311 raise_exception_err(EXCP06_ILLOP, 0);
1312 }
1313 selector = (env->star >> 32) & 0xffff;
1314#ifdef TARGET_X86_64
1315 if (env->hflags & HF_LMA_MASK) {
1316 int code64;
1317
1318 ECX = env->eip + next_eip_addend;
1319 env->regs[11] = compute_eflags();
1320
1321 code64 = env->hflags & HF_CS64_MASK;
1322
1323 cpu_x86_set_cpl(env, 0);
1324 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1325 0, 0xffffffff,
1326 DESC_G_MASK | DESC_P_MASK |
1327 DESC_S_MASK |
1328 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1329 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1330 0, 0xffffffff,
1331 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1332 DESC_S_MASK |
1333 DESC_W_MASK | DESC_A_MASK);
1334 env->eflags &= ~env->fmask;
1335 load_eflags(env->eflags, 0);
1336 if (code64)
1337 env->eip = env->lstar;
1338 else
1339 env->eip = env->cstar;
1340 } else
1341#endif
1342 {
1343 ECX = (uint32_t)(env->eip + next_eip_addend);
1344
1345 cpu_x86_set_cpl(env, 0);
1346 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1347 0, 0xffffffff,
1348 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1349 DESC_S_MASK |
1350 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1351 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1352 0, 0xffffffff,
1353 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1354 DESC_S_MASK |
1355 DESC_W_MASK | DESC_A_MASK);
1356 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1357 env->eip = (uint32_t)env->star;
1358 }
1359}
1360#endif
1361
1362void helper_sysret(int dflag)
1363{
1364 int cpl, selector;
1365
1366 if (!(env->efer & MSR_EFER_SCE)) {
1367 raise_exception_err(EXCP06_ILLOP, 0);
1368 }
1369 cpl = env->hflags & HF_CPL_MASK;
1370 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1371 raise_exception_err(EXCP0D_GPF, 0);
1372 }
1373 selector = (env->star >> 48) & 0xffff;
1374#ifdef TARGET_X86_64
1375 if (env->hflags & HF_LMA_MASK) {
1376 if (dflag == 2) {
1377 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1378 0, 0xffffffff,
1379 DESC_G_MASK | DESC_P_MASK |
1380 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1381 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1382 DESC_L_MASK);
1383 env->eip = ECX;
1384 } else {
1385 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1386 0, 0xffffffff,
1387 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1388 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1389 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1390 env->eip = (uint32_t)ECX;
1391 }
1392 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1393 0, 0xffffffff,
1394 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1395 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1396 DESC_W_MASK | DESC_A_MASK);
1397 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1398 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1399 cpu_x86_set_cpl(env, 3);
1400 } else
1401#endif
1402 {
1403 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1404 0, 0xffffffff,
1405 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1406 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1407 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1408 env->eip = (uint32_t)ECX;
1409 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1410 0, 0xffffffff,
1411 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1412 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1413 DESC_W_MASK | DESC_A_MASK);
1414 env->eflags |= IF_MASK;
1415 cpu_x86_set_cpl(env, 3);
1416 }
1417#ifdef USE_KQEMU
1418 if (kqemu_is_ok(env)) {
1419 if (env->hflags & HF_LMA_MASK)
1420 CC_OP = CC_OP_EFLAGS;
1421 env->exception_index = -1;
1422 cpu_loop_exit();
1423 }
1424#endif
1425}
1426
1427#ifdef VBOX
1428/**
1429 * Checks and processes external VMM events.
1430 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1431 */
1432void helper_external_event(void)
1433{
1434#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1435 uintptr_t uSP;
1436# ifdef RT_ARCH_AMD64
1437 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1438# else
1439 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1440# endif
1441 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1442#endif
1443 /* Keep in sync with flags checked by gen_check_external_event() */
1444 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1445 {
1446 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1447 ~CPU_INTERRUPT_EXTERNAL_HARD);
1448 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1449 }
1450 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1451 {
1452 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1453 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1454 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1455 }
1456 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1457 {
1458 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1459 ~CPU_INTERRUPT_EXTERNAL_DMA);
1460 remR3DmaRun(env);
1461 }
1462 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1463 {
1464 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1465 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1466 remR3TimersRun(env);
1467 }
1468}
1469/* helper for recording call instruction addresses for later scanning */
1470void helper_record_call()
1471{
1472 if ( !(env->state & CPU_RAW_RING0)
1473 && (env->cr[0] & CR0_PG_MASK)
1474 && !(env->eflags & X86_EFL_IF))
1475 remR3RecordCall(env);
1476}
1477#endif /* VBOX */
1478
1479/* real mode interrupt */
1480static void do_interrupt_real(int intno, int is_int, int error_code,
1481 unsigned int next_eip)
1482{
1483 SegmentCache *dt;
1484 target_ulong ptr, ssp;
1485 int selector;
1486 uint32_t offset, esp;
1487 uint32_t old_cs, old_eip;
1488
1489 /* real mode (simpler !) */
1490 dt = &env->idt;
1491#ifndef VBOX
1492 if (intno * 4 + 3 > dt->limit)
1493#else
1494 if ((unsigned)intno * 4 + 3 > dt->limit)
1495#endif
1496 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1497 ptr = dt->base + intno * 4;
1498 offset = lduw_kernel(ptr);
1499 selector = lduw_kernel(ptr + 2);
1500 esp = ESP;
1501 ssp = env->segs[R_SS].base;
1502 if (is_int)
1503 old_eip = next_eip;
1504 else
1505 old_eip = env->eip;
1506 old_cs = env->segs[R_CS].selector;
1507 /* XXX: use SS segment size ? */
1508 PUSHW(ssp, esp, 0xffff, compute_eflags());
1509 PUSHW(ssp, esp, 0xffff, old_cs);
1510 PUSHW(ssp, esp, 0xffff, old_eip);
1511
1512 /* update processor state */
1513 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1514 env->eip = offset;
1515 env->segs[R_CS].selector = selector;
1516 env->segs[R_CS].base = (selector << 4);
1517 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1518}
1519
1520/* fake user mode interrupt */
1521void do_interrupt_user(int intno, int is_int, int error_code,
1522 target_ulong next_eip)
1523{
1524 SegmentCache *dt;
1525 target_ulong ptr;
1526 int dpl, cpl, shift;
1527 uint32_t e2;
1528
1529 dt = &env->idt;
1530 if (env->hflags & HF_LMA_MASK) {
1531 shift = 4;
1532 } else {
1533 shift = 3;
1534 }
1535 ptr = dt->base + (intno << shift);
1536 e2 = ldl_kernel(ptr + 4);
1537
1538 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1539 cpl = env->hflags & HF_CPL_MASK;
1540 /* check privilege if software int */
1541 if (is_int && dpl < cpl)
1542 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1543
1544 /* Since we emulate only user space, we cannot do more than
1545 exiting the emulation with the suitable exception and error
1546 code */
1547 if (is_int)
1548 EIP = next_eip;
1549}
1550
1551/*
1552 * Begin execution of an interruption. is_int is TRUE if coming from
1553 * the int instruction. next_eip is the EIP value AFTER the interrupt
1554 * instruction. It is only relevant if is_int is TRUE.
1555 */
1556void do_interrupt(int intno, int is_int, int error_code,
1557 target_ulong next_eip, int is_hw)
1558{
1559 if (loglevel & CPU_LOG_INT) {
1560 if ((env->cr[0] & CR0_PE_MASK)) {
1561 static int count;
1562 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1563 count, intno, error_code, is_int,
1564 env->hflags & HF_CPL_MASK,
1565 env->segs[R_CS].selector, EIP,
1566 (int)env->segs[R_CS].base + EIP,
1567 env->segs[R_SS].selector, ESP);
1568 if (intno == 0x0e) {
1569 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1570 } else {
1571 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1572 }
1573 fprintf(logfile, "\n");
1574 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1575#if 0
1576 {
1577 int i;
1578 uint8_t *ptr;
1579 fprintf(logfile, " code=");
1580 ptr = env->segs[R_CS].base + env->eip;
1581 for(i = 0; i < 16; i++) {
1582 fprintf(logfile, " %02x", ldub(ptr + i));
1583 }
1584 fprintf(logfile, "\n");
1585 }
1586#endif
1587 count++;
1588 }
1589 }
1590 if (env->cr[0] & CR0_PE_MASK) {
1591#ifdef TARGET_X86_64
1592 if (env->hflags & HF_LMA_MASK) {
1593 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1594 } else
1595#endif
1596 {
1597#ifdef VBOX
1598 /* int xx *, v86 code and VME enabled? */
1599 if ( (env->eflags & VM_MASK)
1600 && (env->cr[4] & CR4_VME_MASK)
1601 && is_int
1602 && !is_hw
1603 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1604 )
1605 do_soft_interrupt_vme(intno, error_code, next_eip);
1606 else
1607#endif /* VBOX */
1608 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1609 }
1610 } else {
1611 do_interrupt_real(intno, is_int, error_code, next_eip);
1612 }
1613}
1614
1615/*
1616 * Check nested exceptions and change to double or triple fault if
1617 * needed. It should only be called, if this is not an interrupt.
1618 * Returns the new exception number.
1619 */
1620static int check_exception(int intno, int *error_code)
1621{
1622 int first_contributory = env->old_exception == 0 ||
1623 (env->old_exception >= 10 &&
1624 env->old_exception <= 13);
1625 int second_contributory = intno == 0 ||
1626 (intno >= 10 && intno <= 13);
1627
1628 if (loglevel & CPU_LOG_INT)
1629 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1630 env->old_exception, intno);
1631
1632 if (env->old_exception == EXCP08_DBLE)
1633 cpu_abort(env, "triple fault");
1634
1635 if ((first_contributory && second_contributory)
1636 || (env->old_exception == EXCP0E_PAGE &&
1637 (second_contributory || (intno == EXCP0E_PAGE)))) {
1638 intno = EXCP08_DBLE;
1639 *error_code = 0;
1640 }
1641
1642 if (second_contributory || (intno == EXCP0E_PAGE) ||
1643 (intno == EXCP08_DBLE))
1644 env->old_exception = intno;
1645
1646 return intno;
1647}
1648
1649/*
1650 * Signal an interruption. It is executed in the main CPU loop.
1651 * is_int is TRUE if coming from the int instruction. next_eip is the
1652 * EIP value AFTER the interrupt instruction. It is only relevant if
1653 * is_int is TRUE.
1654 */
1655void raise_interrupt(int intno, int is_int, int error_code,
1656 int next_eip_addend)
1657{
1658#if defined(VBOX) && defined(DEBUG)
1659 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1660#endif
1661 if (!is_int) {
1662 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1663 intno = check_exception(intno, &error_code);
1664 } else {
1665 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1666 }
1667
1668 env->exception_index = intno;
1669 env->error_code = error_code;
1670 env->exception_is_int = is_int;
1671 env->exception_next_eip = env->eip + next_eip_addend;
1672 cpu_loop_exit();
1673}
1674
1675/* shortcuts to generate exceptions */
1676
1677void (raise_exception_err)(int exception_index, int error_code)
1678{
1679 raise_interrupt(exception_index, 0, error_code, 0);
1680}
1681
1682void raise_exception(int exception_index)
1683{
1684 raise_interrupt(exception_index, 0, 0, 0);
1685}
1686
1687/* SMM support */
1688
1689#if defined(CONFIG_USER_ONLY)
1690
1691void do_smm_enter(void)
1692{
1693}
1694
1695void helper_rsm(void)
1696{
1697}
1698
1699#else
1700
1701#ifdef TARGET_X86_64
1702#define SMM_REVISION_ID 0x00020064
1703#else
1704#define SMM_REVISION_ID 0x00020000
1705#endif
1706
1707void do_smm_enter(void)
1708{
1709 target_ulong sm_state;
1710 SegmentCache *dt;
1711 int i, offset;
1712
1713 if (loglevel & CPU_LOG_INT) {
1714 fprintf(logfile, "SMM: enter\n");
1715 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1716 }
1717
1718 env->hflags |= HF_SMM_MASK;
1719 cpu_smm_update(env);
1720
1721 sm_state = env->smbase + 0x8000;
1722
1723#ifdef TARGET_X86_64
1724 for(i = 0; i < 6; i++) {
1725 dt = &env->segs[i];
1726 offset = 0x7e00 + i * 16;
1727 stw_phys(sm_state + offset, dt->selector);
1728 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1729 stl_phys(sm_state + offset + 4, dt->limit);
1730 stq_phys(sm_state + offset + 8, dt->base);
1731 }
1732
1733 stq_phys(sm_state + 0x7e68, env->gdt.base);
1734 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1735
1736 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1737 stq_phys(sm_state + 0x7e78, env->ldt.base);
1738 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1739 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1740
1741 stq_phys(sm_state + 0x7e88, env->idt.base);
1742 stl_phys(sm_state + 0x7e84, env->idt.limit);
1743
1744 stw_phys(sm_state + 0x7e90, env->tr.selector);
1745 stq_phys(sm_state + 0x7e98, env->tr.base);
1746 stl_phys(sm_state + 0x7e94, env->tr.limit);
1747 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1748
1749 stq_phys(sm_state + 0x7ed0, env->efer);
1750
1751 stq_phys(sm_state + 0x7ff8, EAX);
1752 stq_phys(sm_state + 0x7ff0, ECX);
1753 stq_phys(sm_state + 0x7fe8, EDX);
1754 stq_phys(sm_state + 0x7fe0, EBX);
1755 stq_phys(sm_state + 0x7fd8, ESP);
1756 stq_phys(sm_state + 0x7fd0, EBP);
1757 stq_phys(sm_state + 0x7fc8, ESI);
1758 stq_phys(sm_state + 0x7fc0, EDI);
1759 for(i = 8; i < 16; i++)
1760 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1761 stq_phys(sm_state + 0x7f78, env->eip);
1762 stl_phys(sm_state + 0x7f70, compute_eflags());
1763 stl_phys(sm_state + 0x7f68, env->dr[6]);
1764 stl_phys(sm_state + 0x7f60, env->dr[7]);
1765
1766 stl_phys(sm_state + 0x7f48, env->cr[4]);
1767 stl_phys(sm_state + 0x7f50, env->cr[3]);
1768 stl_phys(sm_state + 0x7f58, env->cr[0]);
1769
1770 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1771 stl_phys(sm_state + 0x7f00, env->smbase);
1772#else
1773 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1774 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1775 stl_phys(sm_state + 0x7ff4, compute_eflags());
1776 stl_phys(sm_state + 0x7ff0, env->eip);
1777 stl_phys(sm_state + 0x7fec, EDI);
1778 stl_phys(sm_state + 0x7fe8, ESI);
1779 stl_phys(sm_state + 0x7fe4, EBP);
1780 stl_phys(sm_state + 0x7fe0, ESP);
1781 stl_phys(sm_state + 0x7fdc, EBX);
1782 stl_phys(sm_state + 0x7fd8, EDX);
1783 stl_phys(sm_state + 0x7fd4, ECX);
1784 stl_phys(sm_state + 0x7fd0, EAX);
1785 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1786 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1787
1788 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1789 stl_phys(sm_state + 0x7f64, env->tr.base);
1790 stl_phys(sm_state + 0x7f60, env->tr.limit);
1791 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1792
1793 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1794 stl_phys(sm_state + 0x7f80, env->ldt.base);
1795 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1796 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1797
1798 stl_phys(sm_state + 0x7f74, env->gdt.base);
1799 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1800
1801 stl_phys(sm_state + 0x7f58, env->idt.base);
1802 stl_phys(sm_state + 0x7f54, env->idt.limit);
1803
1804 for(i = 0; i < 6; i++) {
1805 dt = &env->segs[i];
1806 if (i < 3)
1807 offset = 0x7f84 + i * 12;
1808 else
1809 offset = 0x7f2c + (i - 3) * 12;
1810 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1811 stl_phys(sm_state + offset + 8, dt->base);
1812 stl_phys(sm_state + offset + 4, dt->limit);
1813 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1814 }
1815 stl_phys(sm_state + 0x7f14, env->cr[4]);
1816
1817 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1818 stl_phys(sm_state + 0x7ef8, env->smbase);
1819#endif
1820 /* init SMM cpu state */
1821
1822#ifdef TARGET_X86_64
1823 cpu_load_efer(env, 0);
1824#endif
1825 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1826 env->eip = 0x00008000;
1827 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1828 0xffffffff, 0);
1829 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1830 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1831 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1832 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1833 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1834
1835 cpu_x86_update_cr0(env,
1836 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1837 cpu_x86_update_cr4(env, 0);
1838 env->dr[7] = 0x00000400;
1839 CC_OP = CC_OP_EFLAGS;
1840}
1841
1842void helper_rsm(void)
1843{
1844#ifdef VBOX
1845 cpu_abort(env, "helper_rsm");
1846#else /* !VBOX */
1847 target_ulong sm_
1848
1849 target_ulong sm_state;
1850 int i, offset;
1851 uint32_t val;
1852
1853 sm_state = env->smbase + 0x8000;
1854#ifdef TARGET_X86_64
1855 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1856
1857 for(i = 0; i < 6; i++) {
1858 offset = 0x7e00 + i * 16;
1859 cpu_x86_load_seg_cache(env, i,
1860 lduw_phys(sm_state + offset),
1861 ldq_phys(sm_state + offset + 8),
1862 ldl_phys(sm_state + offset + 4),
1863 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1864 }
1865
1866 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1867 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1868
1869 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1870 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1871 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1872 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1873
1874 env->idt.base = ldq_phys(sm_state + 0x7e88);
1875 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1876
1877 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1878 env->tr.base = ldq_phys(sm_state + 0x7e98);
1879 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1880 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1881
1882 EAX = ldq_phys(sm_state + 0x7ff8);
1883 ECX = ldq_phys(sm_state + 0x7ff0);
1884 EDX = ldq_phys(sm_state + 0x7fe8);
1885 EBX = ldq_phys(sm_state + 0x7fe0);
1886 ESP = ldq_phys(sm_state + 0x7fd8);
1887 EBP = ldq_phys(sm_state + 0x7fd0);
1888 ESI = ldq_phys(sm_state + 0x7fc8);
1889 EDI = ldq_phys(sm_state + 0x7fc0);
1890 for(i = 8; i < 16; i++)
1891 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1892 env->eip = ldq_phys(sm_state + 0x7f78);
1893 load_eflags(ldl_phys(sm_state + 0x7f70),
1894 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1895 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1896 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1897
1898 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1899 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1900 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1901
1902 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1903 if (val & 0x20000) {
1904 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1905 }
1906#else
1907 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1908 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1909 load_eflags(ldl_phys(sm_state + 0x7ff4),
1910 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1911 env->eip = ldl_phys(sm_state + 0x7ff0);
1912 EDI = ldl_phys(sm_state + 0x7fec);
1913 ESI = ldl_phys(sm_state + 0x7fe8);
1914 EBP = ldl_phys(sm_state + 0x7fe4);
1915 ESP = ldl_phys(sm_state + 0x7fe0);
1916 EBX = ldl_phys(sm_state + 0x7fdc);
1917 EDX = ldl_phys(sm_state + 0x7fd8);
1918 ECX = ldl_phys(sm_state + 0x7fd4);
1919 EAX = ldl_phys(sm_state + 0x7fd0);
1920 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1921 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1922
1923 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1924 env->tr.base = ldl_phys(sm_state + 0x7f64);
1925 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1926 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1927
1928 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1929 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1930 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1931 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1932
1933 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1934 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1935
1936 env->idt.base = ldl_phys(sm_state + 0x7f58);
1937 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1938
1939 for(i = 0; i < 6; i++) {
1940 if (i < 3)
1941 offset = 0x7f84 + i * 12;
1942 else
1943 offset = 0x7f2c + (i - 3) * 12;
1944 cpu_x86_load_seg_cache(env, i,
1945 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1946 ldl_phys(sm_state + offset + 8),
1947 ldl_phys(sm_state + offset + 4),
1948 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1949 }
1950 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1951
1952 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1953 if (val & 0x20000) {
1954 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1955 }
1956#endif
1957 CC_OP = CC_OP_EFLAGS;
1958 env->hflags &= ~HF_SMM_MASK;
1959 cpu_smm_update(env);
1960
1961 if (loglevel & CPU_LOG_INT) {
1962 fprintf(logfile, "SMM: after RSM\n");
1963 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1964 }
1965#endif /* !VBOX */
1966}
1967
1968#endif /* !CONFIG_USER_ONLY */
1969
1970
1971/* division, flags are undefined */
1972
1973void helper_divb_AL(target_ulong t0)
1974{
1975 unsigned int num, den, q, r;
1976
1977 num = (EAX & 0xffff);
1978 den = (t0 & 0xff);
1979 if (den == 0) {
1980 raise_exception(EXCP00_DIVZ);
1981 }
1982 q = (num / den);
1983 if (q > 0xff)
1984 raise_exception(EXCP00_DIVZ);
1985 q &= 0xff;
1986 r = (num % den) & 0xff;
1987 EAX = (EAX & ~0xffff) | (r << 8) | q;
1988}
1989
1990void helper_idivb_AL(target_ulong t0)
1991{
1992 int num, den, q, r;
1993
1994 num = (int16_t)EAX;
1995 den = (int8_t)t0;
1996 if (den == 0) {
1997 raise_exception(EXCP00_DIVZ);
1998 }
1999 q = (num / den);
2000 if (q != (int8_t)q)
2001 raise_exception(EXCP00_DIVZ);
2002 q &= 0xff;
2003 r = (num % den) & 0xff;
2004 EAX = (EAX & ~0xffff) | (r << 8) | q;
2005}
2006
2007void helper_divw_AX(target_ulong t0)
2008{
2009 unsigned int num, den, q, r;
2010
2011 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2012 den = (t0 & 0xffff);
2013 if (den == 0) {
2014 raise_exception(EXCP00_DIVZ);
2015 }
2016 q = (num / den);
2017 if (q > 0xffff)
2018 raise_exception(EXCP00_DIVZ);
2019 q &= 0xffff;
2020 r = (num % den) & 0xffff;
2021 EAX = (EAX & ~0xffff) | q;
2022 EDX = (EDX & ~0xffff) | r;
2023}
2024
2025void helper_idivw_AX(target_ulong t0)
2026{
2027 int num, den, q, r;
2028
2029 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2030 den = (int16_t)t0;
2031 if (den == 0) {
2032 raise_exception(EXCP00_DIVZ);
2033 }
2034 q = (num / den);
2035 if (q != (int16_t)q)
2036 raise_exception(EXCP00_DIVZ);
2037 q &= 0xffff;
2038 r = (num % den) & 0xffff;
2039 EAX = (EAX & ~0xffff) | q;
2040 EDX = (EDX & ~0xffff) | r;
2041}
2042
2043void helper_divl_EAX(target_ulong t0)
2044{
2045 unsigned int den, r;
2046 uint64_t num, q;
2047
2048 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2049 den = t0;
2050 if (den == 0) {
2051 raise_exception(EXCP00_DIVZ);
2052 }
2053 q = (num / den);
2054 r = (num % den);
2055 if (q > 0xffffffff)
2056 raise_exception(EXCP00_DIVZ);
2057 EAX = (uint32_t)q;
2058 EDX = (uint32_t)r;
2059}
2060
2061void helper_idivl_EAX(target_ulong t0)
2062{
2063 int den, r;
2064 int64_t num, q;
2065
2066 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2067 den = t0;
2068 if (den == 0) {
2069 raise_exception(EXCP00_DIVZ);
2070 }
2071 q = (num / den);
2072 r = (num % den);
2073 if (q != (int32_t)q)
2074 raise_exception(EXCP00_DIVZ);
2075 EAX = (uint32_t)q;
2076 EDX = (uint32_t)r;
2077}
2078
2079/* bcd */
2080
2081/* XXX: exception */
2082void helper_aam(int base)
2083{
2084 int al, ah;
2085 al = EAX & 0xff;
2086 ah = al / base;
2087 al = al % base;
2088 EAX = (EAX & ~0xffff) | al | (ah << 8);
2089 CC_DST = al;
2090}
2091
2092void helper_aad(int base)
2093{
2094 int al, ah;
2095 al = EAX & 0xff;
2096 ah = (EAX >> 8) & 0xff;
2097 al = ((ah * base) + al) & 0xff;
2098 EAX = (EAX & ~0xffff) | al;
2099 CC_DST = al;
2100}
2101
2102void helper_aaa(void)
2103{
2104 int icarry;
2105 int al, ah, af;
2106 int eflags;
2107
2108 eflags = cc_table[CC_OP].compute_all();
2109 af = eflags & CC_A;
2110 al = EAX & 0xff;
2111 ah = (EAX >> 8) & 0xff;
2112
2113 icarry = (al > 0xf9);
2114 if (((al & 0x0f) > 9 ) || af) {
2115 al = (al + 6) & 0x0f;
2116 ah = (ah + 1 + icarry) & 0xff;
2117 eflags |= CC_C | CC_A;
2118 } else {
2119 eflags &= ~(CC_C | CC_A);
2120 al &= 0x0f;
2121 }
2122 EAX = (EAX & ~0xffff) | al | (ah << 8);
2123 CC_SRC = eflags;
2124 FORCE_RET();
2125}
2126
2127void helper_aas(void)
2128{
2129 int icarry;
2130 int al, ah, af;
2131 int eflags;
2132
2133 eflags = cc_table[CC_OP].compute_all();
2134 af = eflags & CC_A;
2135 al = EAX & 0xff;
2136 ah = (EAX >> 8) & 0xff;
2137
2138 icarry = (al < 6);
2139 if (((al & 0x0f) > 9 ) || af) {
2140 al = (al - 6) & 0x0f;
2141 ah = (ah - 1 - icarry) & 0xff;
2142 eflags |= CC_C | CC_A;
2143 } else {
2144 eflags &= ~(CC_C | CC_A);
2145 al &= 0x0f;
2146 }
2147 EAX = (EAX & ~0xffff) | al | (ah << 8);
2148 CC_SRC = eflags;
2149 FORCE_RET();
2150}
2151
2152void helper_daa(void)
2153{
2154 int al, af, cf;
2155 int eflags;
2156
2157 eflags = cc_table[CC_OP].compute_all();
2158 cf = eflags & CC_C;
2159 af = eflags & CC_A;
2160 al = EAX & 0xff;
2161
2162 eflags = 0;
2163 if (((al & 0x0f) > 9 ) || af) {
2164 al = (al + 6) & 0xff;
2165 eflags |= CC_A;
2166 }
2167 if ((al > 0x9f) || cf) {
2168 al = (al + 0x60) & 0xff;
2169 eflags |= CC_C;
2170 }
2171 EAX = (EAX & ~0xff) | al;
2172 /* well, speed is not an issue here, so we compute the flags by hand */
2173 eflags |= (al == 0) << 6; /* zf */
2174 eflags |= parity_table[al]; /* pf */
2175 eflags |= (al & 0x80); /* sf */
2176 CC_SRC = eflags;
2177 FORCE_RET();
2178}
2179
2180void helper_das(void)
2181{
2182 int al, al1, af, cf;
2183 int eflags;
2184
2185 eflags = cc_table[CC_OP].compute_all();
2186 cf = eflags & CC_C;
2187 af = eflags & CC_A;
2188 al = EAX & 0xff;
2189
2190 eflags = 0;
2191 al1 = al;
2192 if (((al & 0x0f) > 9 ) || af) {
2193 eflags |= CC_A;
2194 if (al < 6 || cf)
2195 eflags |= CC_C;
2196 al = (al - 6) & 0xff;
2197 }
2198 if ((al1 > 0x99) || cf) {
2199 al = (al - 0x60) & 0xff;
2200 eflags |= CC_C;
2201 }
2202 EAX = (EAX & ~0xff) | al;
2203 /* well, speed is not an issue here, so we compute the flags by hand */
2204 eflags |= (al == 0) << 6; /* zf */
2205 eflags |= parity_table[al]; /* pf */
2206 eflags |= (al & 0x80); /* sf */
2207 CC_SRC = eflags;
2208 FORCE_RET();
2209}
2210
2211void helper_into(int next_eip_addend)
2212{
2213 int eflags;
2214 eflags = cc_table[CC_OP].compute_all();
2215 if (eflags & CC_O) {
2216 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2217 }
2218}
2219
2220void helper_cmpxchg8b(target_ulong a0)
2221{
2222 uint64_t d;
2223 int eflags;
2224
2225 eflags = cc_table[CC_OP].compute_all();
2226 d = ldq(a0);
2227 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2228 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2229 eflags |= CC_Z;
2230 } else {
2231 /* always do the store */
2232 stq(a0, d);
2233 EDX = (uint32_t)(d >> 32);
2234 EAX = (uint32_t)d;
2235 eflags &= ~CC_Z;
2236 }
2237 CC_SRC = eflags;
2238}
2239
2240#ifdef TARGET_X86_64
2241void helper_cmpxchg16b(target_ulong a0)
2242{
2243 uint64_t d0, d1;
2244 int eflags;
2245
2246 if ((a0 & 0xf) != 0)
2247 raise_exception(EXCP0D_GPF);
2248 eflags = cc_table[CC_OP].compute_all();
2249 d0 = ldq(a0);
2250 d1 = ldq(a0 + 8);
2251 if (d0 == EAX && d1 == EDX) {
2252 stq(a0, EBX);
2253 stq(a0 + 8, ECX);
2254 eflags |= CC_Z;
2255 } else {
2256 /* always do the store */
2257 stq(a0, d0);
2258 stq(a0 + 8, d1);
2259 EDX = d1;
2260 EAX = d0;
2261 eflags &= ~CC_Z;
2262 }
2263 CC_SRC = eflags;
2264}
2265#endif
2266
2267void helper_single_step(void)
2268{
2269 env->dr[6] |= 0x4000;
2270 raise_exception(EXCP01_SSTP);
2271}
2272
2273void helper_cpuid(void)
2274{
2275#ifndef VBOX
2276 uint32_t index;
2277
2278 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2279
2280 index = (uint32_t)EAX;
2281 /* test if maximum index reached */
2282 if (index & 0x80000000) {
2283 if (index > env->cpuid_xlevel)
2284 index = env->cpuid_level;
2285 } else {
2286 if (index > env->cpuid_level)
2287 index = env->cpuid_level;
2288 }
2289
2290 switch(index) {
2291 case 0:
2292 EAX = env->cpuid_level;
2293 EBX = env->cpuid_vendor1;
2294 EDX = env->cpuid_vendor2;
2295 ECX = env->cpuid_vendor3;
2296 break;
2297 case 1:
2298 EAX = env->cpuid_version;
2299 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2300 ECX = env->cpuid_ext_features;
2301 EDX = env->cpuid_features;
2302 break;
2303 case 2:
2304 /* cache info: needed for Pentium Pro compatibility */
2305 EAX = 1;
2306 EBX = 0;
2307 ECX = 0;
2308 EDX = 0x2c307d;
2309 break;
2310 case 4:
2311 /* cache info: needed for Core compatibility */
2312 switch (ECX) {
2313 case 0: /* L1 dcache info */
2314 EAX = 0x0000121;
2315 EBX = 0x1c0003f;
2316 ECX = 0x000003f;
2317 EDX = 0x0000001;
2318 break;
2319 case 1: /* L1 icache info */
2320 EAX = 0x0000122;
2321 EBX = 0x1c0003f;
2322 ECX = 0x000003f;
2323 EDX = 0x0000001;
2324 break;
2325 case 2: /* L2 cache info */
2326 EAX = 0x0000143;
2327 EBX = 0x3c0003f;
2328 ECX = 0x0000fff;
2329 EDX = 0x0000001;
2330 break;
2331 default: /* end of info */
2332 EAX = 0;
2333 EBX = 0;
2334 ECX = 0;
2335 EDX = 0;
2336 break;
2337 }
2338
2339 break;
2340 case 5:
2341 /* mwait info: needed for Core compatibility */
2342 EAX = 0; /* Smallest monitor-line size in bytes */
2343 EBX = 0; /* Largest monitor-line size in bytes */
2344 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2345 EDX = 0;
2346 break;
2347 case 6:
2348 /* Thermal and Power Leaf */
2349 EAX = 0;
2350 EBX = 0;
2351 ECX = 0;
2352 EDX = 0;
2353 break;
2354 case 9:
2355 /* Direct Cache Access Information Leaf */
2356 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2357 EBX = 0;
2358 ECX = 0;
2359 EDX = 0;
2360 break;
2361 case 0xA:
2362 /* Architectural Performance Monitoring Leaf */
2363 EAX = 0;
2364 EBX = 0;
2365 ECX = 0;
2366 EDX = 0;
2367 break;
2368 case 0x80000000:
2369 EAX = env->cpuid_xlevel;
2370 EBX = env->cpuid_vendor1;
2371 EDX = env->cpuid_vendor2;
2372 ECX = env->cpuid_vendor3;
2373 break;
2374 case 0x80000001:
2375 EAX = env->cpuid_features;
2376 EBX = 0;
2377 ECX = env->cpuid_ext3_features;
2378 EDX = env->cpuid_ext2_features;
2379 break;
2380 case 0x80000002:
2381 case 0x80000003:
2382 case 0x80000004:
2383 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2384 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2385 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2386 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2387 break;
2388 case 0x80000005:
2389 /* cache info (L1 cache) */
2390 EAX = 0x01ff01ff;
2391 EBX = 0x01ff01ff;
2392 ECX = 0x40020140;
2393 EDX = 0x40020140;
2394 break;
2395 case 0x80000006:
2396 /* cache info (L2 cache) */
2397 EAX = 0;
2398 EBX = 0x42004200;
2399 ECX = 0x02008140;
2400 EDX = 0;
2401 break;
2402 case 0x80000008:
2403 /* virtual & phys address size in low 2 bytes. */
2404/* XXX: This value must match the one used in the MMU code. */
2405 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2406 /* 64 bit processor */
2407#if defined(USE_KQEMU)
2408 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2409#else
2410/* XXX: The physical address space is limited to 42 bits in exec.c. */
2411 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2412#endif
2413 } else {
2414#if defined(USE_KQEMU)
2415 EAX = 0x00000020; /* 32 bits physical */
2416#else
2417 if (env->cpuid_features & CPUID_PSE36)
2418 EAX = 0x00000024; /* 36 bits physical */
2419 else
2420 EAX = 0x00000020; /* 32 bits physical */
2421#endif
2422 }
2423 EBX = 0;
2424 ECX = 0;
2425 EDX = 0;
2426 break;
2427 case 0x8000000A:
2428 EAX = 0x00000001;
2429 EBX = 0;
2430 ECX = 0;
2431 EDX = 0;
2432 break;
2433 default:
2434 /* reserved values: zero */
2435 EAX = 0;
2436 EBX = 0;
2437 ECX = 0;
2438 EDX = 0;
2439 break;
2440 }
2441#else /* VBOX */
2442 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2443#endif /* VBOX */
2444}
2445
2446void helper_enter_level(int level, int data32, target_ulong t1)
2447{
2448 target_ulong ssp;
2449 uint32_t esp_mask, esp, ebp;
2450
2451 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2452 ssp = env->segs[R_SS].base;
2453 ebp = EBP;
2454 esp = ESP;
2455 if (data32) {
2456 /* 32 bit */
2457 esp -= 4;
2458 while (--level) {
2459 esp -= 4;
2460 ebp -= 4;
2461 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2462 }
2463 esp -= 4;
2464 stl(ssp + (esp & esp_mask), t1);
2465 } else {
2466 /* 16 bit */
2467 esp -= 2;
2468 while (--level) {
2469 esp -= 2;
2470 ebp -= 2;
2471 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2472 }
2473 esp -= 2;
2474 stw(ssp + (esp & esp_mask), t1);
2475 }
2476}
2477
2478#ifdef TARGET_X86_64
2479void helper_enter64_level(int level, int data64, target_ulong t1)
2480{
2481 target_ulong esp, ebp;
2482 ebp = EBP;
2483 esp = ESP;
2484
2485 if (data64) {
2486 /* 64 bit */
2487 esp -= 8;
2488 while (--level) {
2489 esp -= 8;
2490 ebp -= 8;
2491 stq(esp, ldq(ebp));
2492 }
2493 esp -= 8;
2494 stq(esp, t1);
2495 } else {
2496 /* 16 bit */
2497 esp -= 2;
2498 while (--level) {
2499 esp -= 2;
2500 ebp -= 2;
2501 stw(esp, lduw(ebp));
2502 }
2503 esp -= 2;
2504 stw(esp, t1);
2505 }
2506}
2507#endif
2508
2509void helper_lldt(int selector)
2510{
2511 SegmentCache *dt;
2512 uint32_t e1, e2;
2513#ifndef VBOX
2514 int index, entry_limit;
2515#else
2516 unsigned int index, entry_limit;
2517#endif
2518 target_ulong ptr;
2519
2520#ifdef VBOX
2521 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2522 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2523#endif
2524
2525 selector &= 0xffff;
2526 if ((selector & 0xfffc) == 0) {
2527 /* XXX: NULL selector case: invalid LDT */
2528 env->ldt.base = 0;
2529 env->ldt.limit = 0;
2530 } else {
2531 if (selector & 0x4)
2532 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2533 dt = &env->gdt;
2534 index = selector & ~7;
2535#ifdef TARGET_X86_64
2536 if (env->hflags & HF_LMA_MASK)
2537 entry_limit = 15;
2538 else
2539#endif
2540 entry_limit = 7;
2541 if ((index + entry_limit) > dt->limit)
2542 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2543 ptr = dt->base + index;
2544 e1 = ldl_kernel(ptr);
2545 e2 = ldl_kernel(ptr + 4);
2546 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2547 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2548 if (!(e2 & DESC_P_MASK))
2549 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2550#ifdef TARGET_X86_64
2551 if (env->hflags & HF_LMA_MASK) {
2552 uint32_t e3;
2553 e3 = ldl_kernel(ptr + 8);
2554 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2555 env->ldt.base |= (target_ulong)e3 << 32;
2556 } else
2557#endif
2558 {
2559 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2560 }
2561 }
2562 env->ldt.selector = selector;
2563#ifdef VBOX
2564 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2565 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2566#endif
2567}
2568
2569void helper_ltr(int selector)
2570{
2571 SegmentCache *dt;
2572 uint32_t e1, e2;
2573#ifndef VBOX
2574 int index, type, entry_limit;
2575#else
2576 unsigned int index;
2577 int type, entry_limit;
2578#endif
2579 target_ulong ptr;
2580
2581#ifdef VBOX
2582 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2583 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2584 env->tr.flags, (RTSEL)(selector & 0xffff)));
2585#endif
2586 selector &= 0xffff;
2587 if ((selector & 0xfffc) == 0) {
2588 /* NULL selector case: invalid TR */
2589 env->tr.base = 0;
2590 env->tr.limit = 0;
2591 env->tr.flags = 0;
2592 } else {
2593 if (selector & 0x4)
2594 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2595 dt = &env->gdt;
2596 index = selector & ~7;
2597#ifdef TARGET_X86_64
2598 if (env->hflags & HF_LMA_MASK)
2599 entry_limit = 15;
2600 else
2601#endif
2602 entry_limit = 7;
2603 if ((index + entry_limit) > dt->limit)
2604 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2605 ptr = dt->base + index;
2606 e1 = ldl_kernel(ptr);
2607 e2 = ldl_kernel(ptr + 4);
2608 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2609 if ((e2 & DESC_S_MASK) ||
2610 (type != 1 && type != 9))
2611 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2612 if (!(e2 & DESC_P_MASK))
2613 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2614#ifdef TARGET_X86_64
2615 if (env->hflags & HF_LMA_MASK) {
2616 uint32_t e3, e4;
2617 e3 = ldl_kernel(ptr + 8);
2618 e4 = ldl_kernel(ptr + 12);
2619 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2620 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2621 load_seg_cache_raw_dt(&env->tr, e1, e2);
2622 env->tr.base |= (target_ulong)e3 << 32;
2623 } else
2624#endif
2625 {
2626 load_seg_cache_raw_dt(&env->tr, e1, e2);
2627 }
2628 e2 |= DESC_TSS_BUSY_MASK;
2629 stl_kernel(ptr + 4, e2);
2630 }
2631 env->tr.selector = selector;
2632#ifdef VBOX
2633 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2634 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2635 env->tr.flags, (RTSEL)(selector & 0xffff)));
2636#endif
2637}
2638
2639/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2640void helper_load_seg(int seg_reg, int selector)
2641{
2642 uint32_t e1, e2;
2643 int cpl, dpl, rpl;
2644 SegmentCache *dt;
2645#ifndef VBOX
2646 int index;
2647#else
2648 unsigned int index;
2649#endif
2650 target_ulong ptr;
2651
2652 selector &= 0xffff;
2653 cpl = env->hflags & HF_CPL_MASK;
2654
2655#ifdef VBOX
2656 /* Trying to load a selector with CPL=1? */
2657 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2658 {
2659 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2660 selector = selector & 0xfffc;
2661 }
2662#endif
2663 if ((selector & 0xfffc) == 0) {
2664 /* null selector case */
2665 if (seg_reg == R_SS
2666#ifdef TARGET_X86_64
2667 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2668#endif
2669 )
2670 raise_exception_err(EXCP0D_GPF, 0);
2671 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2672 } else {
2673
2674 if (selector & 0x4)
2675 dt = &env->ldt;
2676 else
2677 dt = &env->gdt;
2678 index = selector & ~7;
2679 if ((index + 7) > dt->limit)
2680 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2681 ptr = dt->base + index;
2682 e1 = ldl_kernel(ptr);
2683 e2 = ldl_kernel(ptr + 4);
2684
2685 if (!(e2 & DESC_S_MASK))
2686 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2687 rpl = selector & 3;
2688 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2689 if (seg_reg == R_SS) {
2690 /* must be writable segment */
2691 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2692 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2693 if (rpl != cpl || dpl != cpl)
2694 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2695 } else {
2696 /* must be readable segment */
2697 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2698 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2699
2700 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2701 /* if not conforming code, test rights */
2702 if (dpl < cpl || dpl < rpl)
2703 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2704 }
2705 }
2706
2707 if (!(e2 & DESC_P_MASK)) {
2708 if (seg_reg == R_SS)
2709 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2710 else
2711 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2712 }
2713
2714 /* set the access bit if not already set */
2715 if (!(e2 & DESC_A_MASK)) {
2716 e2 |= DESC_A_MASK;
2717 stl_kernel(ptr + 4, e2);
2718 }
2719
2720 cpu_x86_load_seg_cache(env, seg_reg, selector,
2721 get_seg_base(e1, e2),
2722 get_seg_limit(e1, e2),
2723 e2);
2724#if 0
2725 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2726 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2727#endif
2728 }
2729}
2730
2731/* protected mode jump */
2732void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2733 int next_eip_addend)
2734{
2735 int gate_cs, type;
2736 uint32_t e1, e2, cpl, dpl, rpl, limit;
2737 target_ulong next_eip;
2738
2739#ifdef VBOX
2740 e1 = e2 = 0;
2741#endif
2742 if ((new_cs & 0xfffc) == 0)
2743 raise_exception_err(EXCP0D_GPF, 0);
2744 if (load_segment(&e1, &e2, new_cs) != 0)
2745 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2746 cpl = env->hflags & HF_CPL_MASK;
2747 if (e2 & DESC_S_MASK) {
2748 if (!(e2 & DESC_CS_MASK))
2749 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2750 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2751 if (e2 & DESC_C_MASK) {
2752 /* conforming code segment */
2753 if (dpl > cpl)
2754 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2755 } else {
2756 /* non conforming code segment */
2757 rpl = new_cs & 3;
2758 if (rpl > cpl)
2759 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2760 if (dpl != cpl)
2761 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2762 }
2763 if (!(e2 & DESC_P_MASK))
2764 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2765 limit = get_seg_limit(e1, e2);
2766 if (new_eip > limit &&
2767 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2768 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2769 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2770 get_seg_base(e1, e2), limit, e2);
2771 EIP = new_eip;
2772 } else {
2773 /* jump to call or task gate */
2774 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2775 rpl = new_cs & 3;
2776 cpl = env->hflags & HF_CPL_MASK;
2777 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2778 switch(type) {
2779 case 1: /* 286 TSS */
2780 case 9: /* 386 TSS */
2781 case 5: /* task gate */
2782 if (dpl < cpl || dpl < rpl)
2783 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2784 next_eip = env->eip + next_eip_addend;
2785 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2786 CC_OP = CC_OP_EFLAGS;
2787 break;
2788 case 4: /* 286 call gate */
2789 case 12: /* 386 call gate */
2790 if ((dpl < cpl) || (dpl < rpl))
2791 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2792 if (!(e2 & DESC_P_MASK))
2793 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2794 gate_cs = e1 >> 16;
2795 new_eip = (e1 & 0xffff);
2796 if (type == 12)
2797 new_eip |= (e2 & 0xffff0000);
2798 if (load_segment(&e1, &e2, gate_cs) != 0)
2799 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2800 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2801 /* must be code segment */
2802 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2803 (DESC_S_MASK | DESC_CS_MASK)))
2804 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2805 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2806 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2807 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2808 if (!(e2 & DESC_P_MASK))
2809#ifdef VBOX /* See page 3-514 of 253666.pdf */
2810 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2811#else
2812 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2813#endif
2814 limit = get_seg_limit(e1, e2);
2815 if (new_eip > limit)
2816 raise_exception_err(EXCP0D_GPF, 0);
2817 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2818 get_seg_base(e1, e2), limit, e2);
2819 EIP = new_eip;
2820 break;
2821 default:
2822 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2823 break;
2824 }
2825 }
2826}
2827
2828/* real mode call */
2829void helper_lcall_real(int new_cs, target_ulong new_eip1,
2830 int shift, int next_eip)
2831{
2832 int new_eip;
2833 uint32_t esp, esp_mask;
2834 target_ulong ssp;
2835
2836 new_eip = new_eip1;
2837 esp = ESP;
2838 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2839 ssp = env->segs[R_SS].base;
2840 if (shift) {
2841 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2842 PUSHL(ssp, esp, esp_mask, next_eip);
2843 } else {
2844 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2845 PUSHW(ssp, esp, esp_mask, next_eip);
2846 }
2847
2848 SET_ESP(esp, esp_mask);
2849 env->eip = new_eip;
2850 env->segs[R_CS].selector = new_cs;
2851 env->segs[R_CS].base = (new_cs << 4);
2852}
2853
2854/* protected mode call */
2855void helper_lcall_protected(int new_cs, target_ulong new_eip,
2856 int shift, int next_eip_addend)
2857{
2858 int new_stack, i;
2859 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2860 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2861 uint32_t val, limit, old_sp_mask;
2862 target_ulong ssp, old_ssp, next_eip;
2863
2864#ifdef VBOX
2865 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2866#endif
2867 next_eip = env->eip + next_eip_addend;
2868#ifdef DEBUG_PCALL
2869 if (loglevel & CPU_LOG_PCALL) {
2870 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2871 new_cs, (uint32_t)new_eip, shift);
2872 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2873 }
2874#endif
2875 if ((new_cs & 0xfffc) == 0)
2876 raise_exception_err(EXCP0D_GPF, 0);
2877 if (load_segment(&e1, &e2, new_cs) != 0)
2878 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2879 cpl = env->hflags & HF_CPL_MASK;
2880#ifdef DEBUG_PCALL
2881 if (loglevel & CPU_LOG_PCALL) {
2882 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2883 }
2884#endif
2885 if (e2 & DESC_S_MASK) {
2886 if (!(e2 & DESC_CS_MASK))
2887 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2888 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2889 if (e2 & DESC_C_MASK) {
2890 /* conforming code segment */
2891 if (dpl > cpl)
2892 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2893 } else {
2894 /* non conforming code segment */
2895 rpl = new_cs & 3;
2896 if (rpl > cpl)
2897 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2898 if (dpl != cpl)
2899 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2900 }
2901 if (!(e2 & DESC_P_MASK))
2902 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2903
2904#ifdef TARGET_X86_64
2905 /* XXX: check 16/32 bit cases in long mode */
2906 if (shift == 2) {
2907 target_ulong rsp;
2908 /* 64 bit case */
2909 rsp = ESP;
2910 PUSHQ(rsp, env->segs[R_CS].selector);
2911 PUSHQ(rsp, next_eip);
2912 /* from this point, not restartable */
2913 ESP = rsp;
2914 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2915 get_seg_base(e1, e2),
2916 get_seg_limit(e1, e2), e2);
2917 EIP = new_eip;
2918 } else
2919#endif
2920 {
2921 sp = ESP;
2922 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2923 ssp = env->segs[R_SS].base;
2924 if (shift) {
2925 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2926 PUSHL(ssp, sp, sp_mask, next_eip);
2927 } else {
2928 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2929 PUSHW(ssp, sp, sp_mask, next_eip);
2930 }
2931
2932 limit = get_seg_limit(e1, e2);
2933 if (new_eip > limit)
2934 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2935 /* from this point, not restartable */
2936 SET_ESP(sp, sp_mask);
2937 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2938 get_seg_base(e1, e2), limit, e2);
2939 EIP = new_eip;
2940 }
2941 } else {
2942 /* check gate type */
2943 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2944 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2945 rpl = new_cs & 3;
2946 switch(type) {
2947 case 1: /* available 286 TSS */
2948 case 9: /* available 386 TSS */
2949 case 5: /* task gate */
2950 if (dpl < cpl || dpl < rpl)
2951 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2952 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2953 CC_OP = CC_OP_EFLAGS;
2954 return;
2955 case 4: /* 286 call gate */
2956 case 12: /* 386 call gate */
2957 break;
2958 default:
2959 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2960 break;
2961 }
2962 shift = type >> 3;
2963
2964 if (dpl < cpl || dpl < rpl)
2965 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2966 /* check valid bit */
2967 if (!(e2 & DESC_P_MASK))
2968 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2969 selector = e1 >> 16;
2970 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2971 param_count = e2 & 0x1f;
2972 if ((selector & 0xfffc) == 0)
2973 raise_exception_err(EXCP0D_GPF, 0);
2974
2975 if (load_segment(&e1, &e2, selector) != 0)
2976 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2977 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2978 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2979 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2980 if (dpl > cpl)
2981 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2982 if (!(e2 & DESC_P_MASK))
2983 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2984
2985 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2986 /* to inner privilege */
2987 get_ss_esp_from_tss(&ss, &sp, dpl);
2988#ifdef DEBUG_PCALL
2989 if (loglevel & CPU_LOG_PCALL)
2990 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2991 ss, sp, param_count, ESP);
2992#endif
2993 if ((ss & 0xfffc) == 0)
2994 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2995 if ((ss & 3) != dpl)
2996 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2997 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2998 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2999 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3000 if (ss_dpl != dpl)
3001 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3002 if (!(ss_e2 & DESC_S_MASK) ||
3003 (ss_e2 & DESC_CS_MASK) ||
3004 !(ss_e2 & DESC_W_MASK))
3005 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3006 if (!(ss_e2 & DESC_P_MASK))
3007#ifdef VBOX /* See page 3-99 of 253666.pdf */
3008 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3009#else
3010 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3011#endif
3012
3013 // push_size = ((param_count * 2) + 8) << shift;
3014
3015 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3016 old_ssp = env->segs[R_SS].base;
3017
3018 sp_mask = get_sp_mask(ss_e2);
3019 ssp = get_seg_base(ss_e1, ss_e2);
3020 if (shift) {
3021 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3022 PUSHL(ssp, sp, sp_mask, ESP);
3023 for(i = param_count - 1; i >= 0; i--) {
3024 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3025 PUSHL(ssp, sp, sp_mask, val);
3026 }
3027 } else {
3028 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3029 PUSHW(ssp, sp, sp_mask, ESP);
3030 for(i = param_count - 1; i >= 0; i--) {
3031 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3032 PUSHW(ssp, sp, sp_mask, val);
3033 }
3034 }
3035 new_stack = 1;
3036 } else {
3037 /* to same privilege */
3038 sp = ESP;
3039 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3040 ssp = env->segs[R_SS].base;
3041 // push_size = (4 << shift);
3042 new_stack = 0;
3043 }
3044
3045 if (shift) {
3046 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3047 PUSHL(ssp, sp, sp_mask, next_eip);
3048 } else {
3049 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3050 PUSHW(ssp, sp, sp_mask, next_eip);
3051 }
3052
3053 /* from this point, not restartable */
3054
3055 if (new_stack) {
3056 ss = (ss & ~3) | dpl;
3057 cpu_x86_load_seg_cache(env, R_SS, ss,
3058 ssp,
3059 get_seg_limit(ss_e1, ss_e2),
3060 ss_e2);
3061 }
3062
3063 selector = (selector & ~3) | dpl;
3064 cpu_x86_load_seg_cache(env, R_CS, selector,
3065 get_seg_base(e1, e2),
3066 get_seg_limit(e1, e2),
3067 e2);
3068 cpu_x86_set_cpl(env, dpl);
3069 SET_ESP(sp, sp_mask);
3070 EIP = offset;
3071 }
3072#ifdef USE_KQEMU
3073 if (kqemu_is_ok(env)) {
3074 env->exception_index = -1;
3075 cpu_loop_exit();
3076 }
3077#endif
3078}
3079
3080/* real and vm86 mode iret */
3081void helper_iret_real(int shift)
3082{
3083 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3084 target_ulong ssp;
3085 int eflags_mask;
3086#ifdef VBOX
3087 bool fVME = false;
3088
3089 remR3TrapClear(env->pVM);
3090#endif /* VBOX */
3091
3092 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3093 sp = ESP;
3094 ssp = env->segs[R_SS].base;
3095 if (shift == 1) {
3096 /* 32 bits */
3097 POPL(ssp, sp, sp_mask, new_eip);
3098 POPL(ssp, sp, sp_mask, new_cs);
3099 new_cs &= 0xffff;
3100 POPL(ssp, sp, sp_mask, new_eflags);
3101 } else {
3102 /* 16 bits */
3103 POPW(ssp, sp, sp_mask, new_eip);
3104 POPW(ssp, sp, sp_mask, new_cs);
3105 POPW(ssp, sp, sp_mask, new_eflags);
3106 }
3107#ifdef VBOX
3108 if ( (env->eflags & VM_MASK)
3109 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3110 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3111 {
3112 fVME = true;
3113 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3114 /* if TF will be set -> #GP */
3115 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3116 || (new_eflags & TF_MASK))
3117 raise_exception(EXCP0D_GPF);
3118 }
3119#endif /* VBOX */
3120 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3121 env->segs[R_CS].selector = new_cs;
3122 env->segs[R_CS].base = (new_cs << 4);
3123 env->eip = new_eip;
3124#ifdef VBOX
3125 if (fVME)
3126 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3127 else
3128#endif
3129 if (env->eflags & VM_MASK)
3130 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3131 else
3132 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3133 if (shift == 0)
3134 eflags_mask &= 0xffff;
3135 load_eflags(new_eflags, eflags_mask);
3136 env->hflags2 &= ~HF2_NMI_MASK;
3137#ifdef VBOX
3138 if (fVME)
3139 {
3140 if (new_eflags & IF_MASK)
3141 env->eflags |= VIF_MASK;
3142 else
3143 env->eflags &= ~VIF_MASK;
3144 }
3145#endif /* VBOX */
3146}
3147
3148#ifndef VBOX
3149static inline void validate_seg(int seg_reg, int cpl)
3150#else /* VBOX */
3151DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3152#endif /* VBOX */
3153{
3154 int dpl;
3155 uint32_t e2;
3156
3157 /* XXX: on x86_64, we do not want to nullify FS and GS because
3158 they may still contain a valid base. I would be interested to
3159 know how a real x86_64 CPU behaves */
3160 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3161 (env->segs[seg_reg].selector & 0xfffc) == 0)
3162 return;
3163
3164 e2 = env->segs[seg_reg].flags;
3165 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3166 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3167 /* data or non conforming code segment */
3168 if (dpl < cpl) {
3169 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3170 }
3171 }
3172}
3173
3174/* protected mode iret */
3175#ifndef VBOX
3176static inline void helper_ret_protected(int shift, int is_iret, int addend)
3177#else /* VBOX */
3178DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3179#endif /* VBOX */
3180{
3181 uint32_t new_cs, new_eflags, new_ss;
3182 uint32_t new_es, new_ds, new_fs, new_gs;
3183 uint32_t e1, e2, ss_e1, ss_e2;
3184 int cpl, dpl, rpl, eflags_mask, iopl;
3185 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3186
3187#ifdef VBOX
3188 ss_e1 = ss_e2 = e1 = e2 = 0;
3189#endif
3190
3191#ifdef TARGET_X86_64
3192 if (shift == 2)
3193 sp_mask = -1;
3194 else
3195#endif
3196 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3197 sp = ESP;
3198 ssp = env->segs[R_SS].base;
3199 new_eflags = 0; /* avoid warning */
3200#ifdef TARGET_X86_64
3201 if (shift == 2) {
3202 POPQ(sp, new_eip);
3203 POPQ(sp, new_cs);
3204 new_cs &= 0xffff;
3205 if (is_iret) {
3206 POPQ(sp, new_eflags);
3207 }
3208 } else
3209#endif
3210 if (shift == 1) {
3211 /* 32 bits */
3212 POPL(ssp, sp, sp_mask, new_eip);
3213 POPL(ssp, sp, sp_mask, new_cs);
3214 new_cs &= 0xffff;
3215 if (is_iret) {
3216 POPL(ssp, sp, sp_mask, new_eflags);
3217#if defined(VBOX) && defined(DEBUG)
3218 printf("iret: new CS %04X\n", new_cs);
3219 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3220 printf("iret: new EFLAGS %08X\n", new_eflags);
3221 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3222#endif
3223 if (new_eflags & VM_MASK)
3224 goto return_to_vm86;
3225 }
3226#ifdef VBOX
3227 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3228 {
3229#ifdef DEBUG
3230 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3231#endif
3232 new_cs = new_cs & 0xfffc;
3233 }
3234#endif
3235 } else {
3236 /* 16 bits */
3237 POPW(ssp, sp, sp_mask, new_eip);
3238 POPW(ssp, sp, sp_mask, new_cs);
3239 if (is_iret)
3240 POPW(ssp, sp, sp_mask, new_eflags);
3241 }
3242#ifdef DEBUG_PCALL
3243 if (loglevel & CPU_LOG_PCALL) {
3244 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3245 new_cs, new_eip, shift, addend);
3246 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3247 }
3248#endif
3249 if ((new_cs & 0xfffc) == 0)
3250 {
3251#if defined(VBOX) && defined(DEBUG)
3252 printf("new_cs & 0xfffc) == 0\n");
3253#endif
3254 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3255 }
3256 if (load_segment(&e1, &e2, new_cs) != 0)
3257 {
3258#if defined(VBOX) && defined(DEBUG)
3259 printf("load_segment failed\n");
3260#endif
3261 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3262 }
3263 if (!(e2 & DESC_S_MASK) ||
3264 !(e2 & DESC_CS_MASK))
3265 {
3266#if defined(VBOX) && defined(DEBUG)
3267 printf("e2 mask %08x\n", e2);
3268#endif
3269 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3270 }
3271 cpl = env->hflags & HF_CPL_MASK;
3272 rpl = new_cs & 3;
3273 if (rpl < cpl)
3274 {
3275#if defined(VBOX) && defined(DEBUG)
3276 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3277#endif
3278 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3279 }
3280 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3281 if (e2 & DESC_C_MASK) {
3282 if (dpl > rpl)
3283 {
3284#if defined(VBOX) && defined(DEBUG)
3285 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3286#endif
3287 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3288 }
3289 } else {
3290 if (dpl != rpl)
3291 {
3292#if defined(VBOX) && defined(DEBUG)
3293 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3294#endif
3295 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3296 }
3297 }
3298 if (!(e2 & DESC_P_MASK))
3299 {
3300#if defined(VBOX) && defined(DEBUG)
3301 printf("DESC_P_MASK e2=%08x\n", e2);
3302#endif
3303 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3304 }
3305
3306 sp += addend;
3307 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3308 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3309 /* return to same privilege level */
3310 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3311 get_seg_base(e1, e2),
3312 get_seg_limit(e1, e2),
3313 e2);
3314 } else {
3315 /* return to different privilege level */
3316#ifdef TARGET_X86_64
3317 if (shift == 2) {
3318 POPQ(sp, new_esp);
3319 POPQ(sp, new_ss);
3320 new_ss &= 0xffff;
3321 } else
3322#endif
3323 if (shift == 1) {
3324 /* 32 bits */
3325 POPL(ssp, sp, sp_mask, new_esp);
3326 POPL(ssp, sp, sp_mask, new_ss);
3327 new_ss &= 0xffff;
3328 } else {
3329 /* 16 bits */
3330 POPW(ssp, sp, sp_mask, new_esp);
3331 POPW(ssp, sp, sp_mask, new_ss);
3332 }
3333#ifdef DEBUG_PCALL
3334 if (loglevel & CPU_LOG_PCALL) {
3335 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3336 new_ss, new_esp);
3337 }
3338#endif
3339 if ((new_ss & 0xfffc) == 0) {
3340#ifdef TARGET_X86_64
3341 /* NULL ss is allowed in long mode if cpl != 3*/
3342 /* XXX: test CS64 ? */
3343 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3344 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3345 0, 0xffffffff,
3346 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3347 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3348 DESC_W_MASK | DESC_A_MASK);
3349 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3350 } else
3351#endif
3352 {
3353 raise_exception_err(EXCP0D_GPF, 0);
3354 }
3355 } else {
3356 if ((new_ss & 3) != rpl)
3357 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3358 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3359 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3360 if (!(ss_e2 & DESC_S_MASK) ||
3361 (ss_e2 & DESC_CS_MASK) ||
3362 !(ss_e2 & DESC_W_MASK))
3363 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3364 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3365 if (dpl != rpl)
3366 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3367 if (!(ss_e2 & DESC_P_MASK))
3368 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3369 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3370 get_seg_base(ss_e1, ss_e2),
3371 get_seg_limit(ss_e1, ss_e2),
3372 ss_e2);
3373 }
3374
3375 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3376 get_seg_base(e1, e2),
3377 get_seg_limit(e1, e2),
3378 e2);
3379 cpu_x86_set_cpl(env, rpl);
3380 sp = new_esp;
3381#ifdef TARGET_X86_64
3382 if (env->hflags & HF_CS64_MASK)
3383 sp_mask = -1;
3384 else
3385#endif
3386 sp_mask = get_sp_mask(ss_e2);
3387
3388 /* validate data segments */
3389 validate_seg(R_ES, rpl);
3390 validate_seg(R_DS, rpl);
3391 validate_seg(R_FS, rpl);
3392 validate_seg(R_GS, rpl);
3393
3394 sp += addend;
3395 }
3396 SET_ESP(sp, sp_mask);
3397 env->eip = new_eip;
3398 if (is_iret) {
3399 /* NOTE: 'cpl' is the _old_ CPL */
3400 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3401 if (cpl == 0)
3402#ifdef VBOX
3403 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3404#else
3405 eflags_mask |= IOPL_MASK;
3406#endif
3407 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3408 if (cpl <= iopl)
3409 eflags_mask |= IF_MASK;
3410 if (shift == 0)
3411 eflags_mask &= 0xffff;
3412 load_eflags(new_eflags, eflags_mask);
3413 }
3414 return;
3415
3416 return_to_vm86:
3417 POPL(ssp, sp, sp_mask, new_esp);
3418 POPL(ssp, sp, sp_mask, new_ss);
3419 POPL(ssp, sp, sp_mask, new_es);
3420 POPL(ssp, sp, sp_mask, new_ds);
3421 POPL(ssp, sp, sp_mask, new_fs);
3422 POPL(ssp, sp, sp_mask, new_gs);
3423
3424 /* modify processor state */
3425 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3426 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3427 load_seg_vm(R_CS, new_cs & 0xffff);
3428 cpu_x86_set_cpl(env, 3);
3429 load_seg_vm(R_SS, new_ss & 0xffff);
3430 load_seg_vm(R_ES, new_es & 0xffff);
3431 load_seg_vm(R_DS, new_ds & 0xffff);
3432 load_seg_vm(R_FS, new_fs & 0xffff);
3433 load_seg_vm(R_GS, new_gs & 0xffff);
3434
3435 env->eip = new_eip & 0xffff;
3436 ESP = new_esp;
3437}
3438
3439void helper_iret_protected(int shift, int next_eip)
3440{
3441 int tss_selector, type;
3442 uint32_t e1, e2;
3443
3444#ifdef VBOX
3445 e1 = e2 = 0;
3446 remR3TrapClear(env->pVM);
3447#endif
3448
3449 /* specific case for TSS */
3450 if (env->eflags & NT_MASK) {
3451#ifdef TARGET_X86_64
3452 if (env->hflags & HF_LMA_MASK)
3453 raise_exception_err(EXCP0D_GPF, 0);
3454#endif
3455 tss_selector = lduw_kernel(env->tr.base + 0);
3456 if (tss_selector & 4)
3457 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3458 if (load_segment(&e1, &e2, tss_selector) != 0)
3459 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3460 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3461 /* NOTE: we check both segment and busy TSS */
3462 if (type != 3)
3463 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3464 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3465 } else {
3466 helper_ret_protected(shift, 1, 0);
3467 }
3468 env->hflags2 &= ~HF2_NMI_MASK;
3469#ifdef USE_KQEMU
3470 if (kqemu_is_ok(env)) {
3471 CC_OP = CC_OP_EFLAGS;
3472 env->exception_index = -1;
3473 cpu_loop_exit();
3474 }
3475#endif
3476}
3477
3478void helper_lret_protected(int shift, int addend)
3479{
3480 helper_ret_protected(shift, 0, addend);
3481#ifdef USE_KQEMU
3482 if (kqemu_is_ok(env)) {
3483 env->exception_index = -1;
3484 cpu_loop_exit();
3485 }
3486#endif
3487}
3488
3489void helper_sysenter(void)
3490{
3491 if (env->sysenter_cs == 0) {
3492 raise_exception_err(EXCP0D_GPF, 0);
3493 }
3494 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3495 cpu_x86_set_cpl(env, 0);
3496
3497#ifdef TARGET_X86_64
3498 if (env->hflags & HF_LMA_MASK) {
3499 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3500 0, 0xffffffff,
3501 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3502 DESC_S_MASK |
3503 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3504 } else
3505#endif
3506 {
3507 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3508 0, 0xffffffff,
3509 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3510 DESC_S_MASK |
3511 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3512 }
3513 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3514 0, 0xffffffff,
3515 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3516 DESC_S_MASK |
3517 DESC_W_MASK | DESC_A_MASK);
3518 ESP = env->sysenter_esp;
3519 EIP = env->sysenter_eip;
3520}
3521
3522void helper_sysexit(int dflag)
3523{
3524 int cpl;
3525
3526 cpl = env->hflags & HF_CPL_MASK;
3527 if (env->sysenter_cs == 0 || cpl != 0) {
3528 raise_exception_err(EXCP0D_GPF, 0);
3529 }
3530 cpu_x86_set_cpl(env, 3);
3531#ifdef TARGET_X86_64
3532 if (dflag == 2) {
3533 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3534 0, 0xffffffff,
3535 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3536 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3537 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3538 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3539 0, 0xffffffff,
3540 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3541 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3542 DESC_W_MASK | DESC_A_MASK);
3543 } else
3544#endif
3545 {
3546 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3547 0, 0xffffffff,
3548 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3549 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3550 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3551 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3552 0, 0xffffffff,
3553 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3554 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3555 DESC_W_MASK | DESC_A_MASK);
3556 }
3557 ESP = ECX;
3558 EIP = EDX;
3559#ifdef USE_KQEMU
3560 if (kqemu_is_ok(env)) {
3561 env->exception_index = -1;
3562 cpu_loop_exit();
3563 }
3564#endif
3565}
3566
3567#if defined(CONFIG_USER_ONLY)
3568target_ulong helper_read_crN(int reg)
3569{
3570 return 0;
3571}
3572
3573void helper_write_crN(int reg, target_ulong t0)
3574{
3575}
3576#else
3577target_ulong helper_read_crN(int reg)
3578{
3579 target_ulong val;
3580
3581 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3582 switch(reg) {
3583 default:
3584 val = env->cr[reg];
3585 break;
3586 case 8:
3587 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3588 val = cpu_get_apic_tpr(env);
3589 } else {
3590 val = env->v_tpr;
3591 }
3592 break;
3593 }
3594 return val;
3595}
3596
3597void helper_write_crN(int reg, target_ulong t0)
3598{
3599 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3600 switch(reg) {
3601 case 0:
3602 cpu_x86_update_cr0(env, t0);
3603 break;
3604 case 3:
3605 cpu_x86_update_cr3(env, t0);
3606 break;
3607 case 4:
3608 cpu_x86_update_cr4(env, t0);
3609 break;
3610 case 8:
3611 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3612 cpu_set_apic_tpr(env, t0);
3613 }
3614 env->v_tpr = t0 & 0x0f;
3615 break;
3616 default:
3617 env->cr[reg] = t0;
3618 break;
3619 }
3620}
3621#endif
3622
3623void helper_lmsw(target_ulong t0)
3624{
3625 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3626 if already set to one. */
3627 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3628 helper_write_crN(0, t0);
3629}
3630
3631void helper_clts(void)
3632{
3633 env->cr[0] &= ~CR0_TS_MASK;
3634 env->hflags &= ~HF_TS_MASK;
3635}
3636
3637/* XXX: do more */
3638void helper_movl_drN_T0(int reg, target_ulong t0)
3639{
3640 env->dr[reg] = t0;
3641}
3642
3643void helper_invlpg(target_ulong addr)
3644{
3645 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3646 tlb_flush_page(env, addr);
3647}
3648
3649void helper_rdtsc(void)
3650{
3651 uint64_t val;
3652
3653 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3654 raise_exception(EXCP0D_GPF);
3655 }
3656 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3657
3658 val = cpu_get_tsc(env) + env->tsc_offset;
3659 EAX = (uint32_t)(val);
3660 EDX = (uint32_t)(val >> 32);
3661}
3662
3663#ifdef VBOX
3664void helper_rdtscp(void)
3665{
3666 uint64_t val;
3667 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3668 raise_exception(EXCP0D_GPF);
3669 }
3670
3671 val = cpu_get_tsc(env);
3672 EAX = (uint32_t)(val);
3673 EDX = (uint32_t)(val >> 32);
3674 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3675}
3676#endif
3677
3678void helper_rdpmc(void)
3679{
3680 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3681 raise_exception(EXCP0D_GPF);
3682 }
3683 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3684
3685 /* currently unimplemented */
3686 raise_exception_err(EXCP06_ILLOP, 0);
3687}
3688
3689#if defined(CONFIG_USER_ONLY)
3690void helper_wrmsr(void)
3691{
3692}
3693
3694void helper_rdmsr(void)
3695{
3696}
3697#else
3698void helper_wrmsr(void)
3699{
3700 uint64_t val;
3701
3702 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3703
3704 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3705
3706 switch((uint32_t)ECX) {
3707 case MSR_IA32_SYSENTER_CS:
3708 env->sysenter_cs = val & 0xffff;
3709 break;
3710 case MSR_IA32_SYSENTER_ESP:
3711 env->sysenter_esp = val;
3712 break;
3713 case MSR_IA32_SYSENTER_EIP:
3714 env->sysenter_eip = val;
3715 break;
3716 case MSR_IA32_APICBASE:
3717 cpu_set_apic_base(env, val);
3718 break;
3719 case MSR_EFER:
3720 {
3721 uint64_t update_mask;
3722 update_mask = 0;
3723 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3724 update_mask |= MSR_EFER_SCE;
3725 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3726 update_mask |= MSR_EFER_LME;
3727 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3728 update_mask |= MSR_EFER_FFXSR;
3729 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3730 update_mask |= MSR_EFER_NXE;
3731 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3732 update_mask |= MSR_EFER_SVME;
3733 cpu_load_efer(env, (env->efer & ~update_mask) |
3734 (val & update_mask));
3735 }
3736 break;
3737 case MSR_STAR:
3738 env->star = val;
3739 break;
3740 case MSR_PAT:
3741 env->pat = val;
3742 break;
3743 case MSR_VM_HSAVE_PA:
3744 env->vm_hsave = val;
3745 break;
3746#ifdef TARGET_X86_64
3747 case MSR_LSTAR:
3748 env->lstar = val;
3749 break;
3750 case MSR_CSTAR:
3751 env->cstar = val;
3752 break;
3753 case MSR_FMASK:
3754 env->fmask = val;
3755 break;
3756 case MSR_FSBASE:
3757 env->segs[R_FS].base = val;
3758 break;
3759 case MSR_GSBASE:
3760 env->segs[R_GS].base = val;
3761 break;
3762 case MSR_KERNELGSBASE:
3763 env->kernelgsbase = val;
3764 break;
3765#endif
3766 default:
3767#ifndef VBOX
3768 /* XXX: exception ? */
3769 break;
3770#else /* VBOX */
3771 {
3772 uint32_t ecx = (uint32_t)ECX;
3773 /* In X2APIC specification this range is reserved for APIC control. */
3774 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3775 cpu_apic_wrmsr(env, ecx, val);
3776 /** @todo else exception? */
3777 break;
3778 }
3779 case MSR_K8_TSC_AUX:
3780 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3781 break;
3782#endif /* VBOX */
3783 }
3784}
3785
3786void helper_rdmsr(void)
3787{
3788 uint64_t val;
3789
3790 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3791
3792 switch((uint32_t)ECX) {
3793 case MSR_IA32_SYSENTER_CS:
3794 val = env->sysenter_cs;
3795 break;
3796 case MSR_IA32_SYSENTER_ESP:
3797 val = env->sysenter_esp;
3798 break;
3799 case MSR_IA32_SYSENTER_EIP:
3800 val = env->sysenter_eip;
3801 break;
3802 case MSR_IA32_APICBASE:
3803 val = cpu_get_apic_base(env);
3804 break;
3805 case MSR_EFER:
3806 val = env->efer;
3807 break;
3808 case MSR_STAR:
3809 val = env->star;
3810 break;
3811 case MSR_PAT:
3812 val = env->pat;
3813 break;
3814 case MSR_VM_HSAVE_PA:
3815 val = env->vm_hsave;
3816 break;
3817 case MSR_IA32_PERF_STATUS:
3818 /* tsc_increment_by_tick */
3819 val = 1000ULL;
3820 /* CPU multiplier */
3821 val |= (((uint64_t)4ULL) << 40);
3822 break;
3823#ifdef TARGET_X86_64
3824 case MSR_LSTAR:
3825 val = env->lstar;
3826 break;
3827 case MSR_CSTAR:
3828 val = env->cstar;
3829 break;
3830 case MSR_FMASK:
3831 val = env->fmask;
3832 break;
3833 case MSR_FSBASE:
3834 val = env->segs[R_FS].base;
3835 break;
3836 case MSR_GSBASE:
3837 val = env->segs[R_GS].base;
3838 break;
3839 case MSR_KERNELGSBASE:
3840 val = env->kernelgsbase;
3841 break;
3842#endif
3843#ifdef USE_KQEMU
3844 case MSR_QPI_COMMBASE:
3845 if (env->kqemu_enabled) {
3846 val = kqemu_comm_base;
3847 } else {
3848 val = 0;
3849 }
3850 break;
3851#endif
3852 default:
3853#ifndef VBOX
3854 /* XXX: exception ? */
3855 val = 0;
3856 break;
3857#else /* VBOX */
3858 {
3859 uint32_t ecx = (uint32_t)ECX;
3860 /* In X2APIC specification this range is reserved for APIC control. */
3861 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3862 val = cpu_apic_rdmsr(env, ecx);
3863 else
3864 val = 0; /** @todo else exception? */
3865 break;
3866 }
3867 case MSR_K8_TSC_AUX:
3868 val = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3869 break;
3870#endif /* VBOX */
3871 }
3872 EAX = (uint32_t)(val);
3873 EDX = (uint32_t)(val >> 32);
3874}
3875#endif
3876
3877target_ulong helper_lsl(target_ulong selector1)
3878{
3879 unsigned int limit;
3880 uint32_t e1, e2, eflags, selector;
3881 int rpl, dpl, cpl, type;
3882
3883 selector = selector1 & 0xffff;
3884 eflags = cc_table[CC_OP].compute_all();
3885 if (load_segment(&e1, &e2, selector) != 0)
3886 goto fail;
3887 rpl = selector & 3;
3888 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3889 cpl = env->hflags & HF_CPL_MASK;
3890 if (e2 & DESC_S_MASK) {
3891 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3892 /* conforming */
3893 } else {
3894 if (dpl < cpl || dpl < rpl)
3895 goto fail;
3896 }
3897 } else {
3898 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3899 switch(type) {
3900 case 1:
3901 case 2:
3902 case 3:
3903 case 9:
3904 case 11:
3905 break;
3906 default:
3907 goto fail;
3908 }
3909 if (dpl < cpl || dpl < rpl) {
3910 fail:
3911 CC_SRC = eflags & ~CC_Z;
3912 return 0;
3913 }
3914 }
3915 limit = get_seg_limit(e1, e2);
3916 CC_SRC = eflags | CC_Z;
3917 return limit;
3918}
3919
3920target_ulong helper_lar(target_ulong selector1)
3921{
3922 uint32_t e1, e2, eflags, selector;
3923 int rpl, dpl, cpl, type;
3924
3925 selector = selector1 & 0xffff;
3926 eflags = cc_table[CC_OP].compute_all();
3927 if ((selector & 0xfffc) == 0)
3928 goto fail;
3929 if (load_segment(&e1, &e2, selector) != 0)
3930 goto fail;
3931 rpl = selector & 3;
3932 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3933 cpl = env->hflags & HF_CPL_MASK;
3934 if (e2 & DESC_S_MASK) {
3935 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3936 /* conforming */
3937 } else {
3938 if (dpl < cpl || dpl < rpl)
3939 goto fail;
3940 }
3941 } else {
3942 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3943 switch(type) {
3944 case 1:
3945 case 2:
3946 case 3:
3947 case 4:
3948 case 5:
3949 case 9:
3950 case 11:
3951 case 12:
3952 break;
3953 default:
3954 goto fail;
3955 }
3956 if (dpl < cpl || dpl < rpl) {
3957 fail:
3958 CC_SRC = eflags & ~CC_Z;
3959 return 0;
3960 }
3961 }
3962 CC_SRC = eflags | CC_Z;
3963 return e2 & 0x00f0ff00;
3964}
3965
3966void helper_verr(target_ulong selector1)
3967{
3968 uint32_t e1, e2, eflags, selector;
3969 int rpl, dpl, cpl;
3970
3971 selector = selector1 & 0xffff;
3972 eflags = cc_table[CC_OP].compute_all();
3973 if ((selector & 0xfffc) == 0)
3974 goto fail;
3975 if (load_segment(&e1, &e2, selector) != 0)
3976 goto fail;
3977 if (!(e2 & DESC_S_MASK))
3978 goto fail;
3979 rpl = selector & 3;
3980 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3981 cpl = env->hflags & HF_CPL_MASK;
3982 if (e2 & DESC_CS_MASK) {
3983 if (!(e2 & DESC_R_MASK))
3984 goto fail;
3985 if (!(e2 & DESC_C_MASK)) {
3986 if (dpl < cpl || dpl < rpl)
3987 goto fail;
3988 }
3989 } else {
3990 if (dpl < cpl || dpl < rpl) {
3991 fail:
3992 CC_SRC = eflags & ~CC_Z;
3993 return;
3994 }
3995 }
3996 CC_SRC = eflags | CC_Z;
3997}
3998
3999void helper_verw(target_ulong selector1)
4000{
4001 uint32_t e1, e2, eflags, selector;
4002 int rpl, dpl, cpl;
4003
4004 selector = selector1 & 0xffff;
4005 eflags = cc_table[CC_OP].compute_all();
4006 if ((selector & 0xfffc) == 0)
4007 goto fail;
4008 if (load_segment(&e1, &e2, selector) != 0)
4009 goto fail;
4010 if (!(e2 & DESC_S_MASK))
4011 goto fail;
4012 rpl = selector & 3;
4013 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4014 cpl = env->hflags & HF_CPL_MASK;
4015 if (e2 & DESC_CS_MASK) {
4016 goto fail;
4017 } else {
4018 if (dpl < cpl || dpl < rpl)
4019 goto fail;
4020 if (!(e2 & DESC_W_MASK)) {
4021 fail:
4022 CC_SRC = eflags & ~CC_Z;
4023 return;
4024 }
4025 }
4026 CC_SRC = eflags | CC_Z;
4027}
4028
4029/* x87 FPU helpers */
4030
4031static void fpu_set_exception(int mask)
4032{
4033 env->fpus |= mask;
4034 if (env->fpus & (~env->fpuc & FPUC_EM))
4035 env->fpus |= FPUS_SE | FPUS_B;
4036}
4037
4038#ifndef VBOX
4039static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4040#else /* VBOX */
4041DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4042#endif /* VBOX */
4043{
4044 if (b == 0.0)
4045 fpu_set_exception(FPUS_ZE);
4046 return a / b;
4047}
4048
4049void fpu_raise_exception(void)
4050{
4051 if (env->cr[0] & CR0_NE_MASK) {
4052 raise_exception(EXCP10_COPR);
4053 }
4054#if !defined(CONFIG_USER_ONLY)
4055 else {
4056 cpu_set_ferr(env);
4057 }
4058#endif
4059}
4060
4061void helper_flds_FT0(uint32_t val)
4062{
4063 union {
4064 float32 f;
4065 uint32_t i;
4066 } u;
4067 u.i = val;
4068 FT0 = float32_to_floatx(u.f, &env->fp_status);
4069}
4070
4071void helper_fldl_FT0(uint64_t val)
4072{
4073 union {
4074 float64 f;
4075 uint64_t i;
4076 } u;
4077 u.i = val;
4078 FT0 = float64_to_floatx(u.f, &env->fp_status);
4079}
4080
4081void helper_fildl_FT0(int32_t val)
4082{
4083 FT0 = int32_to_floatx(val, &env->fp_status);
4084}
4085
4086void helper_flds_ST0(uint32_t val)
4087{
4088 int new_fpstt;
4089 union {
4090 float32 f;
4091 uint32_t i;
4092 } u;
4093 new_fpstt = (env->fpstt - 1) & 7;
4094 u.i = val;
4095 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4096 env->fpstt = new_fpstt;
4097 env->fptags[new_fpstt] = 0; /* validate stack entry */
4098}
4099
4100void helper_fldl_ST0(uint64_t val)
4101{
4102 int new_fpstt;
4103 union {
4104 float64 f;
4105 uint64_t i;
4106 } u;
4107 new_fpstt = (env->fpstt - 1) & 7;
4108 u.i = val;
4109 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4110 env->fpstt = new_fpstt;
4111 env->fptags[new_fpstt] = 0; /* validate stack entry */
4112}
4113
4114void helper_fildl_ST0(int32_t val)
4115{
4116 int new_fpstt;
4117 new_fpstt = (env->fpstt - 1) & 7;
4118 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4119 env->fpstt = new_fpstt;
4120 env->fptags[new_fpstt] = 0; /* validate stack entry */
4121}
4122
4123void helper_fildll_ST0(int64_t val)
4124{
4125 int new_fpstt;
4126 new_fpstt = (env->fpstt - 1) & 7;
4127 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4128 env->fpstt = new_fpstt;
4129 env->fptags[new_fpstt] = 0; /* validate stack entry */
4130}
4131
4132#ifndef VBOX
4133uint32_t helper_fsts_ST0(void)
4134#else
4135RTCCUINTREG helper_fsts_ST0(void)
4136#endif
4137{
4138 union {
4139 float32 f;
4140 uint32_t i;
4141 } u;
4142 u.f = floatx_to_float32(ST0, &env->fp_status);
4143 return u.i;
4144}
4145
4146uint64_t helper_fstl_ST0(void)
4147{
4148 union {
4149 float64 f;
4150 uint64_t i;
4151 } u;
4152 u.f = floatx_to_float64(ST0, &env->fp_status);
4153 return u.i;
4154}
4155#ifndef VBOX
4156int32_t helper_fist_ST0(void)
4157#else
4158RTCCINTREG helper_fist_ST0(void)
4159#endif
4160{
4161 int32_t val;
4162 val = floatx_to_int32(ST0, &env->fp_status);
4163 if (val != (int16_t)val)
4164 val = -32768;
4165 return val;
4166}
4167
4168#ifndef VBOX
4169int32_t helper_fistl_ST0(void)
4170#else
4171RTCCINTREG helper_fistl_ST0(void)
4172#endif
4173{
4174 int32_t val;
4175 val = floatx_to_int32(ST0, &env->fp_status);
4176 return val;
4177}
4178
4179int64_t helper_fistll_ST0(void)
4180{
4181 int64_t val;
4182 val = floatx_to_int64(ST0, &env->fp_status);
4183 return val;
4184}
4185
4186#ifndef VBOX
4187int32_t helper_fistt_ST0(void)
4188#else
4189RTCCINTREG helper_fistt_ST0(void)
4190#endif
4191{
4192 int32_t val;
4193 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4194 if (val != (int16_t)val)
4195 val = -32768;
4196 return val;
4197}
4198
4199#ifndef VBOX
4200int32_t helper_fisttl_ST0(void)
4201#else
4202RTCCINTREG helper_fisttl_ST0(void)
4203#endif
4204{
4205 int32_t val;
4206 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4207 return val;
4208}
4209
4210int64_t helper_fisttll_ST0(void)
4211{
4212 int64_t val;
4213 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4214 return val;
4215}
4216
4217void helper_fldt_ST0(target_ulong ptr)
4218{
4219 int new_fpstt;
4220 new_fpstt = (env->fpstt - 1) & 7;
4221 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4222 env->fpstt = new_fpstt;
4223 env->fptags[new_fpstt] = 0; /* validate stack entry */
4224}
4225
4226void helper_fstt_ST0(target_ulong ptr)
4227{
4228 helper_fstt(ST0, ptr);
4229}
4230
4231void helper_fpush(void)
4232{
4233 fpush();
4234}
4235
4236void helper_fpop(void)
4237{
4238 fpop();
4239}
4240
4241void helper_fdecstp(void)
4242{
4243 env->fpstt = (env->fpstt - 1) & 7;
4244 env->fpus &= (~0x4700);
4245}
4246
4247void helper_fincstp(void)
4248{
4249 env->fpstt = (env->fpstt + 1) & 7;
4250 env->fpus &= (~0x4700);
4251}
4252
4253/* FPU move */
4254
4255void helper_ffree_STN(int st_index)
4256{
4257 env->fptags[(env->fpstt + st_index) & 7] = 1;
4258}
4259
4260void helper_fmov_ST0_FT0(void)
4261{
4262 ST0 = FT0;
4263}
4264
4265void helper_fmov_FT0_STN(int st_index)
4266{
4267 FT0 = ST(st_index);
4268}
4269
4270void helper_fmov_ST0_STN(int st_index)
4271{
4272 ST0 = ST(st_index);
4273}
4274
4275void helper_fmov_STN_ST0(int st_index)
4276{
4277 ST(st_index) = ST0;
4278}
4279
4280void helper_fxchg_ST0_STN(int st_index)
4281{
4282 CPU86_LDouble tmp;
4283 tmp = ST(st_index);
4284 ST(st_index) = ST0;
4285 ST0 = tmp;
4286}
4287
4288/* FPU operations */
4289
4290static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4291
4292void helper_fcom_ST0_FT0(void)
4293{
4294 int ret;
4295
4296 ret = floatx_compare(ST0, FT0, &env->fp_status);
4297 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4298 FORCE_RET();
4299}
4300
4301void helper_fucom_ST0_FT0(void)
4302{
4303 int ret;
4304
4305 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4306 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4307 FORCE_RET();
4308}
4309
4310static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4311
4312void helper_fcomi_ST0_FT0(void)
4313{
4314 int eflags;
4315 int ret;
4316
4317 ret = floatx_compare(ST0, FT0, &env->fp_status);
4318 eflags = cc_table[CC_OP].compute_all();
4319 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4320 CC_SRC = eflags;
4321 FORCE_RET();
4322}
4323
4324void helper_fucomi_ST0_FT0(void)
4325{
4326 int eflags;
4327 int ret;
4328
4329 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4330 eflags = cc_table[CC_OP].compute_all();
4331 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4332 CC_SRC = eflags;
4333 FORCE_RET();
4334}
4335
4336void helper_fadd_ST0_FT0(void)
4337{
4338 ST0 += FT0;
4339}
4340
4341void helper_fmul_ST0_FT0(void)
4342{
4343 ST0 *= FT0;
4344}
4345
4346void helper_fsub_ST0_FT0(void)
4347{
4348 ST0 -= FT0;
4349}
4350
4351void helper_fsubr_ST0_FT0(void)
4352{
4353 ST0 = FT0 - ST0;
4354}
4355
4356void helper_fdiv_ST0_FT0(void)
4357{
4358 ST0 = helper_fdiv(ST0, FT0);
4359}
4360
4361void helper_fdivr_ST0_FT0(void)
4362{
4363 ST0 = helper_fdiv(FT0, ST0);
4364}
4365
4366/* fp operations between STN and ST0 */
4367
4368void helper_fadd_STN_ST0(int st_index)
4369{
4370 ST(st_index) += ST0;
4371}
4372
4373void helper_fmul_STN_ST0(int st_index)
4374{
4375 ST(st_index) *= ST0;
4376}
4377
4378void helper_fsub_STN_ST0(int st_index)
4379{
4380 ST(st_index) -= ST0;
4381}
4382
4383void helper_fsubr_STN_ST0(int st_index)
4384{
4385 CPU86_LDouble *p;
4386 p = &ST(st_index);
4387 *p = ST0 - *p;
4388}
4389
4390void helper_fdiv_STN_ST0(int st_index)
4391{
4392 CPU86_LDouble *p;
4393 p = &ST(st_index);
4394 *p = helper_fdiv(*p, ST0);
4395}
4396
4397void helper_fdivr_STN_ST0(int st_index)
4398{
4399 CPU86_LDouble *p;
4400 p = &ST(st_index);
4401 *p = helper_fdiv(ST0, *p);
4402}
4403
4404/* misc FPU operations */
4405void helper_fchs_ST0(void)
4406{
4407 ST0 = floatx_chs(ST0);
4408}
4409
4410void helper_fabs_ST0(void)
4411{
4412 ST0 = floatx_abs(ST0);
4413}
4414
4415void helper_fld1_ST0(void)
4416{
4417 ST0 = f15rk[1];
4418}
4419
4420void helper_fldl2t_ST0(void)
4421{
4422 ST0 = f15rk[6];
4423}
4424
4425void helper_fldl2e_ST0(void)
4426{
4427 ST0 = f15rk[5];
4428}
4429
4430void helper_fldpi_ST0(void)
4431{
4432 ST0 = f15rk[2];
4433}
4434
4435void helper_fldlg2_ST0(void)
4436{
4437 ST0 = f15rk[3];
4438}
4439
4440void helper_fldln2_ST0(void)
4441{
4442 ST0 = f15rk[4];
4443}
4444
4445void helper_fldz_ST0(void)
4446{
4447 ST0 = f15rk[0];
4448}
4449
4450void helper_fldz_FT0(void)
4451{
4452 FT0 = f15rk[0];
4453}
4454
4455#ifndef VBOX
4456uint32_t helper_fnstsw(void)
4457#else
4458RTCCUINTREG helper_fnstsw(void)
4459#endif
4460{
4461 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4462}
4463
4464#ifndef VBOX
4465uint32_t helper_fnstcw(void)
4466#else
4467RTCCUINTREG helper_fnstcw(void)
4468#endif
4469{
4470 return env->fpuc;
4471}
4472
4473static void update_fp_status(void)
4474{
4475 int rnd_type;
4476
4477 /* set rounding mode */
4478 switch(env->fpuc & RC_MASK) {
4479 default:
4480 case RC_NEAR:
4481 rnd_type = float_round_nearest_even;
4482 break;
4483 case RC_DOWN:
4484 rnd_type = float_round_down;
4485 break;
4486 case RC_UP:
4487 rnd_type = float_round_up;
4488 break;
4489 case RC_CHOP:
4490 rnd_type = float_round_to_zero;
4491 break;
4492 }
4493 set_float_rounding_mode(rnd_type, &env->fp_status);
4494#ifdef FLOATX80
4495 switch((env->fpuc >> 8) & 3) {
4496 case 0:
4497 rnd_type = 32;
4498 break;
4499 case 2:
4500 rnd_type = 64;
4501 break;
4502 case 3:
4503 default:
4504 rnd_type = 80;
4505 break;
4506 }
4507 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4508#endif
4509}
4510
4511void helper_fldcw(uint32_t val)
4512{
4513 env->fpuc = val;
4514 update_fp_status();
4515}
4516
4517void helper_fclex(void)
4518{
4519 env->fpus &= 0x7f00;
4520}
4521
4522void helper_fwait(void)
4523{
4524 if (env->fpus & FPUS_SE)
4525 fpu_raise_exception();
4526 FORCE_RET();
4527}
4528
4529void helper_fninit(void)
4530{
4531 env->fpus = 0;
4532 env->fpstt = 0;
4533 env->fpuc = 0x37f;
4534 env->fptags[0] = 1;
4535 env->fptags[1] = 1;
4536 env->fptags[2] = 1;
4537 env->fptags[3] = 1;
4538 env->fptags[4] = 1;
4539 env->fptags[5] = 1;
4540 env->fptags[6] = 1;
4541 env->fptags[7] = 1;
4542}
4543
4544/* BCD ops */
4545
4546void helper_fbld_ST0(target_ulong ptr)
4547{
4548 CPU86_LDouble tmp;
4549 uint64_t val;
4550 unsigned int v;
4551 int i;
4552
4553 val = 0;
4554 for(i = 8; i >= 0; i--) {
4555 v = ldub(ptr + i);
4556 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4557 }
4558 tmp = val;
4559 if (ldub(ptr + 9) & 0x80)
4560 tmp = -tmp;
4561 fpush();
4562 ST0 = tmp;
4563}
4564
4565void helper_fbst_ST0(target_ulong ptr)
4566{
4567 int v;
4568 target_ulong mem_ref, mem_end;
4569 int64_t val;
4570
4571 val = floatx_to_int64(ST0, &env->fp_status);
4572 mem_ref = ptr;
4573 mem_end = mem_ref + 9;
4574 if (val < 0) {
4575 stb(mem_end, 0x80);
4576 val = -val;
4577 } else {
4578 stb(mem_end, 0x00);
4579 }
4580 while (mem_ref < mem_end) {
4581 if (val == 0)
4582 break;
4583 v = val % 100;
4584 val = val / 100;
4585 v = ((v / 10) << 4) | (v % 10);
4586 stb(mem_ref++, v);
4587 }
4588 while (mem_ref < mem_end) {
4589 stb(mem_ref++, 0);
4590 }
4591}
4592
4593void helper_f2xm1(void)
4594{
4595 ST0 = pow(2.0,ST0) - 1.0;
4596}
4597
4598void helper_fyl2x(void)
4599{
4600 CPU86_LDouble fptemp;
4601
4602 fptemp = ST0;
4603 if (fptemp>0.0){
4604 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4605 ST1 *= fptemp;
4606 fpop();
4607 } else {
4608 env->fpus &= (~0x4700);
4609 env->fpus |= 0x400;
4610 }
4611}
4612
4613void helper_fptan(void)
4614{
4615 CPU86_LDouble fptemp;
4616
4617 fptemp = ST0;
4618 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4619 env->fpus |= 0x400;
4620 } else {
4621 ST0 = tan(fptemp);
4622 fpush();
4623 ST0 = 1.0;
4624 env->fpus &= (~0x400); /* C2 <-- 0 */
4625 /* the above code is for |arg| < 2**52 only */
4626 }
4627}
4628
4629void helper_fpatan(void)
4630{
4631 CPU86_LDouble fptemp, fpsrcop;
4632
4633 fpsrcop = ST1;
4634 fptemp = ST0;
4635 ST1 = atan2(fpsrcop,fptemp);
4636 fpop();
4637}
4638
4639void helper_fxtract(void)
4640{
4641 CPU86_LDoubleU temp;
4642 unsigned int expdif;
4643
4644 temp.d = ST0;
4645 expdif = EXPD(temp) - EXPBIAS;
4646 /*DP exponent bias*/
4647 ST0 = expdif;
4648 fpush();
4649 BIASEXPONENT(temp);
4650 ST0 = temp.d;
4651}
4652
4653#ifdef VBOX
4654#ifdef _MSC_VER
4655/* MSC cannot divide by zero */
4656extern double _Nan;
4657#define NaN _Nan
4658#else
4659#define NaN (0.0 / 0.0)
4660#endif
4661#endif /* VBOX */
4662
4663void helper_fprem1(void)
4664{
4665 CPU86_LDouble dblq, fpsrcop, fptemp;
4666 CPU86_LDoubleU fpsrcop1, fptemp1;
4667 int expdif;
4668 signed long long int q;
4669
4670#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4671 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4672#else
4673 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4674#endif
4675 ST0 = 0.0 / 0.0; /* NaN */
4676 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4677 return;
4678 }
4679
4680 fpsrcop = ST0;
4681 fptemp = ST1;
4682 fpsrcop1.d = fpsrcop;
4683 fptemp1.d = fptemp;
4684 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4685
4686 if (expdif < 0) {
4687 /* optimisation? taken from the AMD docs */
4688 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4689 /* ST0 is unchanged */
4690 return;
4691 }
4692
4693 if (expdif < 53) {
4694 dblq = fpsrcop / fptemp;
4695 /* round dblq towards nearest integer */
4696 dblq = rint(dblq);
4697 ST0 = fpsrcop - fptemp * dblq;
4698
4699 /* convert dblq to q by truncating towards zero */
4700 if (dblq < 0.0)
4701 q = (signed long long int)(-dblq);
4702 else
4703 q = (signed long long int)dblq;
4704
4705 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4706 /* (C0,C3,C1) <-- (q2,q1,q0) */
4707 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4708 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4709 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4710 } else {
4711 env->fpus |= 0x400; /* C2 <-- 1 */
4712 fptemp = pow(2.0, expdif - 50);
4713 fpsrcop = (ST0 / ST1) / fptemp;
4714 /* fpsrcop = integer obtained by chopping */
4715 fpsrcop = (fpsrcop < 0.0) ?
4716 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4717 ST0 -= (ST1 * fpsrcop * fptemp);
4718 }
4719}
4720
4721void helper_fprem(void)
4722{
4723 CPU86_LDouble dblq, fpsrcop, fptemp;
4724 CPU86_LDoubleU fpsrcop1, fptemp1;
4725 int expdif;
4726 signed long long int q;
4727
4728#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4729 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4730#else
4731 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4732#endif
4733 ST0 = 0.0 / 0.0; /* NaN */
4734 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4735 return;
4736 }
4737
4738 fpsrcop = (CPU86_LDouble)ST0;
4739 fptemp = (CPU86_LDouble)ST1;
4740 fpsrcop1.d = fpsrcop;
4741 fptemp1.d = fptemp;
4742 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4743
4744 if (expdif < 0) {
4745 /* optimisation? taken from the AMD docs */
4746 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4747 /* ST0 is unchanged */
4748 return;
4749 }
4750
4751 if ( expdif < 53 ) {
4752 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4753 /* round dblq towards zero */
4754 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4755 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4756
4757 /* convert dblq to q by truncating towards zero */
4758 if (dblq < 0.0)
4759 q = (signed long long int)(-dblq);
4760 else
4761 q = (signed long long int)dblq;
4762
4763 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4764 /* (C0,C3,C1) <-- (q2,q1,q0) */
4765 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4766 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4767 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4768 } else {
4769 int N = 32 + (expdif % 32); /* as per AMD docs */
4770 env->fpus |= 0x400; /* C2 <-- 1 */
4771 fptemp = pow(2.0, (double)(expdif - N));
4772 fpsrcop = (ST0 / ST1) / fptemp;
4773 /* fpsrcop = integer obtained by chopping */
4774 fpsrcop = (fpsrcop < 0.0) ?
4775 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4776 ST0 -= (ST1 * fpsrcop * fptemp);
4777 }
4778}
4779
4780void helper_fyl2xp1(void)
4781{
4782 CPU86_LDouble fptemp;
4783
4784 fptemp = ST0;
4785 if ((fptemp+1.0)>0.0) {
4786 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4787 ST1 *= fptemp;
4788 fpop();
4789 } else {
4790 env->fpus &= (~0x4700);
4791 env->fpus |= 0x400;
4792 }
4793}
4794
4795void helper_fsqrt(void)
4796{
4797 CPU86_LDouble fptemp;
4798
4799 fptemp = ST0;
4800 if (fptemp<0.0) {
4801 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4802 env->fpus |= 0x400;
4803 }
4804 ST0 = sqrt(fptemp);
4805}
4806
4807void helper_fsincos(void)
4808{
4809 CPU86_LDouble fptemp;
4810
4811 fptemp = ST0;
4812 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4813 env->fpus |= 0x400;
4814 } else {
4815 ST0 = sin(fptemp);
4816 fpush();
4817 ST0 = cos(fptemp);
4818 env->fpus &= (~0x400); /* C2 <-- 0 */
4819 /* the above code is for |arg| < 2**63 only */
4820 }
4821}
4822
4823void helper_frndint(void)
4824{
4825 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4826}
4827
4828void helper_fscale(void)
4829{
4830 ST0 = ldexp (ST0, (int)(ST1));
4831}
4832
4833void helper_fsin(void)
4834{
4835 CPU86_LDouble fptemp;
4836
4837 fptemp = ST0;
4838 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4839 env->fpus |= 0x400;
4840 } else {
4841 ST0 = sin(fptemp);
4842 env->fpus &= (~0x400); /* C2 <-- 0 */
4843 /* the above code is for |arg| < 2**53 only */
4844 }
4845}
4846
4847void helper_fcos(void)
4848{
4849 CPU86_LDouble fptemp;
4850
4851 fptemp = ST0;
4852 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4853 env->fpus |= 0x400;
4854 } else {
4855 ST0 = cos(fptemp);
4856 env->fpus &= (~0x400); /* C2 <-- 0 */
4857 /* the above code is for |arg5 < 2**63 only */
4858 }
4859}
4860
4861void helper_fxam_ST0(void)
4862{
4863 CPU86_LDoubleU temp;
4864 int expdif;
4865
4866 temp.d = ST0;
4867
4868 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4869 if (SIGND(temp))
4870 env->fpus |= 0x200; /* C1 <-- 1 */
4871
4872 /* XXX: test fptags too */
4873 expdif = EXPD(temp);
4874 if (expdif == MAXEXPD) {
4875#ifdef USE_X86LDOUBLE
4876 if (MANTD(temp) == 0x8000000000000000ULL)
4877#else
4878 if (MANTD(temp) == 0)
4879#endif
4880 env->fpus |= 0x500 /*Infinity*/;
4881 else
4882 env->fpus |= 0x100 /*NaN*/;
4883 } else if (expdif == 0) {
4884 if (MANTD(temp) == 0)
4885 env->fpus |= 0x4000 /*Zero*/;
4886 else
4887 env->fpus |= 0x4400 /*Denormal*/;
4888 } else {
4889 env->fpus |= 0x400;
4890 }
4891}
4892
4893void helper_fstenv(target_ulong ptr, int data32)
4894{
4895 int fpus, fptag, exp, i;
4896 uint64_t mant;
4897 CPU86_LDoubleU tmp;
4898
4899 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4900 fptag = 0;
4901 for (i=7; i>=0; i--) {
4902 fptag <<= 2;
4903 if (env->fptags[i]) {
4904 fptag |= 3;
4905 } else {
4906 tmp.d = env->fpregs[i].d;
4907 exp = EXPD(tmp);
4908 mant = MANTD(tmp);
4909 if (exp == 0 && mant == 0) {
4910 /* zero */
4911 fptag |= 1;
4912 } else if (exp == 0 || exp == MAXEXPD
4913#ifdef USE_X86LDOUBLE
4914 || (mant & (1LL << 63)) == 0
4915#endif
4916 ) {
4917 /* NaNs, infinity, denormal */
4918 fptag |= 2;
4919 }
4920 }
4921 }
4922 if (data32) {
4923 /* 32 bit */
4924 stl(ptr, env->fpuc);
4925 stl(ptr + 4, fpus);
4926 stl(ptr + 8, fptag);
4927 stl(ptr + 12, 0); /* fpip */
4928 stl(ptr + 16, 0); /* fpcs */
4929 stl(ptr + 20, 0); /* fpoo */
4930 stl(ptr + 24, 0); /* fpos */
4931 } else {
4932 /* 16 bit */
4933 stw(ptr, env->fpuc);
4934 stw(ptr + 2, fpus);
4935 stw(ptr + 4, fptag);
4936 stw(ptr + 6, 0);
4937 stw(ptr + 8, 0);
4938 stw(ptr + 10, 0);
4939 stw(ptr + 12, 0);
4940 }
4941}
4942
4943void helper_fldenv(target_ulong ptr, int data32)
4944{
4945 int i, fpus, fptag;
4946
4947 if (data32) {
4948 env->fpuc = lduw(ptr);
4949 fpus = lduw(ptr + 4);
4950 fptag = lduw(ptr + 8);
4951 }
4952 else {
4953 env->fpuc = lduw(ptr);
4954 fpus = lduw(ptr + 2);
4955 fptag = lduw(ptr + 4);
4956 }
4957 env->fpstt = (fpus >> 11) & 7;
4958 env->fpus = fpus & ~0x3800;
4959 for(i = 0;i < 8; i++) {
4960 env->fptags[i] = ((fptag & 3) == 3);
4961 fptag >>= 2;
4962 }
4963}
4964
4965void helper_fsave(target_ulong ptr, int data32)
4966{
4967 CPU86_LDouble tmp;
4968 int i;
4969
4970 helper_fstenv(ptr, data32);
4971
4972 ptr += (14 << data32);
4973 for(i = 0;i < 8; i++) {
4974 tmp = ST(i);
4975 helper_fstt(tmp, ptr);
4976 ptr += 10;
4977 }
4978
4979 /* fninit */
4980 env->fpus = 0;
4981 env->fpstt = 0;
4982 env->fpuc = 0x37f;
4983 env->fptags[0] = 1;
4984 env->fptags[1] = 1;
4985 env->fptags[2] = 1;
4986 env->fptags[3] = 1;
4987 env->fptags[4] = 1;
4988 env->fptags[5] = 1;
4989 env->fptags[6] = 1;
4990 env->fptags[7] = 1;
4991}
4992
4993void helper_frstor(target_ulong ptr, int data32)
4994{
4995 CPU86_LDouble tmp;
4996 int i;
4997
4998 helper_fldenv(ptr, data32);
4999 ptr += (14 << data32);
5000
5001 for(i = 0;i < 8; i++) {
5002 tmp = helper_fldt(ptr);
5003 ST(i) = tmp;
5004 ptr += 10;
5005 }
5006}
5007
5008void helper_fxsave(target_ulong ptr, int data64)
5009{
5010 int fpus, fptag, i, nb_xmm_regs;
5011 CPU86_LDouble tmp;
5012 target_ulong addr;
5013
5014 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5015 fptag = 0;
5016 for(i = 0; i < 8; i++) {
5017 fptag |= (env->fptags[i] << i);
5018 }
5019 stw(ptr, env->fpuc);
5020 stw(ptr + 2, fpus);
5021 stw(ptr + 4, fptag ^ 0xff);
5022#ifdef TARGET_X86_64
5023 if (data64) {
5024 stq(ptr + 0x08, 0); /* rip */
5025 stq(ptr + 0x10, 0); /* rdp */
5026 } else
5027#endif
5028 {
5029 stl(ptr + 0x08, 0); /* eip */
5030 stl(ptr + 0x0c, 0); /* sel */
5031 stl(ptr + 0x10, 0); /* dp */
5032 stl(ptr + 0x14, 0); /* sel */
5033 }
5034
5035 addr = ptr + 0x20;
5036 for(i = 0;i < 8; i++) {
5037 tmp = ST(i);
5038 helper_fstt(tmp, addr);
5039 addr += 16;
5040 }
5041
5042 if (env->cr[4] & CR4_OSFXSR_MASK) {
5043 /* XXX: finish it */
5044 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5045 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5046 if (env->hflags & HF_CS64_MASK)
5047 nb_xmm_regs = 16;
5048 else
5049 nb_xmm_regs = 8;
5050 addr = ptr + 0xa0;
5051 for(i = 0; i < nb_xmm_regs; i++) {
5052 stq(addr, env->xmm_regs[i].XMM_Q(0));
5053 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5054 addr += 16;
5055 }
5056 }
5057}
5058
5059void helper_fxrstor(target_ulong ptr, int data64)
5060{
5061 int i, fpus, fptag, nb_xmm_regs;
5062 CPU86_LDouble tmp;
5063 target_ulong addr;
5064
5065 env->fpuc = lduw(ptr);
5066 fpus = lduw(ptr + 2);
5067 fptag = lduw(ptr + 4);
5068 env->fpstt = (fpus >> 11) & 7;
5069 env->fpus = fpus & ~0x3800;
5070 fptag ^= 0xff;
5071 for(i = 0;i < 8; i++) {
5072 env->fptags[i] = ((fptag >> i) & 1);
5073 }
5074
5075 addr = ptr + 0x20;
5076 for(i = 0;i < 8; i++) {
5077 tmp = helper_fldt(addr);
5078 ST(i) = tmp;
5079 addr += 16;
5080 }
5081
5082 if (env->cr[4] & CR4_OSFXSR_MASK) {
5083 /* XXX: finish it */
5084 env->mxcsr = ldl(ptr + 0x18);
5085 //ldl(ptr + 0x1c);
5086 if (env->hflags & HF_CS64_MASK)
5087 nb_xmm_regs = 16;
5088 else
5089 nb_xmm_regs = 8;
5090 addr = ptr + 0xa0;
5091 for(i = 0; i < nb_xmm_regs; i++) {
5092#if !defined(VBOX) || __GNUC__ < 4
5093 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5094 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5095#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5096# if 1
5097 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5098 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5099 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5100 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5101# else
5102 /* this works fine on Mac OS X, gcc 4.0.1 */
5103 uint64_t u64 = ldq(addr);
5104 env->xmm_regs[i].XMM_Q(0);
5105 u64 = ldq(addr + 4);
5106 env->xmm_regs[i].XMM_Q(1) = u64;
5107# endif
5108#endif
5109 addr += 16;
5110 }
5111 }
5112}
5113
5114#ifndef USE_X86LDOUBLE
5115
5116void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5117{
5118 CPU86_LDoubleU temp;
5119 int e;
5120
5121 temp.d = f;
5122 /* mantissa */
5123 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5124 /* exponent + sign */
5125 e = EXPD(temp) - EXPBIAS + 16383;
5126 e |= SIGND(temp) >> 16;
5127 *pexp = e;
5128}
5129
5130CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5131{
5132 CPU86_LDoubleU temp;
5133 int e;
5134 uint64_t ll;
5135
5136 /* XXX: handle overflow ? */
5137 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5138 e |= (upper >> 4) & 0x800; /* sign */
5139 ll = (mant >> 11) & ((1LL << 52) - 1);
5140#ifdef __arm__
5141 temp.l.upper = (e << 20) | (ll >> 32);
5142 temp.l.lower = ll;
5143#else
5144 temp.ll = ll | ((uint64_t)e << 52);
5145#endif
5146 return temp.d;
5147}
5148
5149#else
5150
5151void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5152{
5153 CPU86_LDoubleU temp;
5154
5155 temp.d = f;
5156 *pmant = temp.l.lower;
5157 *pexp = temp.l.upper;
5158}
5159
5160CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5161{
5162 CPU86_LDoubleU temp;
5163
5164 temp.l.upper = upper;
5165 temp.l.lower = mant;
5166 return temp.d;
5167}
5168#endif
5169
5170#ifdef TARGET_X86_64
5171
5172//#define DEBUG_MULDIV
5173
5174static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5175{
5176 *plow += a;
5177 /* carry test */
5178 if (*plow < a)
5179 (*phigh)++;
5180 *phigh += b;
5181}
5182
5183static void neg128(uint64_t *plow, uint64_t *phigh)
5184{
5185 *plow = ~ *plow;
5186 *phigh = ~ *phigh;
5187 add128(plow, phigh, 1, 0);
5188}
5189
5190/* return TRUE if overflow */
5191static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5192{
5193 uint64_t q, r, a1, a0;
5194 int i, qb, ab;
5195
5196 a0 = *plow;
5197 a1 = *phigh;
5198 if (a1 == 0) {
5199 q = a0 / b;
5200 r = a0 % b;
5201 *plow = q;
5202 *phigh = r;
5203 } else {
5204 if (a1 >= b)
5205 return 1;
5206 /* XXX: use a better algorithm */
5207 for(i = 0; i < 64; i++) {
5208 ab = a1 >> 63;
5209 a1 = (a1 << 1) | (a0 >> 63);
5210 if (ab || a1 >= b) {
5211 a1 -= b;
5212 qb = 1;
5213 } else {
5214 qb = 0;
5215 }
5216 a0 = (a0 << 1) | qb;
5217 }
5218#if defined(DEBUG_MULDIV)
5219 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5220 *phigh, *plow, b, a0, a1);
5221#endif
5222 *plow = a0;
5223 *phigh = a1;
5224 }
5225 return 0;
5226}
5227
5228/* return TRUE if overflow */
5229static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5230{
5231 int sa, sb;
5232 sa = ((int64_t)*phigh < 0);
5233 if (sa)
5234 neg128(plow, phigh);
5235 sb = (b < 0);
5236 if (sb)
5237 b = -b;
5238 if (div64(plow, phigh, b) != 0)
5239 return 1;
5240 if (sa ^ sb) {
5241 if (*plow > (1ULL << 63))
5242 return 1;
5243 *plow = - *plow;
5244 } else {
5245 if (*plow >= (1ULL << 63))
5246 return 1;
5247 }
5248 if (sa)
5249 *phigh = - *phigh;
5250 return 0;
5251}
5252
5253void helper_mulq_EAX_T0(target_ulong t0)
5254{
5255 uint64_t r0, r1;
5256
5257 mulu64(&r0, &r1, EAX, t0);
5258 EAX = r0;
5259 EDX = r1;
5260 CC_DST = r0;
5261 CC_SRC = r1;
5262}
5263
5264void helper_imulq_EAX_T0(target_ulong t0)
5265{
5266 uint64_t r0, r1;
5267
5268 muls64(&r0, &r1, EAX, t0);
5269 EAX = r0;
5270 EDX = r1;
5271 CC_DST = r0;
5272 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5273}
5274
5275target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5276{
5277 uint64_t r0, r1;
5278
5279 muls64(&r0, &r1, t0, t1);
5280 CC_DST = r0;
5281 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5282 return r0;
5283}
5284
5285void helper_divq_EAX(target_ulong t0)
5286{
5287 uint64_t r0, r1;
5288 if (t0 == 0) {
5289 raise_exception(EXCP00_DIVZ);
5290 }
5291 r0 = EAX;
5292 r1 = EDX;
5293 if (div64(&r0, &r1, t0))
5294 raise_exception(EXCP00_DIVZ);
5295 EAX = r0;
5296 EDX = r1;
5297}
5298
5299void helper_idivq_EAX(target_ulong t0)
5300{
5301 uint64_t r0, r1;
5302 if (t0 == 0) {
5303 raise_exception(EXCP00_DIVZ);
5304 }
5305 r0 = EAX;
5306 r1 = EDX;
5307 if (idiv64(&r0, &r1, t0))
5308 raise_exception(EXCP00_DIVZ);
5309 EAX = r0;
5310 EDX = r1;
5311}
5312#endif
5313
5314static void do_hlt(void)
5315{
5316 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5317 env->halted = 1;
5318 env->exception_index = EXCP_HLT;
5319 cpu_loop_exit();
5320}
5321
5322void helper_hlt(int next_eip_addend)
5323{
5324 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5325 EIP += next_eip_addend;
5326
5327 do_hlt();
5328}
5329
5330void helper_monitor(target_ulong ptr)
5331{
5332 if ((uint32_t)ECX != 0)
5333 raise_exception(EXCP0D_GPF);
5334 /* XXX: store address ? */
5335 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5336}
5337
5338void helper_mwait(int next_eip_addend)
5339{
5340 if ((uint32_t)ECX != 0)
5341 raise_exception(EXCP0D_GPF);
5342#ifdef VBOX
5343 helper_hlt(next_eip_addend);
5344#else
5345 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5346 EIP += next_eip_addend;
5347
5348 /* XXX: not complete but not completely erroneous */
5349 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5350 /* more than one CPU: do not sleep because another CPU may
5351 wake this one */
5352 } else {
5353 do_hlt();
5354 }
5355#endif
5356}
5357
5358void helper_debug(void)
5359{
5360 env->exception_index = EXCP_DEBUG;
5361 cpu_loop_exit();
5362}
5363
5364void helper_raise_interrupt(int intno, int next_eip_addend)
5365{
5366 raise_interrupt(intno, 1, 0, next_eip_addend);
5367}
5368
5369void helper_raise_exception(int exception_index)
5370{
5371 raise_exception(exception_index);
5372}
5373
5374void helper_cli(void)
5375{
5376 env->eflags &= ~IF_MASK;
5377}
5378
5379void helper_sti(void)
5380{
5381 env->eflags |= IF_MASK;
5382}
5383
5384#ifdef VBOX
5385void helper_cli_vme(void)
5386{
5387 env->eflags &= ~VIF_MASK;
5388}
5389
5390void helper_sti_vme(void)
5391{
5392 /* First check, then change eflags according to the AMD manual */
5393 if (env->eflags & VIP_MASK) {
5394 raise_exception(EXCP0D_GPF);
5395 }
5396 env->eflags |= VIF_MASK;
5397}
5398#endif
5399
5400#if 0
5401/* vm86plus instructions */
5402void helper_cli_vm(void)
5403{
5404 env->eflags &= ~VIF_MASK;
5405}
5406
5407void helper_sti_vm(void)
5408{
5409 env->eflags |= VIF_MASK;
5410 if (env->eflags & VIP_MASK) {
5411 raise_exception(EXCP0D_GPF);
5412 }
5413}
5414#endif
5415
5416void helper_set_inhibit_irq(void)
5417{
5418 env->hflags |= HF_INHIBIT_IRQ_MASK;
5419}
5420
5421void helper_reset_inhibit_irq(void)
5422{
5423 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5424}
5425
5426void helper_boundw(target_ulong a0, int v)
5427{
5428 int low, high;
5429 low = ldsw(a0);
5430 high = ldsw(a0 + 2);
5431 v = (int16_t)v;
5432 if (v < low || v > high) {
5433 raise_exception(EXCP05_BOUND);
5434 }
5435 FORCE_RET();
5436}
5437
5438void helper_boundl(target_ulong a0, int v)
5439{
5440 int low, high;
5441 low = ldl(a0);
5442 high = ldl(a0 + 4);
5443 if (v < low || v > high) {
5444 raise_exception(EXCP05_BOUND);
5445 }
5446 FORCE_RET();
5447}
5448
5449static float approx_rsqrt(float a)
5450{
5451 return 1.0 / sqrt(a);
5452}
5453
5454static float approx_rcp(float a)
5455{
5456 return 1.0 / a;
5457}
5458
5459#if !defined(CONFIG_USER_ONLY)
5460
5461#define MMUSUFFIX _mmu
5462
5463#define SHIFT 0
5464#include "softmmu_template.h"
5465
5466#define SHIFT 1
5467#include "softmmu_template.h"
5468
5469#define SHIFT 2
5470#include "softmmu_template.h"
5471
5472#define SHIFT 3
5473#include "softmmu_template.h"
5474
5475#endif
5476
5477#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5478/* This code assumes real physical address always fit into host CPU reg,
5479 which is wrong in general, but true for our current use cases. */
5480RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5481{
5482 return remR3PhysReadS8(addr);
5483}
5484RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5485{
5486 return remR3PhysReadU8(addr);
5487}
5488void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5489{
5490 remR3PhysWriteU8(addr, val);
5491}
5492RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5493{
5494 return remR3PhysReadS16(addr);
5495}
5496RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5497{
5498 return remR3PhysReadU16(addr);
5499}
5500void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5501{
5502 remR3PhysWriteU16(addr, val);
5503}
5504RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5505{
5506 return remR3PhysReadS32(addr);
5507}
5508RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5509{
5510 return remR3PhysReadU32(addr);
5511}
5512void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5513{
5514 remR3PhysWriteU32(addr, val);
5515}
5516uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5517{
5518 return remR3PhysReadU64(addr);
5519}
5520void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5521{
5522 remR3PhysWriteU64(addr, val);
5523}
5524#endif
5525
5526/* try to fill the TLB and return an exception if error. If retaddr is
5527 NULL, it means that the function was called in C code (i.e. not
5528 from generated code or from helper.c) */
5529/* XXX: fix it to restore all registers */
5530void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5531{
5532 TranslationBlock *tb;
5533 int ret;
5534 unsigned long pc;
5535 CPUX86State *saved_env;
5536
5537 /* XXX: hack to restore env in all cases, even if not called from
5538 generated code */
5539 saved_env = env;
5540 env = cpu_single_env;
5541
5542 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5543 if (ret) {
5544 if (retaddr) {
5545 /* now we have a real cpu fault */
5546 pc = (unsigned long)retaddr;
5547 tb = tb_find_pc(pc);
5548 if (tb) {
5549 /* the PC is inside the translated code. It means that we have
5550 a virtual CPU fault */
5551 cpu_restore_state(tb, env, pc, NULL);
5552 }
5553 }
5554 raise_exception_err(env->exception_index, env->error_code);
5555 }
5556 env = saved_env;
5557}
5558
5559#ifdef VBOX
5560
5561/**
5562 * Correctly computes the eflags.
5563 * @returns eflags.
5564 * @param env1 CPU environment.
5565 */
5566uint32_t raw_compute_eflags(CPUX86State *env1)
5567{
5568 CPUX86State *savedenv = env;
5569 uint32_t efl;
5570 env = env1;
5571 efl = compute_eflags();
5572 env = savedenv;
5573 return efl;
5574}
5575
5576/**
5577 * Reads byte from virtual address in guest memory area.
5578 * XXX: is it working for any addresses? swapped out pages?
5579 * @returns readed data byte.
5580 * @param env1 CPU environment.
5581 * @param pvAddr GC Virtual address.
5582 */
5583uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5584{
5585 CPUX86State *savedenv = env;
5586 uint8_t u8;
5587 env = env1;
5588 u8 = ldub_kernel(addr);
5589 env = savedenv;
5590 return u8;
5591}
5592
5593/**
5594 * Reads byte from virtual address in guest memory area.
5595 * XXX: is it working for any addresses? swapped out pages?
5596 * @returns readed data byte.
5597 * @param env1 CPU environment.
5598 * @param pvAddr GC Virtual address.
5599 */
5600uint16_t read_word(CPUX86State *env1, target_ulong addr)
5601{
5602 CPUX86State *savedenv = env;
5603 uint16_t u16;
5604 env = env1;
5605 u16 = lduw_kernel(addr);
5606 env = savedenv;
5607 return u16;
5608}
5609
5610/**
5611 * Reads byte from virtual address in guest memory area.
5612 * XXX: is it working for any addresses? swapped out pages?
5613 * @returns readed data byte.
5614 * @param env1 CPU environment.
5615 * @param pvAddr GC Virtual address.
5616 */
5617uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5618{
5619 CPUX86State *savedenv = env;
5620 uint32_t u32;
5621 env = env1;
5622 u32 = ldl_kernel(addr);
5623 env = savedenv;
5624 return u32;
5625}
5626
5627/**
5628 * Writes byte to virtual address in guest memory area.
5629 * XXX: is it working for any addresses? swapped out pages?
5630 * @returns readed data byte.
5631 * @param env1 CPU environment.
5632 * @param pvAddr GC Virtual address.
5633 * @param val byte value
5634 */
5635void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5636{
5637 CPUX86State *savedenv = env;
5638 env = env1;
5639 stb(addr, val);
5640 env = savedenv;
5641}
5642
5643void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5644{
5645 CPUX86State *savedenv = env;
5646 env = env1;
5647 stw(addr, val);
5648 env = savedenv;
5649}
5650
5651void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5652{
5653 CPUX86State *savedenv = env;
5654 env = env1;
5655 stl(addr, val);
5656 env = savedenv;
5657}
5658
5659/**
5660 * Correctly loads selector into segment register with updating internal
5661 * qemu data/caches.
5662 * @param env1 CPU environment.
5663 * @param seg_reg Segment register.
5664 * @param selector Selector to load.
5665 */
5666void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5667{
5668 CPUX86State *savedenv = env;
5669 jmp_buf old_buf;
5670
5671 env = env1;
5672
5673 if ( env->eflags & X86_EFL_VM
5674 || !(env->cr[0] & X86_CR0_PE))
5675 {
5676 load_seg_vm(seg_reg, selector);
5677
5678 env = savedenv;
5679
5680 /* Successful sync. */
5681 env1->segs[seg_reg].newselector = 0;
5682 }
5683 else
5684 {
5685 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5686 time critical - let's not do that */
5687#ifdef FORCE_SEGMENT_SYNC
5688 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5689#endif
5690 if (setjmp(env1->jmp_env) == 0)
5691 {
5692 if (seg_reg == R_CS)
5693 {
5694 uint32_t e1, e2;
5695 e1 = e2 = 0;
5696 load_segment(&e1, &e2, selector);
5697 cpu_x86_load_seg_cache(env, R_CS, selector,
5698 get_seg_base(e1, e2),
5699 get_seg_limit(e1, e2),
5700 e2);
5701 }
5702 else
5703 tss_load_seg(seg_reg, selector);
5704 env = savedenv;
5705
5706 /* Successful sync. */
5707 env1->segs[seg_reg].newselector = 0;
5708 }
5709 else
5710 {
5711 env = savedenv;
5712
5713 /* Postpone sync until the guest uses the selector. */
5714 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5715 env1->segs[seg_reg].newselector = selector;
5716 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5717 env1->exception_index = -1;
5718 env1->error_code = 0;
5719 env1->old_exception = -1;
5720 }
5721#ifdef FORCE_SEGMENT_SYNC
5722 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5723#endif
5724 }
5725
5726}
5727
5728DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5729{
5730 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5731}
5732
5733
5734int emulate_single_instr(CPUX86State *env1)
5735{
5736 TranslationBlock *tb;
5737 TranslationBlock *current;
5738 int flags;
5739 uint8_t *tc_ptr;
5740 target_ulong old_eip;
5741
5742 /* ensures env is loaded! */
5743 CPUX86State *savedenv = env;
5744 env = env1;
5745
5746 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5747
5748 current = env->current_tb;
5749 env->current_tb = NULL;
5750 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5751
5752 /*
5753 * Translate only one instruction.
5754 */
5755 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5756 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5757 env->segs[R_CS].base, flags, 0);
5758
5759 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5760
5761
5762 /* tb_link_phys: */
5763 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5764 tb->jmp_next[0] = NULL;
5765 tb->jmp_next[1] = NULL;
5766 Assert(tb->jmp_next[0] == NULL);
5767 Assert(tb->jmp_next[1] == NULL);
5768 if (tb->tb_next_offset[0] != 0xffff)
5769 tb_reset_jump(tb, 0);
5770 if (tb->tb_next_offset[1] != 0xffff)
5771 tb_reset_jump(tb, 1);
5772
5773 /*
5774 * Execute it using emulation
5775 */
5776 old_eip = env->eip;
5777 env->current_tb = tb;
5778
5779 /*
5780 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5781 * perhaps not a very safe hack
5782 */
5783 while(old_eip == env->eip)
5784 {
5785 tc_ptr = tb->tc_ptr;
5786
5787#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5788 int fake_ret;
5789 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5790#else
5791 tcg_qemu_tb_exec(tc_ptr);
5792#endif
5793 /*
5794 * Exit once we detect an external interrupt and interrupts are enabled
5795 */
5796 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5797 ( (env->eflags & IF_MASK) &&
5798 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5799 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5800 {
5801 break;
5802 }
5803 }
5804 env->current_tb = current;
5805
5806 tb_phys_invalidate(tb, -1);
5807 tb_free(tb);
5808/*
5809 Assert(tb->tb_next_offset[0] == 0xffff);
5810 Assert(tb->tb_next_offset[1] == 0xffff);
5811 Assert(tb->tb_next[0] == 0xffff);
5812 Assert(tb->tb_next[1] == 0xffff);
5813 Assert(tb->jmp_next[0] == NULL);
5814 Assert(tb->jmp_next[1] == NULL);
5815 Assert(tb->jmp_first == NULL); */
5816
5817 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5818
5819 /*
5820 * Execute the next instruction when we encounter instruction fusing.
5821 */
5822 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5823 {
5824 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5825 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5826 emulate_single_instr(env);
5827 }
5828
5829 env = savedenv;
5830 return 0;
5831}
5832
5833/**
5834 * Correctly loads a new ldtr selector.
5835 *
5836 * @param env1 CPU environment.
5837 * @param selector Selector to load.
5838 */
5839void sync_ldtr(CPUX86State *env1, int selector)
5840{
5841 CPUX86State *saved_env = env;
5842 if (setjmp(env1->jmp_env) == 0)
5843 {
5844 env = env1;
5845 helper_lldt(selector);
5846 env = saved_env;
5847 }
5848 else
5849 {
5850 env = saved_env;
5851#ifdef VBOX_STRICT
5852 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5853#endif
5854 }
5855}
5856
5857/**
5858 * Correctly loads a new tr selector.
5859 *
5860 * @param env1 CPU environment.
5861 * @param selector Selector to load.
5862 */
5863int sync_tr(CPUX86State *env1, int selector)
5864{
5865 /* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */
5866 SegmentCache *dt;
5867 uint32_t e1, e2;
5868 int index, type, entry_limit;
5869 target_ulong ptr;
5870 CPUX86State *saved_env = env;
5871 env = env1;
5872
5873 selector &= 0xffff;
5874 if ((selector & 0xfffc) == 0) {
5875 /* NULL selector case: invalid TR */
5876 env->tr.base = 0;
5877 env->tr.limit = 0;
5878 env->tr.flags = 0;
5879 } else {
5880 if (selector & 0x4)
5881 goto l_failure;
5882 dt = &env->gdt;
5883 index = selector & ~7;
5884#ifdef TARGET_X86_64
5885 if (env->hflags & HF_LMA_MASK)
5886 entry_limit = 15;
5887 else
5888#endif
5889 entry_limit = 7;
5890 if ((index + entry_limit) > dt->limit)
5891 goto l_failure;
5892 ptr = dt->base + index;
5893 e1 = ldl_kernel(ptr);
5894 e2 = ldl_kernel(ptr + 4);
5895 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
5896 if ((e2 & DESC_S_MASK) /*||
5897 (type != 1 && type != 9)*/)
5898 goto l_failure;
5899 if (!(e2 & DESC_P_MASK))
5900 goto l_failure;
5901#ifdef TARGET_X86_64
5902 if (env->hflags & HF_LMA_MASK) {
5903 uint32_t e3;
5904 e3 = ldl_kernel(ptr + 8);
5905 load_seg_cache_raw_dt(&env->tr, e1, e2);
5906 env->tr.base |= (target_ulong)e3 << 32;
5907 } else
5908#endif
5909 {
5910 load_seg_cache_raw_dt(&env->tr, e1, e2);
5911 }
5912 e2 |= DESC_TSS_BUSY_MASK;
5913 stl_kernel(ptr + 4, e2);
5914 }
5915 env->tr.selector = selector;
5916
5917 env = saved_env;
5918 return 0;
5919l_failure:
5920 AssertMsgFailed(("selector=%d\n", selector));
5921 return -1;
5922}
5923
5924
5925int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5926 uint32_t *esp_ptr, int dpl)
5927{
5928 int type, index, shift;
5929
5930 CPUX86State *savedenv = env;
5931 env = env1;
5932
5933 if (!(env->tr.flags & DESC_P_MASK))
5934 cpu_abort(env, "invalid tss");
5935 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5936 if ((type & 7) != 1)
5937 cpu_abort(env, "invalid tss type %d", type);
5938 shift = type >> 3;
5939 index = (dpl * 4 + 2) << shift;
5940 if (index + (4 << shift) - 1 > env->tr.limit)
5941 {
5942 env = savedenv;
5943 return 0;
5944 }
5945 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5946
5947 if (shift == 0) {
5948 *esp_ptr = lduw_kernel(env->tr.base + index);
5949 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5950 } else {
5951 *esp_ptr = ldl_kernel(env->tr.base + index);
5952 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5953 }
5954
5955 env = savedenv;
5956 return 1;
5957}
5958
5959//*****************************************************************************
5960// Needs to be at the bottom of the file (overriding macros)
5961
5962#ifndef VBOX
5963static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5964#else /* VBOX */
5965DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5966#endif /* VBOX */
5967{
5968 return *(CPU86_LDouble *)ptr;
5969}
5970
5971#ifndef VBOX
5972static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5973#else /* VBOX */
5974DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5975#endif /* VBOX */
5976{
5977 *(CPU86_LDouble *)ptr = f;
5978}
5979
5980#undef stw
5981#undef stl
5982#undef stq
5983#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5984#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5985#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5986
5987//*****************************************************************************
5988void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5989{
5990 int fpus, fptag, i, nb_xmm_regs;
5991 CPU86_LDouble tmp;
5992 uint8_t *addr;
5993 int data64 = !!(env->hflags & HF_LMA_MASK);
5994
5995 if (env->cpuid_features & CPUID_FXSR)
5996 {
5997 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5998 fptag = 0;
5999 for(i = 0; i < 8; i++) {
6000 fptag |= (env->fptags[i] << i);
6001 }
6002 stw(ptr, env->fpuc);
6003 stw(ptr + 2, fpus);
6004 stw(ptr + 4, fptag ^ 0xff);
6005
6006 addr = ptr + 0x20;
6007 for(i = 0;i < 8; i++) {
6008 tmp = ST(i);
6009 helper_fstt_raw(tmp, addr);
6010 addr += 16;
6011 }
6012
6013 if (env->cr[4] & CR4_OSFXSR_MASK) {
6014 /* XXX: finish it */
6015 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6016 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6017 nb_xmm_regs = 8 << data64;
6018 addr = ptr + 0xa0;
6019 for(i = 0; i < nb_xmm_regs; i++) {
6020#if __GNUC__ < 4
6021 stq(addr, env->xmm_regs[i].XMM_Q(0));
6022 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6023#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6024 stl(addr, env->xmm_regs[i].XMM_L(0));
6025 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6026 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6027 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6028#endif
6029 addr += 16;
6030 }
6031 }
6032 }
6033 else
6034 {
6035 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6036 int fptag;
6037
6038 fp->FCW = env->fpuc;
6039 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6040 fptag = 0;
6041 for (i=7; i>=0; i--) {
6042 fptag <<= 2;
6043 if (env->fptags[i]) {
6044 fptag |= 3;
6045 } else {
6046 /* the FPU automatically computes it */
6047 }
6048 }
6049 fp->FTW = fptag;
6050
6051 for(i = 0;i < 8; i++) {
6052 tmp = ST(i);
6053 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
6054 }
6055 }
6056}
6057
6058//*****************************************************************************
6059#undef lduw
6060#undef ldl
6061#undef ldq
6062#define lduw(a) *(uint16_t *)(a)
6063#define ldl(a) *(uint32_t *)(a)
6064#define ldq(a) *(uint64_t *)(a)
6065//*****************************************************************************
6066void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6067{
6068 int i, fpus, fptag, nb_xmm_regs;
6069 CPU86_LDouble tmp;
6070 uint8_t *addr;
6071 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6072
6073 if (env->cpuid_features & CPUID_FXSR)
6074 {
6075 env->fpuc = lduw(ptr);
6076 fpus = lduw(ptr + 2);
6077 fptag = lduw(ptr + 4);
6078 env->fpstt = (fpus >> 11) & 7;
6079 env->fpus = fpus & ~0x3800;
6080 fptag ^= 0xff;
6081 for(i = 0;i < 8; i++) {
6082 env->fptags[i] = ((fptag >> i) & 1);
6083 }
6084
6085 addr = ptr + 0x20;
6086 for(i = 0;i < 8; i++) {
6087 tmp = helper_fldt_raw(addr);
6088 ST(i) = tmp;
6089 addr += 16;
6090 }
6091
6092 if (env->cr[4] & CR4_OSFXSR_MASK) {
6093 /* XXX: finish it, endianness */
6094 env->mxcsr = ldl(ptr + 0x18);
6095 //ldl(ptr + 0x1c);
6096 nb_xmm_regs = 8 << data64;
6097 addr = ptr + 0xa0;
6098 for(i = 0; i < nb_xmm_regs; i++) {
6099#if HC_ARCH_BITS == 32
6100 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6101 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6102 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6103 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6104 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6105#else
6106 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6107 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6108#endif
6109 addr += 16;
6110 }
6111 }
6112 }
6113 else
6114 {
6115 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6116 int fptag, j;
6117
6118 env->fpuc = fp->FCW;
6119 env->fpstt = (fp->FSW >> 11) & 7;
6120 env->fpus = fp->FSW & ~0x3800;
6121 fptag = fp->FTW;
6122 for(i = 0;i < 8; i++) {
6123 env->fptags[i] = ((fptag & 3) == 3);
6124 fptag >>= 2;
6125 }
6126 j = env->fpstt;
6127 for(i = 0;i < 8; i++) {
6128 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6129 ST(i) = tmp;
6130 }
6131 }
6132}
6133//*****************************************************************************
6134//*****************************************************************************
6135
6136#endif /* VBOX */
6137
6138/* Secure Virtual Machine helpers */
6139
6140#if defined(CONFIG_USER_ONLY)
6141
6142void helper_vmrun(int aflag, int next_eip_addend)
6143{
6144}
6145void helper_vmmcall(void)
6146{
6147}
6148void helper_vmload(int aflag)
6149{
6150}
6151void helper_vmsave(int aflag)
6152{
6153}
6154void helper_stgi(void)
6155{
6156}
6157void helper_clgi(void)
6158{
6159}
6160void helper_skinit(void)
6161{
6162}
6163void helper_invlpga(int aflag)
6164{
6165}
6166void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6167{
6168}
6169void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6170{
6171}
6172
6173void helper_svm_check_io(uint32_t port, uint32_t param,
6174 uint32_t next_eip_addend)
6175{
6176}
6177#else
6178
6179#ifndef VBOX
6180static inline void svm_save_seg(target_phys_addr_t addr,
6181#else /* VBOX */
6182DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6183#endif /* VBOX */
6184 const SegmentCache *sc)
6185{
6186 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6187 sc->selector);
6188 stq_phys(addr + offsetof(struct vmcb_seg, base),
6189 sc->base);
6190 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6191 sc->limit);
6192 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6193 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6194}
6195
6196#ifndef VBOX
6197static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6198#else /* VBOX */
6199DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6200#endif /* VBOX */
6201{
6202 unsigned int flags;
6203
6204 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6205 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6206 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6207 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6208 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6209}
6210
6211#ifndef VBOX
6212static inline void svm_load_seg_cache(target_phys_addr_t addr,
6213#else /* VBOX */
6214DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6215#endif /* VBOX */
6216 CPUState *env, int seg_reg)
6217{
6218 SegmentCache sc1, *sc = &sc1;
6219 svm_load_seg(addr, sc);
6220 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6221 sc->base, sc->limit, sc->flags);
6222}
6223
6224void helper_vmrun(int aflag, int next_eip_addend)
6225{
6226 target_ulong addr;
6227 uint32_t event_inj;
6228 uint32_t int_ctl;
6229
6230 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6231
6232 if (aflag == 2)
6233 addr = EAX;
6234 else
6235 addr = (uint32_t)EAX;
6236
6237 if (loglevel & CPU_LOG_TB_IN_ASM)
6238 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6239
6240 env->vm_vmcb = addr;
6241
6242 /* save the current CPU state in the hsave page */
6243 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6244 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6245
6246 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6247 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6248
6249 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6250 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6251 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6252 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6253 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6254 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6255
6256 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6257 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6258
6259 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6260 &env->segs[R_ES]);
6261 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6262 &env->segs[R_CS]);
6263 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6264 &env->segs[R_SS]);
6265 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6266 &env->segs[R_DS]);
6267
6268 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6269 EIP + next_eip_addend);
6270 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6271 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6272
6273 /* load the interception bitmaps so we do not need to access the
6274 vmcb in svm mode */
6275 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6276 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6277 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6278 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6279 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6280 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6281
6282 /* enable intercepts */
6283 env->hflags |= HF_SVMI_MASK;
6284
6285 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6286
6287 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6288 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6289
6290 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6291 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6292
6293 /* clear exit_info_2 so we behave like the real hardware */
6294 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6295
6296 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6297 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6298 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6299 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6300 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6301 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6302 if (int_ctl & V_INTR_MASKING_MASK) {
6303 env->v_tpr = int_ctl & V_TPR_MASK;
6304 env->hflags2 |= HF2_VINTR_MASK;
6305 if (env->eflags & IF_MASK)
6306 env->hflags2 |= HF2_HIF_MASK;
6307 }
6308
6309 cpu_load_efer(env,
6310 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6311 env->eflags = 0;
6312 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6313 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6314 CC_OP = CC_OP_EFLAGS;
6315
6316 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6317 env, R_ES);
6318 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6319 env, R_CS);
6320 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6321 env, R_SS);
6322 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6323 env, R_DS);
6324
6325 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6326 env->eip = EIP;
6327 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6328 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6329 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6330 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6331 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6332
6333 /* FIXME: guest state consistency checks */
6334
6335 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6336 case TLB_CONTROL_DO_NOTHING:
6337 break;
6338 case TLB_CONTROL_FLUSH_ALL_ASID:
6339 /* FIXME: this is not 100% correct but should work for now */
6340 tlb_flush(env, 1);
6341 break;
6342 }
6343
6344 env->hflags2 |= HF2_GIF_MASK;
6345
6346 if (int_ctl & V_IRQ_MASK) {
6347 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6348 }
6349
6350 /* maybe we need to inject an event */
6351 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6352 if (event_inj & SVM_EVTINJ_VALID) {
6353 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6354 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6355 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6356 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6357
6358 if (loglevel & CPU_LOG_TB_IN_ASM)
6359 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6360 /* FIXME: need to implement valid_err */
6361 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6362 case SVM_EVTINJ_TYPE_INTR:
6363 env->exception_index = vector;
6364 env->error_code = event_inj_err;
6365 env->exception_is_int = 0;
6366 env->exception_next_eip = -1;
6367 if (loglevel & CPU_LOG_TB_IN_ASM)
6368 fprintf(logfile, "INTR");
6369 /* XXX: is it always correct ? */
6370 do_interrupt(vector, 0, 0, 0, 1);
6371 break;
6372 case SVM_EVTINJ_TYPE_NMI:
6373 env->exception_index = EXCP02_NMI;
6374 env->error_code = event_inj_err;
6375 env->exception_is_int = 0;
6376 env->exception_next_eip = EIP;
6377 if (loglevel & CPU_LOG_TB_IN_ASM)
6378 fprintf(logfile, "NMI");
6379 cpu_loop_exit();
6380 break;
6381 case SVM_EVTINJ_TYPE_EXEPT:
6382 env->exception_index = vector;
6383 env->error_code = event_inj_err;
6384 env->exception_is_int = 0;
6385 env->exception_next_eip = -1;
6386 if (loglevel & CPU_LOG_TB_IN_ASM)
6387 fprintf(logfile, "EXEPT");
6388 cpu_loop_exit();
6389 break;
6390 case SVM_EVTINJ_TYPE_SOFT:
6391 env->exception_index = vector;
6392 env->error_code = event_inj_err;
6393 env->exception_is_int = 1;
6394 env->exception_next_eip = EIP;
6395 if (loglevel & CPU_LOG_TB_IN_ASM)
6396 fprintf(logfile, "SOFT");
6397 cpu_loop_exit();
6398 break;
6399 }
6400 if (loglevel & CPU_LOG_TB_IN_ASM)
6401 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6402 }
6403}
6404
6405void helper_vmmcall(void)
6406{
6407 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6408 raise_exception(EXCP06_ILLOP);
6409}
6410
6411void helper_vmload(int aflag)
6412{
6413 target_ulong addr;
6414 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6415
6416 if (aflag == 2)
6417 addr = EAX;
6418 else
6419 addr = (uint32_t)EAX;
6420
6421 if (loglevel & CPU_LOG_TB_IN_ASM)
6422 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6423 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6424 env->segs[R_FS].base);
6425
6426 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6427 env, R_FS);
6428 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6429 env, R_GS);
6430 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6431 &env->tr);
6432 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6433 &env->ldt);
6434
6435#ifdef TARGET_X86_64
6436 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6437 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6438 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6439 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6440#endif
6441 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6442 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6443 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6444 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6445}
6446
6447void helper_vmsave(int aflag)
6448{
6449 target_ulong addr;
6450 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6451
6452 if (aflag == 2)
6453 addr = EAX;
6454 else
6455 addr = (uint32_t)EAX;
6456
6457 if (loglevel & CPU_LOG_TB_IN_ASM)
6458 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6459 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6460 env->segs[R_FS].base);
6461
6462 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6463 &env->segs[R_FS]);
6464 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6465 &env->segs[R_GS]);
6466 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6467 &env->tr);
6468 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6469 &env->ldt);
6470
6471#ifdef TARGET_X86_64
6472 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6473 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6474 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6475 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6476#endif
6477 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6478 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6479 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6480 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6481}
6482
6483void helper_stgi(void)
6484{
6485 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6486 env->hflags2 |= HF2_GIF_MASK;
6487}
6488
6489void helper_clgi(void)
6490{
6491 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6492 env->hflags2 &= ~HF2_GIF_MASK;
6493}
6494
6495void helper_skinit(void)
6496{
6497 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6498 /* XXX: not implemented */
6499 raise_exception(EXCP06_ILLOP);
6500}
6501
6502void helper_invlpga(int aflag)
6503{
6504 target_ulong addr;
6505 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6506
6507 if (aflag == 2)
6508 addr = EAX;
6509 else
6510 addr = (uint32_t)EAX;
6511
6512 /* XXX: could use the ASID to see if it is needed to do the
6513 flush */
6514 tlb_flush_page(env, addr);
6515}
6516
6517void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6518{
6519 if (likely(!(env->hflags & HF_SVMI_MASK)))
6520 return;
6521#ifndef VBOX
6522 switch(type) {
6523#ifndef VBOX
6524 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6525#else
6526 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6527 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6528 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6529#endif
6530 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6531 helper_vmexit(type, param);
6532 }
6533 break;
6534#ifndef VBOX
6535 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6536#else
6537 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6538 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6539 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6540#endif
6541 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6542 helper_vmexit(type, param);
6543 }
6544 break;
6545 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6546 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6547 helper_vmexit(type, param);
6548 }
6549 break;
6550 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6551 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6552 helper_vmexit(type, param);
6553 }
6554 break;
6555 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6556 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6557 helper_vmexit(type, param);
6558 }
6559 break;
6560 case SVM_EXIT_MSR:
6561 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6562 /* FIXME: this should be read in at vmrun (faster this way?) */
6563 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6564 uint32_t t0, t1;
6565 switch((uint32_t)ECX) {
6566 case 0 ... 0x1fff:
6567 t0 = (ECX * 2) % 8;
6568 t1 = ECX / 8;
6569 break;
6570 case 0xc0000000 ... 0xc0001fff:
6571 t0 = (8192 + ECX - 0xc0000000) * 2;
6572 t1 = (t0 / 8);
6573 t0 %= 8;
6574 break;
6575 case 0xc0010000 ... 0xc0011fff:
6576 t0 = (16384 + ECX - 0xc0010000) * 2;
6577 t1 = (t0 / 8);
6578 t0 %= 8;
6579 break;
6580 default:
6581 helper_vmexit(type, param);
6582 t0 = 0;
6583 t1 = 0;
6584 break;
6585 }
6586 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6587 helper_vmexit(type, param);
6588 }
6589 break;
6590 default:
6591 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6592 helper_vmexit(type, param);
6593 }
6594 break;
6595 }
6596#else
6597 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6598#endif
6599}
6600
6601void helper_svm_check_io(uint32_t port, uint32_t param,
6602 uint32_t next_eip_addend)
6603{
6604 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6605 /* FIXME: this should be read in at vmrun (faster this way?) */
6606 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6607 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6608 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6609 /* next EIP */
6610 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6611 env->eip + next_eip_addend);
6612 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6613 }
6614 }
6615}
6616
6617/* Note: currently only 32 bits of exit_code are used */
6618void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6619{
6620 uint32_t int_ctl;
6621
6622 if (loglevel & CPU_LOG_TB_IN_ASM)
6623 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6624 exit_code, exit_info_1,
6625 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6626 EIP);
6627
6628 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6629 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6630 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6631 } else {
6632 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6633 }
6634
6635 /* Save the VM state in the vmcb */
6636 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6637 &env->segs[R_ES]);
6638 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6639 &env->segs[R_CS]);
6640 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6641 &env->segs[R_SS]);
6642 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6643 &env->segs[R_DS]);
6644
6645 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6646 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6647
6648 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6649 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6650
6651 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6652 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6653 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6654 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6655 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6656
6657 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6658 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6659 int_ctl |= env->v_tpr & V_TPR_MASK;
6660 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6661 int_ctl |= V_IRQ_MASK;
6662 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6663
6664 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6665 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6666 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6667 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6668 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6669 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6670 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6671
6672 /* Reload the host state from vm_hsave */
6673 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6674 env->hflags &= ~HF_SVMI_MASK;
6675 env->intercept = 0;
6676 env->intercept_exceptions = 0;
6677 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6678 env->tsc_offset = 0;
6679
6680 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6681 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6682
6683 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6684 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6685
6686 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6687 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6688 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6689 /* we need to set the efer after the crs so the hidden flags get
6690 set properly */
6691 cpu_load_efer(env,
6692 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6693 env->eflags = 0;
6694 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6695 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6696 CC_OP = CC_OP_EFLAGS;
6697
6698 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6699 env, R_ES);
6700 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6701 env, R_CS);
6702 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6703 env, R_SS);
6704 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6705 env, R_DS);
6706
6707 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6708 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6709 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6710
6711 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6712 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6713
6714 /* other setups */
6715 cpu_x86_set_cpl(env, 0);
6716 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6717 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6718
6719 env->hflags2 &= ~HF2_GIF_MASK;
6720 /* FIXME: Resets the current ASID register to zero (host ASID). */
6721
6722 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6723
6724 /* Clears the TSC_OFFSET inside the processor. */
6725
6726 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6727 from the page table indicated the host's CR3. If the PDPEs contain
6728 illegal state, the processor causes a shutdown. */
6729
6730 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6731 env->cr[0] |= CR0_PE_MASK;
6732 env->eflags &= ~VM_MASK;
6733
6734 /* Disables all breakpoints in the host DR7 register. */
6735
6736 /* Checks the reloaded host state for consistency. */
6737
6738 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6739 host's code segment or non-canonical (in the case of long mode), a
6740 #GP fault is delivered inside the host.) */
6741
6742 /* remove any pending exception */
6743 env->exception_index = -1;
6744 env->error_code = 0;
6745 env->old_exception = -1;
6746
6747 cpu_loop_exit();
6748}
6749
6750#endif
6751
6752/* MMX/SSE */
6753/* XXX: optimize by storing fptt and fptags in the static cpu state */
6754void helper_enter_mmx(void)
6755{
6756 env->fpstt = 0;
6757 *(uint32_t *)(env->fptags) = 0;
6758 *(uint32_t *)(env->fptags + 4) = 0;
6759}
6760
6761void helper_emms(void)
6762{
6763 /* set to empty state */
6764 *(uint32_t *)(env->fptags) = 0x01010101;
6765 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6766}
6767
6768/* XXX: suppress */
6769void helper_movq(uint64_t *d, uint64_t *s)
6770{
6771 *d = *s;
6772}
6773
6774#define SHIFT 0
6775#include "ops_sse.h"
6776
6777#define SHIFT 1
6778#include "ops_sse.h"
6779
6780#define SHIFT 0
6781#include "helper_template.h"
6782#undef SHIFT
6783
6784#define SHIFT 1
6785#include "helper_template.h"
6786#undef SHIFT
6787
6788#define SHIFT 2
6789#include "helper_template.h"
6790#undef SHIFT
6791
6792#ifdef TARGET_X86_64
6793
6794#define SHIFT 3
6795#include "helper_template.h"
6796#undef SHIFT
6797
6798#endif
6799
6800/* bit operations */
6801target_ulong helper_bsf(target_ulong t0)
6802{
6803 int count;
6804 target_ulong res;
6805
6806 res = t0;
6807 count = 0;
6808 while ((res & 1) == 0) {
6809 count++;
6810 res >>= 1;
6811 }
6812 return count;
6813}
6814
6815target_ulong helper_bsr(target_ulong t0)
6816{
6817 int count;
6818 target_ulong res, mask;
6819
6820 res = t0;
6821 count = TARGET_LONG_BITS - 1;
6822 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6823 while ((res & mask) == 0) {
6824 count--;
6825 res <<= 1;
6826 }
6827 return count;
6828}
6829
6830
6831static int compute_all_eflags(void)
6832{
6833 return CC_SRC;
6834}
6835
6836static int compute_c_eflags(void)
6837{
6838 return CC_SRC & CC_C;
6839}
6840
6841#ifndef VBOX
6842CCTable cc_table[CC_OP_NB] = {
6843 [CC_OP_DYNAMIC] = { /* should never happen */ },
6844
6845 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6846
6847 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6848 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6849 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6850
6851 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6852 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6853 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6854
6855 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6856 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6857 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6858
6859 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6860 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6861 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6862
6863 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6864 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6865 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6866
6867 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6868 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6869 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6870
6871 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6872 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6873 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6874
6875 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6876 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6877 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6878
6879 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6880 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6881 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6882
6883 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6884 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6885 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6886
6887#ifdef TARGET_X86_64
6888 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6889
6890 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6891
6892 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6893
6894 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6895
6896 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6897
6898 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6899
6900 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6901
6902 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6903
6904 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6905
6906 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6907#endif
6908};
6909#else /* VBOX */
6910/* Sync carefully with cpu.h */
6911CCTable cc_table[CC_OP_NB] = {
6912 /* CC_OP_DYNAMIC */ { 0, 0 },
6913
6914 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6915
6916 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6917 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6918 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6919#ifdef TARGET_X86_64
6920 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6921#else
6922 /* CC_OP_MULQ */ { 0, 0 },
6923#endif
6924
6925 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6926 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6927 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6928#ifdef TARGET_X86_64
6929 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6930#else
6931 /* CC_OP_ADDQ */ { 0, 0 },
6932#endif
6933
6934 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6935 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6936 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6937#ifdef TARGET_X86_64
6938 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6939#else
6940 /* CC_OP_ADCQ */ { 0, 0 },
6941#endif
6942
6943 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6944 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6945 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6946#ifdef TARGET_X86_64
6947 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6948#else
6949 /* CC_OP_SUBQ */ { 0, 0 },
6950#endif
6951
6952 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6953 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6954 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6955#ifdef TARGET_X86_64
6956 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6957#else
6958 /* CC_OP_SBBQ */ { 0, 0 },
6959#endif
6960
6961 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6962 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6963 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6964#ifdef TARGET_X86_64
6965 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6966#else
6967 /* CC_OP_LOGICQ */ { 0, 0 },
6968#endif
6969
6970 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6971 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6972 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6973#ifdef TARGET_X86_64
6974 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6975#else
6976 /* CC_OP_INCQ */ { 0, 0 },
6977#endif
6978
6979 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6980 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6981 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6982#ifdef TARGET_X86_64
6983 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6984#else
6985 /* CC_OP_DECQ */ { 0, 0 },
6986#endif
6987
6988 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6989 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6990 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6991#ifdef TARGET_X86_64
6992 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6993#else
6994 /* CC_OP_SHLQ */ { 0, 0 },
6995#endif
6996
6997 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6998 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6999 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
7000#ifdef TARGET_X86_64
7001 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
7002#else
7003 /* CC_OP_SARQ */ { 0, 0 },
7004#endif
7005};
7006#endif /* VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette