VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 36140

Last change on this file since 36140 was 36140, checked in by vboxsync, 14 years ago

rem: Re-synced to svn://svn.savannah.nongnu.org/qemu/trunk@5495 (repo UUID c046a42c-6fe2-441c-8c8c-71466251a162).

  • Property svn:eol-style set to native
File size: 189.7 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#define CPU_NO_GLOBAL_REGS
31#include "exec.h"
32#include "host-utils.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39//#define DEBUG_PCALL
40
41#if 0
42#define raise_exception_err(a, b)\
43do {\
44 if (logfile)\
45 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
46 (raise_exception_err)(a, b);\
47} while (0)
48#endif
49
50const uint8_t parity_table[256] = {
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
82 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
83};
84
85/* modulo 17 table */
86const uint8_t rclw_table[32] = {
87 0, 1, 2, 3, 4, 5, 6, 7,
88 8, 9,10,11,12,13,14,15,
89 16, 0, 1, 2, 3, 4, 5, 6,
90 7, 8, 9,10,11,12,13,14,
91};
92
93/* modulo 9 table */
94const uint8_t rclb_table[32] = {
95 0, 1, 2, 3, 4, 5, 6, 7,
96 8, 0, 1, 2, 3, 4, 5, 6,
97 7, 8, 0, 1, 2, 3, 4, 5,
98 6, 7, 8, 0, 1, 2, 3, 4,
99};
100
101const CPU86_LDouble f15rk[7] =
102{
103 0.00000000000000000000L,
104 1.00000000000000000000L,
105 3.14159265358979323851L, /*pi*/
106 0.30102999566398119523L, /*lg2*/
107 0.69314718055994530943L, /*ln2*/
108 1.44269504088896340739L, /*l2e*/
109 3.32192809488736234781L, /*l2t*/
110};
111
112/* broken thread support */
113
114spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
115
116void helper_lock(void)
117{
118 spin_lock(&global_cpu_lock);
119}
120
121void helper_unlock(void)
122{
123 spin_unlock(&global_cpu_lock);
124}
125
126void helper_write_eflags(target_ulong t0, uint32_t update_mask)
127{
128 load_eflags(t0, update_mask);
129}
130
131target_ulong helper_read_eflags(void)
132{
133 uint32_t eflags;
134 eflags = cc_table[CC_OP].compute_all();
135 eflags |= (DF & DF_MASK);
136 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
137 return eflags;
138}
139
140#ifdef VBOX
141
142void helper_write_eflags_vme(target_ulong t0)
143{
144 unsigned int new_eflags = t0;
145
146 assert(env->eflags & (1<<VM_SHIFT));
147
148 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
149 /* if TF will be set -> #GP */
150 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
151 || (new_eflags & TF_MASK)) {
152 raise_exception(EXCP0D_GPF);
153 } else {
154 load_eflags(new_eflags,
155 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
156
157 if (new_eflags & IF_MASK) {
158 env->eflags |= VIF_MASK;
159 } else {
160 env->eflags &= ~VIF_MASK;
161 }
162 }
163}
164
165target_ulong helper_read_eflags_vme(void)
166{
167 uint32_t eflags;
168 eflags = cc_table[CC_OP].compute_all();
169 eflags |= (DF & DF_MASK);
170 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
171 if (env->eflags & VIF_MASK)
172 eflags |= IF_MASK;
173 else
174 eflags &= ~IF_MASK;
175
176 /* According to AMD manual, should be read with IOPL == 3 */
177 eflags |= (3 << IOPL_SHIFT);
178
179 /* We only use helper_read_eflags_vme() in 16-bits mode */
180 return eflags & 0xffff;
181}
182
183void helper_dump_state()
184{
185 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
186 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
187 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
188 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
189 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
190 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
191 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
192}
193
194#endif /* VBOX */
195
196/* return non zero if error */
197static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
198 int selector)
199{
200 SegmentCache *dt;
201 int index;
202 target_ulong ptr;
203
204#ifdef VBOX
205 /* Trying to load a selector with CPL=1? */
206 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
207 {
208 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
209 selector = selector & 0xfffc;
210 }
211#endif /* VBOX */
212
213 if (selector & 0x4)
214 dt = &env->ldt;
215 else
216 dt = &env->gdt;
217 index = selector & ~7;
218 if ((index + 7) > dt->limit)
219 return -1;
220 ptr = dt->base + index;
221 *e1_ptr = ldl_kernel(ptr);
222 *e2_ptr = ldl_kernel(ptr + 4);
223 return 0;
224}
225
226static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
227{
228 unsigned int limit;
229 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
230 if (e2 & DESC_G_MASK)
231 limit = (limit << 12) | 0xfff;
232 return limit;
233}
234
235static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
236{
237 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
238}
239
240static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
241{
242 sc->base = get_seg_base(e1, e2);
243 sc->limit = get_seg_limit(e1, e2);
244 sc->flags = e2;
245}
246
247/* init the segment cache in vm86 mode. */
248static inline void load_seg_vm(int seg, int selector)
249{
250 selector &= 0xffff;
251#ifdef VBOX
252 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
253 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
254 flags |= (3 << DESC_DPL_SHIFT);
255
256 cpu_x86_load_seg_cache(env, seg, selector,
257 (selector << 4), 0xffff, flags);
258#else /* VBOX */
259 cpu_x86_load_seg_cache(env, seg, selector,
260 (selector << 4), 0xffff, 0);
261#endif /* VBOX */
262}
263
264static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
265 uint32_t *esp_ptr, int dpl)
266{
267#ifndef VBOX
268 int type, index, shift;
269#else
270 unsigned int type, index, shift;
271#endif
272
273#if 0
274 {
275 int i;
276 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
277 for(i=0;i<env->tr.limit;i++) {
278 printf("%02x ", env->tr.base[i]);
279 if ((i & 7) == 7) printf("\n");
280 }
281 printf("\n");
282 }
283#endif
284
285 if (!(env->tr.flags & DESC_P_MASK))
286 cpu_abort(env, "invalid tss");
287 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
288 if ((type & 7) != 1)
289 cpu_abort(env, "invalid tss type");
290 shift = type >> 3;
291 index = (dpl * 4 + 2) << shift;
292 if (index + (4 << shift) - 1 > env->tr.limit)
293 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
294 if (shift == 0) {
295 *esp_ptr = lduw_kernel(env->tr.base + index);
296 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
297 } else {
298 *esp_ptr = ldl_kernel(env->tr.base + index);
299 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
300 }
301}
302
303/* XXX: merge with load_seg() */
304static void tss_load_seg(int seg_reg, int selector)
305{
306 uint32_t e1, e2;
307 int rpl, dpl, cpl;
308
309#ifdef VBOX
310 e1 = e2 = 0;
311 cpl = env->hflags & HF_CPL_MASK;
312 /* Trying to load a selector with CPL=1? */
313 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
314 {
315 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
316 selector = selector & 0xfffc;
317 }
318#endif /* VBOX */
319
320 if ((selector & 0xfffc) != 0) {
321 if (load_segment(&e1, &e2, selector) != 0)
322 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
323 if (!(e2 & DESC_S_MASK))
324 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
325 rpl = selector & 3;
326 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
327 cpl = env->hflags & HF_CPL_MASK;
328 if (seg_reg == R_CS) {
329 if (!(e2 & DESC_CS_MASK))
330 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
331 /* XXX: is it correct ? */
332 if (dpl != rpl)
333 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
334 if ((e2 & DESC_C_MASK) && dpl > rpl)
335 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
336 } else if (seg_reg == R_SS) {
337 /* SS must be writable data */
338 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
339 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
340 if (dpl != cpl || dpl != rpl)
341 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
342 } else {
343 /* not readable code */
344 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
345 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
346 /* if data or non conforming code, checks the rights */
347 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
348 if (dpl < cpl || dpl < rpl)
349 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
350 }
351 }
352 if (!(e2 & DESC_P_MASK))
353 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
354 cpu_x86_load_seg_cache(env, seg_reg, selector,
355 get_seg_base(e1, e2),
356 get_seg_limit(e1, e2),
357 e2);
358 } else {
359 if (seg_reg == R_SS || seg_reg == R_CS)
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361#ifdef VBOX
362# if 0
363 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
364 cpu_x86_load_seg_cache(env, seg_reg, selector,
365 0, 0, 0);
366# endif
367#endif /* VBOX */
368 }
369}
370
371#define SWITCH_TSS_JMP 0
372#define SWITCH_TSS_IRET 1
373#define SWITCH_TSS_CALL 2
374
375/* XXX: restore CPU state in registers (PowerPC case) */
376static void switch_tss(int tss_selector,
377 uint32_t e1, uint32_t e2, int source,
378 uint32_t next_eip)
379{
380 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
381 target_ulong tss_base;
382 uint32_t new_regs[8], new_segs[6];
383 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
384 uint32_t old_eflags, eflags_mask;
385 SegmentCache *dt;
386#ifndef VBOX
387 int index;
388#else
389 unsigned int index;
390#endif
391 target_ulong ptr;
392
393 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
394#ifdef DEBUG_PCALL
395 if (loglevel & CPU_LOG_PCALL)
396 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
397#endif
398
399#if defined(VBOX) && defined(DEBUG)
400 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
401#endif
402
403 /* if task gate, we read the TSS segment and we load it */
404 if (type == 5) {
405 if (!(e2 & DESC_P_MASK))
406 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
407 tss_selector = e1 >> 16;
408 if (tss_selector & 4)
409 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
410 if (load_segment(&e1, &e2, tss_selector) != 0)
411 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
412 if (e2 & DESC_S_MASK)
413 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
414 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
415 if ((type & 7) != 1)
416 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
417 }
418
419 if (!(e2 & DESC_P_MASK))
420 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
421
422 if (type & 8)
423 tss_limit_max = 103;
424 else
425 tss_limit_max = 43;
426 tss_limit = get_seg_limit(e1, e2);
427 tss_base = get_seg_base(e1, e2);
428 if ((tss_selector & 4) != 0 ||
429 tss_limit < tss_limit_max)
430 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
431 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
432 if (old_type & 8)
433 old_tss_limit_max = 103;
434 else
435 old_tss_limit_max = 43;
436
437 /* read all the registers from the new TSS */
438 if (type & 8) {
439 /* 32 bit */
440 new_cr3 = ldl_kernel(tss_base + 0x1c);
441 new_eip = ldl_kernel(tss_base + 0x20);
442 new_eflags = ldl_kernel(tss_base + 0x24);
443 for(i = 0; i < 8; i++)
444 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
445 for(i = 0; i < 6; i++)
446 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
447 new_ldt = lduw_kernel(tss_base + 0x60);
448 new_trap = ldl_kernel(tss_base + 0x64);
449 } else {
450 /* 16 bit */
451 new_cr3 = 0;
452 new_eip = lduw_kernel(tss_base + 0x0e);
453 new_eflags = lduw_kernel(tss_base + 0x10);
454 for(i = 0; i < 8; i++)
455 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
456 for(i = 0; i < 4; i++)
457 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
458 new_ldt = lduw_kernel(tss_base + 0x2a);
459 new_segs[R_FS] = 0;
460 new_segs[R_GS] = 0;
461 new_trap = 0;
462 }
463
464 /* NOTE: we must avoid memory exceptions during the task switch,
465 so we make dummy accesses before */
466 /* XXX: it can still fail in some cases, so a bigger hack is
467 necessary to valid the TLB after having done the accesses */
468
469 v1 = ldub_kernel(env->tr.base);
470 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
471 stb_kernel(env->tr.base, v1);
472 stb_kernel(env->tr.base + old_tss_limit_max, v2);
473
474 /* clear busy bit (it is restartable) */
475 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
476 target_ulong ptr;
477 uint32_t e2;
478 ptr = env->gdt.base + (env->tr.selector & ~7);
479 e2 = ldl_kernel(ptr + 4);
480 e2 &= ~DESC_TSS_BUSY_MASK;
481 stl_kernel(ptr + 4, e2);
482 }
483 old_eflags = compute_eflags();
484 if (source == SWITCH_TSS_IRET)
485 old_eflags &= ~NT_MASK;
486
487 /* save the current state in the old TSS */
488 if (type & 8) {
489 /* 32 bit */
490 stl_kernel(env->tr.base + 0x20, next_eip);
491 stl_kernel(env->tr.base + 0x24, old_eflags);
492 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
493 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
494 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
495 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
496 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
497 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
498 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
499 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
500 for(i = 0; i < 6; i++)
501 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
502#ifdef VBOX
503 /* Must store the ldt as it gets reloaded and might have been changed. */
504 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
505#endif
506#if defined(VBOX) && defined(DEBUG)
507 printf("TSS 32 bits switch\n");
508 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
509#endif
510 } else {
511 /* 16 bit */
512 stw_kernel(env->tr.base + 0x0e, next_eip);
513 stw_kernel(env->tr.base + 0x10, old_eflags);
514 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
515 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
516 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
517 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
518 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
519 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
520 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
521 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
522 for(i = 0; i < 4; i++)
523 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
524#ifdef VBOX
525 /* Must store the ldt as it gets reloaded and might have been changed. */
526 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
527#endif
528 }
529
530 /* now if an exception occurs, it will occurs in the next task
531 context */
532
533 if (source == SWITCH_TSS_CALL) {
534 stw_kernel(tss_base, env->tr.selector);
535 new_eflags |= NT_MASK;
536 }
537
538 /* set busy bit */
539 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
540 target_ulong ptr;
541 uint32_t e2;
542 ptr = env->gdt.base + (tss_selector & ~7);
543 e2 = ldl_kernel(ptr + 4);
544 e2 |= DESC_TSS_BUSY_MASK;
545 stl_kernel(ptr + 4, e2);
546 }
547
548 /* set the new CPU state */
549 /* from this point, any exception which occurs can give problems */
550 env->cr[0] |= CR0_TS_MASK;
551 env->hflags |= HF_TS_MASK;
552 env->tr.selector = tss_selector;
553 env->tr.base = tss_base;
554 env->tr.limit = tss_limit;
555 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
556
557 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
558 cpu_x86_update_cr3(env, new_cr3);
559 }
560
561 /* load all registers without an exception, then reload them with
562 possible exception */
563 env->eip = new_eip;
564 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
565 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
566 if (!(type & 8))
567 eflags_mask &= 0xffff;
568 load_eflags(new_eflags, eflags_mask);
569 /* XXX: what to do in 16 bit case ? */
570 EAX = new_regs[0];
571 ECX = new_regs[1];
572 EDX = new_regs[2];
573 EBX = new_regs[3];
574 ESP = new_regs[4];
575 EBP = new_regs[5];
576 ESI = new_regs[6];
577 EDI = new_regs[7];
578 if (new_eflags & VM_MASK) {
579 for(i = 0; i < 6; i++)
580 load_seg_vm(i, new_segs[i]);
581 /* in vm86, CPL is always 3 */
582 cpu_x86_set_cpl(env, 3);
583 } else {
584 /* CPL is set the RPL of CS */
585 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
586 /* first just selectors as the rest may trigger exceptions */
587 for(i = 0; i < 6; i++)
588 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
589 }
590
591 env->ldt.selector = new_ldt & ~4;
592 env->ldt.base = 0;
593 env->ldt.limit = 0;
594 env->ldt.flags = 0;
595
596 /* load the LDT */
597 if (new_ldt & 4)
598 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
599
600 if ((new_ldt & 0xfffc) != 0) {
601 dt = &env->gdt;
602 index = new_ldt & ~7;
603 if ((index + 7) > dt->limit)
604 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
605 ptr = dt->base + index;
606 e1 = ldl_kernel(ptr);
607 e2 = ldl_kernel(ptr + 4);
608 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
609 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
610 if (!(e2 & DESC_P_MASK))
611 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
612 load_seg_cache_raw_dt(&env->ldt, e1, e2);
613 }
614
615 /* load the segments */
616 if (!(new_eflags & VM_MASK)) {
617 tss_load_seg(R_CS, new_segs[R_CS]);
618 tss_load_seg(R_SS, new_segs[R_SS]);
619 tss_load_seg(R_ES, new_segs[R_ES]);
620 tss_load_seg(R_DS, new_segs[R_DS]);
621 tss_load_seg(R_FS, new_segs[R_FS]);
622 tss_load_seg(R_GS, new_segs[R_GS]);
623 }
624
625 /* check that EIP is in the CS segment limits */
626 if (new_eip > env->segs[R_CS].limit) {
627 /* XXX: different exception if CALL ? */
628 raise_exception_err(EXCP0D_GPF, 0);
629 }
630}
631
632/* check if Port I/O is allowed in TSS */
633static inline void check_io(int addr, int size)
634{
635#ifndef VBOX
636 int io_offset, val, mask;
637#else
638 int val, mask;
639 unsigned int io_offset;
640#endif /* VBOX */
641
642 /* TSS must be a valid 32 bit one */
643 if (!(env->tr.flags & DESC_P_MASK) ||
644 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
645 env->tr.limit < 103)
646 goto fail;
647 io_offset = lduw_kernel(env->tr.base + 0x66);
648 io_offset += (addr >> 3);
649 /* Note: the check needs two bytes */
650 if ((io_offset + 1) > env->tr.limit)
651 goto fail;
652 val = lduw_kernel(env->tr.base + io_offset);
653 val >>= (addr & 7);
654 mask = (1 << size) - 1;
655 /* all bits must be zero to allow the I/O */
656 if ((val & mask) != 0) {
657 fail:
658 raise_exception_err(EXCP0D_GPF, 0);
659 }
660}
661
662#ifdef VBOX
663/* Keep in sync with gen_check_external_event() */
664void helper_check_external_event()
665{
666 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
667 | CPU_INTERRUPT_EXTERNAL_TIMER
668 | CPU_INTERRUPT_EXTERNAL_DMA))
669 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
670 && (env->eflags & IF_MASK)
671 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
672 {
673 helper_external_event();
674 }
675
676}
677
678void helper_sync_seg(uint32_t reg)
679{
680 if (env->segs[reg].newselector)
681 sync_seg(env, reg, env->segs[reg].newselector);
682}
683#endif /* VBOX */
684
685void helper_check_iob(uint32_t t0)
686{
687 check_io(t0, 1);
688}
689
690void helper_check_iow(uint32_t t0)
691{
692 check_io(t0, 2);
693}
694
695void helper_check_iol(uint32_t t0)
696{
697 check_io(t0, 4);
698}
699
700void helper_outb(uint32_t port, uint32_t data)
701{
702 cpu_outb(env, port, data & 0xff);
703}
704
705target_ulong helper_inb(uint32_t port)
706{
707 return cpu_inb(env, port);
708}
709
710void helper_outw(uint32_t port, uint32_t data)
711{
712 cpu_outw(env, port, data & 0xffff);
713}
714
715target_ulong helper_inw(uint32_t port)
716{
717 return cpu_inw(env, port);
718}
719
720void helper_outl(uint32_t port, uint32_t data)
721{
722 cpu_outl(env, port, data);
723}
724
725target_ulong helper_inl(uint32_t port)
726{
727 return cpu_inl(env, port);
728}
729
730static inline unsigned int get_sp_mask(unsigned int e2)
731{
732 if (e2 & DESC_B_MASK)
733 return 0xffffffff;
734 else
735 return 0xffff;
736}
737
738#ifdef TARGET_X86_64
739#define SET_ESP(val, sp_mask)\
740do {\
741 if ((sp_mask) == 0xffff)\
742 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
743 else if ((sp_mask) == 0xffffffffLL)\
744 ESP = (uint32_t)(val);\
745 else\
746 ESP = (val);\
747} while (0)
748#else
749#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
750#endif
751
752/* in 64-bit machines, this can overflow. So this segment addition macro
753 * can be used to trim the value to 32-bit whenever needed */
754#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
755
756/* XXX: add a is_user flag to have proper security support */
757#define PUSHW(ssp, sp, sp_mask, val)\
758{\
759 sp -= 2;\
760 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
761}
762
763#define PUSHL(ssp, sp, sp_mask, val)\
764{\
765 sp -= 4;\
766 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
767}
768
769#define POPW(ssp, sp, sp_mask, val)\
770{\
771 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
772 sp += 2;\
773}
774
775#define POPL(ssp, sp, sp_mask, val)\
776{\
777 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
778 sp += 4;\
779}
780
781/* protected mode interrupt */
782static void do_interrupt_protected(int intno, int is_int, int error_code,
783 unsigned int next_eip, int is_hw)
784{
785 SegmentCache *dt;
786 target_ulong ptr, ssp;
787 int type, dpl, selector, ss_dpl, cpl;
788 int has_error_code, new_stack, shift;
789 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
790 uint32_t old_eip, sp_mask;
791
792#ifdef VBOX
793 ss = ss_e1 = ss_e2 = 0;
794 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
795 cpu_loop_exit();
796#endif
797
798 has_error_code = 0;
799 if (!is_int && !is_hw) {
800 switch(intno) {
801 case 8:
802 case 10:
803 case 11:
804 case 12:
805 case 13:
806 case 14:
807 case 17:
808 has_error_code = 1;
809 break;
810 }
811 }
812 if (is_int)
813 old_eip = next_eip;
814 else
815 old_eip = env->eip;
816
817 dt = &env->idt;
818#ifndef VBOX
819 if (intno * 8 + 7 > dt->limit)
820#else
821 if ((unsigned)intno * 8 + 7 > dt->limit)
822#endif
823 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
824 ptr = dt->base + intno * 8;
825 e1 = ldl_kernel(ptr);
826 e2 = ldl_kernel(ptr + 4);
827 /* check gate type */
828 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
829 switch(type) {
830 case 5: /* task gate */
831 /* must do that check here to return the correct error code */
832 if (!(e2 & DESC_P_MASK))
833 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
834 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
835 if (has_error_code) {
836 int type;
837 uint32_t mask;
838 /* push the error code */
839 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
840 shift = type >> 3;
841 if (env->segs[R_SS].flags & DESC_B_MASK)
842 mask = 0xffffffff;
843 else
844 mask = 0xffff;
845 esp = (ESP - (2 << shift)) & mask;
846 ssp = env->segs[R_SS].base + esp;
847 if (shift)
848 stl_kernel(ssp, error_code);
849 else
850 stw_kernel(ssp, error_code);
851 SET_ESP(esp, mask);
852 }
853 return;
854 case 6: /* 286 interrupt gate */
855 case 7: /* 286 trap gate */
856 case 14: /* 386 interrupt gate */
857 case 15: /* 386 trap gate */
858 break;
859 default:
860 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
861 break;
862 }
863 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
864 cpl = env->hflags & HF_CPL_MASK;
865 /* check privilege if software int */
866 if (is_int && dpl < cpl)
867 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
868 /* check valid bit */
869 if (!(e2 & DESC_P_MASK))
870 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
871 selector = e1 >> 16;
872 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
873 if ((selector & 0xfffc) == 0)
874 raise_exception_err(EXCP0D_GPF, 0);
875
876 if (load_segment(&e1, &e2, selector) != 0)
877 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
878 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
879 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
880 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
881 if (dpl > cpl)
882 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
883 if (!(e2 & DESC_P_MASK))
884 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
885 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
886 /* to inner privilege */
887 get_ss_esp_from_tss(&ss, &esp, dpl);
888 if ((ss & 0xfffc) == 0)
889 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
890 if ((ss & 3) != dpl)
891 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
892 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
893 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
894 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
895 if (ss_dpl != dpl)
896 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
897 if (!(ss_e2 & DESC_S_MASK) ||
898 (ss_e2 & DESC_CS_MASK) ||
899 !(ss_e2 & DESC_W_MASK))
900 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
901 if (!(ss_e2 & DESC_P_MASK))
902#ifdef VBOX /* See page 3-477 of 253666.pdf */
903 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
904#else
905 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
906#endif
907 new_stack = 1;
908 sp_mask = get_sp_mask(ss_e2);
909 ssp = get_seg_base(ss_e1, ss_e2);
910#if defined(VBOX) && defined(DEBUG)
911 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
912#endif
913 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
914 /* to same privilege */
915 if (env->eflags & VM_MASK)
916 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917 new_stack = 0;
918 sp_mask = get_sp_mask(env->segs[R_SS].flags);
919 ssp = env->segs[R_SS].base;
920 esp = ESP;
921 dpl = cpl;
922 } else {
923 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
924 new_stack = 0; /* avoid warning */
925 sp_mask = 0; /* avoid warning */
926 ssp = 0; /* avoid warning */
927 esp = 0; /* avoid warning */
928 }
929
930 shift = type >> 3;
931
932#if 0
933 /* XXX: check that enough room is available */
934 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
935 if (env->eflags & VM_MASK)
936 push_size += 8;
937 push_size <<= shift;
938#endif
939 if (shift == 1) {
940 if (new_stack) {
941 if (env->eflags & VM_MASK) {
942 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
943 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
944 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
945 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
946 }
947 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
948 PUSHL(ssp, esp, sp_mask, ESP);
949 }
950 PUSHL(ssp, esp, sp_mask, compute_eflags());
951 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
952 PUSHL(ssp, esp, sp_mask, old_eip);
953 if (has_error_code) {
954 PUSHL(ssp, esp, sp_mask, error_code);
955 }
956 } else {
957 if (new_stack) {
958 if (env->eflags & VM_MASK) {
959 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
960 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
961 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
962 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
963 }
964 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
965 PUSHW(ssp, esp, sp_mask, ESP);
966 }
967 PUSHW(ssp, esp, sp_mask, compute_eflags());
968 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
969 PUSHW(ssp, esp, sp_mask, old_eip);
970 if (has_error_code) {
971 PUSHW(ssp, esp, sp_mask, error_code);
972 }
973 }
974
975 if (new_stack) {
976 if (env->eflags & VM_MASK) {
977 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
978 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
979 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
980 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
981 }
982 ss = (ss & ~3) | dpl;
983 cpu_x86_load_seg_cache(env, R_SS, ss,
984 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
985 }
986 SET_ESP(esp, sp_mask);
987
988 selector = (selector & ~3) | dpl;
989 cpu_x86_load_seg_cache(env, R_CS, selector,
990 get_seg_base(e1, e2),
991 get_seg_limit(e1, e2),
992 e2);
993 cpu_x86_set_cpl(env, dpl);
994 env->eip = offset;
995
996 /* interrupt gate clear IF mask */
997 if ((type & 1) == 0) {
998 env->eflags &= ~IF_MASK;
999 }
1000#ifndef VBOX
1001 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1002#else
1003 /*
1004 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1005 * gets confused by seemingly changed EFLAGS. See #3491 and
1006 * public bug #2341.
1007 */
1008 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1009#endif
1010}
1011
1012#ifdef VBOX
1013
1014/* check if VME interrupt redirection is enabled in TSS */
1015DECLINLINE(bool) is_vme_irq_redirected(int intno)
1016{
1017 unsigned int io_offset, intredir_offset;
1018 unsigned char val, mask;
1019
1020 /* TSS must be a valid 32 bit one */
1021 if (!(env->tr.flags & DESC_P_MASK) ||
1022 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1023 env->tr.limit < 103)
1024 goto fail;
1025 io_offset = lduw_kernel(env->tr.base + 0x66);
1026 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1027 if (io_offset < 0x68 + 0x20)
1028 io_offset = 0x68 + 0x20;
1029 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1030 intredir_offset = io_offset - 0x20;
1031
1032 intredir_offset += (intno >> 3);
1033 if ((intredir_offset) > env->tr.limit)
1034 goto fail;
1035
1036 val = ldub_kernel(env->tr.base + intredir_offset);
1037 mask = 1 << (unsigned char)(intno & 7);
1038
1039 /* bit set means no redirection. */
1040 if ((val & mask) != 0) {
1041 return false;
1042 }
1043 return true;
1044
1045fail:
1046 raise_exception_err(EXCP0D_GPF, 0);
1047 return true;
1048}
1049
1050/* V86 mode software interrupt with CR4.VME=1 */
1051static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1052{
1053 target_ulong ptr, ssp;
1054 int selector;
1055 uint32_t offset, esp;
1056 uint32_t old_cs, old_eflags;
1057 uint32_t iopl;
1058
1059 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1060
1061 if (!is_vme_irq_redirected(intno))
1062 {
1063 if (iopl == 3)
1064 {
1065 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1066 return;
1067 }
1068 else
1069 raise_exception_err(EXCP0D_GPF, 0);
1070 }
1071
1072 /* virtual mode idt is at linear address 0 */
1073 ptr = 0 + intno * 4;
1074 offset = lduw_kernel(ptr);
1075 selector = lduw_kernel(ptr + 2);
1076 esp = ESP;
1077 ssp = env->segs[R_SS].base;
1078 old_cs = env->segs[R_CS].selector;
1079
1080 old_eflags = compute_eflags();
1081 if (iopl < 3)
1082 {
1083 /* copy VIF into IF and set IOPL to 3 */
1084 if (env->eflags & VIF_MASK)
1085 old_eflags |= IF_MASK;
1086 else
1087 old_eflags &= ~IF_MASK;
1088
1089 old_eflags |= (3 << IOPL_SHIFT);
1090 }
1091
1092 /* XXX: use SS segment size ? */
1093 PUSHW(ssp, esp, 0xffff, old_eflags);
1094 PUSHW(ssp, esp, 0xffff, old_cs);
1095 PUSHW(ssp, esp, 0xffff, next_eip);
1096
1097 /* update processor state */
1098 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1099 env->eip = offset;
1100 env->segs[R_CS].selector = selector;
1101 env->segs[R_CS].base = (selector << 4);
1102 env->eflags &= ~(TF_MASK | RF_MASK);
1103
1104 if (iopl < 3)
1105 env->eflags &= ~VIF_MASK;
1106 else
1107 env->eflags &= ~IF_MASK;
1108}
1109
1110#endif /* VBOX */
1111
1112#ifdef TARGET_X86_64
1113
1114#define PUSHQ(sp, val)\
1115{\
1116 sp -= 8;\
1117 stq_kernel(sp, (val));\
1118}
1119
1120#define POPQ(sp, val)\
1121{\
1122 val = ldq_kernel(sp);\
1123 sp += 8;\
1124}
1125
1126static inline target_ulong get_rsp_from_tss(int level)
1127{
1128 int index;
1129
1130#if 0
1131 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1132 env->tr.base, env->tr.limit);
1133#endif
1134
1135 if (!(env->tr.flags & DESC_P_MASK))
1136 cpu_abort(env, "invalid tss");
1137 index = 8 * level + 4;
1138 if ((index + 7) > env->tr.limit)
1139 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1140 return ldq_kernel(env->tr.base + index);
1141}
1142
1143/* 64 bit interrupt */
1144static void do_interrupt64(int intno, int is_int, int error_code,
1145 target_ulong next_eip, int is_hw)
1146{
1147 SegmentCache *dt;
1148 target_ulong ptr;
1149 int type, dpl, selector, cpl, ist;
1150 int has_error_code, new_stack;
1151 uint32_t e1, e2, e3, ss;
1152 target_ulong old_eip, esp, offset;
1153
1154#ifdef VBOX
1155 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1156 cpu_loop_exit();
1157#endif
1158
1159 has_error_code = 0;
1160 if (!is_int && !is_hw) {
1161 switch(intno) {
1162 case 8:
1163 case 10:
1164 case 11:
1165 case 12:
1166 case 13:
1167 case 14:
1168 case 17:
1169 has_error_code = 1;
1170 break;
1171 }
1172 }
1173 if (is_int)
1174 old_eip = next_eip;
1175 else
1176 old_eip = env->eip;
1177
1178 dt = &env->idt;
1179 if (intno * 16 + 15 > dt->limit)
1180 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1181 ptr = dt->base + intno * 16;
1182 e1 = ldl_kernel(ptr);
1183 e2 = ldl_kernel(ptr + 4);
1184 e3 = ldl_kernel(ptr + 8);
1185 /* check gate type */
1186 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1187 switch(type) {
1188 case 14: /* 386 interrupt gate */
1189 case 15: /* 386 trap gate */
1190 break;
1191 default:
1192 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1193 break;
1194 }
1195 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1196 cpl = env->hflags & HF_CPL_MASK;
1197 /* check privilege if software int */
1198 if (is_int && dpl < cpl)
1199 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1200 /* check valid bit */
1201 if (!(e2 & DESC_P_MASK))
1202 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1203 selector = e1 >> 16;
1204 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1205 ist = e2 & 7;
1206 if ((selector & 0xfffc) == 0)
1207 raise_exception_err(EXCP0D_GPF, 0);
1208
1209 if (load_segment(&e1, &e2, selector) != 0)
1210 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1211 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1212 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1213 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1214 if (dpl > cpl)
1215 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1216 if (!(e2 & DESC_P_MASK))
1217 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1218 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1219 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1220 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1221 /* to inner privilege */
1222 if (ist != 0)
1223 esp = get_rsp_from_tss(ist + 3);
1224 else
1225 esp = get_rsp_from_tss(dpl);
1226 esp &= ~0xfLL; /* align stack */
1227 ss = 0;
1228 new_stack = 1;
1229 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1230 /* to same privilege */
1231 if (env->eflags & VM_MASK)
1232 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1233 new_stack = 0;
1234 if (ist != 0)
1235 esp = get_rsp_from_tss(ist + 3);
1236 else
1237 esp = ESP;
1238 esp &= ~0xfLL; /* align stack */
1239 dpl = cpl;
1240 } else {
1241 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1242 new_stack = 0; /* avoid warning */
1243 esp = 0; /* avoid warning */
1244 }
1245
1246 PUSHQ(esp, env->segs[R_SS].selector);
1247 PUSHQ(esp, ESP);
1248 PUSHQ(esp, compute_eflags());
1249 PUSHQ(esp, env->segs[R_CS].selector);
1250 PUSHQ(esp, old_eip);
1251 if (has_error_code) {
1252 PUSHQ(esp, error_code);
1253 }
1254
1255 if (new_stack) {
1256 ss = 0 | dpl;
1257 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1258 }
1259 ESP = esp;
1260
1261 selector = (selector & ~3) | dpl;
1262 cpu_x86_load_seg_cache(env, R_CS, selector,
1263 get_seg_base(e1, e2),
1264 get_seg_limit(e1, e2),
1265 e2);
1266 cpu_x86_set_cpl(env, dpl);
1267 env->eip = offset;
1268
1269 /* interrupt gate clear IF mask */
1270 if ((type & 1) == 0) {
1271 env->eflags &= ~IF_MASK;
1272 }
1273
1274#ifndef VBOX
1275 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1276#else /* VBOX */
1277 /*
1278 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1279 * gets confused by seemingly changed EFLAGS. See #3491 and
1280 * public bug #2341.
1281 */
1282 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1283#endif /* VBOX */
1284}
1285#endif
1286
1287#if defined(CONFIG_USER_ONLY)
1288void helper_syscall(int next_eip_addend)
1289{
1290 env->exception_index = EXCP_SYSCALL;
1291 env->exception_next_eip = env->eip + next_eip_addend;
1292 cpu_loop_exit();
1293}
1294#else
1295void helper_syscall(int next_eip_addend)
1296{
1297 int selector;
1298
1299 if (!(env->efer & MSR_EFER_SCE)) {
1300 raise_exception_err(EXCP06_ILLOP, 0);
1301 }
1302 selector = (env->star >> 32) & 0xffff;
1303#ifdef TARGET_X86_64
1304 if (env->hflags & HF_LMA_MASK) {
1305 int code64;
1306
1307 ECX = env->eip + next_eip_addend;
1308 env->regs[11] = compute_eflags();
1309
1310 code64 = env->hflags & HF_CS64_MASK;
1311
1312 cpu_x86_set_cpl(env, 0);
1313 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1314 0, 0xffffffff,
1315 DESC_G_MASK | DESC_P_MASK |
1316 DESC_S_MASK |
1317 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1318 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1319 0, 0xffffffff,
1320 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1321 DESC_S_MASK |
1322 DESC_W_MASK | DESC_A_MASK);
1323 env->eflags &= ~env->fmask;
1324 load_eflags(env->eflags, 0);
1325 if (code64)
1326 env->eip = env->lstar;
1327 else
1328 env->eip = env->cstar;
1329 } else
1330#endif
1331 {
1332 ECX = (uint32_t)(env->eip + next_eip_addend);
1333
1334 cpu_x86_set_cpl(env, 0);
1335 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1336 0, 0xffffffff,
1337 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1338 DESC_S_MASK |
1339 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1340 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1341 0, 0xffffffff,
1342 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1343 DESC_S_MASK |
1344 DESC_W_MASK | DESC_A_MASK);
1345 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1346 env->eip = (uint32_t)env->star;
1347 }
1348}
1349#endif
1350
1351void helper_sysret(int dflag)
1352{
1353 int cpl, selector;
1354
1355 if (!(env->efer & MSR_EFER_SCE)) {
1356 raise_exception_err(EXCP06_ILLOP, 0);
1357 }
1358 cpl = env->hflags & HF_CPL_MASK;
1359 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1360 raise_exception_err(EXCP0D_GPF, 0);
1361 }
1362 selector = (env->star >> 48) & 0xffff;
1363#ifdef TARGET_X86_64
1364 if (env->hflags & HF_LMA_MASK) {
1365 if (dflag == 2) {
1366 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1367 0, 0xffffffff,
1368 DESC_G_MASK | DESC_P_MASK |
1369 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1370 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1371 DESC_L_MASK);
1372 env->eip = ECX;
1373 } else {
1374 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1375 0, 0xffffffff,
1376 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1377 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1378 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1379 env->eip = (uint32_t)ECX;
1380 }
1381 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1382 0, 0xffffffff,
1383 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1384 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1385 DESC_W_MASK | DESC_A_MASK);
1386 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1387 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1388 cpu_x86_set_cpl(env, 3);
1389 } else
1390#endif
1391 {
1392 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1393 0, 0xffffffff,
1394 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1395 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1396 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1397 env->eip = (uint32_t)ECX;
1398 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1399 0, 0xffffffff,
1400 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1401 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1402 DESC_W_MASK | DESC_A_MASK);
1403 env->eflags |= IF_MASK;
1404 cpu_x86_set_cpl(env, 3);
1405 }
1406#ifdef USE_KQEMU
1407 if (kqemu_is_ok(env)) {
1408 if (env->hflags & HF_LMA_MASK)
1409 CC_OP = CC_OP_EFLAGS;
1410 env->exception_index = -1;
1411 cpu_loop_exit();
1412 }
1413#endif
1414}
1415
1416#ifdef VBOX
1417/**
1418 * Checks and processes external VMM events.
1419 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1420 */
1421void helper_external_event(void)
1422{
1423#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1424 uintptr_t uSP;
1425# ifdef RT_ARCH_AMD64
1426 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1427# else
1428 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1429# endif
1430 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1431#endif
1432 /* Keep in sync with flags checked by gen_check_external_event() */
1433 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1434 {
1435 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1436 ~CPU_INTERRUPT_EXTERNAL_HARD);
1437 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1438 }
1439 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1440 {
1441 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1442 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1443 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1444 }
1445 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1446 {
1447 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1448 ~CPU_INTERRUPT_EXTERNAL_DMA);
1449 remR3DmaRun(env);
1450 }
1451 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1452 {
1453 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1454 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1455 remR3TimersRun(env);
1456 }
1457}
1458/* helper for recording call instruction addresses for later scanning */
1459void helper_record_call()
1460{
1461 if ( !(env->state & CPU_RAW_RING0)
1462 && (env->cr[0] & CR0_PG_MASK)
1463 && !(env->eflags & X86_EFL_IF))
1464 remR3RecordCall(env);
1465}
1466#endif /* VBOX */
1467
1468/* real mode interrupt */
1469static void do_interrupt_real(int intno, int is_int, int error_code,
1470 unsigned int next_eip)
1471{
1472 SegmentCache *dt;
1473 target_ulong ptr, ssp;
1474 int selector;
1475 uint32_t offset, esp;
1476 uint32_t old_cs, old_eip;
1477
1478 /* real mode (simpler !) */
1479 dt = &env->idt;
1480#ifndef VBOX
1481 if (intno * 4 + 3 > dt->limit)
1482#else
1483 if ((unsigned)intno * 4 + 3 > dt->limit)
1484#endif
1485 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1486 ptr = dt->base + intno * 4;
1487 offset = lduw_kernel(ptr);
1488 selector = lduw_kernel(ptr + 2);
1489 esp = ESP;
1490 ssp = env->segs[R_SS].base;
1491 if (is_int)
1492 old_eip = next_eip;
1493 else
1494 old_eip = env->eip;
1495 old_cs = env->segs[R_CS].selector;
1496 /* XXX: use SS segment size ? */
1497 PUSHW(ssp, esp, 0xffff, compute_eflags());
1498 PUSHW(ssp, esp, 0xffff, old_cs);
1499 PUSHW(ssp, esp, 0xffff, old_eip);
1500
1501 /* update processor state */
1502 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1503 env->eip = offset;
1504 env->segs[R_CS].selector = selector;
1505 env->segs[R_CS].base = (selector << 4);
1506 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1507}
1508
1509/* fake user mode interrupt */
1510void do_interrupt_user(int intno, int is_int, int error_code,
1511 target_ulong next_eip)
1512{
1513 SegmentCache *dt;
1514 target_ulong ptr;
1515 int dpl, cpl, shift;
1516 uint32_t e2;
1517
1518 dt = &env->idt;
1519 if (env->hflags & HF_LMA_MASK) {
1520 shift = 4;
1521 } else {
1522 shift = 3;
1523 }
1524 ptr = dt->base + (intno << shift);
1525 e2 = ldl_kernel(ptr + 4);
1526
1527 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1528 cpl = env->hflags & HF_CPL_MASK;
1529 /* check privilege if software int */
1530 if (is_int && dpl < cpl)
1531 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1532
1533 /* Since we emulate only user space, we cannot do more than
1534 exiting the emulation with the suitable exception and error
1535 code */
1536 if (is_int)
1537 EIP = next_eip;
1538}
1539
1540/*
1541 * Begin execution of an interruption. is_int is TRUE if coming from
1542 * the int instruction. next_eip is the EIP value AFTER the interrupt
1543 * instruction. It is only relevant if is_int is TRUE.
1544 */
1545void do_interrupt(int intno, int is_int, int error_code,
1546 target_ulong next_eip, int is_hw)
1547{
1548#ifdef VBOX
1549 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1550 if (is_int) {
1551 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1552 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1553 } else {
1554 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1555 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1556 }
1557 }
1558#endif
1559
1560 if (loglevel & CPU_LOG_INT) {
1561 if ((env->cr[0] & CR0_PE_MASK)) {
1562 static int count;
1563 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1564 count, intno, error_code, is_int,
1565 env->hflags & HF_CPL_MASK,
1566 env->segs[R_CS].selector, EIP,
1567 (int)env->segs[R_CS].base + EIP,
1568 env->segs[R_SS].selector, ESP);
1569 if (intno == 0x0e) {
1570 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1571 } else {
1572 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1573 }
1574 fprintf(logfile, "\n");
1575 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1576#if 0
1577 {
1578 int i;
1579 uint8_t *ptr;
1580 fprintf(logfile, " code=");
1581 ptr = env->segs[R_CS].base + env->eip;
1582 for(i = 0; i < 16; i++) {
1583 fprintf(logfile, " %02x", ldub(ptr + i));
1584 }
1585 fprintf(logfile, "\n");
1586 }
1587#endif
1588 count++;
1589 }
1590 }
1591 if (env->cr[0] & CR0_PE_MASK) {
1592#ifdef TARGET_X86_64
1593 if (env->hflags & HF_LMA_MASK) {
1594 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1595 } else
1596#endif
1597 {
1598#ifdef VBOX
1599 /* int xx *, v86 code and VME enabled? */
1600 if ( (env->eflags & VM_MASK)
1601 && (env->cr[4] & CR4_VME_MASK)
1602 && is_int
1603 && !is_hw
1604 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1605 )
1606 do_soft_interrupt_vme(intno, error_code, next_eip);
1607 else
1608#endif /* VBOX */
1609 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1610 }
1611 } else {
1612 do_interrupt_real(intno, is_int, error_code, next_eip);
1613 }
1614}
1615
1616/*
1617 * Check nested exceptions and change to double or triple fault if
1618 * needed. It should only be called, if this is not an interrupt.
1619 * Returns the new exception number.
1620 */
1621static int check_exception(int intno, int *error_code)
1622{
1623 int first_contributory = env->old_exception == 0 ||
1624 (env->old_exception >= 10 &&
1625 env->old_exception <= 13);
1626 int second_contributory = intno == 0 ||
1627 (intno >= 10 && intno <= 13);
1628
1629 if (loglevel & CPU_LOG_INT)
1630 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1631 env->old_exception, intno);
1632
1633 if (env->old_exception == EXCP08_DBLE)
1634 cpu_abort(env, "triple fault");
1635
1636 if ((first_contributory && second_contributory)
1637 || (env->old_exception == EXCP0E_PAGE &&
1638 (second_contributory || (intno == EXCP0E_PAGE)))) {
1639 intno = EXCP08_DBLE;
1640 *error_code = 0;
1641 }
1642
1643 if (second_contributory || (intno == EXCP0E_PAGE) ||
1644 (intno == EXCP08_DBLE))
1645 env->old_exception = intno;
1646
1647 return intno;
1648}
1649
1650/*
1651 * Signal an interruption. It is executed in the main CPU loop.
1652 * is_int is TRUE if coming from the int instruction. next_eip is the
1653 * EIP value AFTER the interrupt instruction. It is only relevant if
1654 * is_int is TRUE.
1655 */
1656void raise_interrupt(int intno, int is_int, int error_code,
1657 int next_eip_addend)
1658{
1659#if defined(VBOX) && defined(DEBUG)
1660 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1661#endif
1662 if (!is_int) {
1663 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1664 intno = check_exception(intno, &error_code);
1665 } else {
1666 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1667 }
1668
1669 env->exception_index = intno;
1670 env->error_code = error_code;
1671 env->exception_is_int = is_int;
1672 env->exception_next_eip = env->eip + next_eip_addend;
1673 cpu_loop_exit();
1674}
1675
1676/* shortcuts to generate exceptions */
1677
1678void (raise_exception_err)(int exception_index, int error_code)
1679{
1680 raise_interrupt(exception_index, 0, error_code, 0);
1681}
1682
1683void raise_exception(int exception_index)
1684{
1685 raise_interrupt(exception_index, 0, 0, 0);
1686}
1687
1688/* SMM support */
1689
1690#if defined(CONFIG_USER_ONLY)
1691
1692void do_smm_enter(void)
1693{
1694}
1695
1696void helper_rsm(void)
1697{
1698}
1699
1700#else
1701
1702#ifdef TARGET_X86_64
1703#define SMM_REVISION_ID 0x00020064
1704#else
1705#define SMM_REVISION_ID 0x00020000
1706#endif
1707
1708void do_smm_enter(void)
1709{
1710 target_ulong sm_state;
1711 SegmentCache *dt;
1712 int i, offset;
1713
1714 if (loglevel & CPU_LOG_INT) {
1715 fprintf(logfile, "SMM: enter\n");
1716 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1717 }
1718
1719 env->hflags |= HF_SMM_MASK;
1720 cpu_smm_update(env);
1721
1722 sm_state = env->smbase + 0x8000;
1723
1724#ifdef TARGET_X86_64
1725 for(i = 0; i < 6; i++) {
1726 dt = &env->segs[i];
1727 offset = 0x7e00 + i * 16;
1728 stw_phys(sm_state + offset, dt->selector);
1729 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1730 stl_phys(sm_state + offset + 4, dt->limit);
1731 stq_phys(sm_state + offset + 8, dt->base);
1732 }
1733
1734 stq_phys(sm_state + 0x7e68, env->gdt.base);
1735 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1736
1737 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1738 stq_phys(sm_state + 0x7e78, env->ldt.base);
1739 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1740 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1741
1742 stq_phys(sm_state + 0x7e88, env->idt.base);
1743 stl_phys(sm_state + 0x7e84, env->idt.limit);
1744
1745 stw_phys(sm_state + 0x7e90, env->tr.selector);
1746 stq_phys(sm_state + 0x7e98, env->tr.base);
1747 stl_phys(sm_state + 0x7e94, env->tr.limit);
1748 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1749
1750 stq_phys(sm_state + 0x7ed0, env->efer);
1751
1752 stq_phys(sm_state + 0x7ff8, EAX);
1753 stq_phys(sm_state + 0x7ff0, ECX);
1754 stq_phys(sm_state + 0x7fe8, EDX);
1755 stq_phys(sm_state + 0x7fe0, EBX);
1756 stq_phys(sm_state + 0x7fd8, ESP);
1757 stq_phys(sm_state + 0x7fd0, EBP);
1758 stq_phys(sm_state + 0x7fc8, ESI);
1759 stq_phys(sm_state + 0x7fc0, EDI);
1760 for(i = 8; i < 16; i++)
1761 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1762 stq_phys(sm_state + 0x7f78, env->eip);
1763 stl_phys(sm_state + 0x7f70, compute_eflags());
1764 stl_phys(sm_state + 0x7f68, env->dr[6]);
1765 stl_phys(sm_state + 0x7f60, env->dr[7]);
1766
1767 stl_phys(sm_state + 0x7f48, env->cr[4]);
1768 stl_phys(sm_state + 0x7f50, env->cr[3]);
1769 stl_phys(sm_state + 0x7f58, env->cr[0]);
1770
1771 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1772 stl_phys(sm_state + 0x7f00, env->smbase);
1773#else
1774 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1775 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1776 stl_phys(sm_state + 0x7ff4, compute_eflags());
1777 stl_phys(sm_state + 0x7ff0, env->eip);
1778 stl_phys(sm_state + 0x7fec, EDI);
1779 stl_phys(sm_state + 0x7fe8, ESI);
1780 stl_phys(sm_state + 0x7fe4, EBP);
1781 stl_phys(sm_state + 0x7fe0, ESP);
1782 stl_phys(sm_state + 0x7fdc, EBX);
1783 stl_phys(sm_state + 0x7fd8, EDX);
1784 stl_phys(sm_state + 0x7fd4, ECX);
1785 stl_phys(sm_state + 0x7fd0, EAX);
1786 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1787 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1788
1789 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1790 stl_phys(sm_state + 0x7f64, env->tr.base);
1791 stl_phys(sm_state + 0x7f60, env->tr.limit);
1792 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1793
1794 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1795 stl_phys(sm_state + 0x7f80, env->ldt.base);
1796 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1797 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1798
1799 stl_phys(sm_state + 0x7f74, env->gdt.base);
1800 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1801
1802 stl_phys(sm_state + 0x7f58, env->idt.base);
1803 stl_phys(sm_state + 0x7f54, env->idt.limit);
1804
1805 for(i = 0; i < 6; i++) {
1806 dt = &env->segs[i];
1807 if (i < 3)
1808 offset = 0x7f84 + i * 12;
1809 else
1810 offset = 0x7f2c + (i - 3) * 12;
1811 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1812 stl_phys(sm_state + offset + 8, dt->base);
1813 stl_phys(sm_state + offset + 4, dt->limit);
1814 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1815 }
1816 stl_phys(sm_state + 0x7f14, env->cr[4]);
1817
1818 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1819 stl_phys(sm_state + 0x7ef8, env->smbase);
1820#endif
1821 /* init SMM cpu state */
1822
1823#ifdef TARGET_X86_64
1824 cpu_load_efer(env, 0);
1825#endif
1826 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1827 env->eip = 0x00008000;
1828 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1829 0xffffffff, 0);
1830 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1831 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1832 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1833 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1834 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1835
1836 cpu_x86_update_cr0(env,
1837 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1838 cpu_x86_update_cr4(env, 0);
1839 env->dr[7] = 0x00000400;
1840 CC_OP = CC_OP_EFLAGS;
1841}
1842
1843void helper_rsm(void)
1844{
1845#ifdef VBOX
1846 cpu_abort(env, "helper_rsm");
1847#else /* !VBOX */
1848 target_ulong sm_
1849
1850 target_ulong sm_state;
1851 int i, offset;
1852 uint32_t val;
1853
1854 sm_state = env->smbase + 0x8000;
1855#ifdef TARGET_X86_64
1856 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1857
1858 for(i = 0; i < 6; i++) {
1859 offset = 0x7e00 + i * 16;
1860 cpu_x86_load_seg_cache(env, i,
1861 lduw_phys(sm_state + offset),
1862 ldq_phys(sm_state + offset + 8),
1863 ldl_phys(sm_state + offset + 4),
1864 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1865 }
1866
1867 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1868 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1869
1870 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1871 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1872 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1873 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1874
1875 env->idt.base = ldq_phys(sm_state + 0x7e88);
1876 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1877
1878 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1879 env->tr.base = ldq_phys(sm_state + 0x7e98);
1880 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1881 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1882
1883 EAX = ldq_phys(sm_state + 0x7ff8);
1884 ECX = ldq_phys(sm_state + 0x7ff0);
1885 EDX = ldq_phys(sm_state + 0x7fe8);
1886 EBX = ldq_phys(sm_state + 0x7fe0);
1887 ESP = ldq_phys(sm_state + 0x7fd8);
1888 EBP = ldq_phys(sm_state + 0x7fd0);
1889 ESI = ldq_phys(sm_state + 0x7fc8);
1890 EDI = ldq_phys(sm_state + 0x7fc0);
1891 for(i = 8; i < 16; i++)
1892 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1893 env->eip = ldq_phys(sm_state + 0x7f78);
1894 load_eflags(ldl_phys(sm_state + 0x7f70),
1895 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1896 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1897 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1898
1899 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1900 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1901 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1902
1903 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1904 if (val & 0x20000) {
1905 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1906 }
1907#else
1908 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1909 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1910 load_eflags(ldl_phys(sm_state + 0x7ff4),
1911 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1912 env->eip = ldl_phys(sm_state + 0x7ff0);
1913 EDI = ldl_phys(sm_state + 0x7fec);
1914 ESI = ldl_phys(sm_state + 0x7fe8);
1915 EBP = ldl_phys(sm_state + 0x7fe4);
1916 ESP = ldl_phys(sm_state + 0x7fe0);
1917 EBX = ldl_phys(sm_state + 0x7fdc);
1918 EDX = ldl_phys(sm_state + 0x7fd8);
1919 ECX = ldl_phys(sm_state + 0x7fd4);
1920 EAX = ldl_phys(sm_state + 0x7fd0);
1921 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1922 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1923
1924 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1925 env->tr.base = ldl_phys(sm_state + 0x7f64);
1926 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1927 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1928
1929 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1930 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1931 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1932 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1933
1934 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1935 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1936
1937 env->idt.base = ldl_phys(sm_state + 0x7f58);
1938 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1939
1940 for(i = 0; i < 6; i++) {
1941 if (i < 3)
1942 offset = 0x7f84 + i * 12;
1943 else
1944 offset = 0x7f2c + (i - 3) * 12;
1945 cpu_x86_load_seg_cache(env, i,
1946 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1947 ldl_phys(sm_state + offset + 8),
1948 ldl_phys(sm_state + offset + 4),
1949 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1950 }
1951 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1952
1953 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1954 if (val & 0x20000) {
1955 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1956 }
1957#endif
1958 CC_OP = CC_OP_EFLAGS;
1959 env->hflags &= ~HF_SMM_MASK;
1960 cpu_smm_update(env);
1961
1962 if (loglevel & CPU_LOG_INT) {
1963 fprintf(logfile, "SMM: after RSM\n");
1964 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1965 }
1966#endif /* !VBOX */
1967}
1968
1969#endif /* !CONFIG_USER_ONLY */
1970
1971
1972/* division, flags are undefined */
1973
1974void helper_divb_AL(target_ulong t0)
1975{
1976 unsigned int num, den, q, r;
1977
1978 num = (EAX & 0xffff);
1979 den = (t0 & 0xff);
1980 if (den == 0) {
1981 raise_exception(EXCP00_DIVZ);
1982 }
1983 q = (num / den);
1984 if (q > 0xff)
1985 raise_exception(EXCP00_DIVZ);
1986 q &= 0xff;
1987 r = (num % den) & 0xff;
1988 EAX = (EAX & ~0xffff) | (r << 8) | q;
1989}
1990
1991void helper_idivb_AL(target_ulong t0)
1992{
1993 int num, den, q, r;
1994
1995 num = (int16_t)EAX;
1996 den = (int8_t)t0;
1997 if (den == 0) {
1998 raise_exception(EXCP00_DIVZ);
1999 }
2000 q = (num / den);
2001 if (q != (int8_t)q)
2002 raise_exception(EXCP00_DIVZ);
2003 q &= 0xff;
2004 r = (num % den) & 0xff;
2005 EAX = (EAX & ~0xffff) | (r << 8) | q;
2006}
2007
2008void helper_divw_AX(target_ulong t0)
2009{
2010 unsigned int num, den, q, r;
2011
2012 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2013 den = (t0 & 0xffff);
2014 if (den == 0) {
2015 raise_exception(EXCP00_DIVZ);
2016 }
2017 q = (num / den);
2018 if (q > 0xffff)
2019 raise_exception(EXCP00_DIVZ);
2020 q &= 0xffff;
2021 r = (num % den) & 0xffff;
2022 EAX = (EAX & ~0xffff) | q;
2023 EDX = (EDX & ~0xffff) | r;
2024}
2025
2026void helper_idivw_AX(target_ulong t0)
2027{
2028 int num, den, q, r;
2029
2030 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2031 den = (int16_t)t0;
2032 if (den == 0) {
2033 raise_exception(EXCP00_DIVZ);
2034 }
2035 q = (num / den);
2036 if (q != (int16_t)q)
2037 raise_exception(EXCP00_DIVZ);
2038 q &= 0xffff;
2039 r = (num % den) & 0xffff;
2040 EAX = (EAX & ~0xffff) | q;
2041 EDX = (EDX & ~0xffff) | r;
2042}
2043
2044void helper_divl_EAX(target_ulong t0)
2045{
2046 unsigned int den, r;
2047 uint64_t num, q;
2048
2049 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2050 den = t0;
2051 if (den == 0) {
2052 raise_exception(EXCP00_DIVZ);
2053 }
2054 q = (num / den);
2055 r = (num % den);
2056 if (q > 0xffffffff)
2057 raise_exception(EXCP00_DIVZ);
2058 EAX = (uint32_t)q;
2059 EDX = (uint32_t)r;
2060}
2061
2062void helper_idivl_EAX(target_ulong t0)
2063{
2064 int den, r;
2065 int64_t num, q;
2066
2067 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2068 den = t0;
2069 if (den == 0) {
2070 raise_exception(EXCP00_DIVZ);
2071 }
2072 q = (num / den);
2073 r = (num % den);
2074 if (q != (int32_t)q)
2075 raise_exception(EXCP00_DIVZ);
2076 EAX = (uint32_t)q;
2077 EDX = (uint32_t)r;
2078}
2079
2080/* bcd */
2081
2082/* XXX: exception */
2083void helper_aam(int base)
2084{
2085 int al, ah;
2086 al = EAX & 0xff;
2087 ah = al / base;
2088 al = al % base;
2089 EAX = (EAX & ~0xffff) | al | (ah << 8);
2090 CC_DST = al;
2091}
2092
2093void helper_aad(int base)
2094{
2095 int al, ah;
2096 al = EAX & 0xff;
2097 ah = (EAX >> 8) & 0xff;
2098 al = ((ah * base) + al) & 0xff;
2099 EAX = (EAX & ~0xffff) | al;
2100 CC_DST = al;
2101}
2102
2103void helper_aaa(void)
2104{
2105 int icarry;
2106 int al, ah, af;
2107 int eflags;
2108
2109 eflags = cc_table[CC_OP].compute_all();
2110 af = eflags & CC_A;
2111 al = EAX & 0xff;
2112 ah = (EAX >> 8) & 0xff;
2113
2114 icarry = (al > 0xf9);
2115 if (((al & 0x0f) > 9 ) || af) {
2116 al = (al + 6) & 0x0f;
2117 ah = (ah + 1 + icarry) & 0xff;
2118 eflags |= CC_C | CC_A;
2119 } else {
2120 eflags &= ~(CC_C | CC_A);
2121 al &= 0x0f;
2122 }
2123 EAX = (EAX & ~0xffff) | al | (ah << 8);
2124 CC_SRC = eflags;
2125 FORCE_RET();
2126}
2127
2128void helper_aas(void)
2129{
2130 int icarry;
2131 int al, ah, af;
2132 int eflags;
2133
2134 eflags = cc_table[CC_OP].compute_all();
2135 af = eflags & CC_A;
2136 al = EAX & 0xff;
2137 ah = (EAX >> 8) & 0xff;
2138
2139 icarry = (al < 6);
2140 if (((al & 0x0f) > 9 ) || af) {
2141 al = (al - 6) & 0x0f;
2142 ah = (ah - 1 - icarry) & 0xff;
2143 eflags |= CC_C | CC_A;
2144 } else {
2145 eflags &= ~(CC_C | CC_A);
2146 al &= 0x0f;
2147 }
2148 EAX = (EAX & ~0xffff) | al | (ah << 8);
2149 CC_SRC = eflags;
2150 FORCE_RET();
2151}
2152
2153void helper_daa(void)
2154{
2155 int al, af, cf;
2156 int eflags;
2157
2158 eflags = cc_table[CC_OP].compute_all();
2159 cf = eflags & CC_C;
2160 af = eflags & CC_A;
2161 al = EAX & 0xff;
2162
2163 eflags = 0;
2164 if (((al & 0x0f) > 9 ) || af) {
2165 al = (al + 6) & 0xff;
2166 eflags |= CC_A;
2167 }
2168 if ((al > 0x9f) || cf) {
2169 al = (al + 0x60) & 0xff;
2170 eflags |= CC_C;
2171 }
2172 EAX = (EAX & ~0xff) | al;
2173 /* well, speed is not an issue here, so we compute the flags by hand */
2174 eflags |= (al == 0) << 6; /* zf */
2175 eflags |= parity_table[al]; /* pf */
2176 eflags |= (al & 0x80); /* sf */
2177 CC_SRC = eflags;
2178 FORCE_RET();
2179}
2180
2181void helper_das(void)
2182{
2183 int al, al1, af, cf;
2184 int eflags;
2185
2186 eflags = cc_table[CC_OP].compute_all();
2187 cf = eflags & CC_C;
2188 af = eflags & CC_A;
2189 al = EAX & 0xff;
2190
2191 eflags = 0;
2192 al1 = al;
2193 if (((al & 0x0f) > 9 ) || af) {
2194 eflags |= CC_A;
2195 if (al < 6 || cf)
2196 eflags |= CC_C;
2197 al = (al - 6) & 0xff;
2198 }
2199 if ((al1 > 0x99) || cf) {
2200 al = (al - 0x60) & 0xff;
2201 eflags |= CC_C;
2202 }
2203 EAX = (EAX & ~0xff) | al;
2204 /* well, speed is not an issue here, so we compute the flags by hand */
2205 eflags |= (al == 0) << 6; /* zf */
2206 eflags |= parity_table[al]; /* pf */
2207 eflags |= (al & 0x80); /* sf */
2208 CC_SRC = eflags;
2209 FORCE_RET();
2210}
2211
2212void helper_into(int next_eip_addend)
2213{
2214 int eflags;
2215 eflags = cc_table[CC_OP].compute_all();
2216 if (eflags & CC_O) {
2217 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2218 }
2219}
2220
2221void helper_cmpxchg8b(target_ulong a0)
2222{
2223 uint64_t d;
2224 int eflags;
2225
2226 eflags = cc_table[CC_OP].compute_all();
2227 d = ldq(a0);
2228 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2229 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2230 eflags |= CC_Z;
2231 } else {
2232 /* always do the store */
2233 stq(a0, d);
2234 EDX = (uint32_t)(d >> 32);
2235 EAX = (uint32_t)d;
2236 eflags &= ~CC_Z;
2237 }
2238 CC_SRC = eflags;
2239}
2240
2241#ifdef TARGET_X86_64
2242void helper_cmpxchg16b(target_ulong a0)
2243{
2244 uint64_t d0, d1;
2245 int eflags;
2246
2247 if ((a0 & 0xf) != 0)
2248 raise_exception(EXCP0D_GPF);
2249 eflags = cc_table[CC_OP].compute_all();
2250 d0 = ldq(a0);
2251 d1 = ldq(a0 + 8);
2252 if (d0 == EAX && d1 == EDX) {
2253 stq(a0, EBX);
2254 stq(a0 + 8, ECX);
2255 eflags |= CC_Z;
2256 } else {
2257 /* always do the store */
2258 stq(a0, d0);
2259 stq(a0 + 8, d1);
2260 EDX = d1;
2261 EAX = d0;
2262 eflags &= ~CC_Z;
2263 }
2264 CC_SRC = eflags;
2265}
2266#endif
2267
2268void helper_single_step(void)
2269{
2270 env->dr[6] |= 0x4000;
2271 raise_exception(EXCP01_SSTP);
2272}
2273
2274void helper_cpuid(void)
2275{
2276#ifndef VBOX
2277 uint32_t index;
2278
2279 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2280
2281 index = (uint32_t)EAX;
2282 /* test if maximum index reached */
2283 if (index & 0x80000000) {
2284 if (index > env->cpuid_xlevel)
2285 index = env->cpuid_level;
2286 } else {
2287 if (index > env->cpuid_level)
2288 index = env->cpuid_level;
2289 }
2290
2291 switch(index) {
2292 case 0:
2293 EAX = env->cpuid_level;
2294 EBX = env->cpuid_vendor1;
2295 EDX = env->cpuid_vendor2;
2296 ECX = env->cpuid_vendor3;
2297 break;
2298 case 1:
2299 EAX = env->cpuid_version;
2300 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2301 ECX = env->cpuid_ext_features;
2302 EDX = env->cpuid_features;
2303 break;
2304 case 2:
2305 /* cache info: needed for Pentium Pro compatibility */
2306 EAX = 1;
2307 EBX = 0;
2308 ECX = 0;
2309 EDX = 0x2c307d;
2310 break;
2311 case 4:
2312 /* cache info: needed for Core compatibility */
2313 switch (ECX) {
2314 case 0: /* L1 dcache info */
2315 EAX = 0x0000121;
2316 EBX = 0x1c0003f;
2317 ECX = 0x000003f;
2318 EDX = 0x0000001;
2319 break;
2320 case 1: /* L1 icache info */
2321 EAX = 0x0000122;
2322 EBX = 0x1c0003f;
2323 ECX = 0x000003f;
2324 EDX = 0x0000001;
2325 break;
2326 case 2: /* L2 cache info */
2327 EAX = 0x0000143;
2328 EBX = 0x3c0003f;
2329 ECX = 0x0000fff;
2330 EDX = 0x0000001;
2331 break;
2332 default: /* end of info */
2333 EAX = 0;
2334 EBX = 0;
2335 ECX = 0;
2336 EDX = 0;
2337 break;
2338 }
2339
2340 break;
2341 case 5:
2342 /* mwait info: needed for Core compatibility */
2343 EAX = 0; /* Smallest monitor-line size in bytes */
2344 EBX = 0; /* Largest monitor-line size in bytes */
2345 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2346 EDX = 0;
2347 break;
2348 case 6:
2349 /* Thermal and Power Leaf */
2350 EAX = 0;
2351 EBX = 0;
2352 ECX = 0;
2353 EDX = 0;
2354 break;
2355 case 9:
2356 /* Direct Cache Access Information Leaf */
2357 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2358 EBX = 0;
2359 ECX = 0;
2360 EDX = 0;
2361 break;
2362 case 0xA:
2363 /* Architectural Performance Monitoring Leaf */
2364 EAX = 0;
2365 EBX = 0;
2366 ECX = 0;
2367 EDX = 0;
2368 break;
2369 case 0x80000000:
2370 EAX = env->cpuid_xlevel;
2371 EBX = env->cpuid_vendor1;
2372 EDX = env->cpuid_vendor2;
2373 ECX = env->cpuid_vendor3;
2374 break;
2375 case 0x80000001:
2376 EAX = env->cpuid_features;
2377 EBX = 0;
2378 ECX = env->cpuid_ext3_features;
2379 EDX = env->cpuid_ext2_features;
2380 break;
2381 case 0x80000002:
2382 case 0x80000003:
2383 case 0x80000004:
2384 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2385 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2386 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2387 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2388 break;
2389 case 0x80000005:
2390 /* cache info (L1 cache) */
2391 EAX = 0x01ff01ff;
2392 EBX = 0x01ff01ff;
2393 ECX = 0x40020140;
2394 EDX = 0x40020140;
2395 break;
2396 case 0x80000006:
2397 /* cache info (L2 cache) */
2398 EAX = 0;
2399 EBX = 0x42004200;
2400 ECX = 0x02008140;
2401 EDX = 0;
2402 break;
2403 case 0x80000008:
2404 /* virtual & phys address size in low 2 bytes. */
2405/* XXX: This value must match the one used in the MMU code. */
2406 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2407 /* 64 bit processor */
2408#if defined(USE_KQEMU)
2409 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2410#else
2411/* XXX: The physical address space is limited to 42 bits in exec.c. */
2412 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2413#endif
2414 } else {
2415#if defined(USE_KQEMU)
2416 EAX = 0x00000020; /* 32 bits physical */
2417#else
2418 if (env->cpuid_features & CPUID_PSE36)
2419 EAX = 0x00000024; /* 36 bits physical */
2420 else
2421 EAX = 0x00000020; /* 32 bits physical */
2422#endif
2423 }
2424 EBX = 0;
2425 ECX = 0;
2426 EDX = 0;
2427 break;
2428 case 0x8000000A:
2429 EAX = 0x00000001;
2430 EBX = 0;
2431 ECX = 0;
2432 EDX = 0;
2433 break;
2434 default:
2435 /* reserved values: zero */
2436 EAX = 0;
2437 EBX = 0;
2438 ECX = 0;
2439 EDX = 0;
2440 break;
2441 }
2442#else /* VBOX */
2443 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2444#endif /* VBOX */
2445}
2446
2447void helper_enter_level(int level, int data32, target_ulong t1)
2448{
2449 target_ulong ssp;
2450 uint32_t esp_mask, esp, ebp;
2451
2452 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2453 ssp = env->segs[R_SS].base;
2454 ebp = EBP;
2455 esp = ESP;
2456 if (data32) {
2457 /* 32 bit */
2458 esp -= 4;
2459 while (--level) {
2460 esp -= 4;
2461 ebp -= 4;
2462 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2463 }
2464 esp -= 4;
2465 stl(ssp + (esp & esp_mask), t1);
2466 } else {
2467 /* 16 bit */
2468 esp -= 2;
2469 while (--level) {
2470 esp -= 2;
2471 ebp -= 2;
2472 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2473 }
2474 esp -= 2;
2475 stw(ssp + (esp & esp_mask), t1);
2476 }
2477}
2478
2479#ifdef TARGET_X86_64
2480void helper_enter64_level(int level, int data64, target_ulong t1)
2481{
2482 target_ulong esp, ebp;
2483 ebp = EBP;
2484 esp = ESP;
2485
2486 if (data64) {
2487 /* 64 bit */
2488 esp -= 8;
2489 while (--level) {
2490 esp -= 8;
2491 ebp -= 8;
2492 stq(esp, ldq(ebp));
2493 }
2494 esp -= 8;
2495 stq(esp, t1);
2496 } else {
2497 /* 16 bit */
2498 esp -= 2;
2499 while (--level) {
2500 esp -= 2;
2501 ebp -= 2;
2502 stw(esp, lduw(ebp));
2503 }
2504 esp -= 2;
2505 stw(esp, t1);
2506 }
2507}
2508#endif
2509
2510void helper_lldt(int selector)
2511{
2512 SegmentCache *dt;
2513 uint32_t e1, e2;
2514#ifndef VBOX
2515 int index, entry_limit;
2516#else
2517 unsigned int index, entry_limit;
2518#endif
2519 target_ulong ptr;
2520
2521#ifdef VBOX
2522 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2523 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2524#endif
2525
2526 selector &= 0xffff;
2527 if ((selector & 0xfffc) == 0) {
2528 /* XXX: NULL selector case: invalid LDT */
2529 env->ldt.base = 0;
2530 env->ldt.limit = 0;
2531 } else {
2532 if (selector & 0x4)
2533 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2534 dt = &env->gdt;
2535 index = selector & ~7;
2536#ifdef TARGET_X86_64
2537 if (env->hflags & HF_LMA_MASK)
2538 entry_limit = 15;
2539 else
2540#endif
2541 entry_limit = 7;
2542 if ((index + entry_limit) > dt->limit)
2543 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2544 ptr = dt->base + index;
2545 e1 = ldl_kernel(ptr);
2546 e2 = ldl_kernel(ptr + 4);
2547 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2548 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2549 if (!(e2 & DESC_P_MASK))
2550 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2551#ifdef TARGET_X86_64
2552 if (env->hflags & HF_LMA_MASK) {
2553 uint32_t e3;
2554 e3 = ldl_kernel(ptr + 8);
2555 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2556 env->ldt.base |= (target_ulong)e3 << 32;
2557 } else
2558#endif
2559 {
2560 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2561 }
2562 }
2563 env->ldt.selector = selector;
2564#ifdef VBOX
2565 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2566 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2567#endif
2568}
2569
2570void helper_ltr(int selector)
2571{
2572 SegmentCache *dt;
2573 uint32_t e1, e2;
2574#ifndef VBOX
2575 int index, type, entry_limit;
2576#else
2577 unsigned int index;
2578 int type, entry_limit;
2579#endif
2580 target_ulong ptr;
2581
2582#ifdef VBOX
2583 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2584 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2585 env->tr.flags, (RTSEL)(selector & 0xffff)));
2586#endif
2587 selector &= 0xffff;
2588 if ((selector & 0xfffc) == 0) {
2589 /* NULL selector case: invalid TR */
2590 env->tr.base = 0;
2591 env->tr.limit = 0;
2592 env->tr.flags = 0;
2593 } else {
2594 if (selector & 0x4)
2595 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2596 dt = &env->gdt;
2597 index = selector & ~7;
2598#ifdef TARGET_X86_64
2599 if (env->hflags & HF_LMA_MASK)
2600 entry_limit = 15;
2601 else
2602#endif
2603 entry_limit = 7;
2604 if ((index + entry_limit) > dt->limit)
2605 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2606 ptr = dt->base + index;
2607 e1 = ldl_kernel(ptr);
2608 e2 = ldl_kernel(ptr + 4);
2609 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2610 if ((e2 & DESC_S_MASK) ||
2611 (type != 1 && type != 9))
2612 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2613 if (!(e2 & DESC_P_MASK))
2614 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2615#ifdef TARGET_X86_64
2616 if (env->hflags & HF_LMA_MASK) {
2617 uint32_t e3, e4;
2618 e3 = ldl_kernel(ptr + 8);
2619 e4 = ldl_kernel(ptr + 12);
2620 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2621 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2622 load_seg_cache_raw_dt(&env->tr, e1, e2);
2623 env->tr.base |= (target_ulong)e3 << 32;
2624 } else
2625#endif
2626 {
2627 load_seg_cache_raw_dt(&env->tr, e1, e2);
2628 }
2629 e2 |= DESC_TSS_BUSY_MASK;
2630 stl_kernel(ptr + 4, e2);
2631 }
2632 env->tr.selector = selector;
2633#ifdef VBOX
2634 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2635 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2636 env->tr.flags, (RTSEL)(selector & 0xffff)));
2637#endif
2638}
2639
2640/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2641void helper_load_seg(int seg_reg, int selector)
2642{
2643 uint32_t e1, e2;
2644 int cpl, dpl, rpl;
2645 SegmentCache *dt;
2646#ifndef VBOX
2647 int index;
2648#else
2649 unsigned int index;
2650#endif
2651 target_ulong ptr;
2652
2653 selector &= 0xffff;
2654 cpl = env->hflags & HF_CPL_MASK;
2655#ifdef VBOX
2656
2657 /* Trying to load a selector with CPL=1? */
2658 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2659 {
2660 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2661 selector = selector & 0xfffc;
2662 }
2663#endif /* VBOX */
2664 if ((selector & 0xfffc) == 0) {
2665 /* null selector case */
2666 if (seg_reg == R_SS
2667#ifdef TARGET_X86_64
2668 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2669#endif
2670 )
2671 raise_exception_err(EXCP0D_GPF, 0);
2672 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2673 } else {
2674
2675 if (selector & 0x4)
2676 dt = &env->ldt;
2677 else
2678 dt = &env->gdt;
2679 index = selector & ~7;
2680 if ((index + 7) > dt->limit)
2681 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2682 ptr = dt->base + index;
2683 e1 = ldl_kernel(ptr);
2684 e2 = ldl_kernel(ptr + 4);
2685
2686 if (!(e2 & DESC_S_MASK))
2687 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2688 rpl = selector & 3;
2689 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2690 if (seg_reg == R_SS) {
2691 /* must be writable segment */
2692 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2693 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2694 if (rpl != cpl || dpl != cpl)
2695 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2696 } else {
2697 /* must be readable segment */
2698 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2699 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2700
2701 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2702 /* if not conforming code, test rights */
2703 if (dpl < cpl || dpl < rpl)
2704 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2705 }
2706 }
2707
2708 if (!(e2 & DESC_P_MASK)) {
2709 if (seg_reg == R_SS)
2710 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2711 else
2712 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2713 }
2714
2715 /* set the access bit if not already set */
2716 if (!(e2 & DESC_A_MASK)) {
2717 e2 |= DESC_A_MASK;
2718 stl_kernel(ptr + 4, e2);
2719 }
2720
2721 cpu_x86_load_seg_cache(env, seg_reg, selector,
2722 get_seg_base(e1, e2),
2723 get_seg_limit(e1, e2),
2724 e2);
2725#if 0
2726 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2727 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2728#endif
2729 }
2730}
2731
2732/* protected mode jump */
2733void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2734 int next_eip_addend)
2735{
2736 int gate_cs, type;
2737 uint32_t e1, e2, cpl, dpl, rpl, limit;
2738 target_ulong next_eip;
2739
2740#ifdef VBOX
2741 e1 = e2 = 0;
2742#endif
2743 if ((new_cs & 0xfffc) == 0)
2744 raise_exception_err(EXCP0D_GPF, 0);
2745 if (load_segment(&e1, &e2, new_cs) != 0)
2746 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2747 cpl = env->hflags & HF_CPL_MASK;
2748 if (e2 & DESC_S_MASK) {
2749 if (!(e2 & DESC_CS_MASK))
2750 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2751 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2752 if (e2 & DESC_C_MASK) {
2753 /* conforming code segment */
2754 if (dpl > cpl)
2755 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2756 } else {
2757 /* non conforming code segment */
2758 rpl = new_cs & 3;
2759 if (rpl > cpl)
2760 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2761 if (dpl != cpl)
2762 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2763 }
2764 if (!(e2 & DESC_P_MASK))
2765 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2766 limit = get_seg_limit(e1, e2);
2767 if (new_eip > limit &&
2768 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2769 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2770 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2771 get_seg_base(e1, e2), limit, e2);
2772 EIP = new_eip;
2773 } else {
2774 /* jump to call or task gate */
2775 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2776 rpl = new_cs & 3;
2777 cpl = env->hflags & HF_CPL_MASK;
2778 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2779 switch(type) {
2780 case 1: /* 286 TSS */
2781 case 9: /* 386 TSS */
2782 case 5: /* task gate */
2783 if (dpl < cpl || dpl < rpl)
2784 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2785 next_eip = env->eip + next_eip_addend;
2786 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2787 CC_OP = CC_OP_EFLAGS;
2788 break;
2789 case 4: /* 286 call gate */
2790 case 12: /* 386 call gate */
2791 if ((dpl < cpl) || (dpl < rpl))
2792 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2793 if (!(e2 & DESC_P_MASK))
2794 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2795 gate_cs = e1 >> 16;
2796 new_eip = (e1 & 0xffff);
2797 if (type == 12)
2798 new_eip |= (e2 & 0xffff0000);
2799 if (load_segment(&e1, &e2, gate_cs) != 0)
2800 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2801 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2802 /* must be code segment */
2803 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2804 (DESC_S_MASK | DESC_CS_MASK)))
2805 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2806 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2807 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2808 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2809 if (!(e2 & DESC_P_MASK))
2810#ifdef VBOX /* See page 3-514 of 253666.pdf */
2811 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2812#else
2813 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2814#endif
2815 limit = get_seg_limit(e1, e2);
2816 if (new_eip > limit)
2817 raise_exception_err(EXCP0D_GPF, 0);
2818 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2819 get_seg_base(e1, e2), limit, e2);
2820 EIP = new_eip;
2821 break;
2822 default:
2823 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2824 break;
2825 }
2826 }
2827}
2828
2829/* real mode call */
2830void helper_lcall_real(int new_cs, target_ulong new_eip1,
2831 int shift, int next_eip)
2832{
2833 int new_eip;
2834 uint32_t esp, esp_mask;
2835 target_ulong ssp;
2836
2837 new_eip = new_eip1;
2838 esp = ESP;
2839 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2840 ssp = env->segs[R_SS].base;
2841 if (shift) {
2842 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2843 PUSHL(ssp, esp, esp_mask, next_eip);
2844 } else {
2845 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2846 PUSHW(ssp, esp, esp_mask, next_eip);
2847 }
2848
2849 SET_ESP(esp, esp_mask);
2850 env->eip = new_eip;
2851 env->segs[R_CS].selector = new_cs;
2852 env->segs[R_CS].base = (new_cs << 4);
2853}
2854
2855/* protected mode call */
2856void helper_lcall_protected(int new_cs, target_ulong new_eip,
2857 int shift, int next_eip_addend)
2858{
2859 int new_stack, i;
2860 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2861 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2862 uint32_t val, limit, old_sp_mask;
2863 target_ulong ssp, old_ssp, next_eip;
2864
2865#ifdef VBOX
2866 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2867#endif
2868 next_eip = env->eip + next_eip_addend;
2869#ifdef DEBUG_PCALL
2870 if (loglevel & CPU_LOG_PCALL) {
2871 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2872 new_cs, (uint32_t)new_eip, shift);
2873 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2874 }
2875#endif
2876 if ((new_cs & 0xfffc) == 0)
2877 raise_exception_err(EXCP0D_GPF, 0);
2878 if (load_segment(&e1, &e2, new_cs) != 0)
2879 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2880 cpl = env->hflags & HF_CPL_MASK;
2881#ifdef DEBUG_PCALL
2882 if (loglevel & CPU_LOG_PCALL) {
2883 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2884 }
2885#endif
2886 if (e2 & DESC_S_MASK) {
2887 if (!(e2 & DESC_CS_MASK))
2888 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2889 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2890 if (e2 & DESC_C_MASK) {
2891 /* conforming code segment */
2892 if (dpl > cpl)
2893 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2894 } else {
2895 /* non conforming code segment */
2896 rpl = new_cs & 3;
2897 if (rpl > cpl)
2898 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2899 if (dpl != cpl)
2900 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2901 }
2902 if (!(e2 & DESC_P_MASK))
2903 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2904
2905#ifdef TARGET_X86_64
2906 /* XXX: check 16/32 bit cases in long mode */
2907 if (shift == 2) {
2908 target_ulong rsp;
2909 /* 64 bit case */
2910 rsp = ESP;
2911 PUSHQ(rsp, env->segs[R_CS].selector);
2912 PUSHQ(rsp, next_eip);
2913 /* from this point, not restartable */
2914 ESP = rsp;
2915 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2916 get_seg_base(e1, e2),
2917 get_seg_limit(e1, e2), e2);
2918 EIP = new_eip;
2919 } else
2920#endif
2921 {
2922 sp = ESP;
2923 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2924 ssp = env->segs[R_SS].base;
2925 if (shift) {
2926 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2927 PUSHL(ssp, sp, sp_mask, next_eip);
2928 } else {
2929 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2930 PUSHW(ssp, sp, sp_mask, next_eip);
2931 }
2932
2933 limit = get_seg_limit(e1, e2);
2934 if (new_eip > limit)
2935 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2936 /* from this point, not restartable */
2937 SET_ESP(sp, sp_mask);
2938 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2939 get_seg_base(e1, e2), limit, e2);
2940 EIP = new_eip;
2941 }
2942 } else {
2943 /* check gate type */
2944 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2945 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2946 rpl = new_cs & 3;
2947 switch(type) {
2948 case 1: /* available 286 TSS */
2949 case 9: /* available 386 TSS */
2950 case 5: /* task gate */
2951 if (dpl < cpl || dpl < rpl)
2952 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2953 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2954 CC_OP = CC_OP_EFLAGS;
2955 return;
2956 case 4: /* 286 call gate */
2957 case 12: /* 386 call gate */
2958 break;
2959 default:
2960 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2961 break;
2962 }
2963 shift = type >> 3;
2964
2965 if (dpl < cpl || dpl < rpl)
2966 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2967 /* check valid bit */
2968 if (!(e2 & DESC_P_MASK))
2969 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2970 selector = e1 >> 16;
2971 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2972 param_count = e2 & 0x1f;
2973 if ((selector & 0xfffc) == 0)
2974 raise_exception_err(EXCP0D_GPF, 0);
2975
2976 if (load_segment(&e1, &e2, selector) != 0)
2977 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2978 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2979 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2980 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2981 if (dpl > cpl)
2982 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2983 if (!(e2 & DESC_P_MASK))
2984 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2985
2986 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2987 /* to inner privilege */
2988 get_ss_esp_from_tss(&ss, &sp, dpl);
2989#ifdef DEBUG_PCALL
2990 if (loglevel & CPU_LOG_PCALL)
2991 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2992 ss, sp, param_count, ESP);
2993#endif
2994 if ((ss & 0xfffc) == 0)
2995 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2996 if ((ss & 3) != dpl)
2997 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2998 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2999 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3000 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3001 if (ss_dpl != dpl)
3002 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3003 if (!(ss_e2 & DESC_S_MASK) ||
3004 (ss_e2 & DESC_CS_MASK) ||
3005 !(ss_e2 & DESC_W_MASK))
3006 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3007 if (!(ss_e2 & DESC_P_MASK))
3008#ifdef VBOX /* See page 3-99 of 253666.pdf */
3009 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3010#else
3011 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3012#endif
3013
3014 // push_size = ((param_count * 2) + 8) << shift;
3015
3016 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3017 old_ssp = env->segs[R_SS].base;
3018
3019 sp_mask = get_sp_mask(ss_e2);
3020 ssp = get_seg_base(ss_e1, ss_e2);
3021 if (shift) {
3022 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3023 PUSHL(ssp, sp, sp_mask, ESP);
3024 for(i = param_count - 1; i >= 0; i--) {
3025 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3026 PUSHL(ssp, sp, sp_mask, val);
3027 }
3028 } else {
3029 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3030 PUSHW(ssp, sp, sp_mask, ESP);
3031 for(i = param_count - 1; i >= 0; i--) {
3032 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3033 PUSHW(ssp, sp, sp_mask, val);
3034 }
3035 }
3036 new_stack = 1;
3037 } else {
3038 /* to same privilege */
3039 sp = ESP;
3040 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3041 ssp = env->segs[R_SS].base;
3042 // push_size = (4 << shift);
3043 new_stack = 0;
3044 }
3045
3046 if (shift) {
3047 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3048 PUSHL(ssp, sp, sp_mask, next_eip);
3049 } else {
3050 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3051 PUSHW(ssp, sp, sp_mask, next_eip);
3052 }
3053
3054 /* from this point, not restartable */
3055
3056 if (new_stack) {
3057 ss = (ss & ~3) | dpl;
3058 cpu_x86_load_seg_cache(env, R_SS, ss,
3059 ssp,
3060 get_seg_limit(ss_e1, ss_e2),
3061 ss_e2);
3062 }
3063
3064 selector = (selector & ~3) | dpl;
3065 cpu_x86_load_seg_cache(env, R_CS, selector,
3066 get_seg_base(e1, e2),
3067 get_seg_limit(e1, e2),
3068 e2);
3069 cpu_x86_set_cpl(env, dpl);
3070 SET_ESP(sp, sp_mask);
3071 EIP = offset;
3072 }
3073#ifdef USE_KQEMU
3074 if (kqemu_is_ok(env)) {
3075 env->exception_index = -1;
3076 cpu_loop_exit();
3077 }
3078#endif
3079}
3080
3081/* real and vm86 mode iret */
3082void helper_iret_real(int shift)
3083{
3084 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3085 target_ulong ssp;
3086 int eflags_mask;
3087#ifdef VBOX
3088 bool fVME = false;
3089
3090 remR3TrapClear(env->pVM);
3091#endif /* VBOX */
3092
3093 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3094 sp = ESP;
3095 ssp = env->segs[R_SS].base;
3096 if (shift == 1) {
3097 /* 32 bits */
3098 POPL(ssp, sp, sp_mask, new_eip);
3099 POPL(ssp, sp, sp_mask, new_cs);
3100 new_cs &= 0xffff;
3101 POPL(ssp, sp, sp_mask, new_eflags);
3102 } else {
3103 /* 16 bits */
3104 POPW(ssp, sp, sp_mask, new_eip);
3105 POPW(ssp, sp, sp_mask, new_cs);
3106 POPW(ssp, sp, sp_mask, new_eflags);
3107 }
3108#ifdef VBOX
3109 if ( (env->eflags & VM_MASK)
3110 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3111 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3112 {
3113 fVME = true;
3114 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3115 /* if TF will be set -> #GP */
3116 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3117 || (new_eflags & TF_MASK))
3118 raise_exception(EXCP0D_GPF);
3119 }
3120#endif /* VBOX */
3121 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3122 env->segs[R_CS].selector = new_cs;
3123 env->segs[R_CS].base = (new_cs << 4);
3124 env->eip = new_eip;
3125#ifdef VBOX
3126 if (fVME)
3127 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3128 else
3129#endif
3130 if (env->eflags & VM_MASK)
3131 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3132 else
3133 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3134 if (shift == 0)
3135 eflags_mask &= 0xffff;
3136 load_eflags(new_eflags, eflags_mask);
3137 env->hflags2 &= ~HF2_NMI_MASK;
3138#ifdef VBOX
3139 if (fVME)
3140 {
3141 if (new_eflags & IF_MASK)
3142 env->eflags |= VIF_MASK;
3143 else
3144 env->eflags &= ~VIF_MASK;
3145 }
3146#endif /* VBOX */
3147}
3148
3149static inline void validate_seg(int seg_reg, int cpl)
3150{
3151 int dpl;
3152 uint32_t e2;
3153
3154 /* XXX: on x86_64, we do not want to nullify FS and GS because
3155 they may still contain a valid base. I would be interested to
3156 know how a real x86_64 CPU behaves */
3157 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3158 (env->segs[seg_reg].selector & 0xfffc) == 0)
3159 return;
3160
3161 e2 = env->segs[seg_reg].flags;
3162 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3163 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3164 /* data or non conforming code segment */
3165 if (dpl < cpl) {
3166 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3167 }
3168 }
3169}
3170
3171/* protected mode iret */
3172static inline void helper_ret_protected(int shift, int is_iret, int addend)
3173{
3174 uint32_t new_cs, new_eflags, new_ss;
3175 uint32_t new_es, new_ds, new_fs, new_gs;
3176 uint32_t e1, e2, ss_e1, ss_e2;
3177 int cpl, dpl, rpl, eflags_mask, iopl;
3178 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3179
3180#ifdef VBOX
3181 ss_e1 = ss_e2 = e1 = e2 = 0;
3182#endif
3183
3184#ifdef TARGET_X86_64
3185 if (shift == 2)
3186 sp_mask = -1;
3187 else
3188#endif
3189 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3190 sp = ESP;
3191 ssp = env->segs[R_SS].base;
3192 new_eflags = 0; /* avoid warning */
3193#ifdef TARGET_X86_64
3194 if (shift == 2) {
3195 POPQ(sp, new_eip);
3196 POPQ(sp, new_cs);
3197 new_cs &= 0xffff;
3198 if (is_iret) {
3199 POPQ(sp, new_eflags);
3200 }
3201 } else
3202#endif
3203 if (shift == 1) {
3204 /* 32 bits */
3205 POPL(ssp, sp, sp_mask, new_eip);
3206 POPL(ssp, sp, sp_mask, new_cs);
3207 new_cs &= 0xffff;
3208 if (is_iret) {
3209 POPL(ssp, sp, sp_mask, new_eflags);
3210#if defined(VBOX) && defined(DEBUG)
3211 printf("iret: new CS %04X\n", new_cs);
3212 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3213 printf("iret: new EFLAGS %08X\n", new_eflags);
3214 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3215#endif
3216 if (new_eflags & VM_MASK)
3217 goto return_to_vm86;
3218 }
3219#ifdef VBOX
3220 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3221 {
3222#ifdef DEBUG
3223 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3224#endif
3225 new_cs = new_cs & 0xfffc;
3226 }
3227#endif
3228 } else {
3229 /* 16 bits */
3230 POPW(ssp, sp, sp_mask, new_eip);
3231 POPW(ssp, sp, sp_mask, new_cs);
3232 if (is_iret)
3233 POPW(ssp, sp, sp_mask, new_eflags);
3234 }
3235#ifdef DEBUG_PCALL
3236 if (loglevel & CPU_LOG_PCALL) {
3237 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3238 new_cs, new_eip, shift, addend);
3239 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3240 }
3241#endif
3242 if ((new_cs & 0xfffc) == 0)
3243 {
3244#if defined(VBOX) && defined(DEBUG)
3245 printf("new_cs & 0xfffc) == 0\n");
3246#endif
3247 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3248 }
3249 if (load_segment(&e1, &e2, new_cs) != 0)
3250 {
3251#if defined(VBOX) && defined(DEBUG)
3252 printf("load_segment failed\n");
3253#endif
3254 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3255 }
3256 if (!(e2 & DESC_S_MASK) ||
3257 !(e2 & DESC_CS_MASK))
3258 {
3259#if defined(VBOX) && defined(DEBUG)
3260 printf("e2 mask %08x\n", e2);
3261#endif
3262 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3263 }
3264 cpl = env->hflags & HF_CPL_MASK;
3265 rpl = new_cs & 3;
3266 if (rpl < cpl)
3267 {
3268#if defined(VBOX) && defined(DEBUG)
3269 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3270#endif
3271 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3272 }
3273 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3274 if (e2 & DESC_C_MASK) {
3275 if (dpl > rpl)
3276 {
3277#if defined(VBOX) && defined(DEBUG)
3278 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3279#endif
3280 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3281 }
3282 } else {
3283 if (dpl != rpl)
3284 {
3285#if defined(VBOX) && defined(DEBUG)
3286 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3287#endif
3288 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3289 }
3290 }
3291 if (!(e2 & DESC_P_MASK))
3292 {
3293#if defined(VBOX) && defined(DEBUG)
3294 printf("DESC_P_MASK e2=%08x\n", e2);
3295#endif
3296 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3297 }
3298
3299 sp += addend;
3300 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3301 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3302 /* return to same privilege level */
3303 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3304 get_seg_base(e1, e2),
3305 get_seg_limit(e1, e2),
3306 e2);
3307 } else {
3308 /* return to different privilege level */
3309#ifdef TARGET_X86_64
3310 if (shift == 2) {
3311 POPQ(sp, new_esp);
3312 POPQ(sp, new_ss);
3313 new_ss &= 0xffff;
3314 } else
3315#endif
3316 if (shift == 1) {
3317 /* 32 bits */
3318 POPL(ssp, sp, sp_mask, new_esp);
3319 POPL(ssp, sp, sp_mask, new_ss);
3320 new_ss &= 0xffff;
3321 } else {
3322 /* 16 bits */
3323 POPW(ssp, sp, sp_mask, new_esp);
3324 POPW(ssp, sp, sp_mask, new_ss);
3325 }
3326#ifdef DEBUG_PCALL
3327 if (loglevel & CPU_LOG_PCALL) {
3328 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3329 new_ss, new_esp);
3330 }
3331#endif
3332 if ((new_ss & 0xfffc) == 0) {
3333#ifdef TARGET_X86_64
3334 /* NULL ss is allowed in long mode if cpl != 3*/
3335 /* XXX: test CS64 ? */
3336 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3337 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3338 0, 0xffffffff,
3339 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3340 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3341 DESC_W_MASK | DESC_A_MASK);
3342 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3343 } else
3344#endif
3345 {
3346 raise_exception_err(EXCP0D_GPF, 0);
3347 }
3348 } else {
3349 if ((new_ss & 3) != rpl)
3350 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3351 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3352 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3353 if (!(ss_e2 & DESC_S_MASK) ||
3354 (ss_e2 & DESC_CS_MASK) ||
3355 !(ss_e2 & DESC_W_MASK))
3356 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3357 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3358 if (dpl != rpl)
3359 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3360 if (!(ss_e2 & DESC_P_MASK))
3361 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3362 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3363 get_seg_base(ss_e1, ss_e2),
3364 get_seg_limit(ss_e1, ss_e2),
3365 ss_e2);
3366 }
3367
3368 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3369 get_seg_base(e1, e2),
3370 get_seg_limit(e1, e2),
3371 e2);
3372 cpu_x86_set_cpl(env, rpl);
3373 sp = new_esp;
3374#ifdef TARGET_X86_64
3375 if (env->hflags & HF_CS64_MASK)
3376 sp_mask = -1;
3377 else
3378#endif
3379 sp_mask = get_sp_mask(ss_e2);
3380
3381 /* validate data segments */
3382 validate_seg(R_ES, rpl);
3383 validate_seg(R_DS, rpl);
3384 validate_seg(R_FS, rpl);
3385 validate_seg(R_GS, rpl);
3386
3387 sp += addend;
3388 }
3389 SET_ESP(sp, sp_mask);
3390 env->eip = new_eip;
3391 if (is_iret) {
3392 /* NOTE: 'cpl' is the _old_ CPL */
3393 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3394 if (cpl == 0)
3395#ifdef VBOX
3396 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3397#else
3398 eflags_mask |= IOPL_MASK;
3399#endif
3400 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3401 if (cpl <= iopl)
3402 eflags_mask |= IF_MASK;
3403 if (shift == 0)
3404 eflags_mask &= 0xffff;
3405 load_eflags(new_eflags, eflags_mask);
3406 }
3407 return;
3408
3409 return_to_vm86:
3410 POPL(ssp, sp, sp_mask, new_esp);
3411 POPL(ssp, sp, sp_mask, new_ss);
3412 POPL(ssp, sp, sp_mask, new_es);
3413 POPL(ssp, sp, sp_mask, new_ds);
3414 POPL(ssp, sp, sp_mask, new_fs);
3415 POPL(ssp, sp, sp_mask, new_gs);
3416
3417 /* modify processor state */
3418 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3419 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3420 load_seg_vm(R_CS, new_cs & 0xffff);
3421 cpu_x86_set_cpl(env, 3);
3422 load_seg_vm(R_SS, new_ss & 0xffff);
3423 load_seg_vm(R_ES, new_es & 0xffff);
3424 load_seg_vm(R_DS, new_ds & 0xffff);
3425 load_seg_vm(R_FS, new_fs & 0xffff);
3426 load_seg_vm(R_GS, new_gs & 0xffff);
3427
3428 env->eip = new_eip & 0xffff;
3429 ESP = new_esp;
3430}
3431
3432void helper_iret_protected(int shift, int next_eip)
3433{
3434 int tss_selector, type;
3435 uint32_t e1, e2;
3436
3437#ifdef VBOX
3438 e1 = e2 = 0;
3439 remR3TrapClear(env->pVM);
3440#endif
3441
3442 /* specific case for TSS */
3443 if (env->eflags & NT_MASK) {
3444#ifdef TARGET_X86_64
3445 if (env->hflags & HF_LMA_MASK)
3446 raise_exception_err(EXCP0D_GPF, 0);
3447#endif
3448 tss_selector = lduw_kernel(env->tr.base + 0);
3449 if (tss_selector & 4)
3450 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3451 if (load_segment(&e1, &e2, tss_selector) != 0)
3452 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3453 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3454 /* NOTE: we check both segment and busy TSS */
3455 if (type != 3)
3456 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3457 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3458 } else {
3459 helper_ret_protected(shift, 1, 0);
3460 }
3461 env->hflags2 &= ~HF2_NMI_MASK;
3462#ifdef USE_KQEMU
3463 if (kqemu_is_ok(env)) {
3464 CC_OP = CC_OP_EFLAGS;
3465 env->exception_index = -1;
3466 cpu_loop_exit();
3467 }
3468#endif
3469}
3470
3471void helper_lret_protected(int shift, int addend)
3472{
3473 helper_ret_protected(shift, 0, addend);
3474#ifdef USE_KQEMU
3475 if (kqemu_is_ok(env)) {
3476 env->exception_index = -1;
3477 cpu_loop_exit();
3478 }
3479#endif
3480}
3481
3482void helper_sysenter(void)
3483{
3484 if (env->sysenter_cs == 0) {
3485 raise_exception_err(EXCP0D_GPF, 0);
3486 }
3487 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3488 cpu_x86_set_cpl(env, 0);
3489
3490#ifdef TARGET_X86_64
3491 if (env->hflags & HF_LMA_MASK) {
3492 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3493 0, 0xffffffff,
3494 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3495 DESC_S_MASK |
3496 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3497 } else
3498#endif
3499 {
3500 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3501 0, 0xffffffff,
3502 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3503 DESC_S_MASK |
3504 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3505 }
3506 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3507 0, 0xffffffff,
3508 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3509 DESC_S_MASK |
3510 DESC_W_MASK | DESC_A_MASK);
3511 ESP = env->sysenter_esp;
3512 EIP = env->sysenter_eip;
3513}
3514
3515void helper_sysexit(int dflag)
3516{
3517 int cpl;
3518
3519 cpl = env->hflags & HF_CPL_MASK;
3520 if (env->sysenter_cs == 0 || cpl != 0) {
3521 raise_exception_err(EXCP0D_GPF, 0);
3522 }
3523 cpu_x86_set_cpl(env, 3);
3524#ifdef TARGET_X86_64
3525 if (dflag == 2) {
3526 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3527 0, 0xffffffff,
3528 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3529 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3530 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3531 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3532 0, 0xffffffff,
3533 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3534 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3535 DESC_W_MASK | DESC_A_MASK);
3536 } else
3537#endif
3538 {
3539 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3540 0, 0xffffffff,
3541 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3542 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3543 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3544 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3545 0, 0xffffffff,
3546 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3547 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3548 DESC_W_MASK | DESC_A_MASK);
3549 }
3550 ESP = ECX;
3551 EIP = EDX;
3552#ifdef USE_KQEMU
3553 if (kqemu_is_ok(env)) {
3554 env->exception_index = -1;
3555 cpu_loop_exit();
3556 }
3557#endif
3558}
3559
3560#if defined(CONFIG_USER_ONLY)
3561target_ulong helper_read_crN(int reg)
3562{
3563 return 0;
3564}
3565
3566void helper_write_crN(int reg, target_ulong t0)
3567{
3568}
3569#else
3570target_ulong helper_read_crN(int reg)
3571{
3572 target_ulong val;
3573
3574 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3575 switch(reg) {
3576 default:
3577 val = env->cr[reg];
3578 break;
3579 case 8:
3580 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3581 val = cpu_get_apic_tpr(env);
3582 } else {
3583 val = env->v_tpr;
3584 }
3585 break;
3586 }
3587 return val;
3588}
3589
3590void helper_write_crN(int reg, target_ulong t0)
3591{
3592 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3593 switch(reg) {
3594 case 0:
3595 cpu_x86_update_cr0(env, t0);
3596 break;
3597 case 3:
3598 cpu_x86_update_cr3(env, t0);
3599 break;
3600 case 4:
3601 cpu_x86_update_cr4(env, t0);
3602 break;
3603 case 8:
3604 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3605 cpu_set_apic_tpr(env, t0);
3606 }
3607 env->v_tpr = t0 & 0x0f;
3608 break;
3609 default:
3610 env->cr[reg] = t0;
3611 break;
3612 }
3613}
3614#endif
3615
3616void helper_lmsw(target_ulong t0)
3617{
3618 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3619 if already set to one. */
3620 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3621 helper_write_crN(0, t0);
3622}
3623
3624void helper_clts(void)
3625{
3626 env->cr[0] &= ~CR0_TS_MASK;
3627 env->hflags &= ~HF_TS_MASK;
3628}
3629
3630/* XXX: do more */
3631void helper_movl_drN_T0(int reg, target_ulong t0)
3632{
3633 env->dr[reg] = t0;
3634}
3635
3636void helper_invlpg(target_ulong addr)
3637{
3638 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3639 tlb_flush_page(env, addr);
3640}
3641
3642void helper_rdtsc(void)
3643{
3644 uint64_t val;
3645
3646 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3647 raise_exception(EXCP0D_GPF);
3648 }
3649 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3650
3651 val = cpu_get_tsc(env) + env->tsc_offset;
3652 EAX = (uint32_t)(val);
3653 EDX = (uint32_t)(val >> 32);
3654}
3655
3656#ifdef VBOX
3657void helper_rdtscp(void)
3658{
3659 uint64_t val;
3660 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3661 raise_exception(EXCP0D_GPF);
3662 }
3663
3664 val = cpu_get_tsc(env);
3665 EAX = (uint32_t)(val);
3666 EDX = (uint32_t)(val >> 32);
3667 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3668 ECX = (uint32_t)(val);
3669 else
3670 ECX = 0;
3671}
3672#endif /* VBOX */
3673
3674void helper_rdpmc(void)
3675{
3676#ifdef VBOX
3677 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3678 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3679 raise_exception(EXCP0D_GPF);
3680 }
3681 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3682 EAX = 0;
3683 EDX = 0;
3684#else /* !VBOX */
3685 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3686 raise_exception(EXCP0D_GPF);
3687 }
3688 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3689
3690 /* currently unimplemented */
3691 raise_exception_err(EXCP06_ILLOP, 0);
3692#endif /* !VBOX */
3693}
3694
3695#if defined(CONFIG_USER_ONLY)
3696void helper_wrmsr(void)
3697{
3698}
3699
3700void helper_rdmsr(void)
3701{
3702}
3703#else
3704void helper_wrmsr(void)
3705{
3706 uint64_t val;
3707
3708 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3709
3710 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3711
3712 switch((uint32_t)ECX) {
3713 case MSR_IA32_SYSENTER_CS:
3714 env->sysenter_cs = val & 0xffff;
3715 break;
3716 case MSR_IA32_SYSENTER_ESP:
3717 env->sysenter_esp = val;
3718 break;
3719 case MSR_IA32_SYSENTER_EIP:
3720 env->sysenter_eip = val;
3721 break;
3722 case MSR_IA32_APICBASE:
3723#ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3724 cpu_set_apic_base(env, val);
3725#endif
3726 break;
3727 case MSR_EFER:
3728 {
3729 uint64_t update_mask;
3730 update_mask = 0;
3731 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3732 update_mask |= MSR_EFER_SCE;
3733 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3734 update_mask |= MSR_EFER_LME;
3735 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3736 update_mask |= MSR_EFER_FFXSR;
3737 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3738 update_mask |= MSR_EFER_NXE;
3739 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3740 update_mask |= MSR_EFER_SVME;
3741 cpu_load_efer(env, (env->efer & ~update_mask) |
3742 (val & update_mask));
3743 }
3744 break;
3745 case MSR_STAR:
3746 env->star = val;
3747 break;
3748 case MSR_PAT:
3749 env->pat = val;
3750 break;
3751 case MSR_VM_HSAVE_PA:
3752 env->vm_hsave = val;
3753 break;
3754#ifdef TARGET_X86_64
3755 case MSR_LSTAR:
3756 env->lstar = val;
3757 break;
3758 case MSR_CSTAR:
3759 env->cstar = val;
3760 break;
3761 case MSR_FMASK:
3762 env->fmask = val;
3763 break;
3764 case MSR_FSBASE:
3765 env->segs[R_FS].base = val;
3766 break;
3767 case MSR_GSBASE:
3768 env->segs[R_GS].base = val;
3769 break;
3770 case MSR_KERNELGSBASE:
3771 env->kernelgsbase = val;
3772 break;
3773#endif
3774 default:
3775#ifndef VBOX
3776 /* XXX: exception ? */
3777#endif
3778 break;
3779 }
3780
3781#ifdef VBOX
3782 /* call CPUM. */
3783 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3784 {
3785 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3786 }
3787#endif
3788}
3789
3790void helper_rdmsr(void)
3791{
3792 uint64_t val;
3793 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3794
3795 switch((uint32_t)ECX) {
3796 case MSR_IA32_SYSENTER_CS:
3797 val = env->sysenter_cs;
3798 break;
3799 case MSR_IA32_SYSENTER_ESP:
3800 val = env->sysenter_esp;
3801 break;
3802 case MSR_IA32_SYSENTER_EIP:
3803 val = env->sysenter_eip;
3804 break;
3805 case MSR_IA32_APICBASE:
3806 val = cpu_get_apic_base(env);
3807 break;
3808 case MSR_EFER:
3809 val = env->efer;
3810 break;
3811 case MSR_STAR:
3812 val = env->star;
3813 break;
3814 case MSR_PAT:
3815 val = env->pat;
3816 break;
3817 case MSR_VM_HSAVE_PA:
3818 val = env->vm_hsave;
3819 break;
3820#ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3821 case MSR_IA32_PERF_STATUS:
3822 /* tsc_increment_by_tick */
3823 val = 1000ULL;
3824 /* CPU multiplier */
3825 val |= (((uint64_t)4ULL) << 40);
3826 break;
3827#endif /* !VBOX */
3828#ifdef TARGET_X86_64
3829 case MSR_LSTAR:
3830 val = env->lstar;
3831 break;
3832 case MSR_CSTAR:
3833 val = env->cstar;
3834 break;
3835 case MSR_FMASK:
3836 val = env->fmask;
3837 break;
3838 case MSR_FSBASE:
3839 val = env->segs[R_FS].base;
3840 break;
3841 case MSR_GSBASE:
3842 val = env->segs[R_GS].base;
3843 break;
3844 case MSR_KERNELGSBASE:
3845 val = env->kernelgsbase;
3846 break;
3847#endif
3848#ifdef USE_KQEMU
3849 case MSR_QPI_COMMBASE:
3850 if (env->kqemu_enabled) {
3851 val = kqemu_comm_base;
3852 } else {
3853 val = 0;
3854 }
3855 break;
3856#endif
3857 default:
3858#ifndef VBOX
3859 /* XXX: exception ? */
3860 val = 0;
3861#else /* VBOX */
3862 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3863 {
3864 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3865 val = 0;
3866 }
3867#endif /* VBOX */
3868 break;
3869 }
3870 EAX = (uint32_t)(val);
3871 EDX = (uint32_t)(val >> 32);
3872
3873#ifdef VBOX_STRICT
3874 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3875 val = 0;
3876 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3877#endif
3878}
3879#endif
3880
3881target_ulong helper_lsl(target_ulong selector1)
3882{
3883 unsigned int limit;
3884 uint32_t e1, e2, eflags, selector;
3885 int rpl, dpl, cpl, type;
3886
3887 selector = selector1 & 0xffff;
3888 eflags = cc_table[CC_OP].compute_all();
3889 if (load_segment(&e1, &e2, selector) != 0)
3890 goto fail;
3891 rpl = selector & 3;
3892 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3893 cpl = env->hflags & HF_CPL_MASK;
3894 if (e2 & DESC_S_MASK) {
3895 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3896 /* conforming */
3897 } else {
3898 if (dpl < cpl || dpl < rpl)
3899 goto fail;
3900 }
3901 } else {
3902 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3903 switch(type) {
3904 case 1:
3905 case 2:
3906 case 3:
3907 case 9:
3908 case 11:
3909 break;
3910 default:
3911 goto fail;
3912 }
3913 if (dpl < cpl || dpl < rpl) {
3914 fail:
3915 CC_SRC = eflags & ~CC_Z;
3916 return 0;
3917 }
3918 }
3919 limit = get_seg_limit(e1, e2);
3920 CC_SRC = eflags | CC_Z;
3921 return limit;
3922}
3923
3924target_ulong helper_lar(target_ulong selector1)
3925{
3926 uint32_t e1, e2, eflags, selector;
3927 int rpl, dpl, cpl, type;
3928
3929 selector = selector1 & 0xffff;
3930 eflags = cc_table[CC_OP].compute_all();
3931 if ((selector & 0xfffc) == 0)
3932 goto fail;
3933 if (load_segment(&e1, &e2, selector) != 0)
3934 goto fail;
3935 rpl = selector & 3;
3936 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3937 cpl = env->hflags & HF_CPL_MASK;
3938 if (e2 & DESC_S_MASK) {
3939 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3940 /* conforming */
3941 } else {
3942 if (dpl < cpl || dpl < rpl)
3943 goto fail;
3944 }
3945 } else {
3946 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3947 switch(type) {
3948 case 1:
3949 case 2:
3950 case 3:
3951 case 4:
3952 case 5:
3953 case 9:
3954 case 11:
3955 case 12:
3956 break;
3957 default:
3958 goto fail;
3959 }
3960 if (dpl < cpl || dpl < rpl) {
3961 fail:
3962 CC_SRC = eflags & ~CC_Z;
3963 return 0;
3964 }
3965 }
3966 CC_SRC = eflags | CC_Z;
3967 return e2 & 0x00f0ff00;
3968}
3969
3970void helper_verr(target_ulong selector1)
3971{
3972 uint32_t e1, e2, eflags, selector;
3973 int rpl, dpl, cpl;
3974
3975 selector = selector1 & 0xffff;
3976 eflags = cc_table[CC_OP].compute_all();
3977 if ((selector & 0xfffc) == 0)
3978 goto fail;
3979 if (load_segment(&e1, &e2, selector) != 0)
3980 goto fail;
3981 if (!(e2 & DESC_S_MASK))
3982 goto fail;
3983 rpl = selector & 3;
3984 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3985 cpl = env->hflags & HF_CPL_MASK;
3986 if (e2 & DESC_CS_MASK) {
3987 if (!(e2 & DESC_R_MASK))
3988 goto fail;
3989 if (!(e2 & DESC_C_MASK)) {
3990 if (dpl < cpl || dpl < rpl)
3991 goto fail;
3992 }
3993 } else {
3994 if (dpl < cpl || dpl < rpl) {
3995 fail:
3996 CC_SRC = eflags & ~CC_Z;
3997 return;
3998 }
3999 }
4000 CC_SRC = eflags | CC_Z;
4001}
4002
4003void helper_verw(target_ulong selector1)
4004{
4005 uint32_t e1, e2, eflags, selector;
4006 int rpl, dpl, cpl;
4007
4008 selector = selector1 & 0xffff;
4009 eflags = cc_table[CC_OP].compute_all();
4010 if ((selector & 0xfffc) == 0)
4011 goto fail;
4012 if (load_segment(&e1, &e2, selector) != 0)
4013 goto fail;
4014 if (!(e2 & DESC_S_MASK))
4015 goto fail;
4016 rpl = selector & 3;
4017 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4018 cpl = env->hflags & HF_CPL_MASK;
4019 if (e2 & DESC_CS_MASK) {
4020 goto fail;
4021 } else {
4022 if (dpl < cpl || dpl < rpl)
4023 goto fail;
4024 if (!(e2 & DESC_W_MASK)) {
4025 fail:
4026 CC_SRC = eflags & ~CC_Z;
4027 return;
4028 }
4029 }
4030 CC_SRC = eflags | CC_Z;
4031}
4032
4033/* x87 FPU helpers */
4034
4035static void fpu_set_exception(int mask)
4036{
4037 env->fpus |= mask;
4038 if (env->fpus & (~env->fpuc & FPUC_EM))
4039 env->fpus |= FPUS_SE | FPUS_B;
4040}
4041
4042static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4043{
4044 if (b == 0.0)
4045 fpu_set_exception(FPUS_ZE);
4046 return a / b;
4047}
4048
4049void fpu_raise_exception(void)
4050{
4051 if (env->cr[0] & CR0_NE_MASK) {
4052 raise_exception(EXCP10_COPR);
4053 }
4054#if !defined(CONFIG_USER_ONLY)
4055 else {
4056 cpu_set_ferr(env);
4057 }
4058#endif
4059}
4060
4061void helper_flds_FT0(uint32_t val)
4062{
4063 union {
4064 float32 f;
4065 uint32_t i;
4066 } u;
4067 u.i = val;
4068 FT0 = float32_to_floatx(u.f, &env->fp_status);
4069}
4070
4071void helper_fldl_FT0(uint64_t val)
4072{
4073 union {
4074 float64 f;
4075 uint64_t i;
4076 } u;
4077 u.i = val;
4078 FT0 = float64_to_floatx(u.f, &env->fp_status);
4079}
4080
4081void helper_fildl_FT0(int32_t val)
4082{
4083 FT0 = int32_to_floatx(val, &env->fp_status);
4084}
4085
4086void helper_flds_ST0(uint32_t val)
4087{
4088 int new_fpstt;
4089 union {
4090 float32 f;
4091 uint32_t i;
4092 } u;
4093 new_fpstt = (env->fpstt - 1) & 7;
4094 u.i = val;
4095 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4096 env->fpstt = new_fpstt;
4097 env->fptags[new_fpstt] = 0; /* validate stack entry */
4098}
4099
4100void helper_fldl_ST0(uint64_t val)
4101{
4102 int new_fpstt;
4103 union {
4104 float64 f;
4105 uint64_t i;
4106 } u;
4107 new_fpstt = (env->fpstt - 1) & 7;
4108 u.i = val;
4109 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4110 env->fpstt = new_fpstt;
4111 env->fptags[new_fpstt] = 0; /* validate stack entry */
4112}
4113
4114void helper_fildl_ST0(int32_t val)
4115{
4116 int new_fpstt;
4117 new_fpstt = (env->fpstt - 1) & 7;
4118 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4119 env->fpstt = new_fpstt;
4120 env->fptags[new_fpstt] = 0; /* validate stack entry */
4121}
4122
4123void helper_fildll_ST0(int64_t val)
4124{
4125 int new_fpstt;
4126 new_fpstt = (env->fpstt - 1) & 7;
4127 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4128 env->fpstt = new_fpstt;
4129 env->fptags[new_fpstt] = 0; /* validate stack entry */
4130}
4131
4132#ifndef VBOX
4133uint32_t helper_fsts_ST0(void)
4134#else
4135RTCCUINTREG helper_fsts_ST0(void)
4136#endif
4137{
4138 union {
4139 float32 f;
4140 uint32_t i;
4141 } u;
4142 u.f = floatx_to_float32(ST0, &env->fp_status);
4143 return u.i;
4144}
4145
4146uint64_t helper_fstl_ST0(void)
4147{
4148 union {
4149 float64 f;
4150 uint64_t i;
4151 } u;
4152 u.f = floatx_to_float64(ST0, &env->fp_status);
4153 return u.i;
4154}
4155
4156#ifndef VBOX
4157int32_t helper_fist_ST0(void)
4158#else
4159RTCCINTREG helper_fist_ST0(void)
4160#endif
4161{
4162 int32_t val;
4163 val = floatx_to_int32(ST0, &env->fp_status);
4164 if (val != (int16_t)val)
4165 val = -32768;
4166 return val;
4167}
4168
4169#ifndef VBOX
4170int32_t helper_fistl_ST0(void)
4171#else
4172RTCCINTREG helper_fistl_ST0(void)
4173#endif
4174{
4175 int32_t val;
4176 val = floatx_to_int32(ST0, &env->fp_status);
4177 return val;
4178}
4179
4180int64_t helper_fistll_ST0(void)
4181{
4182 int64_t val;
4183 val = floatx_to_int64(ST0, &env->fp_status);
4184 return val;
4185}
4186
4187#ifndef VBOX
4188int32_t helper_fistt_ST0(void)
4189#else
4190RTCCINTREG helper_fistt_ST0(void)
4191#endif
4192{
4193 int32_t val;
4194 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4195 if (val != (int16_t)val)
4196 val = -32768;
4197 return val;
4198}
4199
4200#ifndef VBOX
4201int32_t helper_fisttl_ST0(void)
4202#else
4203RTCCINTREG helper_fisttl_ST0(void)
4204#endif
4205{
4206 int32_t val;
4207 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4208 return val;
4209}
4210
4211int64_t helper_fisttll_ST0(void)
4212{
4213 int64_t val;
4214 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4215 return val;
4216}
4217
4218void helper_fldt_ST0(target_ulong ptr)
4219{
4220 int new_fpstt;
4221 new_fpstt = (env->fpstt - 1) & 7;
4222 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4223 env->fpstt = new_fpstt;
4224 env->fptags[new_fpstt] = 0; /* validate stack entry */
4225}
4226
4227void helper_fstt_ST0(target_ulong ptr)
4228{
4229 helper_fstt(ST0, ptr);
4230}
4231
4232void helper_fpush(void)
4233{
4234 fpush();
4235}
4236
4237void helper_fpop(void)
4238{
4239 fpop();
4240}
4241
4242void helper_fdecstp(void)
4243{
4244 env->fpstt = (env->fpstt - 1) & 7;
4245 env->fpus &= (~0x4700);
4246}
4247
4248void helper_fincstp(void)
4249{
4250 env->fpstt = (env->fpstt + 1) & 7;
4251 env->fpus &= (~0x4700);
4252}
4253
4254/* FPU move */
4255
4256void helper_ffree_STN(int st_index)
4257{
4258 env->fptags[(env->fpstt + st_index) & 7] = 1;
4259}
4260
4261void helper_fmov_ST0_FT0(void)
4262{
4263 ST0 = FT0;
4264}
4265
4266void helper_fmov_FT0_STN(int st_index)
4267{
4268 FT0 = ST(st_index);
4269}
4270
4271void helper_fmov_ST0_STN(int st_index)
4272{
4273 ST0 = ST(st_index);
4274}
4275
4276void helper_fmov_STN_ST0(int st_index)
4277{
4278 ST(st_index) = ST0;
4279}
4280
4281void helper_fxchg_ST0_STN(int st_index)
4282{
4283 CPU86_LDouble tmp;
4284 tmp = ST(st_index);
4285 ST(st_index) = ST0;
4286 ST0 = tmp;
4287}
4288
4289/* FPU operations */
4290
4291static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4292
4293void helper_fcom_ST0_FT0(void)
4294{
4295 int ret;
4296
4297 ret = floatx_compare(ST0, FT0, &env->fp_status);
4298 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4299 FORCE_RET();
4300}
4301
4302void helper_fucom_ST0_FT0(void)
4303{
4304 int ret;
4305
4306 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4307 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4308 FORCE_RET();
4309}
4310
4311static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4312
4313void helper_fcomi_ST0_FT0(void)
4314{
4315 int eflags;
4316 int ret;
4317
4318 ret = floatx_compare(ST0, FT0, &env->fp_status);
4319 eflags = cc_table[CC_OP].compute_all();
4320 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4321 CC_SRC = eflags;
4322 FORCE_RET();
4323}
4324
4325void helper_fucomi_ST0_FT0(void)
4326{
4327 int eflags;
4328 int ret;
4329
4330 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4331 eflags = cc_table[CC_OP].compute_all();
4332 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4333 CC_SRC = eflags;
4334 FORCE_RET();
4335}
4336
4337void helper_fadd_ST0_FT0(void)
4338{
4339 ST0 += FT0;
4340}
4341
4342void helper_fmul_ST0_FT0(void)
4343{
4344 ST0 *= FT0;
4345}
4346
4347void helper_fsub_ST0_FT0(void)
4348{
4349 ST0 -= FT0;
4350}
4351
4352void helper_fsubr_ST0_FT0(void)
4353{
4354 ST0 = FT0 - ST0;
4355}
4356
4357void helper_fdiv_ST0_FT0(void)
4358{
4359 ST0 = helper_fdiv(ST0, FT0);
4360}
4361
4362void helper_fdivr_ST0_FT0(void)
4363{
4364 ST0 = helper_fdiv(FT0, ST0);
4365}
4366
4367/* fp operations between STN and ST0 */
4368
4369void helper_fadd_STN_ST0(int st_index)
4370{
4371 ST(st_index) += ST0;
4372}
4373
4374void helper_fmul_STN_ST0(int st_index)
4375{
4376 ST(st_index) *= ST0;
4377}
4378
4379void helper_fsub_STN_ST0(int st_index)
4380{
4381 ST(st_index) -= ST0;
4382}
4383
4384void helper_fsubr_STN_ST0(int st_index)
4385{
4386 CPU86_LDouble *p;
4387 p = &ST(st_index);
4388 *p = ST0 - *p;
4389}
4390
4391void helper_fdiv_STN_ST0(int st_index)
4392{
4393 CPU86_LDouble *p;
4394 p = &ST(st_index);
4395 *p = helper_fdiv(*p, ST0);
4396}
4397
4398void helper_fdivr_STN_ST0(int st_index)
4399{
4400 CPU86_LDouble *p;
4401 p = &ST(st_index);
4402 *p = helper_fdiv(ST0, *p);
4403}
4404
4405/* misc FPU operations */
4406void helper_fchs_ST0(void)
4407{
4408 ST0 = floatx_chs(ST0);
4409}
4410
4411void helper_fabs_ST0(void)
4412{
4413 ST0 = floatx_abs(ST0);
4414}
4415
4416void helper_fld1_ST0(void)
4417{
4418 ST0 = f15rk[1];
4419}
4420
4421void helper_fldl2t_ST0(void)
4422{
4423 ST0 = f15rk[6];
4424}
4425
4426void helper_fldl2e_ST0(void)
4427{
4428 ST0 = f15rk[5];
4429}
4430
4431void helper_fldpi_ST0(void)
4432{
4433 ST0 = f15rk[2];
4434}
4435
4436void helper_fldlg2_ST0(void)
4437{
4438 ST0 = f15rk[3];
4439}
4440
4441void helper_fldln2_ST0(void)
4442{
4443 ST0 = f15rk[4];
4444}
4445
4446void helper_fldz_ST0(void)
4447{
4448 ST0 = f15rk[0];
4449}
4450
4451void helper_fldz_FT0(void)
4452{
4453 FT0 = f15rk[0];
4454}
4455
4456#ifndef VBOX
4457uint32_t helper_fnstsw(void)
4458#else
4459RTCCUINTREG helper_fnstsw(void)
4460#endif
4461{
4462 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4463}
4464
4465#ifndef VBOX
4466uint32_t helper_fnstcw(void)
4467#else
4468RTCCUINTREG helper_fnstcw(void)
4469#endif
4470{
4471 return env->fpuc;
4472}
4473
4474static void update_fp_status(void)
4475{
4476 int rnd_type;
4477
4478 /* set rounding mode */
4479 switch(env->fpuc & RC_MASK) {
4480 default:
4481 case RC_NEAR:
4482 rnd_type = float_round_nearest_even;
4483 break;
4484 case RC_DOWN:
4485 rnd_type = float_round_down;
4486 break;
4487 case RC_UP:
4488 rnd_type = float_round_up;
4489 break;
4490 case RC_CHOP:
4491 rnd_type = float_round_to_zero;
4492 break;
4493 }
4494 set_float_rounding_mode(rnd_type, &env->fp_status);
4495#ifdef FLOATX80
4496 switch((env->fpuc >> 8) & 3) {
4497 case 0:
4498 rnd_type = 32;
4499 break;
4500 case 2:
4501 rnd_type = 64;
4502 break;
4503 case 3:
4504 default:
4505 rnd_type = 80;
4506 break;
4507 }
4508 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4509#endif
4510}
4511
4512void helper_fldcw(uint32_t val)
4513{
4514 env->fpuc = val;
4515 update_fp_status();
4516}
4517
4518void helper_fclex(void)
4519{
4520 env->fpus &= 0x7f00;
4521}
4522
4523void helper_fwait(void)
4524{
4525 if (env->fpus & FPUS_SE)
4526 fpu_raise_exception();
4527 FORCE_RET();
4528}
4529
4530void helper_fninit(void)
4531{
4532 env->fpus = 0;
4533 env->fpstt = 0;
4534 env->fpuc = 0x37f;
4535 env->fptags[0] = 1;
4536 env->fptags[1] = 1;
4537 env->fptags[2] = 1;
4538 env->fptags[3] = 1;
4539 env->fptags[4] = 1;
4540 env->fptags[5] = 1;
4541 env->fptags[6] = 1;
4542 env->fptags[7] = 1;
4543}
4544
4545/* BCD ops */
4546
4547void helper_fbld_ST0(target_ulong ptr)
4548{
4549 CPU86_LDouble tmp;
4550 uint64_t val;
4551 unsigned int v;
4552 int i;
4553
4554 val = 0;
4555 for(i = 8; i >= 0; i--) {
4556 v = ldub(ptr + i);
4557 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4558 }
4559 tmp = val;
4560 if (ldub(ptr + 9) & 0x80)
4561 tmp = -tmp;
4562 fpush();
4563 ST0 = tmp;
4564}
4565
4566void helper_fbst_ST0(target_ulong ptr)
4567{
4568 int v;
4569 target_ulong mem_ref, mem_end;
4570 int64_t val;
4571
4572 val = floatx_to_int64(ST0, &env->fp_status);
4573 mem_ref = ptr;
4574 mem_end = mem_ref + 9;
4575 if (val < 0) {
4576 stb(mem_end, 0x80);
4577 val = -val;
4578 } else {
4579 stb(mem_end, 0x00);
4580 }
4581 while (mem_ref < mem_end) {
4582 if (val == 0)
4583 break;
4584 v = val % 100;
4585 val = val / 100;
4586 v = ((v / 10) << 4) | (v % 10);
4587 stb(mem_ref++, v);
4588 }
4589 while (mem_ref < mem_end) {
4590 stb(mem_ref++, 0);
4591 }
4592}
4593
4594void helper_f2xm1(void)
4595{
4596 ST0 = pow(2.0,ST0) - 1.0;
4597}
4598
4599void helper_fyl2x(void)
4600{
4601 CPU86_LDouble fptemp;
4602
4603 fptemp = ST0;
4604 if (fptemp>0.0){
4605 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4606 ST1 *= fptemp;
4607 fpop();
4608 } else {
4609 env->fpus &= (~0x4700);
4610 env->fpus |= 0x400;
4611 }
4612}
4613
4614void helper_fptan(void)
4615{
4616 CPU86_LDouble fptemp;
4617
4618 fptemp = ST0;
4619 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4620 env->fpus |= 0x400;
4621 } else {
4622 ST0 = tan(fptemp);
4623 fpush();
4624 ST0 = 1.0;
4625 env->fpus &= (~0x400); /* C2 <-- 0 */
4626 /* the above code is for |arg| < 2**52 only */
4627 }
4628}
4629
4630void helper_fpatan(void)
4631{
4632 CPU86_LDouble fptemp, fpsrcop;
4633
4634 fpsrcop = ST1;
4635 fptemp = ST0;
4636 ST1 = atan2(fpsrcop,fptemp);
4637 fpop();
4638}
4639
4640void helper_fxtract(void)
4641{
4642 CPU86_LDoubleU temp;
4643 unsigned int expdif;
4644
4645 temp.d = ST0;
4646 expdif = EXPD(temp) - EXPBIAS;
4647 /*DP exponent bias*/
4648 ST0 = expdif;
4649 fpush();
4650 BIASEXPONENT(temp);
4651 ST0 = temp.d;
4652}
4653
4654void helper_fprem1(void)
4655{
4656 CPU86_LDouble dblq, fpsrcop, fptemp;
4657 CPU86_LDoubleU fpsrcop1, fptemp1;
4658 int expdif;
4659 signed long long int q;
4660
4661#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4662 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4663#else
4664 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4665#endif
4666 ST0 = 0.0 / 0.0; /* NaN */
4667 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4668 return;
4669 }
4670
4671 fpsrcop = ST0;
4672 fptemp = ST1;
4673 fpsrcop1.d = fpsrcop;
4674 fptemp1.d = fptemp;
4675 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4676
4677 if (expdif < 0) {
4678 /* optimisation? taken from the AMD docs */
4679 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4680 /* ST0 is unchanged */
4681 return;
4682 }
4683
4684 if (expdif < 53) {
4685 dblq = fpsrcop / fptemp;
4686 /* round dblq towards nearest integer */
4687 dblq = rint(dblq);
4688 ST0 = fpsrcop - fptemp * dblq;
4689
4690 /* convert dblq to q by truncating towards zero */
4691 if (dblq < 0.0)
4692 q = (signed long long int)(-dblq);
4693 else
4694 q = (signed long long int)dblq;
4695
4696 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4697 /* (C0,C3,C1) <-- (q2,q1,q0) */
4698 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4699 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4700 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4701 } else {
4702 env->fpus |= 0x400; /* C2 <-- 1 */
4703 fptemp = pow(2.0, expdif - 50);
4704 fpsrcop = (ST0 / ST1) / fptemp;
4705 /* fpsrcop = integer obtained by chopping */
4706 fpsrcop = (fpsrcop < 0.0) ?
4707 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4708 ST0 -= (ST1 * fpsrcop * fptemp);
4709 }
4710}
4711
4712void helper_fprem(void)
4713{
4714 CPU86_LDouble dblq, fpsrcop, fptemp;
4715 CPU86_LDoubleU fpsrcop1, fptemp1;
4716 int expdif;
4717 signed long long int q;
4718
4719#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4720 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4721#else
4722 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4723#endif
4724 ST0 = 0.0 / 0.0; /* NaN */
4725 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4726 return;
4727 }
4728
4729 fpsrcop = (CPU86_LDouble)ST0;
4730 fptemp = (CPU86_LDouble)ST1;
4731 fpsrcop1.d = fpsrcop;
4732 fptemp1.d = fptemp;
4733 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4734
4735 if (expdif < 0) {
4736 /* optimisation? taken from the AMD docs */
4737 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4738 /* ST0 is unchanged */
4739 return;
4740 }
4741
4742 if ( expdif < 53 ) {
4743 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4744 /* round dblq towards zero */
4745 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4746 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4747
4748 /* convert dblq to q by truncating towards zero */
4749 if (dblq < 0.0)
4750 q = (signed long long int)(-dblq);
4751 else
4752 q = (signed long long int)dblq;
4753
4754 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4755 /* (C0,C3,C1) <-- (q2,q1,q0) */
4756 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4757 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4758 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4759 } else {
4760 int N = 32 + (expdif % 32); /* as per AMD docs */
4761 env->fpus |= 0x400; /* C2 <-- 1 */
4762 fptemp = pow(2.0, (double)(expdif - N));
4763 fpsrcop = (ST0 / ST1) / fptemp;
4764 /* fpsrcop = integer obtained by chopping */
4765 fpsrcop = (fpsrcop < 0.0) ?
4766 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4767 ST0 -= (ST1 * fpsrcop * fptemp);
4768 }
4769}
4770
4771void helper_fyl2xp1(void)
4772{
4773 CPU86_LDouble fptemp;
4774
4775 fptemp = ST0;
4776 if ((fptemp+1.0)>0.0) {
4777 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4778 ST1 *= fptemp;
4779 fpop();
4780 } else {
4781 env->fpus &= (~0x4700);
4782 env->fpus |= 0x400;
4783 }
4784}
4785
4786void helper_fsqrt(void)
4787{
4788 CPU86_LDouble fptemp;
4789
4790 fptemp = ST0;
4791 if (fptemp<0.0) {
4792 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4793 env->fpus |= 0x400;
4794 }
4795 ST0 = sqrt(fptemp);
4796}
4797
4798void helper_fsincos(void)
4799{
4800 CPU86_LDouble fptemp;
4801
4802 fptemp = ST0;
4803 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4804 env->fpus |= 0x400;
4805 } else {
4806 ST0 = sin(fptemp);
4807 fpush();
4808 ST0 = cos(fptemp);
4809 env->fpus &= (~0x400); /* C2 <-- 0 */
4810 /* the above code is for |arg| < 2**63 only */
4811 }
4812}
4813
4814void helper_frndint(void)
4815{
4816 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4817}
4818
4819void helper_fscale(void)
4820{
4821 ST0 = ldexp (ST0, (int)(ST1));
4822}
4823
4824void helper_fsin(void)
4825{
4826 CPU86_LDouble fptemp;
4827
4828 fptemp = ST0;
4829 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4830 env->fpus |= 0x400;
4831 } else {
4832 ST0 = sin(fptemp);
4833 env->fpus &= (~0x400); /* C2 <-- 0 */
4834 /* the above code is for |arg| < 2**53 only */
4835 }
4836}
4837
4838void helper_fcos(void)
4839{
4840 CPU86_LDouble fptemp;
4841
4842 fptemp = ST0;
4843 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4844 env->fpus |= 0x400;
4845 } else {
4846 ST0 = cos(fptemp);
4847 env->fpus &= (~0x400); /* C2 <-- 0 */
4848 /* the above code is for |arg5 < 2**63 only */
4849 }
4850}
4851
4852void helper_fxam_ST0(void)
4853{
4854 CPU86_LDoubleU temp;
4855 int expdif;
4856
4857 temp.d = ST0;
4858
4859 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4860 if (SIGND(temp))
4861 env->fpus |= 0x200; /* C1 <-- 1 */
4862
4863 /* XXX: test fptags too */
4864 expdif = EXPD(temp);
4865 if (expdif == MAXEXPD) {
4866#ifdef USE_X86LDOUBLE
4867 if (MANTD(temp) == 0x8000000000000000ULL)
4868#else
4869 if (MANTD(temp) == 0)
4870#endif
4871 env->fpus |= 0x500 /*Infinity*/;
4872 else
4873 env->fpus |= 0x100 /*NaN*/;
4874 } else if (expdif == 0) {
4875 if (MANTD(temp) == 0)
4876 env->fpus |= 0x4000 /*Zero*/;
4877 else
4878 env->fpus |= 0x4400 /*Denormal*/;
4879 } else {
4880 env->fpus |= 0x400;
4881 }
4882}
4883
4884void helper_fstenv(target_ulong ptr, int data32)
4885{
4886 int fpus, fptag, exp, i;
4887 uint64_t mant;
4888 CPU86_LDoubleU tmp;
4889
4890 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4891 fptag = 0;
4892 for (i=7; i>=0; i--) {
4893 fptag <<= 2;
4894 if (env->fptags[i]) {
4895 fptag |= 3;
4896 } else {
4897 tmp.d = env->fpregs[i].d;
4898 exp = EXPD(tmp);
4899 mant = MANTD(tmp);
4900 if (exp == 0 && mant == 0) {
4901 /* zero */
4902 fptag |= 1;
4903 } else if (exp == 0 || exp == MAXEXPD
4904#ifdef USE_X86LDOUBLE
4905 || (mant & (1LL << 63)) == 0
4906#endif
4907 ) {
4908 /* NaNs, infinity, denormal */
4909 fptag |= 2;
4910 }
4911 }
4912 }
4913 if (data32) {
4914 /* 32 bit */
4915 stl(ptr, env->fpuc);
4916 stl(ptr + 4, fpus);
4917 stl(ptr + 8, fptag);
4918 stl(ptr + 12, 0); /* fpip */
4919 stl(ptr + 16, 0); /* fpcs */
4920 stl(ptr + 20, 0); /* fpoo */
4921 stl(ptr + 24, 0); /* fpos */
4922 } else {
4923 /* 16 bit */
4924 stw(ptr, env->fpuc);
4925 stw(ptr + 2, fpus);
4926 stw(ptr + 4, fptag);
4927 stw(ptr + 6, 0);
4928 stw(ptr + 8, 0);
4929 stw(ptr + 10, 0);
4930 stw(ptr + 12, 0);
4931 }
4932}
4933
4934void helper_fldenv(target_ulong ptr, int data32)
4935{
4936 int i, fpus, fptag;
4937
4938 if (data32) {
4939 env->fpuc = lduw(ptr);
4940 fpus = lduw(ptr + 4);
4941 fptag = lduw(ptr + 8);
4942 }
4943 else {
4944 env->fpuc = lduw(ptr);
4945 fpus = lduw(ptr + 2);
4946 fptag = lduw(ptr + 4);
4947 }
4948 env->fpstt = (fpus >> 11) & 7;
4949 env->fpus = fpus & ~0x3800;
4950 for(i = 0;i < 8; i++) {
4951 env->fptags[i] = ((fptag & 3) == 3);
4952 fptag >>= 2;
4953 }
4954}
4955
4956void helper_fsave(target_ulong ptr, int data32)
4957{
4958 CPU86_LDouble tmp;
4959 int i;
4960
4961 helper_fstenv(ptr, data32);
4962
4963 ptr += (14 << data32);
4964 for(i = 0;i < 8; i++) {
4965 tmp = ST(i);
4966 helper_fstt(tmp, ptr);
4967 ptr += 10;
4968 }
4969
4970 /* fninit */
4971 env->fpus = 0;
4972 env->fpstt = 0;
4973 env->fpuc = 0x37f;
4974 env->fptags[0] = 1;
4975 env->fptags[1] = 1;
4976 env->fptags[2] = 1;
4977 env->fptags[3] = 1;
4978 env->fptags[4] = 1;
4979 env->fptags[5] = 1;
4980 env->fptags[6] = 1;
4981 env->fptags[7] = 1;
4982}
4983
4984void helper_frstor(target_ulong ptr, int data32)
4985{
4986 CPU86_LDouble tmp;
4987 int i;
4988
4989 helper_fldenv(ptr, data32);
4990 ptr += (14 << data32);
4991
4992 for(i = 0;i < 8; i++) {
4993 tmp = helper_fldt(ptr);
4994 ST(i) = tmp;
4995 ptr += 10;
4996 }
4997}
4998
4999void helper_fxsave(target_ulong ptr, int data64)
5000{
5001 int fpus, fptag, i, nb_xmm_regs;
5002 CPU86_LDouble tmp;
5003 target_ulong addr;
5004
5005 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5006 fptag = 0;
5007 for(i = 0; i < 8; i++) {
5008 fptag |= (env->fptags[i] << i);
5009 }
5010 stw(ptr, env->fpuc);
5011 stw(ptr + 2, fpus);
5012 stw(ptr + 4, fptag ^ 0xff);
5013#ifdef TARGET_X86_64
5014 if (data64) {
5015 stq(ptr + 0x08, 0); /* rip */
5016 stq(ptr + 0x10, 0); /* rdp */
5017 } else
5018#endif
5019 {
5020 stl(ptr + 0x08, 0); /* eip */
5021 stl(ptr + 0x0c, 0); /* sel */
5022 stl(ptr + 0x10, 0); /* dp */
5023 stl(ptr + 0x14, 0); /* sel */
5024 }
5025
5026 addr = ptr + 0x20;
5027 for(i = 0;i < 8; i++) {
5028 tmp = ST(i);
5029 helper_fstt(tmp, addr);
5030 addr += 16;
5031 }
5032
5033 if (env->cr[4] & CR4_OSFXSR_MASK) {
5034 /* XXX: finish it */
5035 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5036 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5037 if (env->hflags & HF_CS64_MASK)
5038 nb_xmm_regs = 16;
5039 else
5040 nb_xmm_regs = 8;
5041 addr = ptr + 0xa0;
5042 for(i = 0; i < nb_xmm_regs; i++) {
5043 stq(addr, env->xmm_regs[i].XMM_Q(0));
5044 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5045 addr += 16;
5046 }
5047 }
5048}
5049
5050void helper_fxrstor(target_ulong ptr, int data64)
5051{
5052 int i, fpus, fptag, nb_xmm_regs;
5053 CPU86_LDouble tmp;
5054 target_ulong addr;
5055
5056 env->fpuc = lduw(ptr);
5057 fpus = lduw(ptr + 2);
5058 fptag = lduw(ptr + 4);
5059 env->fpstt = (fpus >> 11) & 7;
5060 env->fpus = fpus & ~0x3800;
5061 fptag ^= 0xff;
5062 for(i = 0;i < 8; i++) {
5063 env->fptags[i] = ((fptag >> i) & 1);
5064 }
5065
5066 addr = ptr + 0x20;
5067 for(i = 0;i < 8; i++) {
5068 tmp = helper_fldt(addr);
5069 ST(i) = tmp;
5070 addr += 16;
5071 }
5072
5073 if (env->cr[4] & CR4_OSFXSR_MASK) {
5074 /* XXX: finish it */
5075 env->mxcsr = ldl(ptr + 0x18);
5076 //ldl(ptr + 0x1c);
5077 if (env->hflags & HF_CS64_MASK)
5078 nb_xmm_regs = 16;
5079 else
5080 nb_xmm_regs = 8;
5081 addr = ptr + 0xa0;
5082 for(i = 0; i < nb_xmm_regs; i++) {
5083#if !defined(VBOX) || __GNUC__ < 4
5084 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5085 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5086#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5087# if 1
5088 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5089 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5090 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5091 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5092# else
5093 /* this works fine on Mac OS X, gcc 4.0.1 */
5094 uint64_t u64 = ldq(addr);
5095 env->xmm_regs[i].XMM_Q(0);
5096 u64 = ldq(addr + 4);
5097 env->xmm_regs[i].XMM_Q(1) = u64;
5098# endif
5099#endif
5100 addr += 16;
5101 }
5102 }
5103}
5104
5105#ifndef USE_X86LDOUBLE
5106
5107void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5108{
5109 CPU86_LDoubleU temp;
5110 int e;
5111
5112 temp.d = f;
5113 /* mantissa */
5114 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5115 /* exponent + sign */
5116 e = EXPD(temp) - EXPBIAS + 16383;
5117 e |= SIGND(temp) >> 16;
5118 *pexp = e;
5119}
5120
5121CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5122{
5123 CPU86_LDoubleU temp;
5124 int e;
5125 uint64_t ll;
5126
5127 /* XXX: handle overflow ? */
5128 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5129 e |= (upper >> 4) & 0x800; /* sign */
5130 ll = (mant >> 11) & ((1LL << 52) - 1);
5131#ifdef __arm__
5132 temp.l.upper = (e << 20) | (ll >> 32);
5133 temp.l.lower = ll;
5134#else
5135 temp.ll = ll | ((uint64_t)e << 52);
5136#endif
5137 return temp.d;
5138}
5139
5140#else
5141
5142void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5143{
5144 CPU86_LDoubleU temp;
5145
5146 temp.d = f;
5147 *pmant = temp.l.lower;
5148 *pexp = temp.l.upper;
5149}
5150
5151CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5152{
5153 CPU86_LDoubleU temp;
5154
5155 temp.l.upper = upper;
5156 temp.l.lower = mant;
5157 return temp.d;
5158}
5159#endif
5160
5161#ifdef TARGET_X86_64
5162
5163//#define DEBUG_MULDIV
5164
5165static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5166{
5167 *plow += a;
5168 /* carry test */
5169 if (*plow < a)
5170 (*phigh)++;
5171 *phigh += b;
5172}
5173
5174static void neg128(uint64_t *plow, uint64_t *phigh)
5175{
5176 *plow = ~ *plow;
5177 *phigh = ~ *phigh;
5178 add128(plow, phigh, 1, 0);
5179}
5180
5181/* return TRUE if overflow */
5182static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5183{
5184 uint64_t q, r, a1, a0;
5185 int i, qb, ab;
5186
5187 a0 = *plow;
5188 a1 = *phigh;
5189 if (a1 == 0) {
5190 q = a0 / b;
5191 r = a0 % b;
5192 *plow = q;
5193 *phigh = r;
5194 } else {
5195 if (a1 >= b)
5196 return 1;
5197 /* XXX: use a better algorithm */
5198 for(i = 0; i < 64; i++) {
5199 ab = a1 >> 63;
5200 a1 = (a1 << 1) | (a0 >> 63);
5201 if (ab || a1 >= b) {
5202 a1 -= b;
5203 qb = 1;
5204 } else {
5205 qb = 0;
5206 }
5207 a0 = (a0 << 1) | qb;
5208 }
5209#if defined(DEBUG_MULDIV)
5210 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5211 *phigh, *plow, b, a0, a1);
5212#endif
5213 *plow = a0;
5214 *phigh = a1;
5215 }
5216 return 0;
5217}
5218
5219/* return TRUE if overflow */
5220static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5221{
5222 int sa, sb;
5223 sa = ((int64_t)*phigh < 0);
5224 if (sa)
5225 neg128(plow, phigh);
5226 sb = (b < 0);
5227 if (sb)
5228 b = -b;
5229 if (div64(plow, phigh, b) != 0)
5230 return 1;
5231 if (sa ^ sb) {
5232 if (*plow > (1ULL << 63))
5233 return 1;
5234 *plow = - *plow;
5235 } else {
5236 if (*plow >= (1ULL << 63))
5237 return 1;
5238 }
5239 if (sa)
5240 *phigh = - *phigh;
5241 return 0;
5242}
5243
5244void helper_mulq_EAX_T0(target_ulong t0)
5245{
5246 uint64_t r0, r1;
5247
5248 mulu64(&r0, &r1, EAX, t0);
5249 EAX = r0;
5250 EDX = r1;
5251 CC_DST = r0;
5252 CC_SRC = r1;
5253}
5254
5255void helper_imulq_EAX_T0(target_ulong t0)
5256{
5257 uint64_t r0, r1;
5258
5259 muls64(&r0, &r1, EAX, t0);
5260 EAX = r0;
5261 EDX = r1;
5262 CC_DST = r0;
5263 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5264}
5265
5266target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5267{
5268 uint64_t r0, r1;
5269
5270 muls64(&r0, &r1, t0, t1);
5271 CC_DST = r0;
5272 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5273 return r0;
5274}
5275
5276void helper_divq_EAX(target_ulong t0)
5277{
5278 uint64_t r0, r1;
5279 if (t0 == 0) {
5280 raise_exception(EXCP00_DIVZ);
5281 }
5282 r0 = EAX;
5283 r1 = EDX;
5284 if (div64(&r0, &r1, t0))
5285 raise_exception(EXCP00_DIVZ);
5286 EAX = r0;
5287 EDX = r1;
5288}
5289
5290void helper_idivq_EAX(target_ulong t0)
5291{
5292 uint64_t r0, r1;
5293 if (t0 == 0) {
5294 raise_exception(EXCP00_DIVZ);
5295 }
5296 r0 = EAX;
5297 r1 = EDX;
5298 if (idiv64(&r0, &r1, t0))
5299 raise_exception(EXCP00_DIVZ);
5300 EAX = r0;
5301 EDX = r1;
5302}
5303#endif
5304
5305static void do_hlt(void)
5306{
5307 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5308 env->halted = 1;
5309 env->exception_index = EXCP_HLT;
5310 cpu_loop_exit();
5311}
5312
5313void helper_hlt(int next_eip_addend)
5314{
5315 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5316 EIP += next_eip_addend;
5317
5318 do_hlt();
5319}
5320
5321void helper_monitor(target_ulong ptr)
5322{
5323#ifdef VBOX
5324 if ((uint32_t)ECX > 1)
5325 raise_exception(EXCP0D_GPF);
5326#else /* !VBOX */
5327 if ((uint32_t)ECX != 0)
5328 raise_exception(EXCP0D_GPF);
5329#endif /* !VBOX */
5330 /* XXX: store address ? */
5331 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5332}
5333
5334void helper_mwait(int next_eip_addend)
5335{
5336 if ((uint32_t)ECX != 0)
5337 raise_exception(EXCP0D_GPF);
5338#ifdef VBOX
5339 helper_hlt(next_eip_addend);
5340#else /* !VBOX */
5341 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5342 EIP += next_eip_addend;
5343
5344 /* XXX: not complete but not completely erroneous */
5345 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5346 /* more than one CPU: do not sleep because another CPU may
5347 wake this one */
5348 } else {
5349 do_hlt();
5350 }
5351#endif /* !VBOX */
5352}
5353
5354void helper_debug(void)
5355{
5356 env->exception_index = EXCP_DEBUG;
5357 cpu_loop_exit();
5358}
5359
5360void helper_raise_interrupt(int intno, int next_eip_addend)
5361{
5362 raise_interrupt(intno, 1, 0, next_eip_addend);
5363}
5364
5365void helper_raise_exception(int exception_index)
5366{
5367 raise_exception(exception_index);
5368}
5369
5370void helper_cli(void)
5371{
5372 env->eflags &= ~IF_MASK;
5373}
5374
5375void helper_sti(void)
5376{
5377 env->eflags |= IF_MASK;
5378}
5379
5380#ifdef VBOX
5381void helper_cli_vme(void)
5382{
5383 env->eflags &= ~VIF_MASK;
5384}
5385
5386void helper_sti_vme(void)
5387{
5388 /* First check, then change eflags according to the AMD manual */
5389 if (env->eflags & VIP_MASK) {
5390 raise_exception(EXCP0D_GPF);
5391 }
5392 env->eflags |= VIF_MASK;
5393}
5394#endif /* VBOX */
5395
5396#if 0
5397/* vm86plus instructions */
5398void helper_cli_vm(void)
5399{
5400 env->eflags &= ~VIF_MASK;
5401}
5402
5403void helper_sti_vm(void)
5404{
5405 env->eflags |= VIF_MASK;
5406 if (env->eflags & VIP_MASK) {
5407 raise_exception(EXCP0D_GPF);
5408 }
5409}
5410#endif
5411
5412void helper_set_inhibit_irq(void)
5413{
5414 env->hflags |= HF_INHIBIT_IRQ_MASK;
5415}
5416
5417void helper_reset_inhibit_irq(void)
5418{
5419 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5420}
5421
5422void helper_boundw(target_ulong a0, int v)
5423{
5424 int low, high;
5425 low = ldsw(a0);
5426 high = ldsw(a0 + 2);
5427 v = (int16_t)v;
5428 if (v < low || v > high) {
5429 raise_exception(EXCP05_BOUND);
5430 }
5431 FORCE_RET();
5432}
5433
5434void helper_boundl(target_ulong a0, int v)
5435{
5436 int low, high;
5437 low = ldl(a0);
5438 high = ldl(a0 + 4);
5439 if (v < low || v > high) {
5440 raise_exception(EXCP05_BOUND);
5441 }
5442 FORCE_RET();
5443}
5444
5445static float approx_rsqrt(float a)
5446{
5447 return 1.0 / sqrt(a);
5448}
5449
5450static float approx_rcp(float a)
5451{
5452 return 1.0 / a;
5453}
5454
5455#if !defined(CONFIG_USER_ONLY)
5456
5457#define MMUSUFFIX _mmu
5458
5459#define SHIFT 0
5460#include "softmmu_template.h"
5461
5462#define SHIFT 1
5463#include "softmmu_template.h"
5464
5465#define SHIFT 2
5466#include "softmmu_template.h"
5467
5468#define SHIFT 3
5469#include "softmmu_template.h"
5470
5471#endif
5472
5473#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5474/* This code assumes real physical address always fit into host CPU reg,
5475 which is wrong in general, but true for our current use cases. */
5476RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5477{
5478 return remR3PhysReadS8(addr);
5479}
5480RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5481{
5482 return remR3PhysReadU8(addr);
5483}
5484void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5485{
5486 remR3PhysWriteU8(addr, val);
5487}
5488RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5489{
5490 return remR3PhysReadS16(addr);
5491}
5492RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5493{
5494 return remR3PhysReadU16(addr);
5495}
5496void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5497{
5498 remR3PhysWriteU16(addr, val);
5499}
5500RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5501{
5502 return remR3PhysReadS32(addr);
5503}
5504RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5505{
5506 return remR3PhysReadU32(addr);
5507}
5508void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5509{
5510 remR3PhysWriteU32(addr, val);
5511}
5512uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5513{
5514 return remR3PhysReadU64(addr);
5515}
5516void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5517{
5518 remR3PhysWriteU64(addr, val);
5519}
5520#endif /* VBOX */
5521
5522/* try to fill the TLB and return an exception if error. If retaddr is
5523 NULL, it means that the function was called in C code (i.e. not
5524 from generated code or from helper.c) */
5525/* XXX: fix it to restore all registers */
5526void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5527{
5528 TranslationBlock *tb;
5529 int ret;
5530 unsigned long pc;
5531 CPUX86State *saved_env;
5532
5533 /* XXX: hack to restore env in all cases, even if not called from
5534 generated code */
5535 saved_env = env;
5536 env = cpu_single_env;
5537
5538 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5539 if (ret) {
5540 if (retaddr) {
5541 /* now we have a real cpu fault */
5542 pc = (unsigned long)retaddr;
5543 tb = tb_find_pc(pc);
5544 if (tb) {
5545 /* the PC is inside the translated code. It means that we have
5546 a virtual CPU fault */
5547 cpu_restore_state(tb, env, pc, NULL);
5548 }
5549 }
5550 raise_exception_err(env->exception_index, env->error_code);
5551 }
5552 env = saved_env;
5553}
5554
5555#ifdef VBOX
5556
5557/**
5558 * Correctly computes the eflags.
5559 * @returns eflags.
5560 * @param env1 CPU environment.
5561 */
5562uint32_t raw_compute_eflags(CPUX86State *env1)
5563{
5564 CPUX86State *savedenv = env;
5565 uint32_t efl;
5566 env = env1;
5567 efl = compute_eflags();
5568 env = savedenv;
5569 return efl;
5570}
5571
5572/**
5573 * Reads byte from virtual address in guest memory area.
5574 * XXX: is it working for any addresses? swapped out pages?
5575 * @returns read data byte.
5576 * @param env1 CPU environment.
5577 * @param pvAddr GC Virtual address.
5578 */
5579uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5580{
5581 CPUX86State *savedenv = env;
5582 uint8_t u8;
5583 env = env1;
5584 u8 = ldub_kernel(addr);
5585 env = savedenv;
5586 return u8;
5587}
5588
5589/**
5590 * Reads byte from virtual address in guest memory area.
5591 * XXX: is it working for any addresses? swapped out pages?
5592 * @returns read data byte.
5593 * @param env1 CPU environment.
5594 * @param pvAddr GC Virtual address.
5595 */
5596uint16_t read_word(CPUX86State *env1, target_ulong addr)
5597{
5598 CPUX86State *savedenv = env;
5599 uint16_t u16;
5600 env = env1;
5601 u16 = lduw_kernel(addr);
5602 env = savedenv;
5603 return u16;
5604}
5605
5606/**
5607 * Reads byte from virtual address in guest memory area.
5608 * XXX: is it working for any addresses? swapped out pages?
5609 * @returns read data byte.
5610 * @param env1 CPU environment.
5611 * @param pvAddr GC Virtual address.
5612 */
5613uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5614{
5615 CPUX86State *savedenv = env;
5616 uint32_t u32;
5617 env = env1;
5618 u32 = ldl_kernel(addr);
5619 env = savedenv;
5620 return u32;
5621}
5622
5623/**
5624 * Writes byte to virtual address in guest memory area.
5625 * XXX: is it working for any addresses? swapped out pages?
5626 * @returns read data byte.
5627 * @param env1 CPU environment.
5628 * @param pvAddr GC Virtual address.
5629 * @param val byte value
5630 */
5631void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5632{
5633 CPUX86State *savedenv = env;
5634 env = env1;
5635 stb(addr, val);
5636 env = savedenv;
5637}
5638
5639void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5640{
5641 CPUX86State *savedenv = env;
5642 env = env1;
5643 stw(addr, val);
5644 env = savedenv;
5645}
5646
5647void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5648{
5649 CPUX86State *savedenv = env;
5650 env = env1;
5651 stl(addr, val);
5652 env = savedenv;
5653}
5654
5655/**
5656 * Correctly loads selector into segment register with updating internal
5657 * qemu data/caches.
5658 * @param env1 CPU environment.
5659 * @param seg_reg Segment register.
5660 * @param selector Selector to load.
5661 */
5662void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5663{
5664 CPUX86State *savedenv = env;
5665#ifdef FORCE_SEGMENT_SYNC
5666 jmp_buf old_buf;
5667#endif
5668
5669 env = env1;
5670
5671 if ( env->eflags & X86_EFL_VM
5672 || !(env->cr[0] & X86_CR0_PE))
5673 {
5674 load_seg_vm(seg_reg, selector);
5675
5676 env = savedenv;
5677
5678 /* Successful sync. */
5679 env1->segs[seg_reg].newselector = 0;
5680 }
5681 else
5682 {
5683 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5684 time critical - let's not do that */
5685#ifdef FORCE_SEGMENT_SYNC
5686 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5687#endif
5688 if (setjmp(env1->jmp_env) == 0)
5689 {
5690 if (seg_reg == R_CS)
5691 {
5692 uint32_t e1, e2;
5693 e1 = e2 = 0;
5694 load_segment(&e1, &e2, selector);
5695 cpu_x86_load_seg_cache(env, R_CS, selector,
5696 get_seg_base(e1, e2),
5697 get_seg_limit(e1, e2),
5698 e2);
5699 }
5700 else
5701 helper_load_seg(seg_reg, selector);
5702 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5703 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5704
5705 env = savedenv;
5706
5707 /* Successful sync. */
5708 env1->segs[seg_reg].newselector = 0;
5709 }
5710 else
5711 {
5712 env = savedenv;
5713
5714 /* Postpone sync until the guest uses the selector. */
5715 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5716 env1->segs[seg_reg].newselector = selector;
5717 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5718 env1->exception_index = -1;
5719 env1->error_code = 0;
5720 env1->old_exception = -1;
5721 }
5722#ifdef FORCE_SEGMENT_SYNC
5723 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5724#endif
5725 }
5726
5727}
5728
5729DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5730{
5731 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5732}
5733
5734
5735int emulate_single_instr(CPUX86State *env1)
5736{
5737 TranslationBlock *tb;
5738 TranslationBlock *current;
5739 int flags;
5740 uint8_t *tc_ptr;
5741 target_ulong old_eip;
5742
5743 /* ensures env is loaded! */
5744 CPUX86State *savedenv = env;
5745 env = env1;
5746
5747 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5748
5749 current = env->current_tb;
5750 env->current_tb = NULL;
5751 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5752
5753 /*
5754 * Translate only one instruction.
5755 */
5756 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5757 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5758 env->segs[R_CS].base, flags, 0);
5759
5760 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5761
5762
5763 /* tb_link_phys: */
5764 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5765 tb->jmp_next[0] = NULL;
5766 tb->jmp_next[1] = NULL;
5767 Assert(tb->jmp_next[0] == NULL);
5768 Assert(tb->jmp_next[1] == NULL);
5769 if (tb->tb_next_offset[0] != 0xffff)
5770 tb_reset_jump(tb, 0);
5771 if (tb->tb_next_offset[1] != 0xffff)
5772 tb_reset_jump(tb, 1);
5773
5774 /*
5775 * Execute it using emulation
5776 */
5777 old_eip = env->eip;
5778 env->current_tb = tb;
5779
5780 /*
5781 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5782 * perhaps not a very safe hack
5783 */
5784 while(old_eip == env->eip)
5785 {
5786 tc_ptr = tb->tc_ptr;
5787
5788#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5789 int fake_ret;
5790 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5791#else
5792 tcg_qemu_tb_exec(tc_ptr);
5793#endif
5794 /*
5795 * Exit once we detect an external interrupt and interrupts are enabled
5796 */
5797 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5798 ( (env->eflags & IF_MASK) &&
5799 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5800 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5801 {
5802 break;
5803 }
5804 }
5805 env->current_tb = current;
5806
5807 tb_phys_invalidate(tb, -1);
5808 tb_free(tb);
5809/*
5810 Assert(tb->tb_next_offset[0] == 0xffff);
5811 Assert(tb->tb_next_offset[1] == 0xffff);
5812 Assert(tb->tb_next[0] == 0xffff);
5813 Assert(tb->tb_next[1] == 0xffff);
5814 Assert(tb->jmp_next[0] == NULL);
5815 Assert(tb->jmp_next[1] == NULL);
5816 Assert(tb->jmp_first == NULL); */
5817
5818 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5819
5820 /*
5821 * Execute the next instruction when we encounter instruction fusing.
5822 */
5823 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5824 {
5825 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5826 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5827 emulate_single_instr(env);
5828 }
5829
5830 env = savedenv;
5831 return 0;
5832}
5833
5834/**
5835 * Correctly loads a new ldtr selector.
5836 *
5837 * @param env1 CPU environment.
5838 * @param selector Selector to load.
5839 */
5840void sync_ldtr(CPUX86State *env1, int selector)
5841{
5842 CPUX86State *saved_env = env;
5843 if (setjmp(env1->jmp_env) == 0)
5844 {
5845 env = env1;
5846 helper_lldt(selector);
5847 env = saved_env;
5848 }
5849 else
5850 {
5851 env = saved_env;
5852#ifdef VBOX_STRICT
5853 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5854#endif
5855 }
5856}
5857
5858int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5859 uint32_t *esp_ptr, int dpl)
5860{
5861 int type, index, shift;
5862
5863 CPUX86State *savedenv = env;
5864 env = env1;
5865
5866 if (!(env->tr.flags & DESC_P_MASK))
5867 cpu_abort(env, "invalid tss");
5868 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5869 if ((type & 7) != 1)
5870 cpu_abort(env, "invalid tss type %d", type);
5871 shift = type >> 3;
5872 index = (dpl * 4 + 2) << shift;
5873 if (index + (4 << shift) - 1 > env->tr.limit)
5874 {
5875 env = savedenv;
5876 return 0;
5877 }
5878 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5879
5880 if (shift == 0) {
5881 *esp_ptr = lduw_kernel(env->tr.base + index);
5882 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5883 } else {
5884 *esp_ptr = ldl_kernel(env->tr.base + index);
5885 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5886 }
5887
5888 env = savedenv;
5889 return 1;
5890}
5891
5892//*****************************************************************************
5893// Needs to be at the bottom of the file (overriding macros)
5894
5895static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5896{
5897 return *(CPU86_LDouble *)ptr;
5898}
5899
5900static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5901{
5902 *(CPU86_LDouble *)ptr = f;
5903}
5904
5905#undef stw
5906#undef stl
5907#undef stq
5908#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5909#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5910#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5911
5912//*****************************************************************************
5913void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5914{
5915 int fpus, fptag, i, nb_xmm_regs;
5916 CPU86_LDouble tmp;
5917 uint8_t *addr;
5918 int data64 = !!(env->hflags & HF_LMA_MASK);
5919
5920 if (env->cpuid_features & CPUID_FXSR)
5921 {
5922 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5923 fptag = 0;
5924 for(i = 0; i < 8; i++) {
5925 fptag |= (env->fptags[i] << i);
5926 }
5927 stw(ptr, env->fpuc);
5928 stw(ptr + 2, fpus);
5929 stw(ptr + 4, fptag ^ 0xff);
5930
5931 addr = ptr + 0x20;
5932 for(i = 0;i < 8; i++) {
5933 tmp = ST(i);
5934 helper_fstt_raw(tmp, addr);
5935 addr += 16;
5936 }
5937
5938 if (env->cr[4] & CR4_OSFXSR_MASK) {
5939 /* XXX: finish it */
5940 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5941 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5942 nb_xmm_regs = 8 << data64;
5943 addr = ptr + 0xa0;
5944 for(i = 0; i < nb_xmm_regs; i++) {
5945#if __GNUC__ < 4
5946 stq(addr, env->xmm_regs[i].XMM_Q(0));
5947 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5948#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5949 stl(addr, env->xmm_regs[i].XMM_L(0));
5950 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
5951 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
5952 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
5953#endif
5954 addr += 16;
5955 }
5956 }
5957 }
5958 else
5959 {
5960 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
5961 int fptag;
5962
5963 fp->FCW = env->fpuc;
5964 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5965 fptag = 0;
5966 for (i=7; i>=0; i--) {
5967 fptag <<= 2;
5968 if (env->fptags[i]) {
5969 fptag |= 3;
5970 } else {
5971 /* the FPU automatically computes it */
5972 }
5973 }
5974 fp->FTW = fptag;
5975
5976 for(i = 0;i < 8; i++) {
5977 tmp = ST(i);
5978 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
5979 }
5980 }
5981}
5982
5983//*****************************************************************************
5984#undef lduw
5985#undef ldl
5986#undef ldq
5987#define lduw(a) *(uint16_t *)(a)
5988#define ldl(a) *(uint32_t *)(a)
5989#define ldq(a) *(uint64_t *)(a)
5990//*****************************************************************************
5991void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5992{
5993 int i, fpus, fptag, nb_xmm_regs;
5994 CPU86_LDouble tmp;
5995 uint8_t *addr;
5996 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
5997
5998 if (env->cpuid_features & CPUID_FXSR)
5999 {
6000 env->fpuc = lduw(ptr);
6001 fpus = lduw(ptr + 2);
6002 fptag = lduw(ptr + 4);
6003 env->fpstt = (fpus >> 11) & 7;
6004 env->fpus = fpus & ~0x3800;
6005 fptag ^= 0xff;
6006 for(i = 0;i < 8; i++) {
6007 env->fptags[i] = ((fptag >> i) & 1);
6008 }
6009
6010 addr = ptr + 0x20;
6011 for(i = 0;i < 8; i++) {
6012 tmp = helper_fldt_raw(addr);
6013 ST(i) = tmp;
6014 addr += 16;
6015 }
6016
6017 if (env->cr[4] & CR4_OSFXSR_MASK) {
6018 /* XXX: finish it, endianness */
6019 env->mxcsr = ldl(ptr + 0x18);
6020 //ldl(ptr + 0x1c);
6021 nb_xmm_regs = 8 << data64;
6022 addr = ptr + 0xa0;
6023 for(i = 0; i < nb_xmm_regs; i++) {
6024#if HC_ARCH_BITS == 32
6025 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6026 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6027 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6028 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6029 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6030#else
6031 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6032 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6033#endif
6034 addr += 16;
6035 }
6036 }
6037 }
6038 else
6039 {
6040 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6041 int fptag, j;
6042
6043 env->fpuc = fp->FCW;
6044 env->fpstt = (fp->FSW >> 11) & 7;
6045 env->fpus = fp->FSW & ~0x3800;
6046 fptag = fp->FTW;
6047 for(i = 0;i < 8; i++) {
6048 env->fptags[i] = ((fptag & 3) == 3);
6049 fptag >>= 2;
6050 }
6051 j = env->fpstt;
6052 for(i = 0;i < 8; i++) {
6053 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6054 ST(i) = tmp;
6055 }
6056 }
6057}
6058//*****************************************************************************
6059//*****************************************************************************
6060
6061#endif /* VBOX */
6062
6063/* Secure Virtual Machine helpers */
6064
6065#if defined(CONFIG_USER_ONLY)
6066
6067void helper_vmrun(int aflag, int next_eip_addend)
6068{
6069}
6070void helper_vmmcall(void)
6071{
6072}
6073void helper_vmload(int aflag)
6074{
6075}
6076void helper_vmsave(int aflag)
6077{
6078}
6079void helper_stgi(void)
6080{
6081}
6082void helper_clgi(void)
6083{
6084}
6085void helper_skinit(void)
6086{
6087}
6088void helper_invlpga(int aflag)
6089{
6090}
6091void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6092{
6093}
6094void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6095{
6096}
6097
6098void helper_svm_check_io(uint32_t port, uint32_t param,
6099 uint32_t next_eip_addend)
6100{
6101}
6102#else
6103
6104static inline void svm_save_seg(target_phys_addr_t addr,
6105 const SegmentCache *sc)
6106{
6107 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6108 sc->selector);
6109 stq_phys(addr + offsetof(struct vmcb_seg, base),
6110 sc->base);
6111 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6112 sc->limit);
6113 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6114 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6115}
6116
6117static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6118{
6119 unsigned int flags;
6120
6121 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6122 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6123 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6124 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6125 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6126}
6127
6128static inline void svm_load_seg_cache(target_phys_addr_t addr,
6129 CPUState *env, int seg_reg)
6130{
6131 SegmentCache sc1, *sc = &sc1;
6132 svm_load_seg(addr, sc);
6133 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6134 sc->base, sc->limit, sc->flags);
6135}
6136
6137void helper_vmrun(int aflag, int next_eip_addend)
6138{
6139 target_ulong addr;
6140 uint32_t event_inj;
6141 uint32_t int_ctl;
6142
6143 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6144
6145 if (aflag == 2)
6146 addr = EAX;
6147 else
6148 addr = (uint32_t)EAX;
6149
6150 if (loglevel & CPU_LOG_TB_IN_ASM)
6151 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6152
6153 env->vm_vmcb = addr;
6154
6155 /* save the current CPU state in the hsave page */
6156 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6157 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6158
6159 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6160 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6161
6162 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6163 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6164 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6165 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6166 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6167 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6168
6169 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6170 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6171
6172 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6173 &env->segs[R_ES]);
6174 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6175 &env->segs[R_CS]);
6176 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6177 &env->segs[R_SS]);
6178 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6179 &env->segs[R_DS]);
6180
6181 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6182 EIP + next_eip_addend);
6183 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6184 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6185
6186 /* load the interception bitmaps so we do not need to access the
6187 vmcb in svm mode */
6188 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6189 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6190 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6191 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6192 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6193 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6194
6195 /* enable intercepts */
6196 env->hflags |= HF_SVMI_MASK;
6197
6198 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6199
6200 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6201 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6202
6203 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6204 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6205
6206 /* clear exit_info_2 so we behave like the real hardware */
6207 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6208
6209 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6210 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6211 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6212 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6213 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6214 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6215 if (int_ctl & V_INTR_MASKING_MASK) {
6216 env->v_tpr = int_ctl & V_TPR_MASK;
6217 env->hflags2 |= HF2_VINTR_MASK;
6218 if (env->eflags & IF_MASK)
6219 env->hflags2 |= HF2_HIF_MASK;
6220 }
6221
6222 cpu_load_efer(env,
6223 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6224 env->eflags = 0;
6225 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6226 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6227 CC_OP = CC_OP_EFLAGS;
6228
6229 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6230 env, R_ES);
6231 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6232 env, R_CS);
6233 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6234 env, R_SS);
6235 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6236 env, R_DS);
6237
6238 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6239 env->eip = EIP;
6240 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6241 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6242 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6243 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6244 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6245
6246 /* FIXME: guest state consistency checks */
6247
6248 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6249 case TLB_CONTROL_DO_NOTHING:
6250 break;
6251 case TLB_CONTROL_FLUSH_ALL_ASID:
6252 /* FIXME: this is not 100% correct but should work for now */
6253 tlb_flush(env, 1);
6254 break;
6255 }
6256
6257 env->hflags2 |= HF2_GIF_MASK;
6258
6259 if (int_ctl & V_IRQ_MASK) {
6260 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6261 }
6262
6263 /* maybe we need to inject an event */
6264 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6265 if (event_inj & SVM_EVTINJ_VALID) {
6266 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6267 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6268 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6269 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6270
6271 if (loglevel & CPU_LOG_TB_IN_ASM)
6272 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6273 /* FIXME: need to implement valid_err */
6274 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6275 case SVM_EVTINJ_TYPE_INTR:
6276 env->exception_index = vector;
6277 env->error_code = event_inj_err;
6278 env->exception_is_int = 0;
6279 env->exception_next_eip = -1;
6280 if (loglevel & CPU_LOG_TB_IN_ASM)
6281 fprintf(logfile, "INTR");
6282 /* XXX: is it always correct ? */
6283 do_interrupt(vector, 0, 0, 0, 1);
6284 break;
6285 case SVM_EVTINJ_TYPE_NMI:
6286 env->exception_index = EXCP02_NMI;
6287 env->error_code = event_inj_err;
6288 env->exception_is_int = 0;
6289 env->exception_next_eip = EIP;
6290 if (loglevel & CPU_LOG_TB_IN_ASM)
6291 fprintf(logfile, "NMI");
6292 cpu_loop_exit();
6293 break;
6294 case SVM_EVTINJ_TYPE_EXEPT:
6295 env->exception_index = vector;
6296 env->error_code = event_inj_err;
6297 env->exception_is_int = 0;
6298 env->exception_next_eip = -1;
6299 if (loglevel & CPU_LOG_TB_IN_ASM)
6300 fprintf(logfile, "EXEPT");
6301 cpu_loop_exit();
6302 break;
6303 case SVM_EVTINJ_TYPE_SOFT:
6304 env->exception_index = vector;
6305 env->error_code = event_inj_err;
6306 env->exception_is_int = 1;
6307 env->exception_next_eip = EIP;
6308 if (loglevel & CPU_LOG_TB_IN_ASM)
6309 fprintf(logfile, "SOFT");
6310 cpu_loop_exit();
6311 break;
6312 }
6313 if (loglevel & CPU_LOG_TB_IN_ASM)
6314 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6315 }
6316}
6317
6318void helper_vmmcall(void)
6319{
6320 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6321 raise_exception(EXCP06_ILLOP);
6322}
6323
6324void helper_vmload(int aflag)
6325{
6326 target_ulong addr;
6327 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6328
6329 if (aflag == 2)
6330 addr = EAX;
6331 else
6332 addr = (uint32_t)EAX;
6333
6334 if (loglevel & CPU_LOG_TB_IN_ASM)
6335 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6336 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6337 env->segs[R_FS].base);
6338
6339 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6340 env, R_FS);
6341 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6342 env, R_GS);
6343 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6344 &env->tr);
6345 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6346 &env->ldt);
6347
6348#ifdef TARGET_X86_64
6349 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6350 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6351 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6352 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6353#endif
6354 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6355 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6356 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6357 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6358}
6359
6360void helper_vmsave(int aflag)
6361{
6362 target_ulong addr;
6363 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6364
6365 if (aflag == 2)
6366 addr = EAX;
6367 else
6368 addr = (uint32_t)EAX;
6369
6370 if (loglevel & CPU_LOG_TB_IN_ASM)
6371 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6372 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6373 env->segs[R_FS].base);
6374
6375 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6376 &env->segs[R_FS]);
6377 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6378 &env->segs[R_GS]);
6379 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6380 &env->tr);
6381 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6382 &env->ldt);
6383
6384#ifdef TARGET_X86_64
6385 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6386 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6387 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6388 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6389#endif
6390 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6391 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6392 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6393 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6394}
6395
6396void helper_stgi(void)
6397{
6398 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6399 env->hflags2 |= HF2_GIF_MASK;
6400}
6401
6402void helper_clgi(void)
6403{
6404 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6405 env->hflags2 &= ~HF2_GIF_MASK;
6406}
6407
6408void helper_skinit(void)
6409{
6410 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6411 /* XXX: not implemented */
6412 raise_exception(EXCP06_ILLOP);
6413}
6414
6415void helper_invlpga(int aflag)
6416{
6417 target_ulong addr;
6418 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6419
6420 if (aflag == 2)
6421 addr = EAX;
6422 else
6423 addr = (uint32_t)EAX;
6424
6425 /* XXX: could use the ASID to see if it is needed to do the
6426 flush */
6427 tlb_flush_page(env, addr);
6428}
6429
6430void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6431{
6432 if (likely(!(env->hflags & HF_SVMI_MASK)))
6433 return;
6434#ifndef VBOX
6435 switch(type) {
6436 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6437 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6438 helper_vmexit(type, param);
6439 }
6440 break;
6441 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6442 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6443 helper_vmexit(type, param);
6444 }
6445 break;
6446 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6447 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6448 helper_vmexit(type, param);
6449 }
6450 break;
6451 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6452 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6453 helper_vmexit(type, param);
6454 }
6455 break;
6456 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6457 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6458 helper_vmexit(type, param);
6459 }
6460 break;
6461 case SVM_EXIT_MSR:
6462 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6463 /* FIXME: this should be read in at vmrun (faster this way?) */
6464 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6465 uint32_t t0, t1;
6466 switch((uint32_t)ECX) {
6467 case 0 ... 0x1fff:
6468 t0 = (ECX * 2) % 8;
6469 t1 = ECX / 8;
6470 break;
6471 case 0xc0000000 ... 0xc0001fff:
6472 t0 = (8192 + ECX - 0xc0000000) * 2;
6473 t1 = (t0 / 8);
6474 t0 %= 8;
6475 break;
6476 case 0xc0010000 ... 0xc0011fff:
6477 t0 = (16384 + ECX - 0xc0010000) * 2;
6478 t1 = (t0 / 8);
6479 t0 %= 8;
6480 break;
6481 default:
6482 helper_vmexit(type, param);
6483 t0 = 0;
6484 t1 = 0;
6485 break;
6486 }
6487 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6488 helper_vmexit(type, param);
6489 }
6490 break;
6491 default:
6492 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6493 helper_vmexit(type, param);
6494 }
6495 break;
6496 }
6497#else /* VBOX */
6498 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6499#endif /* VBOX */
6500}
6501
6502void helper_svm_check_io(uint32_t port, uint32_t param,
6503 uint32_t next_eip_addend)
6504{
6505 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6506 /* FIXME: this should be read in at vmrun (faster this way?) */
6507 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6508 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6509 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6510 /* next EIP */
6511 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6512 env->eip + next_eip_addend);
6513 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6514 }
6515 }
6516}
6517
6518/* Note: currently only 32 bits of exit_code are used */
6519void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6520{
6521 uint32_t int_ctl;
6522
6523 if (loglevel & CPU_LOG_TB_IN_ASM)
6524 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6525 exit_code, exit_info_1,
6526 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6527 EIP);
6528
6529 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6530 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6531 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6532 } else {
6533 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6534 }
6535
6536 /* Save the VM state in the vmcb */
6537 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6538 &env->segs[R_ES]);
6539 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6540 &env->segs[R_CS]);
6541 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6542 &env->segs[R_SS]);
6543 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6544 &env->segs[R_DS]);
6545
6546 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6547 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6548
6549 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6550 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6551
6552 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6553 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6554 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6555 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6556 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6557
6558 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6559 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6560 int_ctl |= env->v_tpr & V_TPR_MASK;
6561 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6562 int_ctl |= V_IRQ_MASK;
6563 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6564
6565 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6566 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6567 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6568 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6569 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6570 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6571 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6572
6573 /* Reload the host state from vm_hsave */
6574 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6575 env->hflags &= ~HF_SVMI_MASK;
6576 env->intercept = 0;
6577 env->intercept_exceptions = 0;
6578 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6579 env->tsc_offset = 0;
6580
6581 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6582 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6583
6584 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6585 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6586
6587 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6588 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6589 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6590 /* we need to set the efer after the crs so the hidden flags get
6591 set properly */
6592 cpu_load_efer(env,
6593 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6594 env->eflags = 0;
6595 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6596 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6597 CC_OP = CC_OP_EFLAGS;
6598
6599 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6600 env, R_ES);
6601 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6602 env, R_CS);
6603 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6604 env, R_SS);
6605 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6606 env, R_DS);
6607
6608 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6609 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6610 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6611
6612 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6613 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6614
6615 /* other setups */
6616 cpu_x86_set_cpl(env, 0);
6617 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6618 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6619
6620 env->hflags2 &= ~HF2_GIF_MASK;
6621 /* FIXME: Resets the current ASID register to zero (host ASID). */
6622
6623 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6624
6625 /* Clears the TSC_OFFSET inside the processor. */
6626
6627 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6628 from the page table indicated the host's CR3. If the PDPEs contain
6629 illegal state, the processor causes a shutdown. */
6630
6631 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6632 env->cr[0] |= CR0_PE_MASK;
6633 env->eflags &= ~VM_MASK;
6634
6635 /* Disables all breakpoints in the host DR7 register. */
6636
6637 /* Checks the reloaded host state for consistency. */
6638
6639 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6640 host's code segment or non-canonical (in the case of long mode), a
6641 #GP fault is delivered inside the host.) */
6642
6643 /* remove any pending exception */
6644 env->exception_index = -1;
6645 env->error_code = 0;
6646 env->old_exception = -1;
6647
6648 cpu_loop_exit();
6649}
6650
6651#endif
6652
6653/* MMX/SSE */
6654/* XXX: optimize by storing fptt and fptags in the static cpu state */
6655void helper_enter_mmx(void)
6656{
6657 env->fpstt = 0;
6658 *(uint32_t *)(env->fptags) = 0;
6659 *(uint32_t *)(env->fptags + 4) = 0;
6660}
6661
6662void helper_emms(void)
6663{
6664 /* set to empty state */
6665 *(uint32_t *)(env->fptags) = 0x01010101;
6666 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6667}
6668
6669/* XXX: suppress */
6670void helper_movq(uint64_t *d, uint64_t *s)
6671{
6672 *d = *s;
6673}
6674
6675#define SHIFT 0
6676#include "ops_sse.h"
6677
6678#define SHIFT 1
6679#include "ops_sse.h"
6680
6681#define SHIFT 0
6682#include "helper_template.h"
6683#undef SHIFT
6684
6685#define SHIFT 1
6686#include "helper_template.h"
6687#undef SHIFT
6688
6689#define SHIFT 2
6690#include "helper_template.h"
6691#undef SHIFT
6692
6693#ifdef TARGET_X86_64
6694
6695#define SHIFT 3
6696#include "helper_template.h"
6697#undef SHIFT
6698
6699#endif
6700
6701/* bit operations */
6702target_ulong helper_bsf(target_ulong t0)
6703{
6704 int count;
6705 target_ulong res;
6706
6707 res = t0;
6708 count = 0;
6709 while ((res & 1) == 0) {
6710 count++;
6711 res >>= 1;
6712 }
6713 return count;
6714}
6715
6716target_ulong helper_bsr(target_ulong t0)
6717{
6718 int count;
6719 target_ulong res, mask;
6720
6721 res = t0;
6722 count = TARGET_LONG_BITS - 1;
6723 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6724 while ((res & mask) == 0) {
6725 count--;
6726 res <<= 1;
6727 }
6728 return count;
6729}
6730
6731
6732static int compute_all_eflags(void)
6733{
6734 return CC_SRC;
6735}
6736
6737static int compute_c_eflags(void)
6738{
6739 return CC_SRC & CC_C;
6740}
6741
6742CCTable cc_table[CC_OP_NB] = {
6743 [CC_OP_DYNAMIC] = { /* should never happen */ },
6744
6745 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6746
6747 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6748 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6749 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6750
6751 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6752 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6753 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6754
6755 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6756 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6757 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6758
6759 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6760 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6761 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6762
6763 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6764 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6765 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6766
6767 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6768 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6769 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6770
6771 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6772 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6773 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6774
6775 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6776 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6777 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6778
6779 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6780 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6781 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6782
6783 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6784 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6785 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6786
6787#ifdef TARGET_X86_64
6788 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6789
6790 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6791
6792 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6793
6794 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6795
6796 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6797
6798 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6799
6800 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6801
6802 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6803
6804 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6805
6806 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6807#endif
6808};
6809
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette