Logo Search packages:      
Sourcecode: faumachine version File versions  Download package

cpu_jit_op.c

/*
 * $Id: cpu_jit_op.c,v 1.96 2009-01-22 16:30:17 potyra Exp $
 *
 * Derived from QEMU sources.
 * Modified for FAUmachine by Volkmar Sieh.
 *
 * Copyright (c) 2005-2009 FAUmachine Team <info@faumachine.org>.
 * Copyright (c) 2003 Fabrice Bellard.
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
 * USA
 */
 
#define FAST

#define ASM_SOFTMMU

#define _ISOC99_SOURCE
#include <fenv.h>
#include <math.h>

#include "exec.h"
#include "arch_gen_cpu_x86_state.h"
#include "arch_gen_cpu_x86_fpu_fast.h"
#include "arch_gen_cpu_x86_io.h"
#include "cpu_jit.h"

#include "floatx.h"

/* Some of the defines that follow are in dyngen-exec.h or exec-all.h in
 * QEMU */

/* force GCC to generate only one epilog at the end of the function */
#if defined(__i386__) || defined(__x86_64__)
/* Also add 4 bytes of padding so that we can replace the ret with a jmp.  */
#define FORCE_RET() asm volatile ("nop;nop;nop;nop");
#else
#define FORCE_RET() __asm__ __volatile__("" : : : "memory");
#endif

#ifdef __alpha__
/* the symbols are considered non exported so a br immediate is generated */
#define __hidden __attribute__((visibility("hidden")))
#else
#define __hidden
#endif

#if defined(__alpha__)
/* Suggested by Richard Henderson. This will result in code like
     ldah $0,__op_param1($29)        !gprelhigh
     lda $0,__op_param1($0)          !gprellow
   We can then conveniently change $29 to $31 and adapt the offsets to
   emit the appropriate constant.  */
extern int __op_param1 __hidden;
extern int __op_param2 __hidden;
#define PARAM1 ({ int _r; asm("" : "=r"(_r) : "0" (&__op_param1)); _r; })
#define PARAM2 ({ int _r; asm("" : "=r"(_r) : "0" (&__op_param2)); _r; })
#else
#if defined(__APPLE__)
static int __op_param1, __op_param2;
#else
extern int __op_param1, __op_param2;
#endif
#define PARAM1 ((long)(&__op_param1))
#define PARAM2 ((long)(&__op_param2))
#endif /* !defined(__alpha__) */

extern int __op_jmp0, __op_jmp1, __op_jmp2, __op_jmp3;

#ifdef __i386__
#define EXIT_TB() asm volatile ("hlt")
#if defined(__APPLE__)
/* XXX Different relocations are generated for MacOS X for Intel
 *    (please as from cctools).  */
#define GOTO_LABEL_PARAM(n) \
        asm volatile ("cli;.long " ASM_NAME(__op_gen_label) #n)
#else
#define GOTO_LABEL_PARAM(n) \
        asm volatile ("cli;.long " ASM_NAME(__op_gen_label) #n " - 1f;1:")
#endif
#endif
#ifdef __x86_64__
#define EXIT_TB() asm volatile ("hlt")
#define GOTO_LABEL_PARAM(n) \
        asm volatile ("cli;.long " ASM_NAME(__op_gen_label) #n " - 1f;1:")
#endif
#ifdef __powerpc__
#define EXIT_TB() asm volatile ("blr")
#define GOTO_LABEL_PARAM(n) asm volatile ("b " ASM_NAME(__op_gen_label) #n)
#endif
#ifdef __s390__
#define EXIT_TB() asm volatile ("br %r14")
#define GOTO_LABEL_PARAM(n) asm volatile ("b " ASM_NAME(__op_gen_label) #n)
#endif
#ifdef __alpha__
#define EXIT_TB() asm volatile ("ret")
#endif
#ifdef __ia64__
#define EXIT_TB() asm volatile ("br.ret.sptk.many b0;;")
#define GOTO_LABEL_PARAM(n) asm volatile ("br.sptk.many " \
                                ASM_NAME(__op_gen_label) #n)
#endif
#ifdef __sparc__
#define EXIT_TB() asm volatile ("jmpl %i0 + 8, %g0\n" \
                        "nop")
#endif
#ifdef __arm__
#define EXIT_TB() asm volatile ("b exec_loop")
#define GOTO_LABEL_PARAM(n) asm volatile ("b " ASM_NAME(__op_gen_label) #n)
#endif
#ifdef __mc68000
#define EXIT_TB() asm volatile ("rts")
#endif

#define ASM_OP_LABEL_NAME(n, opname) \
    ASM_NAME(__op_label) #n "." ASM_NAME(opname)


#if defined(__powerpc__)

/* we patch the jump instruction directly */
#define GOTO_TB(opname, tbparam, n)\
do {\
    asm volatile (ASM_DATA_SECTION\
              ASM_OP_LABEL_NAME(n, opname) ":\n"\
              ".long 1f\n"\
              ASM_PREVIOUS_SECTION \
                  "b " ASM_NAME(__op_jmp) #n "\n"\
              "1:\n");\
} while (0)

#elif (defined(__i386__) || defined(__x86_64__)) && defined(USE_DIRECT_JUMP)

/* we patch the jump instruction directly.  Use sti in place of the actual
   jmp instruction so that dyngen can patch in the correct result.  */
#if defined(__APPLE__)
/* XXX Different relocations are generated for MacOS X for Intel
   (please as from cctools).  */
#define GOTO_TB(opname, tbparam, n)\
do {\
    asm volatile (ASM_DATA_SECTION\
              ASM_OP_LABEL_NAME(n, opname) ":\n"\
              ".long 1f\n"\
              ASM_PREVIOUS_SECTION \
                  "sti;.long " ASM_NAME(__op_jmp) #n "\n"\
              "1:\n");\
} while (0)
#else
#define GOTO_TB(opname, tbparam, n)\
do {\
    asm volatile (ASM_DATA_SECTION\
              ASM_OP_LABEL_NAME(n, opname) ":\n"\
              ".long 1f\n"\
              ASM_PREVIOUS_SECTION \
                  "sti;.long " ASM_NAME(__op_jmp) #n " - 1f\n"\
              "1:\n");\
} while (0)
#endif

#else

/* jump to next block operations (more portable code, does not need
   cache flushing, but slower because of indirect jump) */
#define GOTO_TB(opname, tbparam, n)\
do {\
    static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
    static void __attribute__((unused)) *__op_label ## n \
        __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\
    goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
label ## n: ;\
dummy_label ## n: ;\
} while (0)

#endif /* __powerpc__ else */

#if defined(DARWIN) && CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
#define STACK_CLEANUP() asm volatile ("addl $0x04,%esp");
#else
#define STACK_CLEANUP() 
#endif

/* FIXME */
extern uint8_t NAME_(parity_table)[256];

/* modulo 17 table */
const uint8_t NAME_(rclw_table)[32] = {
      0, 1, 2, 3, 4, 5, 6, 7,
      8, 9,10,11,12,13,14,15,
      16, 0, 1, 2, 3, 4, 5, 6,
      7, 8, 9,10,11,12,13,14,
};

/* modulo 9 table */
const uint8_t NAME_(rclb_table)[32] = {
      0, 1, 2, 3, 4, 5, 6, 7,
      8, 0, 1, 2, 3, 4, 5, 6,
      7, 8, 0, 1, 2, 3, 4, 5,
      6, 7, 8, 0, 1, 2, 3, 4,
};

const CPU86_LDouble NAME_(f15rk)[7] =
{
      0.00000000000000000000L,
      1.00000000000000000000L,
      3.14159265358979323851L,      /*pi*/
      0.30102999566398119523L,      /*lg2*/
      0.69314718055994530943L,      /*ln2*/
      1.44269504088896340739L,      /*l2e*/
      3.32192809488736234781L,      /*l2t*/
};

/* we define the various pieces of code used by the JIT */

#define REG EAX
#define REGNAME _EAX
#include "cpu_jit_opreg_template.h"
#undef REG
#undef REGNAME

#define REG ECX
#define REGNAME _ECX
#include "cpu_jit_opreg_template.h"
#undef REG
#undef REGNAME

#define REG EDX
#define REGNAME _EDX
#include "cpu_jit_opreg_template.h"
#undef REG
#undef REGNAME

#define REG EBX
#define REGNAME _EBX
#include "cpu_jit_opreg_template.h"
#undef REG
#undef REGNAME

#define REG ESP
#define REGNAME _ESP
#include "cpu_jit_opreg_template.h"
#undef REG
#undef REGNAME

#define REG EBP
#define REGNAME _EBP
#include "cpu_jit_opreg_template.h"
#undef REG
#undef REGNAME

#define REG ESI
#define REGNAME _ESI
#include "cpu_jit_opreg_template.h"
#undef REG
#undef REGNAME

#define REG EDI
#define REGNAME _EDI
#include "cpu_jit_opreg_template.h"
#undef REG
#undef REGNAME

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT

#define REG (env->regs[8])
#define REGNAME _R8
#include "cpu_jit_opreg_template.h"
#undef REG
#undef REGNAME

#define REG (env->regs[9])
#define REGNAME _R9
#include "cpu_jit_opreg_template.h"
#undef REG
#undef REGNAME

#define REG (env->regs[10])
#define REGNAME _R10
#include "cpu_jit_opreg_template.h"
#undef REG
#undef REGNAME

#define REG (env->regs[11])
#define REGNAME _R11
#include "cpu_jit_opreg_template.h"
#undef REG
#undef REGNAME

#define REG (env->regs[12])
#define REGNAME _R12
#include "cpu_jit_opreg_template.h"
#undef REG
#undef REGNAME

#define REG (env->regs[13])
#define REGNAME _R13
#include "cpu_jit_opreg_template.h"
#undef REG
#undef REGNAME

#define REG (env->regs[14])
#define REGNAME _R14
#include "cpu_jit_opreg_template.h"
#undef REG
#undef REGNAME

#define REG (env->regs[15])
#define REGNAME _R15
#include "cpu_jit_opreg_template.h"
#undef REG
#undef REGNAME

#endif

#ifdef WORDS_BIGENDIAN
typedef union UREG64 {
    struct { uint16_t v3, v2, v1, v0; } w;
    struct { uint32_t v1, v0; } l;
    uint64_t q;
} UREG64;
#else /* ! WORDS_BIGENDIAN */
typedef union UREG64 {
    struct { uint16_t v0, v1, v2, v3; } w;
    struct { uint32_t v0, v1; } l;
    uint64_t q;
} UREG64;
#endif /* ! WORDS_BIGENDIAN */

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT

#define PARAMQ1 \
({\
    UREG64 __p;\
    __p.l.v1 = PARAM1;\
    __p.l.v0 = PARAM2;\
    __p.q;\
}) 

#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */

/* operations with flags */

/* update flags with T0 and T1 (add/sub case) */
void NAME_(op_update2_cc)(void)
{
    CC_SRC = T1;
    CC_DST = T0;
}

/* update flags with T0 (logic operation case) */
void NAME_(op_update1_cc)(void)
{
    CC_DST = T0;
}

void NAME_(op_update_neg_cc)(void)
{
    CC_SRC = -T0;
    CC_DST = T0;
}

void NAME_(op_cmpl_T0_T1_cc)(void)
{
    CC_SRC = T1;
    CC_DST = T0 - T1;
}

void NAME_(op_update_inc_cc)(void)
{
    CC_SRC = NAME_(cc_table)[CC_OP].compute_c();
    CC_DST = T0;
}

void NAME_(op_testl_T0_T1_cc)(void)
{
    CC_DST = T0 & T1;
}

/* operations without flags */

void NAME_(op_addl_T0_T1)(void)
{
    T0 += T1;
}

void NAME_(op_orl_T0_T1)(void)
{
    T0 |= T1;
}

void NAME_(op_andl_T0_T1)(void)
{
    T0 &= T1;
}

void NAME_(op_subl_T0_T1)(void)
{
    T0 -= T1;
}

void NAME_(op_xorl_T0_T1)(void)
{
    T0 ^= T1;
}

void NAME_(op_negl_T0)(void)
{
    T0 = -T0;
}

void NAME_(op_incl_T0)(void)
{
    T0++;
}

void NAME_(op_decl_T0)(void)
{
    T0--;
}

void NAME_(op_notl_T0)(void)
{
    T0 = ~T0;
}
#if CONFIG_CPU >= 80486
void NAME_(op_bswapl_T0)(void)
{
    T0 = bswap32(T0);
}
#endif
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
void NAME_(op_bswapq_T0)(void)
{
    NAME_(helper_bswapq_T0)();
}
#endif

/* multiply/divide */

/* XXX: add eflags optimizations */
/* XXX: add non P4 style flags */

void NAME_(op_mulb_AL_T0)(void)
{
    unsigned int res;
    res = (uint8_t)EAX * (uint8_t)T0;
    EAX = (EAX & ~0xffff) | res;
    CC_DST = res;
    CC_SRC = (res & 0xff00);
}

void NAME_(op_imulb_AL_T0)(void)
{
    int res;
    res = (int8_t)EAX * (int8_t)T0;
    EAX = (EAX & ~0xffff) | (res & 0xffff);
    CC_DST = res;
    CC_SRC = (res != (int8_t)res);
}

void NAME_(op_mulw_AX_T0)(void)
{
    unsigned int res;
    res = (uint16_t)EAX * (uint16_t)T0;
    EAX = (EAX & ~0xffff) | (res & 0xffff);
    EDX = (EDX & ~0xffff) | ((res >> 16) & 0xffff);
    CC_DST = res;
    CC_SRC = res >> 16;
}

void NAME_(op_imulw_AX_T0)(void)
{
    int res;
    res = (int16_t)EAX * (int16_t)T0;
    EAX = (EAX & ~0xffff) | (res & 0xffff);
    EDX = (EDX & ~0xffff) | ((res >> 16) & 0xffff);
    CC_DST = res;
    CC_SRC = (res != (int16_t)res);
}
#if CONFIG_CPU >= 80386
void NAME_(op_mull_EAX_T0)(void)
{
    uint64_t res;
    res = (uint64_t)((uint32_t)EAX) * (uint64_t)((uint32_t)T0);
    EAX = (uint32_t)res;
    EDX = (uint32_t)(res >> 32);
    CC_DST = (uint32_t)res;
    CC_SRC = (uint32_t)(res >> 32);
}

void NAME_(op_imull_EAX_T0)(void)
{
    int64_t res;
    res = (int64_t)((int32_t)EAX) * (int64_t)((int32_t)T0);
    EAX = (uint32_t)(res);
    EDX = (uint32_t)(res >> 32);
    CC_DST = res;
    CC_SRC = (res != (int32_t)res);
}
#endif
void NAME_(op_imulw_T0_T1)(void)
{
    int res;
    res = (int16_t)T0 * (int16_t)T1;
    T0 = res;
    CC_DST = res;
    CC_SRC = (res != (int16_t)res);
}
#if CONFIG_CPU >= 80386
void NAME_(op_imull_T0_T1)(void)
{
    int64_t res;
    res = (int64_t)((int32_t)T0) * (int64_t)((int32_t)T1);
    T0 = res;
    CC_DST = res;
    CC_SRC = (res != (int32_t)res);
}
#endif
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
void NAME_(op_mulq_EAX_T0)(void)
{
    NAME_(helper_mulq_EAX_T0)();
}

void NAME_(op_imulq_EAX_T0)(void)
{
    NAME_(helper_imulq_EAX_T0)();
}

void NAME_(op_imulq_T0_T1)(void)
{
    NAME_(helper_imulq_T0_T1)();
}
#endif

/* division, flags are undefined */

void NAME_(op_divb_AL_T0)(void)
{
    unsigned int num, den, q, r;

    num = (EAX & 0xffff);
    den = (T0 & 0xff);
    if (den == 0) {
        NAME_(raise_exception)(CPU_FAULT_DE);
    }
    q = (num / den);
    if (q > 0xff)
        NAME_(raise_exception)(CPU_FAULT_DE);
    q &= 0xff;
    r = (num % den) & 0xff;
    EAX = (EAX & ~0xffff) | (r << 8) | q;
}

void NAME_(op_idivb_AL_T0)(void)
{
    int num, den, q, r;

    num = (int16_t)EAX;
    den = (int8_t)T0;
    if (den == 0) {
        NAME_(raise_exception)(CPU_FAULT_DE);
    }
    q = (num / den);
    if (q != (int8_t)q)
        NAME_(raise_exception)(CPU_FAULT_DE);
    q &= 0xff;
    r = (num % den) & 0xff;
    EAX = (EAX & ~0xffff) | (r << 8) | q;
}

void NAME_(op_divw_AX_T0)(void)
{
    unsigned int num, den, q, r;

    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
    den = (T0 & 0xffff);
    if (den == 0) {
        NAME_(raise_exception)(CPU_FAULT_DE);
    }
    q = (num / den);
    if (q > 0xffff)
        NAME_(raise_exception)(CPU_FAULT_DE);
    q &= 0xffff;
    r = (num % den) & 0xffff;
    EAX = (EAX & ~0xffff) | q;
    EDX = (EDX & ~0xffff) | r;
}

void NAME_(op_idivw_AX_T0)(void)
{
    int num, den, q, r;

    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
    den = (int16_t)T0;
    if (den == 0) {
        NAME_(raise_exception)(CPU_FAULT_DE);
    }
    q = (num / den);
    if (q != (int16_t)q)
        NAME_(raise_exception)(CPU_FAULT_DE);
    q &= 0xffff;
    r = (num % den) & 0xffff;
    EAX = (EAX & ~0xffff) | q;
    EDX = (EDX & ~0xffff) | r;
}
#if CONFIG_CPU >= 80386
void NAME_(op_divl_EAX_T0)(void)
{
    unsigned int den, r;
    uint64_t num, q;
    
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
    den = T0;
    if (den == 0) {
        NAME_(raise_exception)(CPU_FAULT_DE);
    }
    q = (num / den);
    r = (num % den);
    if (q > 0xffffffff)
        NAME_(raise_exception)(CPU_FAULT_DE);
    EAX = (uint32_t)q;
    EDX = (uint32_t)r;
}

void NAME_(op_idivl_EAX_T0)(void)
{
    int den, r;
    int64_t num, q;
    
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
    den = T0;
    if (den == 0) {
        NAME_(raise_exception)(CPU_FAULT_DE);
    }
    q = (num / den);
    r = (num % den);
    if (q != (int32_t)q)
        NAME_(raise_exception)(CPU_FAULT_DE);
    EAX = (uint32_t)q;
    EDX = (uint32_t)r;
}
#endif
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
void NAME_(op_divq_EAX_T0)(void)
{
    NAME_(helper_divq_EAX_T0)();
}

void NAME_(op_idivq_EAX_T0)(void)
{
    NAME_(helper_idivq_EAX_T0)();
}
#endif

/* constant load & misc op */

/* XXX: consistent names */
void NAME_(op_movl_T0_imu)(void)
{
    T0 = (uint32_t)PARAM1;
}

void NAME_(op_movl_T0_im)(void)
{
    T0 = (int32_t)PARAM1;
}

void NAME_(op_addl_T0_im)(void)
{
    T0 += PARAM1;
}

void NAME_(op_andl_T0_ffff)(void)
{
    T0 = T0 & 0xffff;
}

void NAME_(op_andl_T0_im)(void)
{
    T0 = T0 & PARAM1;
}

void NAME_(op_movl_T0_T1)(void)
{
    T0 = T1;
}

void NAME_(op_movl_T1_imu)(void)
{
    T1 = (uint32_t)PARAM1;
}

void NAME_(op_movl_T1_im)(void)
{
    T1 = (int32_t)PARAM1;
}

void NAME_(op_addl_T1_im)(void)
{
    T1 += PARAM1;
}

void NAME_(op_movl_T1_A0)(void)
{
    T1 = A0;
}

void NAME_(op_movl_A0_im)(void)
{
    A0 = (uint32_t)PARAM1;
}

void NAME_(op_addl_A0_im)(void)
{
    A0 = (uint32_t)(A0 + PARAM1);
}

void NAME_(op_movl_A0_seg)(void)
{
    A0 = (uint32_t)*(target_ulong *)((char *)env + PARAM1);
}

void NAME_(op_addl_A0_seg)(void)
{
    A0 = (uint32_t)(A0 + *(target_ulong *)((char *)env + PARAM1));
}

void NAME_(op_addl_A0_AL)(void)
{
    A0 = (uint32_t)(A0 + (EAX & 0xff));
}

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT

void NAME_(op_movq_T0_im64)(void)
{
    T0 = PARAMQ1;
}

void NAME_(op_movq_T1_im64)(void)
{
    T1 = PARAMQ1;
}

void NAME_(op_movq_A0_im)(void)
{
    A0 = (int32_t)PARAM1;
}

void NAME_(op_movq_A0_im64)(void)
{
    A0 = PARAMQ1;
}

void NAME_(op_addq_A0_im)(void)
{
    A0 = (A0 + (int32_t)PARAM1);
}

void NAME_(op_addq_A0_im64)(void)
{
    A0 = (A0 + PARAMQ1);
}

void NAME_(op_movq_A0_seg)(void)
{
    A0 = *(target_ulong *)((char *)env + PARAM1);
}

void NAME_(op_addq_A0_seg)(void)
{
    A0 += *(target_ulong *)((char *)env + PARAM1);
}

void NAME_(op_addq_A0_AL)(void)
{
    A0 = (A0 + (EAX & 0xff));
}

#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */

void NAME_(op_andl_A0_ffff)(void)
{
    A0 = A0 & 0xffff;
}

/* memory access */

#define MEMSUFFIX _kernel
#include "cpu_jit_ops_mem.h"

#define MEMSUFFIX _user
#include "cpu_jit_ops_mem.h"

/* Indirect jump. */
void NAME_(op_jmp_T0)(void)
{
      EIP = T0;
}

void NAME_(op_movl_eip_im)(void)
{
    EIP = (uint32_t)PARAM1;
}

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
void NAME_(op_movq_eip_im)(void)
{
    EIP = (int32_t)PARAM1;
}

void NAME_(op_movq_eip_im64)(void)
{
    EIP = PARAMQ1;
}
#endif

void NAME_(op_hlt)(void)
{
      env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* Needed if sti is just before. */
      env->hflags |= HF_HALTED_MASK;
      env->exception_index = -1;
}

void __attribute__((__noreturn__)) NAME_(op_raise_interrupt)(void)
{
      int intno, next_eip_addend;

      intno = PARAM1;
      next_eip_addend = PARAM2;
      NAME_(raise_interrupt)(intno, 1, 0, next_eip_addend);
}

void __attribute__((__noreturn__)) NAME_(op_raise_exception)(void)
{
      int exception_index;

      exception_index = PARAM1;
      NAME_(raise_exception)(exception_index);
}

void NAME_(op_into)(void)
{
      int eflags;

      eflags = NAME_(cc_table)[CC_OP].compute_all();
      if (eflags & CC_O) {
            NAME_(raise_interrupt)(CPU_FAULT_OF, 1, 0, PARAM1);
      }
      FORCE_RET();
}

void NAME_(op_cli)(void)
{
      env->eflags &= ~CPU_IF_MASK;
}

void NAME_(op_sti)(void)
{
      env->eflags |= CPU_IF_MASK;
}

void NAME_(op_set_inhibit_irq)(void)
{
      env->hflags |= HF_INHIBIT_IRQ_MASK;
}

void NAME_(op_reset_inhibit_irq)(void)
{
      env->hflags &= ~HF_INHIBIT_IRQ_MASK;
}

#if 0
/* vm86plus instructions */
void NAME_(op_cli_vm)(void)
{
    env->eflags &= ~VIF_MASK;
}

void NAME_(op_sti_vm)(void)
{
    env->eflags |= VIF_MASK;
    if (env->eflags & VIP_MASK) {
        EIP = PARAM1;
        NAME_(raise_exception)(CPU_FAULT_GP);
    }
    FORCE_RET();
}
#endif

void NAME_(op_boundw)(void)
{
    int low, high, v;
    low = ldsw(A0);
    high = ldsw(A0 + 2);
    v = (int16_t)T0;
    if (v < low || v > high) {
        NAME_(raise_exception)(CPU_FAULT_BR);
    }
    FORCE_RET();
}

void NAME_(op_boundl)(void)
{
    int low, high, v;
    low = ldl(A0);
    high = ldl(A0 + 4);
    v = T0;
    if (v < low || v > high) {
        NAME_(raise_exception)(CPU_FAULT_BR);
    }
    FORCE_RET();
}
#if CONFIG_CPU >= 80586
void NAME_(op_cmpxchg8b)(void)
{
    NAME_(helper_cmpxchg8b)();
}
#endif
void NAME_(op_movl_T0_0)(void)
{
    T0 = 0;
}

void NAME_(op_flushtsc)(void)
{
      env->process.inst_cnt += PARAM1;
}

void NAME_(op_inctsc)(void)
{
      env->process.inst_cnt += PARAM1;
      if (env->process.inst_limit <= env->process.inst_cnt) {
            EXIT_TB();
      }
}

void NAME_(op_exit_tb)(void)
{
    EXIT_TB();
}

/* multiple size ops */

#define ldul ldl

#define SHIFT 0
#include "cpu_jit_ops_template.h"
#undef SHIFT

#define SHIFT 1
#include "cpu_jit_ops_template.h"
#undef SHIFT

#if CONFIG_CPU >= 80386
#define SHIFT 2
#include "cpu_jit_ops_template.h"
#undef SHIFT
#endif

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT

#define SHIFT 3
#include "cpu_jit_ops_template.h"
#undef SHIFT

#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */

/* sign extend */

void NAME_(op_movsbl_T0_T0)(void)
{
    T0 = (int8_t)T0;
}

void NAME_(op_movzbl_T0_T0)(void)
{
    T0 = (uint8_t)T0;
}

void NAME_(op_movswl_T0_T0)(void)
{
    T0 = (int16_t)T0;
}

void NAME_(op_movzwl_T0_T0)(void)
{
    T0 = (uint16_t)T0;
}

void NAME_(op_movswl_EAX_AX)(void)
{
    EAX = (uint32_t) (int16_t) EAX;
}

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
void NAME_(op_movslq_T0_T0)(void)
{
    T0 = (int32_t)T0;
}

void NAME_(op_movslq_RAX_EAX)(void)
{
    EAX = (int32_t)EAX;
}
#endif

void NAME_(op_movsbw_AX_AL)(void)
{
    EAX = (EAX & ~0xffff) | ((int8_t)EAX & 0xffff);
}
#if CONFIG_CPU >= 80386
void NAME_(op_movslq_EDX_EAX)(void)
{
    EDX = (uint32_t) ((int32_t) EAX >> 31);
}
#endif /* CONFIG_CPU >= 8086 */
void NAME_(op_movswl_DX_AX)(void)
{
    EDX = (EDX & ~0xffff) | (((int16_t)EAX >> 15) & 0xffff);
}

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
void NAME_(op_movsqo_RDX_RAX)(void)
{
    EDX = (int64_t)EAX >> 63;
}
#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */

/* string ops helpers */

void NAME_(op_addl_ESI_T0)(void)
{
    ESI = (uint32_t)(ESI + T0);
}

void NAME_(op_addw_ESI_T0)(void)
{
    ESI = (ESI & ~0xffff) | ((ESI + T0) & 0xffff);
}

void NAME_(op_addl_EDI_T0)(void)
{
    EDI = (uint32_t)(EDI + T0);
}

void NAME_(op_addw_EDI_T0)(void)
{
    EDI = (EDI & ~0xffff) | ((EDI + T0) & 0xffff);
}

void NAME_(op_decl_ECX)(void)
{
    ECX = (uint32_t)(ECX - 1);
}

void NAME_(op_decw_ECX)(void)
{
    ECX = (ECX & ~0xffff) | ((ECX - 1) & 0xffff);
}

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
void NAME_(op_addq_ESI_T0)(void)
{
    ESI = (ESI + T0);
}

void NAME_(op_addq_EDI_T0)(void)
{
    EDI = (EDI + T0);
}

void NAME_(op_decq_ECX)(void)
{
    ECX--;
}
#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */

/* push/pop utils */

void NAME_(op_addl_A0_SS)(void)
{
    A0 = (uint32_t)(A0 + env->segs[R_SS].base);
}

void NAME_(op_subl_A0_2)(void)
{
    A0 = (uint32_t)(A0 - 2);
}

void NAME_(op_subl_A0_4)(void)
{
    A0 = (uint32_t)(A0 - 4);
}

void NAME_(op_addl_ESP_4)(void)
{
    ESP = (uint32_t)(ESP + 4);
}

void NAME_(op_addl_ESP_2)(void)
{
    ESP = (uint32_t)(ESP + 2);
}

void NAME_(op_addw_ESP_4)(void)
{
    ESP = (ESP & ~0xffff) | ((ESP + 4) & 0xffff);
}

void NAME_(op_addw_ESP_2)(void)
{
    ESP = (ESP & ~0xffff) | ((ESP + 2) & 0xffff);
}

void NAME_(op_addl_ESP_im)(void)
{
    ESP = (uint32_t)(ESP + PARAM1);
}

void NAME_(op_addw_ESP_im)(void)
{
    ESP = (ESP & ~0xffff) | ((ESP + PARAM1) & 0xffff);
}

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
void NAME_(op_subq_A0_2)(void)
{
    A0 -= 2;
}

void NAME_(op_subq_A0_8)(void)
{
    A0 -= 8;
}

void NAME_(op_addq_ESP_8)(void)
{
    ESP += 8;
}

void NAME_(op_addq_ESP_im)(void)
{
    ESP += PARAM1;
}
#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */
#if CONFIG_CPU >= 80586
void NAME_(op_rdtsc)(void)
{
    NAME_(helper_rdtsc)();
}
#endif /* CONFIG_CPU >= 80586 */
#if CONFIG_CPU >= 80486
void NAME_(op_cpuid)(void)
{
    NAME_(helper_cpuid)();
}
#endif /* CONFIG_CPU >= 80486 */
void NAME_(op_enter_level)(void)
{
    NAME_(helper_enter_level)(PARAM1, PARAM2);
}

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
void NAME_(op_enter64_level)(void)
{
    NAME_(helper_enter64_level)(PARAM1, PARAM2);
}
#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */

#if 80486 <= CONFIG_CPU && CONFIG_CPU_SEP_SUPPORT
void NAME_(op_sysenter)(void)
{
    NAME_(helper_sysenter)();
}

void NAME_(op_sysexit)(void)
{
    NAME_(helper_sysexit)();
}
#endif

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
void NAME_(op_syscall)(void)
{
    NAME_(helper_syscall)(PARAM1);
}

void NAME_(op_sysret)(void)
{
    NAME_(helper_sysret)(PARAM1);
}
#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */

#if 80486 <= CONFIG_CPU && CONFIG_CPU_MSR_SUPPORT
void NAME_(op_rdmsr)(void)
{
    NAME_(helper_rdmsr)();
}

void NAME_(op_wrmsr)(void)
{
    NAME_(helper_wrmsr)();
}
#endif /* 80486 <= CONFIG_CPU && CONFIG_CPU_MSR_SUPPORT */

/* bcd */

/* XXX: exception */
void NAME_(op_aam)(void)
{
    int base = PARAM1;
    int al, ah;
    al = EAX & 0xff;
    ah = al / base;
    al = al % base;
    EAX = (EAX & ~0xffff) | al | (ah << 8);
    CC_DST = al;
}

void NAME_(op_aad)(void)
{
    int base = PARAM1;
    int al, ah;
    al = EAX & 0xff;
    ah = (EAX >> 8) & 0xff;
    al = ((ah * base) + al) & 0xff;
    EAX = (EAX & ~0xffff) | al;
    CC_DST = al;
}

void NAME_(op_aaa)(void)
{
    int icarry;
    int al, ah, af;
    int eflags;

    eflags = NAME_(cc_table)[CC_OP].compute_all();
    af = eflags & CC_A;
    al = EAX & 0xff;
    ah = (EAX >> 8) & 0xff;

    icarry = (al > 0xf9);
    if (((al & 0x0f) > 9 ) || af) {
        al = (al + 6) & 0x0f;
        ah = (ah + 1 + icarry) & 0xff;
        eflags |= CC_C | CC_A;
    } else {
        eflags &= ~(CC_C | CC_A);
        al &= 0x0f;
    }
    EAX = (EAX & ~0xffff) | al | (ah << 8);
    CC_SRC = eflags;
    FORCE_RET();
}

void NAME_(op_aas)(void)
{
    int icarry;
    int al, ah, af;
    int eflags;

    eflags = NAME_(cc_table)[CC_OP].compute_all();
    af = eflags & CC_A;
    al = EAX & 0xff;
    ah = (EAX >> 8) & 0xff;

    icarry = (al < 6);
    if (((al & 0x0f) > 9 ) || af) {
        al = (al - 6) & 0x0f;
        ah = (ah - 1 - icarry) & 0xff;
        eflags |= CC_C | CC_A;
    } else {
        eflags &= ~(CC_C | CC_A);
        al &= 0x0f;
    }
    EAX = (EAX & ~0xffff) | al | (ah << 8);
    CC_SRC = eflags;
    FORCE_RET();
}

void NAME_(op_daa)(void)
{
    int al, af, cf;
    int eflags;

    eflags = NAME_(cc_table)[CC_OP].compute_all();
    cf = eflags & CC_C;
    af = eflags & CC_A;
    al = EAX & 0xff;

    eflags = 0;
    if (((al & 0x0f) > 9 ) || af) {
        al = (al + 6) & 0xff;
        eflags |= CC_A;
    }
    if ((al > 0x9f) || cf) {
        al = (al + 0x60) & 0xff;
        eflags |= CC_C;
    }
    EAX = (EAX & ~0xff) | al;
    /* well, speed is not an issue here, so we compute the flags by hand */
    eflags |= (al == 0) << 6; /* zf */
    eflags |= NAME_(parity_table)[al]; /* pf */
    eflags |= (al & 0x80); /* sf */
    CC_SRC = eflags;
    FORCE_RET();
}

void NAME_(op_das)(void)
{
    int al, al1, af, cf;
    int eflags;

    eflags = NAME_(cc_table)[CC_OP].compute_all();
    cf = eflags & CC_C;
    af = eflags & CC_A;
    al = EAX & 0xff;

    eflags = 0;
    al1 = al;
    if (((al & 0x0f) > 9 ) || af) {
        eflags |= CC_A;
        if (al < 6 || cf)
            eflags |= CC_C;
        al = (al - 6) & 0xff;
    }
    if ((al1 > 0x99) || cf) {
        al = (al - 0x60) & 0xff;
        eflags |= CC_C;
    }
    EAX = (EAX & ~0xff) | al;
    /* well, speed is not an issue here, so we compute the flags by hand */
    eflags |= (al == 0) << 6; /* zf */
    eflags |= NAME_(parity_table)[al]; /* pf */
    eflags |= (al & 0x80); /* sf */
    CC_SRC = eflags;
    FORCE_RET();
}

/* segment handling */

/* never use it with R_CS */
void NAME_(op_movl_seg_T0)(void)
{
    NAME_(load_seg)(PARAM1, T0);
}

/* faster VM86 version */
void NAME_(op_movl_seg_T0_vm)(void)
{
    int selector;
    SegmentCache *sc;
    
    selector = T0 & 0xffff;
    /* env->segs[] access */
    sc = (SegmentCache *)((char *)env + PARAM1);
    sc->selector = selector;
    sc->base = (selector << 4);
}

void NAME_(op_movl_T0_seg)(void)
{
    T0 = env->segs[PARAM1].selector;
}
#if CONFIG_CPU >= 80286
void NAME_(op_lsl)(void)
{
    NAME_(helper_lsl)();
}

void NAME_(op_lar)(void)
{
    NAME_(helper_lar)();
}

void NAME_(op_verr)(void)
{
    NAME_(helper_verr)();
}

void NAME_(op_verw)(void)
{
    NAME_(helper_verw)();
}

void NAME_(op_arpl)(void)
{
    if ((T0 & 3) < (T1 & 3)) {
        /* XXX: emulate bug or 0xff3f0000 oring as in bochs ? */
        T0 = (T0 & ~3) | (T1 & 3);
        T1 = CC_Z;
   } else {
        T1 = 0;
    }
    FORCE_RET();
}
            
void NAME_(op_arpl_update)(void)
{
    int eflags;
    eflags = NAME_(cc_table)[CC_OP].compute_all();
    CC_SRC = (eflags & ~CC_Z) | T1;
}
#endif    
/* T0: segment, T1:eip */
void NAME_(op_ljmp_protected_T0_T1)(void)
{
    NAME_(helper_ljmp_protected_T0_T1)(PARAM1);
}

void NAME_(op_lcall_real_T0_T1)(void)
{
    NAME_(helper_lcall_real_T0_T1)(PARAM1, PARAM2);
}

void NAME_(op_lcall_protected_T0_T1)(void)
{
    NAME_(helper_lcall_protected_T0_T1)(PARAM1, PARAM2);
}

void NAME_(op_iret_real)(void)
{
    NAME_(helper_iret_real)(PARAM1);
}

void NAME_(op_iret_protected)(void)
{
    NAME_(helper_iret_protected)(PARAM1, PARAM2);
}

void NAME_(op_lret_protected)(void)
{
    NAME_(helper_lret_protected)(PARAM1, PARAM2);
}
#if CONFIG_CPU >= 80286
void NAME_(op_lldt_T0)(void)
{
    NAME_(helper_lldt_T0)();
}

void NAME_(op_ltr_T0)(void)
{
    NAME_(helper_ltr_T0)();
}
#endif
#if CONFIG_CPU >= 80386
/* CR registers access */
void NAME_(op_movl_crN_T0)(void)
{
    NAME_(helper_movl_crN_T0)(PARAM1);
}

/* DR registers access */
void NAME_(op_movl_drN_T0)(void)
{
    NAME_(helper_movl_drN_T0)(PARAM1);
}
#endif
#if CONFIG_CPU >= 80286
void NAME_(op_lmsw_T0)(void)
{
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
       if already set to one. */
    T0 = (env->cr[0] & ~0xe) | (T0 & 0xf);
    NAME_(helper_movl_crN_T0)(0);
}
#endif
#if CONFIG_CPU >= 80486
void NAME_(op_invlpg_A0)(void)
{
    NAME_(helper_invlpg)(A0);
}
#endif

void NAME_(op_movl_T0_env)(void)
{
    T0 = *(uint32_t *)((char *)env + PARAM1);
}

void NAME_(op_movl_env_T0)(void)
{
    *(uint32_t *)((char *)env + PARAM1) = T0;
}

void NAME_(op_movl_env_T1)(void)
{
    *(uint32_t *)((char *)env + PARAM1) = T1;
}

void NAME_(op_movtl_T0_env)(void)
{
    T0 = *(target_ulong *)((char *)env + PARAM1);
}

void NAME_(op_movtl_env_T0)(void)
{
    *(target_ulong *)((char *)env + PARAM1) = T0;
}

void NAME_(op_movtl_T1_env)(void)
{
    T1 = *(target_ulong *)((char *)env + PARAM1);
}

void NAME_(op_movtl_env_T1)(void)
{
    *(target_ulong *)((char *)env + PARAM1) = T1;
}
#if CONFIG_CPU >= 80286
void NAME_(op_clts)(void)
{
    env->cr[0] &= ~CPU_CR0_TS_MASK;
    env->hflags &= ~HF_TS_MASK;
}
#endif
/* flags handling */

void NAME_(op_goto_tb0)(void)
{
    GOTO_TB(op_goto_tb0, PARAM1, 0);
}

void NAME_(op_goto_tb1)(void)
{
    GOTO_TB(op_goto_tb1, PARAM1, 1);
}

void NAME_(op_jmp_label)(void)
{
    GOTO_LABEL_PARAM(1);
}

void NAME_(op_jnz_T0_label)(void)
{
    if (T0)
        GOTO_LABEL_PARAM(1);
    FORCE_RET();
}

void NAME_(op_jz_T0_label)(void)
{
    if (!T0)
        GOTO_LABEL_PARAM(1);
    FORCE_RET();
}

/* slow set cases (compute x86 flags) */
void NAME_(op_seto_T0_cc)(void)
{
    int eflags;
    eflags = NAME_(cc_table)[CC_OP].compute_all();
    T0 = (eflags >> 11) & 1;
}

void NAME_(op_setb_T0_cc)(void)
{
    T0 = NAME_(cc_table)[CC_OP].compute_c();
}

void NAME_(op_setz_T0_cc)(void)
{
    int eflags;
    eflags = NAME_(cc_table)[CC_OP].compute_all();
    T0 = (eflags >> 6) & 1;
}

void NAME_(op_setbe_T0_cc)(void)
{
    int eflags;
    eflags = NAME_(cc_table)[CC_OP].compute_all();
    T0 = (eflags & (CC_Z | CC_C)) != 0;
}

void NAME_(op_sets_T0_cc)(void)
{
    int eflags;
    eflags = NAME_(cc_table)[CC_OP].compute_all();
    T0 = (eflags >> 7) & 1;
}

void NAME_(op_setp_T0_cc)(void)
{
    int eflags;
    eflags = NAME_(cc_table)[CC_OP].compute_all();
    T0 = (eflags >> 2) & 1;
}

void NAME_(op_setl_T0_cc)(void)
{
    int eflags;
    eflags = NAME_(cc_table)[CC_OP].compute_all();
    T0 = ((eflags ^ (eflags >> 4)) >> 7) & 1;
}

void NAME_(op_setle_T0_cc)(void)
{
    int eflags;
    eflags = NAME_(cc_table)[CC_OP].compute_all();
    T0 = (((eflags ^ (eflags >> 4)) & 0x80) || (eflags & CC_Z)) != 0;
}

void NAME_(op_xor_T0_1)(void)
{
    T0 ^= 1;
}

void NAME_(op_set_cc_op)(void)
{
    CC_OP = PARAM1;
}

void NAME_(op_mov_T0_cc)(void)
{
    T0 = NAME_(cc_table)[CC_OP].compute_all();
}

/* XXX: clear VIF/VIP in all ops ? */

#if CONFIG_CPU <= 80386
#undef CPU_ID_MASK
#define CPU_ID_MASK     (0 << CPU_ID_SHIFT)
#undef CPU_AC_MASK
#define CPU_AC_MASK     (0 << CPU_AC_SHIFT)
#endif

void NAME_(op_movl_eflags_T0)(void)
{
    load_eflags(T0, (CPU_TF_MASK | CPU_AC_MASK | CPU_ID_MASK | CPU_NT_MASK));
}

void NAME_(op_movw_eflags_T0)(void)
{
    load_eflags(T0, (CPU_TF_MASK | CPU_AC_MASK | CPU_ID_MASK | CPU_NT_MASK) & 0xffff);
}

void NAME_(op_movl_eflags_T0_io)(void)
{
    load_eflags(T0, (CPU_TF_MASK | CPU_AC_MASK | CPU_ID_MASK | CPU_NT_MASK | CPU_IF_MASK));
}

void NAME_(op_movw_eflags_T0_io)(void)
{
    load_eflags(T0, (CPU_TF_MASK | CPU_AC_MASK | CPU_ID_MASK | CPU_NT_MASK | CPU_IF_MASK) & 0xffff);
}

void NAME_(op_movl_eflags_T0_cpl0)(void)
{
    load_eflags(T0, (CPU_TF_MASK | CPU_AC_MASK | CPU_ID_MASK | CPU_NT_MASK | CPU_IF_MASK | CPU_IOPL_MASK));
}

void NAME_(op_movw_eflags_T0_cpl0)(void)
{
    load_eflags(T0, (CPU_TF_MASK | CPU_AC_MASK | CPU_ID_MASK | CPU_NT_MASK | CPU_IF_MASK | CPU_IOPL_MASK) & 0xffff);
}

#if 0
/* vm86plus version */
void NAME_(op_movw_eflags_T0_vm)(void)
{
    int eflags;
    eflags = T0;
    CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
    DF = 1 - (2 * ((eflags >> 10) & 1));
    /* we also update some system flags as in user mode */
    env->eflags = (env->eflags & ~(FL_UPDATE_MASK16 | VIF_MASK)) |
        (eflags & FL_UPDATE_MASK16);
    if (eflags & CPU_IF_MASK) {
        env->eflags |= VIF_MASK;
        if (env->eflags & VIP_MASK) {
            EIP = PARAM1;
            NAME_(raise_exception)(CPU_FAULT_GP);
        }
    }
    FORCE_RET();
}

void NAME_(op_movl_eflags_T0_vm)(void)
{
    int eflags;
    eflags = T0;
    CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
    DF = 1 - (2 * ((eflags >> 10) & 1));
    /* we also update some system flags as in user mode */
    env->eflags = (env->eflags & ~(FL_UPDATE_MASK32 | VIF_MASK)) |
        (eflags & FL_UPDATE_MASK32);
    if (eflags & CPU_IF_MASK) {
        env->eflags |= VIF_MASK;
        if (env->eflags & VIP_MASK) {
            EIP = PARAM1;
            NAME_(raise_exception)(CPU_FAULT_GP);
        }
    }
    FORCE_RET();
}
#endif

/* XXX: compute only O flag */
void NAME_(op_movb_eflags_T0)(void)
{
    int of;
    of = NAME_(cc_table)[CC_OP].compute_all() & CC_O;
    CC_SRC = (T0 & (CC_S | CC_Z | CC_A | CC_P | CC_C)) | of;
}

void NAME_(op_movl_T0_eflags)(void)
{
    int eflags;
    eflags = NAME_(cc_table)[CC_OP].compute_all();
    eflags |= (DF & CPU_DF_MASK);
    eflags |= env->eflags & ~(CPU_VM_MASK | CPU_RF_MASK);
    T0 = eflags;
}

/* vm86plus version */
#if 0
void NAME_(op_movl_T0_eflags_vm)(void)
{
    int eflags;
    eflags = NAME_(cc_table)[CC_OP].compute_all();
    eflags |= (DF & CPU_DF_MASK);
    eflags |= env->eflags & ~(CPU_VM_MASK | CPU_RF_MASK | CPU_IF_MASK);
    if (env->eflags & VIF_MASK)
        eflags |= CPU_IF_MASK;
    T0 = eflags;
}
#endif

void NAME_(op_cld)(void)
{
    DF = 1;
}

void NAME_(op_std)(void)
{
    DF = -1;
}

void NAME_(op_clc)(void)
{
    int eflags;
    eflags = NAME_(cc_table)[CC_OP].compute_all();
    eflags &= ~CC_C;
    CC_SRC = eflags;
}

void NAME_(op_stc)(void)
{
    int eflags;
    eflags = NAME_(cc_table)[CC_OP].compute_all();
    eflags |= CC_C;
    CC_SRC = eflags;
}

void NAME_(op_cmc)(void)
{
    int eflags;
    eflags = NAME_(cc_table)[CC_OP].compute_all();
    eflags ^= CC_C;
    CC_SRC = eflags;
}

void NAME_(op_salc)(void)
{
    int cf;
    cf = NAME_(cc_table)[CC_OP].compute_c();
    EAX = (EAX & ~0xff) | ((-cf) & 0xff);
}

/* floating point support. Some of the code for complicated x87
   functions comes from the LGPL'ed x86 emulator found in the Willows
   TWIN windows emulator. */

/* fp load FT0 */

void NAME_(op_flds_FT0_A0)(void)
{
#ifdef USE_FP_CONVERT
    FP_CONVERT.i32 = ldl(A0);
    FT0 = FP_CONVERT.f;
#else
    FT0 = ldfl(A0);
#endif
}

void NAME_(op_fldl_FT0_A0)(void)
{
#ifdef USE_FP_CONVERT
    FP_CONVERT.i64 = ldq(A0);
    FT0 = FP_CONVERT.d;
#else
    FT0 = ldfq(A0);
#endif
}

/* helpers are needed to avoid static constant reference. XXX: find a better way */
#ifdef USE_INT_TO_FLOAT_HELPERS

void NAME_(helper_fild_FT0_A0)(void)
{
    FT0 = (CPU86_LDouble)ldsw(A0);
}

void NAME_(helper_fildl_FT0_A0)(void)
{
    FT0 = (CPU86_LDouble)((int32_t)ldl(A0));
}

void NAME_(helper_fildll_FT0_A0)(void)
{
    FT0 = (CPU86_LDouble)((int64_t)ldq(A0));
}

void NAME_(op_fild_FT0_A0)(void)
{
    NAME_(helper_fild_FT0_A0)();
}

void NAME_(op_fildl_FT0_A0)(void)
{
    NAME_(helper_fildl_FT0_A0)();
}

void NAME_(op_fildll_FT0_A0)(void)
{
    NAME_(helper_fildll_FT0_A0)();
}

#else

void NAME_(op_fild_FT0_A0)(void)
{
#ifdef USE_FP_CONVERT
    FP_CONVERT.i32 = ldsw(A0);
    FT0 = (CPU86_LDouble)FP_CONVERT.i32;
#else
    FT0 = (CPU86_LDouble)ldsw(A0);
#endif
}

void NAME_(op_fildl_FT0_A0)(void)
{
#ifdef USE_FP_CONVERT
    FP_CONVERT.i32 = (int32_t) ldl(A0);
    FT0 = (CPU86_LDouble)FP_CONVERT.i32;
#else
    FT0 = (CPU86_LDouble)((int32_t)ldl(A0));
#endif
}

void NAME_(op_fildll_FT0_A0)(void)
{
#ifdef USE_FP_CONVERT
    FP_CONVERT.i64 = (int64_t) ldq(A0);
    FT0 = (CPU86_LDouble)FP_CONVERT.i64;
#else
    FT0 = (CPU86_LDouble)((int64_t)ldq(A0));
#endif
}
#endif

/* fp load ST0 */

void NAME_(op_flds_ST0_A0)(void)
{
    int new_fpstt;
    new_fpstt = (env->fpstt - 1) & 7;
#ifdef USE_FP_CONVERT
    FP_CONVERT.i32 = ldl(A0);
    env->fpregs[new_fpstt].d = FP_CONVERT.f;
#else
    env->fpregs[new_fpstt].d = ldfl(A0);
#endif
    env->fpstt = new_fpstt;
    env->fptags[new_fpstt] = 0; /* validate stack entry */
}

void NAME_(op_fldl_ST0_A0)(void)
{
    int new_fpstt;
    new_fpstt = (env->fpstt - 1) & 7;
#ifdef USE_FP_CONVERT
    FP_CONVERT.i64 = ldq(A0);
    env->fpregs[new_fpstt].d = FP_CONVERT.d;
#else
    env->fpregs[new_fpstt].d = ldfq(A0);
#endif
    env->fpstt = new_fpstt;
    env->fptags[new_fpstt] = 0; /* validate stack entry */
}

void NAME_(op_fldt_ST0_A0)(void)
{
    NAME_(helper_fldt_ST0_A0)();
}

/* helpers are needed to avoid static constant reference. XXX: find a better way */
#ifdef USE_INT_TO_FLOAT_HELPERS

void NAME_(helper_fild_ST0_A0)(void)
{
    int new_fpstt;
    new_fpstt = (env->fpstt - 1) & 7;
    env->fpregs[new_fpstt].d = (CPU86_LDouble)ldsw(A0);
    env->fpstt = new_fpstt;
    env->fptags[new_fpstt] = 0; /* validate stack entry */
}

void NAME_(helper_fildl_ST0_A0)(void)
{
    int new_fpstt;
    new_fpstt = (env->fpstt - 1) & 7;
    env->fpregs[new_fpstt].d = (CPU86_LDouble)((int32_t)ldl(A0));
    env->fpstt = new_fpstt;
    env->fptags[new_fpstt] = 0; /* validate stack entry */
}

void NAME_(helper_fildll_ST0_A0)(void)
{
    int new_fpstt;
    new_fpstt = (env->fpstt - 1) & 7;
    env->fpregs[new_fpstt].d = (CPU86_LDouble)((int64_t)ldq(A0));
    env->fpstt = new_fpstt;
    env->fptags[new_fpstt] = 0; /* validate stack entry */
}

void NAME_(op_fild_ST0_A0)(void)
{
    NAME_(helper_fild_ST0_A0)();
}

void NAME_(op_fildl_ST0_A0)(void)
{
    NAME_(helper_fildl_ST0_A0)();
}

void NAME_(op_fildll_ST0_A0)(void)
{
    NAME_(helper_fildll_ST0_A0)();
}

#else

void NAME_(op_fild_ST0_A0)(void)
{
    int new_fpstt;
    new_fpstt = (env->fpstt - 1) & 7;
#ifdef USE_FP_CONVERT
    FP_CONVERT.i32 = ldsw(A0);
    env->fpregs[new_fpstt].d = (CPU86_LDouble)FP_CONVERT.i32;
#else
    env->fpregs[new_fpstt].d = (CPU86_LDouble)ldsw(A0);
#endif
    env->fpstt = new_fpstt;
    env->fptags[new_fpstt] = 0; /* validate stack entry */
}

void NAME_(op_fildl_ST0_A0)(void)
{
    int new_fpstt;
    new_fpstt = (env->fpstt - 1) & 7;
#ifdef USE_FP_CONVERT
    FP_CONVERT.i32 = (int32_t) ldl(A0);
    env->fpregs[new_fpstt].d = (CPU86_LDouble)FP_CONVERT.i32;
#else
    env->fpregs[new_fpstt].d = (CPU86_LDouble)((int32_t)ldl(A0));
#endif
    env->fpstt = new_fpstt;
    env->fptags[new_fpstt] = 0; /* validate stack entry */
}

void NAME_(op_fildll_ST0_A0)(void)
{
    int new_fpstt;
    new_fpstt = (env->fpstt - 1) & 7;
#ifdef USE_FP_CONVERT
    FP_CONVERT.i64 = (int64_t) ldq(A0);
    env->fpregs[new_fpstt].d = (CPU86_LDouble)FP_CONVERT.i64;
#else
    env->fpregs[new_fpstt].d = (CPU86_LDouble)((int64_t)ldq(A0));
#endif
    env->fpstt = new_fpstt;
    env->fptags[new_fpstt] = 0; /* validate stack entry */
}

#endif

/* fp store */

void NAME_(op_fsts_ST0_A0)(void)
{
#ifdef USE_FP_CONVERT
    FP_CONVERT.f = (float)ST0;
    stfl(A0, FP_CONVERT.f);
#else
    stfl(A0, (float)ST0);
#endif
    FORCE_RET();
}

void NAME_(op_fstl_ST0_A0)(void)
{
    stfq(A0, (double)ST0);
    FORCE_RET();
}

void NAME_(op_fstt_ST0_A0)(void)
{
    NAME_(helper_fstt_ST0_A0)();
}

void NAME_(op_fist_ST0_A0)(void)
{
#if defined(__sparc__) && !defined(__sparc_v9__)
    register CPU86_LDouble d asm("o0");
#else
    CPU86_LDouble d;
#endif
    int val;

    d = ST0;
    val = floatx_to_int32(d, &env->fp_status);
    if (val != (int16_t)val)
        val = -32768;
    stw(A0, val);
    FORCE_RET();
}

void NAME_(op_fistl_ST0_A0)(void)
{
#if defined(__sparc__) && !defined(__sparc_v9__)
    register CPU86_LDouble d asm("o0");
#else
    CPU86_LDouble d;
#endif
    int val;

    d = ST0;
    val = floatx_to_int32(d, &env->fp_status);
    stl(A0, val);
    FORCE_RET();
}

void NAME_(op_fistll_ST0_A0)(void)
{
#if defined(__sparc__) && !defined(__sparc_v9__)
    register CPU86_LDouble d asm("o0");
#else
    CPU86_LDouble d;
#endif
    int64_t val;

    d = ST0;
    val = floatx_to_int64(d, &env->fp_status);
    stq(A0, val);
    FORCE_RET();
}

#if 0 /* FIXME fox - these are still missing in cpu_jit_compile.c and floatx.h */
void NAME_(op_fistt_ST0_A0)(void)
{
#if defined(__sparc__) && !defined(__sparc_v9__)
    register CPU86_LDouble d asm("o0");
#else
    CPU86_LDouble d;
#endif
    int val;

    d = ST0;
    val = floatx_to_int32_round_to_zero(d, &env->fp_status);
    if (val != (int16_t)val)
        val = -32768;
    stw(A0, val);
    FORCE_RET();
}

void NAME_(op_fisttl_ST0_A0)(void)
{
#if defined(__sparc__) && !defined(__sparc_v9__)
    register CPU86_LDouble d asm("o0");
#else
    CPU86_LDouble d;
#endif
    int val;

    d = ST0;
    val = floatx_to_int32_round_to_zero(d, &env->fp_status);
    stl(A0, val);
    FORCE_RET();
}

void NAME_(op_fisttll_ST0_A0)(void)
{
#if defined(__sparc__) && !defined(__sparc_v9__)
    register CPU86_LDouble d asm("o0");
#else
    CPU86_LDouble d;
#endif
    int64_t val;

    d = ST0;
    val = floatx_to_int64_round_to_zero(d, &env->fp_status);
    stq(A0, val);
    FORCE_RET();
}
#endif /* 0 FIXME */

void NAME_(op_fbld_ST0_A0)(void)
{
    NAME_(helper_fbld_ST0_A0)();
}

void NAME_(op_fbst_ST0_A0)(void)
{
    NAME_(helper_fbst_ST0_A0)();
}

/* FPU move */

void NAME_(op_fpush)(void)
{
    fpush();
}

void NAME_(op_fpop)(void)
{
    fpop();
}

void NAME_(op_fdecstp)(void)
{
    env->fpstt = (env->fpstt - 1) & 7;
    env->fpus &= (~0x4700);
}

void NAME_(op_fincstp)(void)
{
    env->fpstt = (env->fpstt + 1) & 7;
    env->fpus &= (~0x4700);
}

void NAME_(op_ffree_STN)(void)
{
    env->fptags[(env->fpstt + PARAM1) & 7] = 1;
}

void NAME_(op_fmov_ST0_FT0)(void)
{
    ST0 = FT0;
}

void NAME_(op_fmov_FT0_STN)(void)
{
    FT0 = ST(PARAM1);
}

void NAME_(op_fmov_ST0_STN)(void)
{
    ST0 = ST(PARAM1);
}

void NAME_(op_fmov_STN_ST0)(void)
{
    ST(PARAM1) = ST0;
}

void NAME_(op_fxchg_ST0_STN)(void)
{
    CPU86_LDouble tmp;
    tmp = ST(PARAM1);
    ST(PARAM1) = ST0;
    ST0 = tmp;
}

/* FPU operations */

const int NAME_(fcom_ccval)[4] = {0x0100, 0x4000, 0x0000, 0x4500};

void NAME_(op_fcom_ST0_FT0)(void)
{
    int ret;

    ret = floatx_compare(ST0, FT0, &env->fp_status);
    env->fpus = (env->fpus & ~0x4500) | NAME_(fcom_ccval)[ret + 1];
    FORCE_RET();
}

void NAME_(op_fucom_ST0_FT0)(void)
{
    int ret;

    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
    env->fpus = (env->fpus & ~0x4500) | NAME_(fcom_ccval)[ret+ 1];
    FORCE_RET();
}

const int NAME_(fcomi_ccval)[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};

void NAME_(op_fcomi_ST0_FT0)(void)
{
    int eflags;
    int ret;

    ret = floatx_compare(ST0, FT0, &env->fp_status);
    eflags = NAME_(cc_table)[CC_OP].compute_all();
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | NAME_(fcomi_ccval)[ret + 1];
    CC_SRC = eflags;
    FORCE_RET();
}

void NAME_(op_fucomi_ST0_FT0)(void)
{
    int eflags;
    int ret;

    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
    eflags = NAME_(cc_table)[CC_OP].compute_all();
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | NAME_(fcomi_ccval)[ret + 1];
    CC_SRC = eflags;
    FORCE_RET();
}

void NAME_(op_fcmov_ST0_STN_T0)(void)
{
    if (T0) {
        ST0 = ST(PARAM1);
    }
    FORCE_RET();
}

void NAME_(op_fadd_ST0_FT0)(void)
{
    ST0 += FT0;
}

void NAME_(op_fmul_ST0_FT0)(void)
{
    ST0 *= FT0;
}

void NAME_(op_fsub_ST0_FT0)(void)
{
    ST0 -= FT0;
}

void NAME_(op_fsubr_ST0_FT0)(void)
{
    ST0 = FT0 - ST0;
}

void NAME_(op_fdiv_ST0_FT0)(void)
{
    ST0 = NAME_(helper_fdiv)(ST0, FT0);
}

void NAME_(op_fdivr_ST0_FT0)(void)
{
    ST0 = NAME_(helper_fdiv)(FT0, ST0);
}

/* fp operations between STN and ST0 */

void NAME_(op_fadd_STN_ST0)(void)
{
    ST(PARAM1) += ST0;
}

void NAME_(op_fmul_STN_ST0)(void)
{
    ST(PARAM1) *= ST0;
}

void NAME_(op_fsub_STN_ST0)(void)
{
    ST(PARAM1) -= ST0;
}

void NAME_(op_fsubr_STN_ST0)(void)
{
    CPU86_LDouble *p;
    p = &ST(PARAM1);
    *p = ST0 - *p;
}

void NAME_(op_fdiv_STN_ST0)(void)
{
    CPU86_LDouble *p;
    p = &ST(PARAM1);
    *p = NAME_(helper_fdiv)(*p, ST0);
}

void NAME_(op_fdivr_STN_ST0)(void)
{
    CPU86_LDouble *p;
    p = &ST(PARAM1);
    *p = NAME_(helper_fdiv)(ST0, *p);
}

/* misc FPU operations */
void NAME_(op_fchs_ST0)(void)
{
    ST0 = floatx_chs(ST0);
}

void NAME_(op_fabs_ST0)(void)
{
    ST0 = floatx_abs(ST0);
}

void NAME_(op_fxam_ST0)(void)
{
    NAME_(helper_fxam_ST0)();
}

void NAME_(op_fld1_ST0)(void)
{
    ST0 = NAME_(f15rk)[1];
}

void NAME_(op_fldl2t_ST0)(void)
{
    ST0 = NAME_(f15rk)[6];
}

void NAME_(op_fldl2e_ST0)(void)
{
    ST0 = NAME_(f15rk)[5];
}

void NAME_(op_fldpi_ST0)(void)
{
    ST0 = NAME_(f15rk)[2];
}

void NAME_(op_fldlg2_ST0)(void)
{
    ST0 = NAME_(f15rk)[3];
}

void NAME_(op_fldln2_ST0)(void)
{
    ST0 = NAME_(f15rk)[4];
}

void NAME_(op_fldz_ST0)(void)
{
    ST0 = NAME_(f15rk)[0];
}

void NAME_(op_fldz_FT0)(void)
{
    FT0 = NAME_(f15rk)[0];
}

/* associated heplers to reduce generated code length and to simplify
   relocation (FP constants are usually stored in .rodata section) */

void NAME_(op_f2xm1)(void)
{
    NAME_(helper_f2xm1)();
}

void NAME_(op_fyl2x)(void)
{
    NAME_(helper_fyl2x)();
}

void NAME_(op_fptan)(void)
{
    NAME_(helper_fptan)();
}

void NAME_(op_fpatan)(void)
{
    NAME_(helper_fpatan)();
}

void NAME_(op_fxtract)(void)
{
    NAME_(helper_fxtract)();
}

void NAME_(op_fprem1)(void)
{
    NAME_(helper_fprem1)();
}


void NAME_(op_fprem)(void)
{
    NAME_(helper_fprem)();
}

void NAME_(op_fyl2xp1)(void)
{
    NAME_(helper_fyl2xp1)();
}

void NAME_(op_fsqrt)(void)
{
    NAME_(helper_fsqrt)();
}
#if CONFIG_CPU >= 80386
void NAME_(op_fsincos)(void)
{
    NAME_(helper_fsincos)();
}
#endif 
void NAME_(op_frndint)(void)
{
    NAME_(helper_frndint)();
}

void NAME_(op_fscale)(void)
{
    NAME_(helper_fscale)();
}
#if CONFIG_CPU >= 80386
void NAME_(op_fsin)(void)
{
    NAME_(helper_fsin)();
}

void NAME_(op_fcos)(void)
{
    NAME_(helper_fcos)();
}
#endif
void NAME_(op_fnstsw_A0)(void)
{
    int fpus;
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
    stw(A0, fpus);
    FORCE_RET();
}

void NAME_(op_fnstsw_EAX)(void)
{
    int fpus;
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
    EAX = (EAX & ~0xffff) | fpus;
}

void NAME_(op_fnstcw_A0)(void)
{
    stw(A0, env->fpuc);
    FORCE_RET();
}

void NAME_(op_fldcw_A0)(void)
{
    env->fpuc = lduw(A0);
    NAME_(update_fp_status)();
}

void NAME_(op_fnclex)(void)
{
    env->fpus &= 0x7f00;
}

void NAME_(op_fpu_check_ferr)(void)
{
      NAME_(fpu_check_ferr)();
}

void NAME_(op_fwait_exception)(void)
{
      if(env->fpus & FPUS_SE)
            NAME_(raise_exception)(CPU_FAULT_MF);
      FORCE_RET();
}

void NAME_(op_fninit)(void)
{
    env->fpus = 0;
    env->fpstt = 0;
    env->fpuc = 0x37f;
    env->fptags[0] = 1;
    env->fptags[1] = 1;
    env->fptags[2] = 1;
    env->fptags[3] = 1;
    env->fptags[4] = 1;
    env->fptags[5] = 1;
    env->fptags[6] = 1;
    env->fptags[7] = 1;
}

void NAME_(op_fnstenv_A0)(void)
{
    NAME_(helper_fstenv)(A0, PARAM1);
}

void NAME_(op_fldenv_A0)(void)
{
    NAME_(helper_fldenv)(A0, PARAM1);
}

void NAME_(op_fnsave_A0)(void)
{
    NAME_(helper_fsave)(A0, PARAM1);
}

void NAME_(op_frstor_A0)(void)
{
    NAME_(helper_frstor)(A0, PARAM1);
}

/* threading support */
void NAME_(op_lock)(void)
{
    NAME_(lock)();
}

void NAME_(op_unlock)(void)
{
    NAME_(unlock)();
}

#if 80386 <= CONFIG_CPU
/* system management mode */
void NAME_(op_rsm)(void)
{
    NAME_(helper_rsm)();
}
#endif /* 80386 <= CONFIG_CPU */

#if CONFIG_CPU >= 80586 && CONFIG_CPU_MMX_SUPPORT

/* FIXME sand: ugly hack to avoid build problems
 * on DARWIN/MACOSX */
#if defined(__alpha__)
extern int __op_param3 __hidden;
#define PARAM3 ({ int _r; asm("" : "=r"(_r) : "0" (&__op_param3)); _r; })
#else
#if defined(__APPLE__)
static int __op_param3;
#else
extern int __op_param3;
#endif
#define PARAM3 ((long)(&__op_param3))
#endif /* !defined(__alpha__) */

#if CONFIG_CPU_SSE_SUPPORT
/* SSE support */
static inline void memcpy16(void *d, void *s)
{
    ((uint32_t *)d)[0] = ((uint32_t *)s)[0];
    ((uint32_t *)d)[1] = ((uint32_t *)s)[1];
    ((uint32_t *)d)[2] = ((uint32_t *)s)[2];
    ((uint32_t *)d)[3] = ((uint32_t *)s)[3];
}

void NAME_(op_movo)(void)
{
    /* XXX: badly generated code */
    XMMReg *d, *s;
    d = (XMMReg *)((char *)env + PARAM1);
    s = (XMMReg *)((char *)env + PARAM2);
    memcpy16(d, s);
}
#endif
void NAME_(op_movq)(void)
{
    uint64_t *d, *s;
    d = (uint64_t *)((char *)env + PARAM1);
    s = (uint64_t *)((char *)env + PARAM2);
    *d = *s;
}

void NAME_(op_movl)(void)
{
    uint32_t *d, *s;
    d = (uint32_t *)((char *)env + PARAM1);
    s = (uint32_t *)((char *)env + PARAM2);
    *d = *s;
}

void NAME_(op_movq_env_0)(void)
{
    uint64_t *d;
    d = (uint64_t *)((char *)env + PARAM1);
    *d = 0;
}

void NAME_(op_fxsave_A0)(void)
{
    NAME_(helper_fxsave)(A0, PARAM1);
}

void NAME_(op_fxrstor_A0)(void)
{
    NAME_(helper_fxrstor)(A0, PARAM1);
}

/* XXX: optimize by storing fptt and fptags in the static cpu state */
void NAME_(op_enter_mmx)(void)
{
    env->fpstt = 0;
    *(uint32_t *)(env->fptags) = 0;
    *(uint32_t *)(env->fptags + 4) = 0;
}

void NAME_(op_emms)(void)
{
    /* set to empty state */
    *(uint32_t *)(env->fptags) = 0x01010101;
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
}

#define SHIFT 0
#include "cpu_jit_ops_sse.h"

#if CONFIG_CPU_SSE_SUPPORT
#define SHIFT 1
#include "cpu_jit_ops_sse.h"
#endif

#endif /* CONFIG_CPU >= 80586 && CONFIG_CPU_MMX_SUPPORT */

Generated by  Doxygen 1.6.0   Back to index