Logo Search packages:      
Sourcecode: faumachine version File versions  Download package

arch_gen_cpu_x86_sim_fast.c

/*
 * $Id: arch_gen_cpu_x86_sim_fast.c,v 1.35 2009-02-16 08:18:23 vrsieh Exp $
 *
 *  i386 helpers
 * 
 *  Copyright (c) 2005-2009 FAUmachine Team.
 *  Copyright (c) 2003 Fabrice Bellard
 *  Modified for FAUmachine by Volkmar Sieh
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
 * USA
 */

#define FAST

#if CONFIG_CPU <= 80386
#undef CPU_ID_MASK
#define CPU_ID_MASK     (0 << CPU_ID_SHIFT)
#undef CPU_AC_MASK
#define CPU_AC_MASK     (0 << CPU_AC_SHIFT)
#endif

const uint8_t NAME_(parity_table)[256] = {
      CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
      0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
      0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
      CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
      0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
      CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
      CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
      0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
      0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
      CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
      CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
      0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
      CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
      0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
      0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
      CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
      0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
      CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
      CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
      0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
      CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
      0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
      0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
      CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
      CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
      0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
      0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
      CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
      0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
      CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
      CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
      0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
};


void NAME_(lock)(void)
{
}

void NAME_(unlock)(void)
{
}

static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
{
    sc->base = get_seg_base(e1, e2);
    sc->limit = get_seg_limit(e1, e2);
    sc->flags = e2;
}

/* init the segment cache in vm86 mode. */
static inline void load_seg_vm(int seg, int selector)
{
    selector &= 0xffff;
    cpu_x86_load_seg_cache(seg, selector, 
                           (selector << 4), 0xffff, 0);
}

/* XXX: merge with load_seg() */
static void tss_load_seg(int seg_reg, int selector)
{
    uint32_t e1, e2;
    int rpl, dpl, cpl;

    if ((selector & 0xfffc) != 0) {
        if (load_segment(&e1, &e2, selector) != 0)
            NAME_(raise_exception_err)(CPU_FAULT_TS, selector & 0xfffc);
        if (!(e2 & DESC_S_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_TS, selector & 0xfffc);
        rpl = selector & 3;
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
        cpl = env->hflags & HF_CPL_MASK;
        if (seg_reg == R_CS) {
            if (!(e2 & DESC_CS_MASK))
                NAME_(raise_exception_err)(CPU_FAULT_TS, selector & 0xfffc);
            if (dpl != rpl)
                NAME_(raise_exception_err)(CPU_FAULT_TS, selector & 0xfffc);
            if ((e2 & DESC_C_MASK) && dpl > rpl)
                NAME_(raise_exception_err)(CPU_FAULT_TS, selector & 0xfffc);
                
        } else if (seg_reg == R_SS) {
            /* SS must be writable data */
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
                NAME_(raise_exception_err)(CPU_FAULT_TS, selector & 0xfffc);
            if (dpl != cpl || dpl != rpl)
                NAME_(raise_exception_err)(CPU_FAULT_TS, selector & 0xfffc);
        } else {
            /* not readable code */
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
                NAME_(raise_exception_err)(CPU_FAULT_TS, selector & 0xfffc);
            /* if data or non conforming code, checks the rights */
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
                if (dpl < cpl || dpl < rpl)
                    NAME_(raise_exception_err)(CPU_FAULT_TS, selector & 0xfffc);
            }
        }
        if (!(e2 & DESC_P_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_NP, selector & 0xfffc);
        cpu_x86_load_seg_cache(seg_reg, selector, 
                       get_seg_base(e1, e2),
                       get_seg_limit(e1, e2),
                       e2);
    } else {
        if (seg_reg == R_SS || seg_reg == R_CS) 
            NAME_(raise_exception_err)(CPU_FAULT_TS, selector & 0xfffc);
    }
}

#define SWITCH_TSS_JMP  0
#define SWITCH_TSS_IRET 1
#define SWITCH_TSS_CALL 2

/* XXX: restore CPU state in registers (PowerPC case) */
void
NAME_(switch_tss)(
      int tss_selector, 
      uint32_t e1,
      uint32_t e2,
      int source,
      uint32_t next_eip
)
{
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
    target_ulong tss_base;
    uint32_t new_regs[8], new_segs[6];
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
    uint32_t old_eflags, eflags_mask;
    SegmentCache *dt;
    int index_;
    target_ulong ptr;

    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;

    /* if task gate, we read the TSS segment and we load it */
    if (type == 5) {
        if (!(e2 & DESC_P_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_NP, tss_selector & 0xfffc);
        tss_selector = e1 >> 16;
        if (tss_selector & 4)
            NAME_(raise_exception_err)(CPU_FAULT_TS, tss_selector & 0xfffc);
        if (load_segment(&e1, &e2, tss_selector) != 0)
            NAME_(raise_exception_err)(CPU_FAULT_GP, tss_selector & 0xfffc);
        if (e2 & DESC_S_MASK)
            NAME_(raise_exception_err)(CPU_FAULT_GP, tss_selector & 0xfffc);
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
        if ((type & 7) != 1)
            NAME_(raise_exception_err)(CPU_FAULT_GP, tss_selector & 0xfffc);
    }

    if (!(e2 & DESC_P_MASK))
        NAME_(raise_exception_err)(CPU_FAULT_NP, tss_selector & 0xfffc);

    if (type & 8)
        tss_limit_max = 103;
    else
        tss_limit_max = 43;
    tss_limit = get_seg_limit(e1, e2);
    tss_base = get_seg_base(e1, e2);
    if ((tss_selector & 4) != 0 || 
        tss_limit < tss_limit_max)
        NAME_(raise_exception_err)(CPU_FAULT_TS, tss_selector & 0xfffc);
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
    if (old_type & 8)
        old_tss_limit_max = 103;
    else
        old_tss_limit_max = 43;

    /* read all the registers from the new TSS */
    if (type & 8) {
        /* 32 bit */
        new_cr3 = ldl_kernel(tss_base + 0x1c);
        new_eip = ldl_kernel(tss_base + 0x20);
        new_eflags = ldl_kernel(tss_base + 0x24);
        for(i = 0; i < 8; i++)
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
        for(i = 0; i < 6; i++)
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
        new_ldt = lduw_kernel(tss_base + 0x60);
        new_trap = ldl_kernel(tss_base + 0x64);
    } else {
        /* 16 bit */
        new_cr3 = 0;
        new_eip = lduw_kernel(tss_base + 0x0e);
        new_eflags = lduw_kernel(tss_base + 0x10);
        for(i = 0; i < 8; i++)
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
        for(i = 0; i < 4; i++)
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 2));
        new_ldt = lduw_kernel(tss_base + 0x2a);
        new_segs[R_FS] = 0;
        new_segs[R_GS] = 0;
        new_trap = 0;
    }
    
    /* NOTE: we must avoid memory exceptions during the task switch,
       so we make dummy accesses before */
    /* XXX: it can still fail in some cases, so a bigger hack is
       necessary to valid the TLB after having done the accesses */

    v1 = ldub_kernel(env->tr.base);
    v2 = ldub(env->tr.base + old_tss_limit_max);
    stb_kernel(env->tr.base, v1);
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
    
    /* clear busy bit (it is restartable) */
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
        target_ulong ptr_;
        uint32_t e2_;
        ptr_ = env->gdt.base + (env->tr.selector & ~7);
        e2_ = ldl_kernel(ptr_ + 4);
        e2_ &= ~DESC_TSS_BUSY_MASK;
        stl_kernel(ptr_ + 4, e2_);
    }

    old_eflags = compute_eflags();
    if (source == SWITCH_TSS_IRET)
        old_eflags &= ~CPU_NT_MASK;
    
    /* save the current state in the old TSS */
    if (type & 8) {
        /* 32 bit */
        stl_kernel(env->tr.base + 0x20, next_eip);
        stl_kernel(env->tr.base + 0x24, old_eflags);
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
        for(i = 0; i < 6; i++)
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
    } else {
        /* 16 bit */
        stw_kernel(env->tr.base + 0x0e, next_eip);
        stw_kernel(env->tr.base + 0x10, old_eflags);
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
        for(i = 0; i < 4; i++)
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
    }
    
    /* now if an exception occurs, it will occurs in the next task
       context */

    if (source == SWITCH_TSS_CALL) {
        stw_kernel(tss_base, env->tr.selector);
        new_eflags |= CPU_NT_MASK;
    }

    /* set busy bit */
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
        target_ulong ptr_;
        uint32_t e2_;
        ptr_ = env->gdt.base + (tss_selector & ~7);
        e2_ = ldl_kernel(ptr_ + 4);
        e2_ |= DESC_TSS_BUSY_MASK;
        stl_kernel(ptr_ + 4, e2_);
    }

    /* set the new CPU state */
    /* from this point, any exception which occurs can give problems */
    env->cr[0] |= CPU_CR0_TS_MASK;
    env->hflags |= HF_TS_MASK;
    env->tr.selector = tss_selector;
    env->tr.base = tss_base;
    env->tr.limit = tss_limit;
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
    
    if ((type & 8) && (env->cr[0] & CPU_CR0_PG_MASK)) {
        NAME_(update_cr3)(new_cr3);
    }
    
    /* load all registers without an exception, then reload them with
       possible exception */
    env->eip = new_eip;
    eflags_mask = CPU_TF_MASK | CPU_AC_MASK | CPU_ID_MASK | 
        CPU_IF_MASK | CPU_IOPL_MASK | CPU_VM_MASK | CPU_RF_MASK | CPU_NT_MASK;
    if (!(type & 8))
        eflags_mask &= 0xffff;
    load_eflags(new_eflags, eflags_mask);
    /* XXX: what to do in 16 bit case ? */
    EAX = new_regs[0];
    ECX = new_regs[1];
    EDX = new_regs[2];
    EBX = new_regs[3];
    ESP = new_regs[4];
    EBP = new_regs[5];
    ESI = new_regs[6];
    EDI = new_regs[7];
    if (new_eflags & CPU_VM_MASK) {
        for(i = 0; i < 6; i++) 
            load_seg_vm(i, new_segs[i]);
        /* in vm86, CPL is always 3 */
        cpu_x86_set_cpl(3);
    } else {
        /* CPL is set the RPL of CS */
        cpu_x86_set_cpl(new_segs[R_CS] & 3);
        /* first just selectors as the rest may trigger exceptions */
        for(i = 0; i < 6; i++)
            cpu_x86_load_seg_cache(i, new_segs[i], 0, 0, 0);
    }
    
    env->ldt.selector = new_ldt & ~4;
    env->ldt.base = 0;
    env->ldt.limit = 0;
    env->ldt.flags = 0;

    /* load the LDT */
    if (new_ldt & 4)
        NAME_(raise_exception_err)(CPU_FAULT_TS, new_ldt & 0xfffc);

    if ((new_ldt & 0xfffc) != 0) {
        dt = &env->gdt;
        index_ = new_ldt & ~7;
        if ((index_ + 7) > dt->limit)
            NAME_(raise_exception_err)(CPU_FAULT_TS, new_ldt & 0xfffc);
        ptr = dt->base + index_;
        e1 = ldl_kernel(ptr);
        e2 = ldl_kernel(ptr + 4);
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
            NAME_(raise_exception_err)(CPU_FAULT_TS, new_ldt & 0xfffc);
        if (!(e2 & DESC_P_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_TS, new_ldt & 0xfffc);
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
    }
    
    /* load the segments */
    if (!(new_eflags & CPU_VM_MASK)) {
        tss_load_seg(R_CS, new_segs[R_CS]);
        tss_load_seg(R_SS, new_segs[R_SS]);
        tss_load_seg(R_ES, new_segs[R_ES]);
        tss_load_seg(R_DS, new_segs[R_DS]);
        tss_load_seg(R_FS, new_segs[R_FS]);
        tss_load_seg(R_GS, new_segs[R_GS]);
    }
    
    /* check that EIP is in the CS segment limits */
    if (new_eip > env->segs[R_CS].limit) {
        /* XXX: different exception if CALL ? */
        NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
    }
}

/* check if Port I/O is allowed in TSS */
static inline void check_io(int addr, int size)
{
    int io_offset, val, mask;
 
    /* ,,open'' our backdoor for bios debugging output */
    if (addr == 0xffff && size == 1) {
          return;
    }

    /* TSS must be a valid 32 bit one */
    if (!(env->tr.flags & DESC_P_MASK) ||
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
        env->tr.limit < 103)
        goto fail;
    io_offset = lduw_kernel(env->tr.base + 0x66);
    io_offset += (addr >> 3);
    /* Note: the check needs two bytes */
    if ((io_offset + 1) > env->tr.limit)
        goto fail;
    val = lduw_kernel(env->tr.base + io_offset);
    val >>= (addr & 7);
    mask = (1 << size) - 1;
    /* all bits must be zero to allow the I/O */
    if ((val & mask) != 0) {
    fail:
        NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
    }
}

void NAME_(check_iob_T0)(void)
{
    check_io(T0, 1);
}

void NAME_(check_iow_T0)(void)
{
    check_io(T0, 2);
}

void NAME_(check_iol_T0)(void)
{
    check_io(T0, 4);
}

void NAME_(check_iob_DX)(void)
{
    check_io(EDX & 0xffff, 1);
}

void NAME_(check_iow_DX)(void)
{
    check_io(EDX & 0xffff, 2);
}

void NAME_(check_iol_DX)(void)
{
    check_io(EDX & 0xffff, 4);
}

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT

static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
{
    *plow += a;
    /* carry test */
    if (*plow < a)
        (*phigh)++;
    *phigh += b;
}

static void neg128(uint64_t *plow, uint64_t *phigh)
{
    *plow = ~ *plow;
    *phigh = ~ *phigh;
    add128(plow, phigh, 1, 0);
}

static void mul64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
{
    uint32_t a0, a1, b0, b1;
    uint64_t v;

    a0 = a;
    a1 = a >> 32;

    b0 = b;
    b1 = b >> 32;
    
    v = (uint64_t)a0 * (uint64_t)b0;
    *plow = v;
    *phigh = 0;

    v = (uint64_t)a0 * (uint64_t)b1;
    add128(plow, phigh, v << 32, v >> 32);
    
    v = (uint64_t)a1 * (uint64_t)b0;
    add128(plow, phigh, v << 32, v >> 32);
    
    v = (uint64_t)a1 * (uint64_t)b1;
    *phigh += v;
}

static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
{
    int sa, sb;
    sa = (a < 0);
    if (sa)
        a = -a;
    sb = (b < 0);
    if (sb)
        b = -b;
    mul64(plow, phigh, a, b);
    if (sa ^ sb) {
        neg128(plow, phigh);
    }
}

/* return TRUE if overflow */
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
{
    uint64_t q, r, a1, a0;
    int i, qb, ab;

    a0 = *plow;
    a1 = *phigh;
    if (a1 == 0) {
        q = a0 / b;
        r = a0 % b;
        *plow = q;
        *phigh = r;
    } else {
        if (a1 >= b)
            return 1;
        /* XXX: use a better algorithm */
        for(i = 0; i < 64; i++) {
            ab = a1 >> 63;
            a1 = (a1 << 1) | (a0 >> 63);
            if (ab || a1 >= b) {
                a1 -= b;
                qb = 1;
            } else {
                qb = 0;
            }
            a0 = (a0 << 1) | qb;
        }
        *plow = a0;
        *phigh = a1;
    }
    return 0;
}

/* return TRUE if overflow */
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
{
    int sa, sb;
    sa = ((int64_t)*phigh < 0);
    if (sa)
        neg128(plow, phigh);
    sb = (b < 0);
    if (sb)
        b = -b;
    if (div64(plow, phigh, b) != 0)
        return 1;
    if (sa ^ sb) {
        if (*plow > (1ULL << 63))
            return 1;
        *plow = - *plow;
    } else {
        if (*plow >= (1ULL << 63))
            return 1;
    }
    if (sa)
        *phigh = - *phigh;
    return 0;
}

void NAME_(helper_mulq_EAX_T0)(void)
{
    uint64_t r0, r1;

    mul64(&r0, &r1, EAX, T0);
    EAX = r0;
    EDX = r1;
    CC_DST = r0;
    CC_SRC = r1;
}

void NAME_(helper_imulq_EAX_T0)(void)
{
    uint64_t r0, r1;

    imul64(&r0, &r1, EAX, T0);
    EAX = r0;
    EDX = r1;
    CC_DST = r0;
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
}

void NAME_(helper_imulq_T0_T1)(void)
{
    uint64_t r0, r1;

    imul64(&r0, &r1, T0, T1);
    T0 = r0;
    CC_DST = r0;
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
}

void NAME_(helper_divq_EAX_T0)(void)
{
    uint64_t r0, r1;
    if (T0 == 0) {
        NAME_(raise_exception)(CPU_FAULT_DE);
    }
    r0 = EAX;
    r1 = EDX;
    if (div64(&r0, &r1, T0))
        NAME_(raise_exception)(CPU_FAULT_DE);
    EAX = r0;
    EDX = r1;
}

void NAME_(helper_idivq_EAX_T0)(void)
{
    uint64_t r0, r1;
    if (T0 == 0) {
        NAME_(raise_exception)(CPU_FAULT_DE);
    }
    r0 = EAX;
    r1 = EDX;
    if (idiv64(&r0, &r1, T0))
        NAME_(raise_exception)(CPU_FAULT_DE);
    EAX = r0;
    EDX = r1;
}

void NAME_(helper_bswapq_T0)(void)
{
    T0 = bswap64(T0);
}

#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */

void NAME_(helper_cmpxchg8b)(void)
{
    uint64_t d;
    int eflags;

    eflags = compute_all();
    d = ldq(A0);
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
        stq(A0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
        eflags |= CC_Z;
    } else {
        EDX = (uint32_t)(d >> 32);
        EAX = (uint32_t)d;
        eflags &= ~CC_Z;
    }
    CC_SRC = eflags;
}

void NAME_(helper_enter_level)(int level, int data32)
{
    target_ulong ssp;
    uint32_t esp_mask, esp, ebp;

    esp_mask = get_sp_mask(env->segs[R_SS].flags);
    ssp = env->segs[R_SS].base;
    ebp = EBP;
    esp = ESP;
    if (data32) {
        /* 32 bit */
        esp -= 4;
        while (--level) {
            esp -= 4;
            ebp -= 4;
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
        }
        esp -= 4;
        stl(ssp + (esp & esp_mask), T1);
    } else {
        /* 16 bit */
        esp -= 2;
        while (--level) {
            esp -= 2;
            ebp -= 2;
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
        }
        esp -= 2;
        stw(ssp + (esp & esp_mask), T1);
    }
}

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
void NAME_(helper_enter64_level)(int level, int data64)
{
    target_ulong esp, ebp;
    ebp = EBP;
    esp = ESP;

    if (data64) {
        /* 64 bit */
        esp -= 8;
        while (--level) {
            esp -= 8;
            ebp -= 8;
            stq(esp, ldq(ebp));
        }
        esp -= 8;
        stq(esp, T1);
    } else {
        /* 16 bit */
        esp -= 2;
        while (--level) {
            esp -= 2;
            ebp -= 2;
            stw(esp, lduw(ebp));
        }
        esp -= 2;
        stw(esp, T1);
    }
}
#endif

void NAME_(helper_lldt_T0)(void)
{
    int selector;
    SegmentCache *dt;
    uint32_t e1, e2;
    int index_, entry_limit;
    target_ulong ptr;
    
    selector = T0 & 0xffff;
    if ((selector & 0xfffc) == 0) {
        /* XXX: NULL selector case: invalid LDT */
        env->ldt.base = 0;
        env->ldt.limit = 0;
    } else {
        if (selector & 0x4)
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
        dt = &env->gdt;
        index_ = selector & ~7;
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
        if (env->hflags & HF_LMA_MASK)
            entry_limit = 15;
        else
#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */
            entry_limit = 7;
        if ((index_ + entry_limit) > dt->limit)
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
        ptr = dt->base + index_;
        e1 = ldl_kernel(ptr);
        e2 = ldl_kernel(ptr + 4);
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
        if (!(e2 & DESC_P_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_NP, selector & 0xfffc);
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
        if (env->hflags & HF_LMA_MASK) {
            uint32_t e3;
            e3 = ldl_kernel(ptr + 8);
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
            env->ldt.base |= (target_ulong)e3 << 32;
        } else
#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */
        {
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
        }
    }
    env->ldt.selector = selector;
}

void NAME_(helper_ltr_T0)(void)
{
    int selector;
    SegmentCache *dt;
    uint32_t e1, e2;
    int index_, type, entry_limit;
    target_ulong ptr;
    
    selector = T0 & 0xffff;
    if ((selector & 0xfffc) == 0) {
        /* NULL selector case: invalid LDT */
        env->tr.base = 0;
        env->tr.limit = 0;
        env->tr.flags = 0;
    } else {
        if (selector & 0x4)
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
        dt = &env->gdt;
        index_ = selector & ~7;
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
        if (env->hflags & HF_LMA_MASK)
            entry_limit = 15;
        else
#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */
            entry_limit = 7;
        if ((index_ + entry_limit) > dt->limit)
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
        ptr = dt->base + index_;
        e1 = ldl_kernel(ptr);
        e2 = ldl_kernel(ptr + 4);
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
        if ((e2 & DESC_S_MASK) || 
            (type != 1 && type != 9))
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
        if (!(e2 & DESC_P_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_NP, selector & 0xfffc);
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
        if (env->hflags & HF_LMA_MASK) {
            uint32_t e3;
            e3 = ldl_kernel(ptr + 8);
            load_seg_cache_raw_dt(&env->tr, e1, e2);
            env->tr.base |= (target_ulong)e3 << 32;
        } else 
#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */
        {
            load_seg_cache_raw_dt(&env->tr, e1, e2);
        }
        e2 |= DESC_TSS_BUSY_MASK;
        stl_kernel(ptr + 4, e2);
    }
    env->tr.selector = selector;
}

/* only works if protected mode and not VM86. seg_reg must be != R_CS */
void NAME_(load_seg)(int seg_reg, int selector)
{
    uint32_t e1, e2;
    int cpl, dpl, rpl;
    SegmentCache *dt;
    int index_;
    target_ulong ptr;

    selector &= 0xffff;
    cpl = env->hflags & HF_CPL_MASK;
    if ((selector & 0xfffc) == 0) {
        /* null selector case */
        if (seg_reg == R_SS
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
#endif
            )
            NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
        cpu_x86_load_seg_cache(seg_reg, selector, 0, 0, 0);
    } else {
        
        if (selector & 0x4)
            dt = &env->ldt;
        else
            dt = &env->gdt;
        index_ = selector & ~7;
        if ((index_ + 7) > dt->limit)
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
        ptr = dt->base + index_;
        e1 = ldl_kernel(ptr);
        e2 = ldl_kernel(ptr + 4);

        if (!(e2 & DESC_S_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
        rpl = selector & 3;
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
        if (seg_reg == R_SS) {
            /* must be writable segment */
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
                NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
            if (rpl != cpl || dpl != cpl)
                NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
        } else {
            /* must be readable segment */
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
                NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
            
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
                /* if not conforming code, test rights */
                if (dpl < cpl || dpl < rpl)
                    NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
            }
        }

        if (!(e2 & DESC_P_MASK)) {
            if (seg_reg == R_SS)
                NAME_(raise_exception_err)(CPU_FAULT_SS, selector & 0xfffc);
            else
                NAME_(raise_exception_err)(CPU_FAULT_NP, selector & 0xfffc);
        }

        /* set the access bit if not already set */
        if (!(e2 & DESC_A_MASK)) {
            e2 |= DESC_A_MASK;
            stl_kernel(ptr + 4, e2);
        }

        cpu_x86_load_seg_cache(seg_reg, selector, 
                       get_seg_base(e1, e2),
                       get_seg_limit(e1, e2),
                       e2);
#if 0
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
#endif
    }
}

/* protected mode jump */
void NAME_(helper_ljmp_protected_T0_T1)(int next_eip_addend)
{
    int new_cs, gate_cs, type;
    uint32_t e1, e2, cpl, dpl, rpl, limit;
    target_ulong new_eip, next_eip;
    
    new_cs = T0;
    new_eip = T1;
    if ((new_cs & 0xfffc) == 0)
        NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
    if (load_segment(&e1, &e2, new_cs) != 0)
        NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
    cpl = env->hflags & HF_CPL_MASK;
    if (e2 & DESC_S_MASK) {
        if (!(e2 & DESC_CS_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
        if (e2 & DESC_C_MASK) {
            /* conforming code segment */
            if (dpl > cpl)
                NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
        } else {
            /* non conforming code segment */
            rpl = new_cs & 3;
            if (rpl > cpl)
                NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
            if (dpl != cpl)
                NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
        }
        if (!(e2 & DESC_P_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_NP, new_cs & 0xfffc);
        limit = get_seg_limit(e1, e2);
        if (new_eip > limit && 
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
        cpu_x86_load_seg_cache(R_CS, (new_cs & 0xfffc) | cpl,
                       get_seg_base(e1, e2), limit, e2);
        EIP = new_eip;
    } else {
        /* jump to call or task gate */
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
        rpl = new_cs & 3;
        cpl = env->hflags & HF_CPL_MASK;
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
        switch(type) {
        case 1: /* 286 TSS */
        case 9: /* 386 TSS */
        case 5: /* task gate */
            if (dpl < cpl || dpl < rpl)
                NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
            next_eip = env->eip + next_eip_addend;
            NAME_(switch_tss)(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
            break;
        case 4: /* 286 call gate */
        case 12: /* 386 call gate */
            if ((dpl < cpl) || (dpl < rpl))
                NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
            if (!(e2 & DESC_P_MASK))
                NAME_(raise_exception_err)(CPU_FAULT_NP, new_cs & 0xfffc);
            gate_cs = e1 >> 16;
            new_eip = (e1 & 0xffff);
            if (type == 12)
                new_eip |= (e2 & 0xffff0000);
            if (load_segment(&e1, &e2, gate_cs) != 0)
                NAME_(raise_exception_err)(CPU_FAULT_GP, gate_cs & 0xfffc);
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
            /* must be code segment */
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 
                 (DESC_S_MASK | DESC_CS_MASK)))
                NAME_(raise_exception_err)(CPU_FAULT_GP, gate_cs & 0xfffc);
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
                NAME_(raise_exception_err)(CPU_FAULT_GP, gate_cs & 0xfffc);
            if (!(e2 & DESC_P_MASK))
                NAME_(raise_exception_err)(CPU_FAULT_GP, gate_cs & 0xfffc);
            limit = get_seg_limit(e1, e2);
            if (new_eip > limit)
                NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
            cpu_x86_load_seg_cache(R_CS, (gate_cs & 0xfffc) | cpl,
                                   get_seg_base(e1, e2), limit, e2);
            EIP = new_eip;
            break;
        default:
            NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
            break;
        }
    }
}

/* real mode call */
void NAME_(helper_lcall_real_T0_T1)(int shift, int next_eip)
{
    int new_cs, new_eip;
    uint32_t esp, esp_mask;
    target_ulong ssp;

    new_cs = T0;
    new_eip = T1;
    esp = ESP;
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
    ssp = env->segs[R_SS].base;
    if (shift) {
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
        PUSHL(ssp, esp, esp_mask, next_eip);
    } else {
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
        PUSHW(ssp, esp, esp_mask, next_eip);
    }

    ESP = (ESP & ~esp_mask) | (esp & esp_mask);
    env->eip = new_eip;
    env->segs[R_CS].selector = new_cs;
    env->segs[R_CS].base = (new_cs << 4);
}

/* protected mode call */
void NAME_(helper_lcall_protected_T0_T1)(int shift, int next_eip_addend)
{
    int new_cs, new_stack, i;
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
    uint32_t val, limit, old_sp_mask;
    target_ulong ssp, old_ssp, next_eip, new_eip;
    
    new_cs = T0;
    new_eip = T1;
    next_eip = env->eip + next_eip_addend;
    if ((new_cs & 0xfffc) == 0)
        NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
    if (load_segment(&e1, &e2, new_cs) != 0)
        NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
    cpl = env->hflags & HF_CPL_MASK;
    if (e2 & DESC_S_MASK) {
        if (!(e2 & DESC_CS_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
        if (e2 & DESC_C_MASK) {
            /* conforming code segment */
            if (dpl > cpl)
                NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
        } else {
            /* non conforming code segment */
            rpl = new_cs & 3;
            if (rpl > cpl)
                NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
            if (dpl != cpl)
                NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
        }
        if (!(e2 & DESC_P_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_NP, new_cs & 0xfffc);

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
        /* XXX: check 16/32 bit cases in long mode */
        if (shift == 2) {
            target_ulong rsp;
            /* 64 bit case */
            rsp = ESP;
            PUSHQ(rsp, env->segs[R_CS].selector);
            PUSHQ(rsp, next_eip);
            /* from this point, not restartable */
            ESP = rsp;
            cpu_x86_load_seg_cache(R_CS, (new_cs & 0xfffc) | cpl,
                                   get_seg_base(e1, e2), 
                                   get_seg_limit(e1, e2), e2);
            EIP = new_eip;
        } else 
#endif
        {
            sp = ESP;
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
            ssp = env->segs[R_SS].base;
            if (shift) {
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
                PUSHL(ssp, sp, sp_mask, next_eip);
            } else {
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
                PUSHW(ssp, sp, sp_mask, next_eip);
            }
            
            limit = get_seg_limit(e1, e2);
            if (new_eip > limit)
                NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
            /* from this point, not restartable */
            ESP = (ESP & ~sp_mask) | (sp & sp_mask);
            cpu_x86_load_seg_cache(R_CS, (new_cs & 0xfffc) | cpl,
                                   get_seg_base(e1, e2), limit, e2);
            EIP = new_eip;
        }
    } else {
        /* check gate type */
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
        rpl = new_cs & 3;
        switch(type) {
        case 1: /* available 286 TSS */
        case 9: /* available 386 TSS */
        case 5: /* task gate */
            if (dpl < cpl || dpl < rpl)
                NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
            NAME_(switch_tss)(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
            return;
        case 4: /* 286 call gate */
        case 12: /* 386 call gate */
            break;
        default:
            NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
            break;
        }
        shift = type >> 3;

        if (dpl < cpl || dpl < rpl)
            NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
        /* check valid bit */
        if (!(e2 & DESC_P_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_NP,  new_cs & 0xfffc);
        selector = e1 >> 16;
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
        param_count = e2 & 0x1f;
        if ((selector & 0xfffc) == 0)
            NAME_(raise_exception_err)(CPU_FAULT_GP, 0);

        if (load_segment(&e1, &e2, selector) != 0)
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
        if (dpl > cpl)
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
        if (!(e2 & DESC_P_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_NP, selector & 0xfffc);

        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
            /* to inner priviledge */
            get_ss_esp_from_tss(&ss, &sp, dpl);
            if ((ss & 0xfffc) == 0)
                NAME_(raise_exception_err)(CPU_FAULT_TS, ss & 0xfffc);
            if ((ss & 3) != dpl)
                NAME_(raise_exception_err)(CPU_FAULT_TS, ss & 0xfffc);
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
                NAME_(raise_exception_err)(CPU_FAULT_TS, ss & 0xfffc);
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
            if (ss_dpl != dpl)
                NAME_(raise_exception_err)(CPU_FAULT_TS, ss & 0xfffc);
            if (!(ss_e2 & DESC_S_MASK) ||
                (ss_e2 & DESC_CS_MASK) ||
                !(ss_e2 & DESC_W_MASK))
                NAME_(raise_exception_err)(CPU_FAULT_TS, ss & 0xfffc);
            if (!(ss_e2 & DESC_P_MASK))
                NAME_(raise_exception_err)(CPU_FAULT_TS, ss & 0xfffc);
            
            //            push_size = ((param_count * 2) + 8) << shift;

            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
            old_ssp = env->segs[R_SS].base;
            
            sp_mask = get_sp_mask(ss_e2);
            ssp = get_seg_base(ss_e1, ss_e2);
            if (shift) {
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
                PUSHL(ssp, sp, sp_mask, ESP);
                for(i = param_count - 1; i >= 0; i--) {
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
                    PUSHL(ssp, sp, sp_mask, val);
                }
            } else {
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
                PUSHW(ssp, sp, sp_mask, ESP);
                for(i = param_count - 1; i >= 0; i--) {
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
                    PUSHW(ssp, sp, sp_mask, val);
                }
            }
            new_stack = 1;
        } else {
            /* to same priviledge */
            sp = ESP;
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
            ssp = env->segs[R_SS].base;
            //            push_size = (4 << shift);
            new_stack = 0;
          ss_e1 = 0; /* avoid warning */
          ss_e2 = 0; /* avoid warning */
          ss = 0; /* avoid warning */
        }

        if (shift) {
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
            PUSHL(ssp, sp, sp_mask, next_eip);
        } else {
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
            PUSHW(ssp, sp, sp_mask, next_eip);
        }

        /* from this point, not restartable */

        if (new_stack) {
            ss = (ss & ~3) | dpl;
            cpu_x86_load_seg_cache(R_SS, ss, 
                                   ssp,
                                   get_seg_limit(ss_e1, ss_e2),
                                   ss_e2);
        }

        selector = (selector & ~3) | dpl;
        cpu_x86_load_seg_cache(R_CS, selector, 
                       get_seg_base(e1, e2),
                       get_seg_limit(e1, e2),
                       e2);
        cpu_x86_set_cpl(dpl);
        ESP = (ESP & ~sp_mask) | (sp & sp_mask);
        EIP = offset;
    }
}

/* real and vm86 mode iret */
void NAME_(helper_iret_real)(int shift)
{
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
    target_ulong ssp;
    int eflags_mask;

    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
    sp = ESP;
    ssp = env->segs[R_SS].base;
    if (shift == 1) {
        /* 32 bits */
        POPL(ssp, sp, sp_mask, new_eip);
        POPL(ssp, sp, sp_mask, new_cs);
        new_cs &= 0xffff;
        POPL(ssp, sp, sp_mask, new_eflags);
    } else {
        /* 16 bits */
        POPW(ssp, sp, sp_mask, new_eip);
        POPW(ssp, sp, sp_mask, new_cs);
        POPW(ssp, sp, sp_mask, new_eflags);
    }
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
    load_seg_vm(R_CS, new_cs);
    env->eip = new_eip;
    if (env->eflags & CPU_VM_MASK)
        eflags_mask = CPU_TF_MASK | CPU_AC_MASK | CPU_ID_MASK | CPU_IF_MASK | CPU_RF_MASK | CPU_NT_MASK;
    else
        eflags_mask = CPU_TF_MASK | CPU_AC_MASK | CPU_ID_MASK | CPU_IF_MASK | CPU_IOPL_MASK | CPU_RF_MASK | CPU_NT_MASK;
    if (shift == 0)
        eflags_mask &= 0xffff;
    load_eflags(new_eflags, eflags_mask);
}

static inline void
NAME_(validate_seg)(int seg_reg, int cpl)
{
    int dpl;
    uint32_t e2;
    
    /* XXX: on x86_64, we do not want to nullify FS and GS because
       they may still contain a valid base. I would be interested to
       know how a real x86_64 CPU behaves */
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
        (env->segs[seg_reg].selector & 0xfffc) == 0)
        return;

    e2 = env->segs[seg_reg].flags;
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
        /* data or non conforming code segment */
        if (dpl < cpl) {
            cpu_x86_load_seg_cache(seg_reg, 0, 0, 0, 0);
        }
    }
}

/* protected mode iret */
static void
NAME_(helper_ret_protected)(int shift, int is_iret, int addend)
{
    uint32_t new_cs, new_eflags, new_ss;
    uint32_t new_es, new_ds, new_fs, new_gs;
    uint32_t e1, e2, ss_e1, ss_e2 = 0;
    int cpl, dpl, rpl, eflags_mask, iopl;
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
    
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
    if (shift == 2)
        sp_mask = -1;
    else
#endif
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
    sp = ESP;
    ssp = env->segs[R_SS].base;
    new_eflags = 0; /* avoid warning */
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
    if (shift == 2) {
        POPQ(sp, new_eip);
        POPQ(sp, new_cs);
        new_cs &= 0xffff;
        if (is_iret) {
            POPQ(sp, new_eflags);
        }
    } else
#endif
    if (shift == 1) {
        /* 32 bits */
        POPL(ssp, sp, sp_mask, new_eip);
        POPL(ssp, sp, sp_mask, new_cs);
        new_cs &= 0xffff;
        if (is_iret) {
            POPL(ssp, sp, sp_mask, new_eflags);
            if (new_eflags & CPU_VM_MASK)
                goto return_to_vm86;
        }
    } else {
        /* 16 bits */
        POPW(ssp, sp, sp_mask, new_eip);
        POPW(ssp, sp, sp_mask, new_cs);
        if (is_iret)
            POPW(ssp, sp, sp_mask, new_eflags);
    }
    /* QEMU debug stuff removed */
    if ((new_cs & 0xfffc) == 0)
        NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
    if (load_segment(&e1, &e2, new_cs) != 0)
        NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
    if (!(e2 & DESC_S_MASK) ||
        !(e2 & DESC_CS_MASK))
        NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
    cpl = env->hflags & HF_CPL_MASK;
    rpl = new_cs & 3; 
    if (rpl < cpl)
        NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
    if (e2 & DESC_C_MASK) {
        if (dpl > rpl)
            NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
    } else {
        if (dpl != rpl)
            NAME_(raise_exception_err)(CPU_FAULT_GP, new_cs & 0xfffc);
    }
    if (!(e2 & DESC_P_MASK))
        NAME_(raise_exception_err)(CPU_FAULT_NP, new_cs & 0xfffc);
    
    sp += addend;
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
        /* return to same priledge level */
        cpu_x86_load_seg_cache(R_CS, new_cs, 
                       get_seg_base(e1, e2),
                       get_seg_limit(e1, e2),
                       e2);
    } else {
        /* return to different priviledge level */
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
        if (shift == 2) {
            POPQ(sp, new_esp);
            POPQ(sp, new_ss);
            new_ss &= 0xffff;
        } else
#endif
        if (shift == 1) {
            /* 32 bits */
            POPL(ssp, sp, sp_mask, new_esp);
            POPL(ssp, sp, sp_mask, new_ss);
            new_ss &= 0xffff;
        } else {
            /* 16 bits */
            POPW(ssp, sp, sp_mask, new_esp);
            POPW(ssp, sp, sp_mask, new_ss);
        }
        
        /* QEMU debug stuff removed */
        if ((new_ss & 0xfffc) == 0) {
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
            /* NULL ss is allowed in long mode if cpl != 3*/
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
                cpu_x86_load_seg_cache(R_SS, new_ss, 
                                       0, 0xffffffff,
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
                                       DESC_W_MASK | DESC_A_MASK);
              ss_e2 = DESC_B_MASK;
            } else 
#endif
            {
                NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
            }
        } else {
            if ((new_ss & 3) != rpl)
                NAME_(raise_exception_err)(CPU_FAULT_GP, new_ss & 0xfffc);
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
                NAME_(raise_exception_err)(CPU_FAULT_GP, new_ss & 0xfffc);
            if (!(ss_e2 & DESC_S_MASK) ||
                (ss_e2 & DESC_CS_MASK) ||
                !(ss_e2 & DESC_W_MASK))
                NAME_(raise_exception_err)(CPU_FAULT_GP, new_ss & 0xfffc);
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
            if (dpl != rpl)
                NAME_(raise_exception_err)(CPU_FAULT_GP, new_ss & 0xfffc);
            if (!(ss_e2 & DESC_P_MASK))
                NAME_(raise_exception_err)(CPU_FAULT_NP, new_ss & 0xfffc);
            cpu_x86_load_seg_cache(R_SS, new_ss, 
                           get_seg_base(ss_e1, ss_e2),
                           get_seg_limit(ss_e1, ss_e2),
                           ss_e2);
        }

        cpu_x86_load_seg_cache(R_CS, new_cs, 
                       get_seg_base(e1, e2),
                       get_seg_limit(e1, e2),
                       e2);
        cpu_x86_set_cpl(rpl);
        sp = new_esp;
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
        if (env->hflags & HF_CS64_MASK)
            sp_mask = -1;
        else
#endif
            sp_mask = get_sp_mask(ss_e2);

        /* validate data segments */
        NAME_(validate_seg)(R_ES, rpl);
        NAME_(validate_seg)(R_DS, rpl);
        NAME_(validate_seg)(R_FS, rpl);
        NAME_(validate_seg)(R_GS, rpl);

        sp += addend;
    }
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
    env->eip = new_eip;
    if (is_iret) {
        /* NOTE: 'cpl' is the _old_ CPL */
        eflags_mask = CPU_TF_MASK | CPU_AC_MASK | CPU_ID_MASK | CPU_RF_MASK | CPU_NT_MASK;
        if (cpl == 0)
            eflags_mask |= CPU_IOPL_MASK;
        iopl = (env->eflags >> CPU_IOPL_SHIFT) & 3;
        if (cpl <= iopl)
            eflags_mask |= CPU_IF_MASK;
        if (shift == 0)
            eflags_mask &= 0xffff;
        load_eflags(new_eflags, eflags_mask);
    }
    return;

 return_to_vm86:
    POPL(ssp, sp, sp_mask, new_esp);
    POPL(ssp, sp, sp_mask, new_ss);
    POPL(ssp, sp, sp_mask, new_es);
    POPL(ssp, sp, sp_mask, new_ds);
    POPL(ssp, sp, sp_mask, new_fs);
    POPL(ssp, sp, sp_mask, new_gs);
    
    /* modify processor state */
    load_eflags(new_eflags, CPU_TF_MASK | CPU_AC_MASK | CPU_ID_MASK | 
                CPU_IF_MASK | CPU_IOPL_MASK | CPU_VM_MASK | CPU_NT_MASK | CPU_VIF_MASK | CPU_VIP_MASK);
    load_seg_vm(R_CS, new_cs & 0xffff);
    cpu_x86_set_cpl(3);
    load_seg_vm(R_SS, new_ss & 0xffff);
    load_seg_vm(R_ES, new_es & 0xffff);
    load_seg_vm(R_DS, new_ds & 0xffff);
    load_seg_vm(R_FS, new_fs & 0xffff);
    load_seg_vm(R_GS, new_gs & 0xffff);

    env->eip = new_eip & 0xffff;
    ESP = new_esp;
}

void NAME_(helper_iret_protected)(int shift, int next_eip)
{
    int tss_selector, type;
    uint32_t e1, e2;
    
    /* specific case for TSS */
    if (env->eflags & CPU_NT_MASK) {
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
        if (env->hflags & HF_LMA_MASK)
            NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
#endif
        tss_selector = lduw_kernel(env->tr.base + 0);
        if (tss_selector & 4)
            NAME_(raise_exception_err)(CPU_FAULT_TS, tss_selector & 0xfffc);
        if (load_segment(&e1, &e2, tss_selector) != 0)
            NAME_(raise_exception_err)(CPU_FAULT_TS, tss_selector & 0xfffc);
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
        /* NOTE: we check both segment and busy TSS */
        if (type != 3)
            NAME_(raise_exception_err)(CPU_FAULT_TS, tss_selector & 0xfffc);
        NAME_(switch_tss)(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
    } else {
        NAME_(helper_ret_protected)(shift, 1, 0);
    }
}

void NAME_(helper_lret_protected)(int shift, int addend)
{
    NAME_(helper_ret_protected)(shift, 0, addend);
}

#if 80486 <= CONFIG_CPU && CONFIG_CPU_SEP_SUPPORT
void NAME_(helper_sysenter)(void)
{
    if (env->sysenter_cs == 0) {
        NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
    }
    env->eflags &= ~(CPU_VM_MASK | CPU_IF_MASK | CPU_RF_MASK);
    cpu_x86_set_cpl(0);
    cpu_x86_load_seg_cache(R_CS, env->sysenter_cs & 0xfffc, 
                           0, 0xffffffff, 
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
                           DESC_S_MASK |
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
    cpu_x86_load_seg_cache(R_SS, (env->sysenter_cs + 8) & 0xfffc, 
                           0, 0xffffffff,
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
                           DESC_S_MASK |
                           DESC_W_MASK | DESC_A_MASK);
    ESP = env->sysenter_esp;
    EIP = env->sysenter_eip;
}

void NAME_(helper_sysexit)(void)
{
    int cpl;

    cpl = env->hflags & HF_CPL_MASK;
    if (env->sysenter_cs == 0 || cpl != 0) {
        NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
    }
    cpu_x86_set_cpl(3);
    cpu_x86_load_seg_cache(R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, 
                           0, 0xffffffff, 
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
    cpu_x86_load_seg_cache(R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, 
                           0, 0xffffffff,
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
                           DESC_W_MASK | DESC_A_MASK);
    ESP = ECX;
    EIP = EDX;
}
#endif /* 80486 <= CONFIG_CPU && CONFIG_CPU_SEP_SUPPORT */

void NAME_(helper_syscall)(int next_eip_addend)
{
    int selector;

    if (!(env->efer & MSR_EFER_SCE)) {
        NAME_(raise_exception_err)(CPU_FAULT_UD, 0);
    }
    selector = (env->star >> 32) & 0xffff;
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
    if (env->hflags & HF_LMA_MASK) {
        int code64;

        ECX = env->eip + next_eip_addend;

      env->regs[11] = compute_eflags();

        code64 = env->hflags & HF_CS64_MASK;

        cpu_x86_set_cpl(0);
        cpu_x86_load_seg_cache(R_CS, selector & 0xfffc, 
                           0, 0xffffffff, 
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
                               DESC_S_MASK |
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
        cpu_x86_load_seg_cache(R_SS, (selector + 8) & 0xfffc, 
                               0, 0xffffffff,
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
                               DESC_S_MASK |
                               DESC_W_MASK | DESC_A_MASK);
        env->eflags &= ~env->fmask;
        if (code64)
            env->eip = env->lstar;
        else
            env->eip = env->cstar;
    } else 
#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */
    {
        ECX = (uint32_t)(env->eip + next_eip_addend);
        
        cpu_x86_set_cpl(0);
        cpu_x86_load_seg_cache(R_CS, selector & 0xfffc, 
                           0, 0xffffffff, 
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
                               DESC_S_MASK |
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
        cpu_x86_load_seg_cache(R_SS, (selector + 8) & 0xfffc, 
                               0, 0xffffffff,
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
                               DESC_S_MASK |
                               DESC_W_MASK | DESC_A_MASK);
        env->eflags &= ~(CPU_IF_MASK | CPU_RF_MASK | CPU_VM_MASK);
        env->eip = (uint32_t)env->star;
    }
}

void NAME_(helper_sysret)(int dflag)
{
    int cpl, selector;

    if (!(env->efer & MSR_EFER_SCE)) {
        NAME_(raise_exception_err)(CPU_FAULT_UD, 0);
    }
    cpl = env->hflags & HF_CPL_MASK;
    if (!(env->cr[0] & CPU_CR0_PE_MASK) || cpl != 0) {
        NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
    }
    selector = (env->star >> 48) & 0xffff;
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
    if (env->hflags & HF_LMA_MASK) {
        if (dflag == 2) {
            cpu_x86_load_seg_cache(R_CS, (selector + 16) | 3, 
                                   0, 0xffffffff, 
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 
                                   DESC_L_MASK);
            env->eip = ECX;
        } else {
            cpu_x86_load_seg_cache(R_CS, selector | 3, 
                                   0, 0xffffffff, 
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
            env->eip = (uint32_t)ECX;
        }
        cpu_x86_load_seg_cache(R_SS, selector + 8, 
                               0, 0xffffffff,
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
                               DESC_W_MASK | DESC_A_MASK);
        load_eflags((uint32_t)(env->regs[11]), CPU_TF_MASK | CPU_AC_MASK | CPU_ID_MASK | 
                    CPU_IF_MASK | CPU_IOPL_MASK | CPU_VM_MASK | CPU_RF_MASK | CPU_NT_MASK);
        cpu_x86_set_cpl(3);
    } else 
#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */
    {
        cpu_x86_load_seg_cache(R_CS, selector | 3, 
                               0, 0xffffffff, 
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
        env->eip = (uint32_t)ECX;
        cpu_x86_load_seg_cache(R_SS, selector + 8, 
                               0, 0xffffffff,
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
                               DESC_W_MASK | DESC_A_MASK);
        env->eflags |= CPU_IF_MASK;
        cpu_x86_set_cpl(3);
    }
}

void NAME_(helper_invlpg)(target_ulong addr)
{
      NAME_(mmu_invlpg)(addr);
}

#if 80586 <= CONFIG_CPU
void
NAME_(helper_rdtsc)(void)
{
      uint64_t val;

      if ((env->cr[4] & CPU_CR4_TSD_MASK)
       && ((env->hflags & HF_CPL_MASK) != 0)) {
            NAME_(raise_exception)(CPU_FAULT_GP);
      }
#if 0
      val = env->process.inst_offset + env->process.inst_cnt;
#else
      /* Beware of overflows! */
      {     uint64_t t;
            uint64_t x;
            uint64_t y;

            t = time_virt();
            x = t / TIME_HZ;
            y = t % TIME_HZ;
            val = (x * CONFIG_CPU_FREQ) + (y * CONFIG_CPU_FREQ) / TIME_HZ;
      }
#endif
      EAX = (uint32_t) (val >>  0);
      EDX = (uint32_t) (val >> 32);
}
#endif /* 80586 <= CONFIG_CPU */

#if 80486 <= CONFIG_CPU && CONFIG_CPU_MSR_SUPPORT
void
NAME_(helper_wrmsr)(void)
{
      uint64_t val;
      
      val = ((uint32_t) EAX) | ((uint64_t)((uint32_t) EDX) << 32);

      switch ((uint32_t) ECX) {
#if 80686 <= CONFIG_CPU
      case MSR_IA32_BIOS_SIGN_ID: /* 0x8b */
            env->update_signature = EDX;
            break;
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_TSC_SUPPORT
      case MSR_IA32_TSC:
            /* Setting TSC value. */
            env->process.inst_offset = val - env->process.inst_cnt;
            break;
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_APIC_SUPPORT
      case MSR_IA32_APICBASE: /* 0x1b */
            /* IA32 APIC BASE MSR. */
            NAME_(apic_base_msr_set)(val);
            break;
#endif

      case MSR_EBL_CR_POWERON: /* 0x2a */
            /* Processor Hard Poweron Configuration */
            faum_log(FAUM_LOG_WARNING, "CPU", "",
                  "Writing %08x:%08x to EBL_CR_POWERON MSR 0x%x.\n",
                  (uint32_t)EDX, (uint32_t)EAX, (uint32_t)ECX);

            env->msr[ECX].low = (uint32_t) EAX;
            env->msr[ECX].high = (uint32_t) EDX;
            break;

      case MSR_BBL_CR_CTL3:
            /* L2 Cache Control Register */
            faum_log(FAUM_LOG_WARNING, "CPU", "",
                  "Writing %08x:%08x to BBL_CR_CTL3 MSR 0x%x.\n",
                  (uint32_t)EDX, (uint32_t)EAX, (uint32_t)ECX);

            env->msr[ECX].low = (uint32_t) EAX;
            env->msr[ECX].high = (uint32_t) EDX;
            break;

#if 80486 <= CONFIG_CPU && CONFIG_CPU_SEP_SUPPORT
      case MSR_IA32_SYSENTER_CS: /* 0x174 */
            env->sysenter_cs = val & 0xffff;
            break;

      case MSR_IA32_SYSENTER_ESP: /* 0x175 */
            env->sysenter_esp = val;
            break;

      case MSR_IA32_SYSENTER_EIP: /* 0x176 */
            env->sysenter_eip = val;
            break;
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_MTRR_SUPPORT
      /*
       * Memory Cache Control
       * Memory Type Range Registers
       */
      case MSR_IA32_MTRRCAP: /* 0xfe */
            /* Read-only Register */
            NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
            break;

      case MSR_IA32_MTRR_DEF_TYPE: /* 0x2ff */
            if ((val & 0xfffffffffffff300ULL) != 0) {
                  /* Reserved bits set. */
                  NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
            }
            switch ((int) (val >> 0) & 0xff) {
            case 0:
            case 1:
            case 4:
            case 5:
            case 6:
                  break;
            default:
                  /* Bad default memory type. */
                  NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
                  break;
            }
            env->msr[ECX].low = (uint32_t) EAX;
            env->msr[ECX].high = (uint32_t) EDX;
            break;

      case MSR_IA32_MTRR_PHYSBASE0: /* 0x200 */
      case MSR_IA32_MTRR_PHYSBASE1: /* 0x202 */
      case MSR_IA32_MTRR_PHYSBASE2: /* 0x204 */
      case MSR_IA32_MTRR_PHYSBASE3: /* 0x206 */
      case MSR_IA32_MTRR_PHYSBASE4: /* 0x208 */
      case MSR_IA32_MTRR_PHYSBASE5: /* 0x20a */
      case MSR_IA32_MTRR_PHYSBASE6: /* 0x20c */
      case MSR_IA32_MTRR_PHYSBASE7: /* 0x20e */
            if ((val >> CONFIG_CPU_PHYS_BITS)
             || (val & 0xf00)) {
                  /* Reserved bits set. */
                  NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
            }
            switch ((int) (val >> 0) & 0xff) {
            case 0:
            case 1:
            case 4:
            case 5:
            case 6:
                  break;
            default:
                  /* Bad memory type. */
                  NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
                  break;
            }
            env->msr[ECX].low = (uint32_t) EAX;
            env->msr[ECX].high = (uint32_t) EDX;
            break;

      case MSR_IA32_MTRR_PHYSMASK0: /* 0x201 */
      case MSR_IA32_MTRR_PHYSMASK1: /* 0x203 */
      case MSR_IA32_MTRR_PHYSMASK2: /* 0x205 */
      case MSR_IA32_MTRR_PHYSMASK3: /* 0x207 */
      case MSR_IA32_MTRR_PHYSMASK4: /* 0x209 */
      case MSR_IA32_MTRR_PHYSMASK5: /* 0x20b */
      case MSR_IA32_MTRR_PHYSMASK6: /* 0x20d */
      case MSR_IA32_MTRR_PHYSMASK7: /* 0x20f */
            if ((val >> CONFIG_CPU_PHYS_BITS)
             || (val & 0x7ff)) {
                  /* Reserved bits set. */
                  NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
            }
            env->msr[ECX].low = (uint32_t) EAX;
            env->msr[ECX].high = (uint32_t) EDX;
            break;

      case MSR_IA32_MTRR_FIX64K_00000: /* 0x250 */
      case MSR_IA32_MTRR_FIX16K_80000: /* 0x258 */
      case MSR_IA32_MTRR_FIX16K_A0000: /* 0x259 */
      case MSR_IA32_MTRR_FIX4K_C0000: /* 0x268 */
      case MSR_IA32_MTRR_FIX4K_C8000: /* 0x269 */
      case MSR_IA32_MTRR_FIX4K_D0000: /* 0x26a */
      case MSR_IA32_MTRR_FIX4K_D8000: /* 0x26b */
      case MSR_IA32_MTRR_FIX4K_E0000: /* 0x26c */
      case MSR_IA32_MTRR_FIX4K_E8000: /* 0x26d */
      case MSR_IA32_MTRR_FIX4K_F0000: /* 0x26e */
      case MSR_IA32_MTRR_FIX4K_F8000: /* 0x26f */
            /* Check for bad memory types - FIXME VOSSI */
            env->msr[ECX].low = (uint32_t) EAX;
            env->msr[ECX].high = (uint32_t) EDX;
            break;
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_MCA_SUPPORT
      /*
       * Machine-Check Architecture
       */
      case MSR_IA32_MCG_CAP: /* 0x179 */
            /* Read-only. */
            assert(0); /* FIXME */
            break;

      case MSR_IA32_MCG_STATUS: /* 0x17a */
            /* Read-only. */
            assert(EAX == 0
                && EDX == 0); /* FIXME */
            break;

      case MSR_IA32_MCG_CTL: /* 0x17b */
            /* Just remember setting. */
            env->msr[ECX].low = (uint32_t) EAX;
            env->msr[ECX].high = (uint32_t) EDX;
            break;
            
      case MSR_IA32_MC0_CTL: /* 0x400 */
      case MSR_IA32_MC1_CTL: /* 0x404 */
      case MSR_IA32_MC2_CTL: /* 0x408 */
      case MSR_IA32_MC3_CTL: /* 0x40c */
      case MSR_IA32_MC4_CTL: /* 0x410 */
            /* Just remember setting. */
            env->msr[ECX].low = (uint32_t) EAX;
            env->msr[ECX].high = (uint32_t) EDX;
            break;

      case MSR_IA32_MC0_STATUS: /* 0x401 */
      case MSR_IA32_MC1_STATUS: /* 0x405 */
      case MSR_IA32_MC2_STATUS: /* 0x409 */
      case MSR_IA32_MC3_STATUS: /* 0x40d */
      case MSR_IA32_MC4_STATUS: /* 0x411 */
            /* Software might write '0' to register to clear. */
            /* Writing 1s will cause a general protection fault. */
            if (EAX != 0
             || EDX != 0) {
                  NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
            }
            /* FIXME */
            break;

      case MSR_IA32_MC0_ADDR: /* 0x402 */
      case MSR_IA32_MC1_ADDR: /* 0x406 */
      case MSR_IA32_MC2_ADDR: /* 0x40a */
      case MSR_IA32_MC3_ADDR: /* 0x40e */
      case MSR_IA32_MC4_ADDR: /* 0x412 */
            /* Read-only. */
            assert(0); /* FIXME */

      case MSR_IA32_MC0_MISC: /* 0x403 */
      case MSR_IA32_MC1_MISC: /* 0x407 */
      case MSR_IA32_MC2_MISC: /* 0x40b */
      case MSR_IA32_MC3_MISC: /* 0x40f */
      case MSR_IA32_MC4_MISC: /* 0x413 */
            /* Not implemented. */
            assert(0); /* FIXME */
            break;
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_PAT_SUPPORT
      case MSR_IA32_PAT: { /* 0x277 */
            uint32_t val;
            unsigned int i;

            /* 3-628. */
            for (i = 0; i < 8; i++) {
                  if (i < 4) {
                        val = EAX;
                  } else {
                        val = EDX;
                  }
                  val >>= i * 8;
                  val &= 0xff;

                  if (val == 0x02
                   || val == 0x03
                   || 0x08 <= val) {
                        NAME_(raise_exception_err)(CPU_FAULT_GP, 0);
                  }

                  env->pat[i] = val;
            }
            break;
          }
#endif
#if CONFIG_CPU >= 80486 && (CONFIG_CPU_NX_SUPPORT || CONFIG_CPU_LM_SUPPORT)
      case MSR_EFER: /* 0xc0000080 */
            {
                  uint64_t update_mask;
                  update_mask = 0;
                  if (CONFIG_CPU_SYSCALL_SUPPORT) {
                        update_mask |= MSR_EFER_SCE;
                  }
                  if (CONFIG_CPU_FFXSR_SUPPORT) {
                        update_mask |= MSR_EFER_FFXSR;
                  }
                  if (CONFIG_CPU_NX_SUPPORT) {
                        update_mask |= MSR_EFER_NXE;
                  }
                  if (CONFIG_CPU_LM_SUPPORT) {
                        update_mask |= MSR_EFER_LME;
                  }
                  env->efer = (env->efer & ~update_mask)
                          | (val & update_mask);
            }
            break;
#endif
      case MSR_STAR: /* 0xc0000081 */
            env->star = val;
            break;

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
      case MSR_LSTAR: /* 0xc0000082 */
            env->lstar = val;
            break;
      case MSR_CSTAR: /* 0xc0000083 */
            env->cstar = val;
            break;
      case MSR_FMASK: /* 0xc0000084 */
            env->fmask = val;
            break;
      case MSR_FSBASE: /* 0xc0000100 */
            env->segs[R_FS].base = val;
            break;
      case MSR_GSBASE: /* 0xc0000101 */
            env->segs[R_GS].base = val;
            break;
      case MSR_KERNELGSBASE: /* 0xc0000102 */
            env->kernelgsbase = val;
            break;
#endif
      default:
            faum_log(FAUM_LOG_WARNING, "CPU", "",
                  "Writing %08x:%08x to   MSR 0x%x.\n",
                  (uint32_t) EDX, (uint32_t) EAX, (uint32_t) ECX);
            /* should generate a GPF according to em64t manual -
             * but QEMU doesn't do it either... - FIXME? */
            break; 
      }
}

void
NAME_(helper_rdmsr)(void)
{
      switch ((uint32_t) ECX) {
#if 80686 <= CONFIG_CPU
      case MSR_IA32_BIOS_SIGN_ID: /* 0x8b */
            EAX = 0;
            EDX = env->update_signature;
            break;
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_TSC_SUPPORT
      case MSR_IA32_TSC:
            /* Read TSC (that's the same as "rdtsc"!). */
            NAME_(helper_rdtsc)();
            break;
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_APIC_SUPPORT
      case MSR_IA32_APICBASE: {
            /* IA32 APIC BASE MSR. */
            uint64_t val;

            val = NAME_(apic_base_msr_get)();
            EDX = (uint32_t) (val >> 32);
            EAX = (uint32_t) (val >>  0);
            break;
          }
#endif

      case MSR_EBL_CR_POWERON: /* 0x2a */
            /* Processor Hard Poweron Configuration */
            EAX = 0 /* FIXME */
                  | env->apic_cluster_id << 16
                  | env->apic_arbitration_id << 20
                  /* FIXME */
                  ;
            EDX = 0;

            faum_log(FAUM_LOG_WARNING, "CPU", "",
                  "Reading %08x:%08x from EBL_CR_POWERON.\n",
                        (uint32_t) EDX, (uint32_t) EAX);
            break;

      case MSR_BBL_CR_CTL3:
            /* L2 Cache Control Register */
            EAX = env->msr[ECX].low;
            EDX = env->msr[ECX].high;

            faum_log(FAUM_LOG_WARNING, "CPU", "",
                  "Reading %08x:%08x from BBL_CR_CTL3 MSR 0x%x.\n",
                        (uint32_t)EDX, (uint32_t)EAX, (uint32_t)ECX);
            break;

#if 80486 <= CONFIG_CPU && CONFIG_CPU_SEP_SUPPORT
      case MSR_IA32_SYSENTER_CS:
            EAX = env->sysenter_cs;
            EDX = 0;
            break;

      case MSR_IA32_SYSENTER_ESP:
            EAX = env->sysenter_esp;
            EDX = 0;
            break;

      case MSR_IA32_SYSENTER_EIP:
            EAX = env->sysenter_eip;
            EDX = 0;
            break;
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_MTRR_SUPPORT
      /*
       * Memory Cache Control
       * Memory Type Range Registers
       */
      case MSR_IA32_MTRRCAP: /* 0xfe */
            EAX = (1 << 10)   /* Write combining supported. */
                | (1 << 8)    /* Fixed range regs supported. */
                | (8 << 0);   /* 8 Variable range regs supported. */
            EDX = 0;
            break;

      case MSR_IA32_MTRR_DEF_TYPE: /* 0x2ff */
      case MSR_IA32_MTRR_PHYSBASE0: /* 0x200 */
      case MSR_IA32_MTRR_PHYSMASK0: /* 0x201 */
      case MSR_IA32_MTRR_PHYSBASE1: /* 0x202 */
      case MSR_IA32_MTRR_PHYSMASK1: /* 0x203 */
      case MSR_IA32_MTRR_PHYSBASE2: /* 0x204 */
      case MSR_IA32_MTRR_PHYSMASK2: /* 0x205 */
      case MSR_IA32_MTRR_PHYSBASE3: /* 0x206 */
      case MSR_IA32_MTRR_PHYSMASK3: /* 0x207 */
      case MSR_IA32_MTRR_PHYSBASE4: /* 0x208 */
      case MSR_IA32_MTRR_PHYSMASK4: /* 0x209 */
      case MSR_IA32_MTRR_PHYSBASE5: /* 0x20a */
      case MSR_IA32_MTRR_PHYSMASK5: /* 0x20b */
      case MSR_IA32_MTRR_PHYSBASE6: /* 0x20c */
      case MSR_IA32_MTRR_PHYSMASK6: /* 0x20d */
      case MSR_IA32_MTRR_PHYSBASE7: /* 0x20e */
      case MSR_IA32_MTRR_PHYSMASK7: /* 0x20f */
      case MSR_IA32_MTRR_FIX64K_00000: /* 0x250 */
      case MSR_IA32_MTRR_FIX16K_80000: /* 0x258 */
      case MSR_IA32_MTRR_FIX16K_A0000: /* 0x259 */
      case MSR_IA32_MTRR_FIX4K_C0000: /* 0x268 */
      case MSR_IA32_MTRR_FIX4K_C8000: /* 0x269 */
      case MSR_IA32_MTRR_FIX4K_D0000: /* 0x26a */
      case MSR_IA32_MTRR_FIX4K_D8000: /* 0x26b */
      case MSR_IA32_MTRR_FIX4K_E0000: /* 0x26c */
      case MSR_IA32_MTRR_FIX4K_E8000: /* 0x26d */
      case MSR_IA32_MTRR_FIX4K_F0000: /* 0x26e */
      case MSR_IA32_MTRR_FIX4K_F8000: /* 0x26f */
            EAX = env->msr[ECX].low;
            EDX = env->msr[ECX].high;
            break;
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_MCA_SUPPORT
      /*
       * Machine-Check Architecture
       */
      case MSR_IA32_MCG_CAP: /* 0x179 */
            EAX = (1 << 8)    /* MCG_CTL available */
                | (5 << 0);   /* 5 error-reporting banks */
            EDX = 0;
            break;

      case MSR_IA32_MCG_STATUS: /* 0x17a */
            EAX = (0 << 2)    /* No machine check in progress */
                | (0 << 1)    /* No error IP valid */
                | (0 << 0);   /* No restart IP valid */
            EDX = 0;
            break;

      case MSR_IA32_MCG_CTL: /* 0x17b */
            /* Just report setting. */
            EAX = env->msr[ECX].low;
            EDX = env->msr[ECX].high;
            break;

      case MSR_IA32_MC0_CTL: /* 0x400 */
      case MSR_IA32_MC1_CTL: /* 0x404 */
      case MSR_IA32_MC2_CTL: /* 0x408 */
      case MSR_IA32_MC3_CTL: /* 0x40c */
      case MSR_IA32_MC4_CTL: /* 0x410 */
            /* Just report setting. */
            EAX = env->msr[ECX].low;
            EDX = env->msr[ECX].high;
            break;

      case MSR_IA32_MC0_STATUS: /* 0x401 */
      case MSR_IA32_MC1_STATUS: /* 0x405 */
      case MSR_IA32_MC2_STATUS: /* 0x409 */
      case MSR_IA32_MC3_STATUS: /* 0x40d */
      case MSR_IA32_MC4_STATUS: /* 0x411 */
            EAX = 0;    /* No error pending. */
            EDX = 0;
            break;

      case MSR_IA32_MC0_ADDR: /* 0x402 */
      case MSR_IA32_MC1_ADDR: /* 0x406 */
      case MSR_IA32_MC2_ADDR: /* 0x40a */
      case MSR_IA32_MC3_ADDR: /* 0x40e */
      case MSR_IA32_MC4_ADDR: /* 0x412 */
            EAX = 0;    /* No error pending. */
            EDX = 0;
            break;

      case MSR_IA32_MC0_MISC: /* 0x403 */
      case MSR_IA32_MC1_MISC: /* 0x407 */
      case MSR_IA32_MC2_MISC: /* 0x40b */
      case MSR_IA32_MC3_MISC: /* 0x40f */
      case MSR_IA32_MC4_MISC: /* 0x413 */
            EAX = 0;    /* Register not implemented. */
            EDX = 0;
            break;
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_PAT_SUPPORT
      case MSR_IA32_PAT: /* 0x277 */
            /* 3-628. */
            EAX = (env->pat[0] <<  0)
                | (env->pat[1] <<  8)
                | (env->pat[2] << 16)
                | (env->pat[3] << 24);
            EDX = (env->pat[4] <<  0)
                | (env->pat[5] <<  8)
                | (env->pat[6] << 16)
                | (env->pat[7] << 24);
            break;
#endif

      case MSR_EFER: /* 0xc0000080 */
            EAX = (uint32_t)(env->efer);
            EDX = (uint32_t)(env->efer >> 32);
            break;
      case MSR_STAR: /* 0xc0000081 */
            EAX = (uint32_t)(env->star);
            EDX = (uint32_t)(env->star >> 32);
            break;

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
      case MSR_LSTAR: /* 0xc0000082 */
            EAX = (uint32_t)(env->lstar);
            EDX = (uint32_t)(env->lstar >> 32);
            break;
      case MSR_CSTAR: /* 0xc0000083 */
            EAX = (uint32_t)(env->cstar);
            EDX = (uint32_t)(env->cstar >> 32);
            break;
      case MSR_FMASK: /* 0xc0000084 */
            EAX = (uint32_t)(env->fmask);
            EDX = (uint32_t)(env->fmask >> 32);
            break;
      case MSR_FSBASE: /* 0xc0000100 */
            EAX = (uint32_t)(env->segs[R_FS].base);
            EDX = (uint32_t)(env->segs[R_FS].base >> 32);
            break;
      case MSR_GSBASE: /* 0xc0000101 */
            EAX = (uint32_t)(env->segs[R_GS].base);
            EDX = (uint32_t)(env->segs[R_GS].base >> 32);
            break;
      case MSR_KERNELGSBASE: /* 0xc0000102 */
            EAX = (uint32_t)(env->kernelgsbase);
            EDX = (uint32_t)(env->kernelgsbase >> 32);
            break;
#endif

      default:
            /* should generate a GPF according to em64t manual -
             * but QEMU doesn't do it either... - FIXME? */
            EAX = 0;
            EDX = 0;
            faum_log(FAUM_LOG_WARNING, "CPU", "",
                  "Reading %08x:%08x from MSR 0x%x.\n",
                  (uint32_t) EDX, (uint32_t) EAX, (uint32_t) ECX);
            break; 
      }
}
#endif /* 80486 <= CONFIG_CPU && CONFIG_CPU_MSR_SUPPORT */

void NAME_(helper_lsl)(void)
{
    unsigned int selector, limit;
    uint32_t e1, e2;
    int rpl, dpl, cpl, type;

    CC_SRC = compute_all() & ~CC_Z;
    selector = T0 & 0xffff;
    if (load_segment(&e1, &e2, selector) != 0)
        return;
    rpl = selector & 3;
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
    cpl = env->hflags & HF_CPL_MASK;
    if (e2 & DESC_S_MASK) {
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
            /* conforming */
        } else {
            if (dpl < cpl || dpl < rpl)
                return;
        }
    } else {
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
        switch(type) {
        case 1:
        case 2:
        case 3:
        case 9:
        case 11:
            break;
        default:
            return;
        }
        if (dpl < cpl || dpl < rpl)
            return;
    }
    limit = get_seg_limit(e1, e2);
    T1 = limit;
    CC_SRC |= CC_Z;
}

void NAME_(helper_lar)(void)
{
    unsigned int selector;
    uint32_t e1, e2;
    int rpl, dpl, cpl, type;

    CC_SRC = compute_all() & ~CC_Z;
    selector = T0 & 0xffff;
    if ((selector & 0xfffc) == 0)
        return;
    if (load_segment(&e1, &e2, selector) != 0)
        return;
    rpl = selector & 3;
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
    cpl = env->hflags & HF_CPL_MASK;
    if (e2 & DESC_S_MASK) {
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
            /* conforming */
        } else {
            if (dpl < cpl || dpl < rpl)
                return;
        }
    } else {
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
        switch(type) {
        case 1:
        case 2:
        case 3:
        case 4:
        case 5:
        case 9:
        case 11:
        case 12:
            break;
        default:
            return;
        }
        if (dpl < cpl || dpl < rpl)
            return;
    }
    T1 = e2 & 0x00f0ff00;
    CC_SRC |= CC_Z;
}

void NAME_(helper_verr)(void)
{
    unsigned int selector;
    uint32_t e1, e2;
    int rpl, dpl, cpl;

    CC_SRC = compute_all() & ~CC_Z;
    selector = T0 & 0xffff;
    if ((selector & 0xfffc) == 0)
        return;
    if (load_segment(&e1, &e2, selector) != 0)
        return;
    if (!(e2 & DESC_S_MASK))
        return;
    rpl = selector & 3;
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
    cpl = env->hflags & HF_CPL_MASK;
    if (e2 & DESC_CS_MASK) {
        if (!(e2 & DESC_R_MASK))
            return;
        if (!(e2 & DESC_C_MASK)) {
            if (dpl < cpl || dpl < rpl)
                return;
        }
    } else {
        if (dpl < cpl || dpl < rpl)
            return;
    }
    CC_SRC |= CC_Z;
}

void NAME_(helper_verw)(void)
{
    unsigned int selector;
    uint32_t e1, e2;
    int rpl, dpl, cpl;

    CC_SRC = compute_all() & ~CC_Z;
    selector = T0 & 0xffff;
    if ((selector & 0xfffc) == 0)
        return;
    if (load_segment(&e1, &e2, selector) != 0)
        return;
    if (!(e2 & DESC_S_MASK))
        return;
    rpl = selector & 3;
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
    cpl = env->hflags & HF_CPL_MASK;
    if (e2 & DESC_CS_MASK) {
        return;
    } else {
        if (dpl < cpl || dpl < rpl)
            return;
        if (!(e2 & DESC_W_MASK))
            return;
    }
    CC_SRC |= CC_Z;
}

#if 80386 <= CONFIG_CPU
/*
 * functions for physical RAM access, needed to read CPU state from SMRAM.
 * {ld,st}l_... should better be called {st,ld}d_... (d for "doubleword", as
 * "long" is ambiguous) - but we keep to the qemu naming scheme.
 */
/* FIXME */
extern uint32_t
NAME_(mr_data_l)(Paddr pa);

static inline uint32_t
ldl_phys(Paddr addr)
{
      return NAME_(mr_data_l)(addr);
}

static inline uint64_t
ldq_phys(Paddr addr)
{
      uint32_t v1;
      uint32_t v2;

      v1 = NAME_(mr_data_l)(addr + 0);
      v2 = NAME_(mr_data_l)(addr + 4);

      return v1 | ((uint64_t) v2 << 32);
}

void
NAME_(helper_rsm)(void)
{
      uint32_t base;
      uint32_t lval;
      int i;

      /*
       * read back CPU status
       */
      base = env->smbase + 0x8000;
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
      /* Offset   Register */
      /* 7DCFH - 7C00H Reserved  - we'll store other internals here: */
      /*
       * restore hidden info from reserved space
       * FIXME: perhaps there is even more than this :)
       */
      env->tr.flags = ldq_phys(base+0x7d20);
      env->tr.limit = ldq_phys(base+0x7d28);
      env->tr.base = ldq_phys(base+0x7d30);
      env->segs[R_SS].flags = ldq_phys(base+0x7d38);
      env->segs[R_GS].flags = ldq_phys(base+0x7d40);
      env->segs[R_FS].flags = ldq_phys(base+0x7d48);
      env->segs[R_ES].flags = ldq_phys(base+0x7d50);
      env->segs[R_DS].flags = ldq_phys(base+0x7d58);
      env->segs[R_CS].flags = ldq_phys(base+0x7d60);
      env->segs[R_SS].limit = ldq_phys(base+0x7d68);
      env->segs[R_GS].limit = ldq_phys(base+0x7d70);
      env->segs[R_FS].limit = ldq_phys(base+0x7d78);
      env->segs[R_ES].limit = ldq_phys(base+0x7d80);
      env->segs[R_DS].limit = ldq_phys(base+0x7d88);
      env->segs[R_CS].limit = ldq_phys(base+0x7d90);
      env->segs[R_SS].base = ldq_phys(base+0x7d98);
      env->segs[R_GS].base = ldq_phys(base+0x7da0);
      env->segs[R_FS].base = ldq_phys(base+0x7da8);
      env->segs[R_ES].base = ldq_phys(base+0x7db0);
      env->segs[R_DS].base = ldq_phys(base+0x7db8);
      env->segs[R_CS].base = ldq_phys(base+0x7dc0);
      NAME_(update_cr4)(ldq_phys(base+0x7dc8));
      /* 7EF8H    SMBASE Field (Doubleword) */
      env->smbase = ldl_phys(base+0x7ef8);
      /* 7EFCH    SMM Revision Identifier Field (Doubleword) */

      lval = ldl_phys(base+0x7f00);
      /* 7F00H    I/O Instruction Restart Field (Word) */
            /* FIXME */
      /* 7F02H    Auto HALT Restart Field (Word) */
      env->hflags |= ((lval >> 16) & 1) << HF_HALTED_SHIFT;

      /* 7F04H    IEDBASE */
      /* 7F08H - 7F1BH  Reserved */
      /* 7F1CH    R15 */
      env->regs[15] = ldq_phys(base+0x7f1c);
      /* 7F24H    R14 */
      env->regs[14] = ldq_phys(base+0x7f24);
      /* 7F2CH    R13 */
      env->regs[13] = ldq_phys(base+0x7f2c);
      /* 7F34H    R12 */
      env->regs[12] = ldq_phys(base+0x7f34);
      /* 7F3CH    R11 */
      env->regs[11] = ldq_phys(base+0x7f3c);
      /* 7F44H    R10 */
      env->regs[10] = ldq_phys(base+0x7f44);
      /* 7F4CH    R9 */
      env->regs[9] = ldq_phys(base+0x7f4c);
      /* 7F54H    R8 */
      env->regs[8] = ldq_phys(base+0x7f54);
      /* 7F5CH    RAX */
      EAX = ldq_phys(base+0x7f5c);
      /* 7F64H    RCX */
      ECX = ldq_phys(base+0x7f64);
      /* 7F6CH    RDX */
      EDX = ldq_phys(base+0x7f6c);
      /* 7F74H    RBX */
      EBX = ldq_phys(base+0x7f74);
      /* 7F7CH    RSP */
      ESP = ldq_phys(base+0x7f7c);
      /* 7F84H    RBP */
      EBP = ldq_phys(base+0x7f84);
      /* 7F8CH    RSI */
      ESI = ldq_phys(base+0x7f8c);
      /* 7F94H    RDI */
      EDI = ldq_phys(base+0x7f94);
      /* 7F9CH    IO_MEM_ADDR */
      /* 7FA4H    IO_MISC */
      /* 7FA8H    ES SEL */
      env->segs[R_ES].selector = ldl_phys(base+0x7fa8);
      /* 7FACH    CS SEL */
      env->segs[R_CS].selector = ldl_phys(base+0x7fac);
      /* 7FB0H    SS SEL */
      env->segs[R_SS].selector = ldl_phys(base+0x7fb0);
      /* 7FB4H    DS SEL */
      env->segs[R_DS].selector = ldl_phys(base+0x7fb4);
      /* 7FB8H    FS SEL */
      env->segs[R_FS].selector = ldl_phys(base+0x7fb8);
      /* 7FBCH    GS SEL */
      env->segs[R_GS].selector = ldl_phys(base+0x7fbc);
      /* 7FC0H    LDTR SEL */
      /* 7FC4H    TR SEL */
      env->tr.selector = ldl_phys(base+0x7fc4);
      /* 7FF8H    CR0 */
      NAME_(update_cr0)(ldq_phys(base+0x7ff8));
      /* 7FF0H    CR3 */
      NAME_(update_cr3)(ldq_phys(base+0x7ff0));
      /* 7FE8H    RFLAGS */
      env->eflags = ldq_phys(base+0x7fe8);
      /* 7FE0H    IA32_EFER */
      env->efer = ldq_phys(base+0x7fe0);
      /* 7FD8H    RIP */
      env->eip = ldq_phys(base+0x7fd8);
      /* 7FD0H    DR6 */
      env->dr[6] = ldq_phys(base+0x7fd0);
      /* 7FC8H    DR7 */
      env->dr[7] = ldq_phys(base+0x7fc8);
#else /* not (CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT) */
      /* Offset   Register */
      /* 7EF7H-7E00H Reserved */
      /*
       * restore hidden info from reserved space
       * FIXME: perhaps there is even more than this :)
       */
      env->tr.flags = ldl_phys(base+0x7ea0);
      env->tr.limit = ldl_phys(base+0x7ea4);
      env->tr.base = ldl_phys(base+0x7ea8);
      env->segs[R_SS].flags = ldl_phys(base+0x7eac);
      env->segs[R_GS].flags = ldl_phys(base+0x7eb0);
      env->segs[R_FS].flags = ldl_phys(base+0x7eb4);
      env->segs[R_ES].flags = ldl_phys(base+0x7eb8);
      env->segs[R_DS].flags = ldl_phys(base+0x7ebc);
      env->segs[R_CS].flags = ldl_phys(base+0x7ec0);
      env->segs[R_SS].limit = ldl_phys(base+0x7ec4);
      env->segs[R_GS].limit = ldl_phys(base+0x7ec8);
      env->segs[R_FS].limit = ldl_phys(base+0x7ecc);
      env->segs[R_ES].limit = ldl_phys(base+0x7ed0);
      env->segs[R_DS].limit = ldl_phys(base+0x7ed4);
      env->segs[R_CS].limit = ldl_phys(base+0x7ed8);
      env->segs[R_SS].base = ldl_phys(base+0x7edc);
      env->segs[R_GS].base = ldl_phys(base+0x7ee0);
      env->segs[R_FS].base = ldl_phys(base+0x7ee4);
      env->segs[R_ES].base = ldl_phys(base+0x7ee8);
      env->segs[R_DS].base = ldl_phys(base+0x7eec);
      env->segs[R_CS].base = ldl_phys(base+0x7ef0);
      NAME_(update_cr4)(ldl_phys(base+0x7ef4));
      /* 7EF8H    SMBASE Field (Doubleword) */
      env->smbase = ldl_phys(base+0x7ef8);

      lval = ldl_phys(base+0x7f00);
      /* 7F00H    I/O Instruction Restart Field (Word) */
            /* FIXME */
      /* 7F02H    Auto HALT Restart Field (Word) */
      env->hflags |= ((lval >> 16) & 1) << HF_HALTED_SHIFT;

      /* 7FA0H    I/O Memory Address Field, see Section 13.7 */
      /* 7FA4H    I/O State Field, see Section 13.7 */
      /* I/O Support, I/O restart and auto HALT unsupported */
      /* 7FA8H    ES* */
      env->segs[R_ES].selector = ldl_phys(base+0x7fa8);
      /* 7FACH    CS* */
      env->segs[R_CS].selector = ldl_phys(base+0x7fac);
      /* 7FB0H    SS* */
      env->segs[R_SS].selector = ldl_phys(base+0x7fb0);
      /* 7FB4H    DS* */
      env->segs[R_DS].selector = ldl_phys(base+0x7fb4);
      /* 7FB8H    FS* */
      env->segs[R_FS].selector = ldl_phys(base+0x7fb8);
      /* 7FBCH    GS* */
      env->segs[R_GS].selector = ldl_phys(base+0x7fbc);
      /* 7FC0H    Reserved */
      /* 7FC4H    TR* */
      env->tr.selector = ldl_phys(base+0x7fc4);
      /* 7FC8H    DR7 */
      env->dr[7] = ldl_phys(base+0x7fc8);
      /* 7FCCH    DR6 */
      env->dr[6] = ldl_phys(base+0x7fcc);
      /* 7FD0H    EAX */
      EAX = ldl_phys(base+0x7fd0);
      /* 7FD4H    ECX */
      ECX = ldl_phys(base+0x7fd4);
      /* 7FD8H    EDX */
      EDX = ldl_phys(base+0x7fd8);
      /* 7FDCH    EBX */
      EBX = ldl_phys(base+0x7fdc);
      /* 7FE0H    ESP */
      ESP = ldl_phys(base+0x7fe0);
      /* 7FE4H    EBP */
      EBP = ldl_phys(base+0x7fe4);
      /* 7FE8H    ESI */
      ESI = ldl_phys(base+0x7fe8);
      /* 7FECH    EDI */
      EDI = ldl_phys(base+0x7fec);
      /* 7FF0H    EIP */
      env->eip = ldl_phys(base+0x7ff0);
      /* 7FF4H    EFLAGS */
      env->eflags = ldl_phys(base+0x7ff4);
      /* 7FF8H    CR3 */
      /* 7FFCH    CR0 */
      NAME_(update_cr0)(ldl_phys(base+0x7ffc));
      /* cr3 update must be *after* cr0 update */
      NAME_(update_cr3)(ldl_phys(base+0x7ff8));
#endif /* not (CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT) */

#if 0 /* no debugging messages in "production" version :) */
      faum_log(FAUM_LOG_DEBUG, "CPU", "",
                  "RSM called... SMBASE now %08x\n", env->smbase);
#endif
      /*
       * restore qemu special flags
       */
      /*
       * FIXME: maybe there's a better way than this
       *        or even more to do here
       */
      for (i=0; i < CPU_NB_SEGS; i++) {
            cpu_x86_load_seg_cache(i, env->segs[i].selector,
                        env->segs[i].base, env->segs[i].limit,
                        env->segs[i].flags);
      }
      /*
       * ...and now we're back from SMM
       */
      NAME_(leave_smm)();
}
#endif /* 80386 <= CONFIG_CPU */

#define SHIFT 0
#include "arch_gen_cpu_x86_sim_fast_template.h"
#undef SHIFT

#define SHIFT 1
#include "arch_gen_cpu_x86_sim_fast_template.h"
#undef SHIFT

#if CONFIG_CPU >= 80386
#define SHIFT 2
#include "arch_gen_cpu_x86_sim_fast_template.h"
#undef SHIFT
#endif

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT

#define SHIFT 3
#include "arch_gen_cpu_x86_sim_fast_template.h"
#undef SHIFT

#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */

static int compute_all_eflags(void)
{
    return CC_SRC;
}

static int compute_c_eflags(void)
{
    return CC_SRC & CC_C;
}

const CCTable NAME_(cc_table)[CC_OP_NB] = {
    [CC_OP_DYNAMIC] = { /* should never happen */ },

    [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },

    [CC_OP_MULB] = { compute_all_mulb, compute_c_mulb },
    [CC_OP_MULW] = { compute_all_mulw, compute_c_mulw },
#if CONFIG_CPU >= 80386 
    [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
#endif 
    [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
    [CC_OP_ADDW] = { compute_all_addw, compute_c_addw  },
#if CONFIG_CPU >= 80386
    [CC_OP_ADDL] = { compute_all_addl, compute_c_addl  },
#endif

    [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
    [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw  },
#if CONFIG_CPU >= 80386
    [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl  },
#endif
    [CC_OP_SUBB] = { compute_all_subb, compute_c_subb  },
    [CC_OP_SUBW] = { compute_all_subw, compute_c_subw  },
#if CONFIG_CPU >= 80386
    [CC_OP_SUBL] = { compute_all_subl, compute_c_subl  },
#endif
    [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb  },
    [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw  },
#if CONFIG_CPU >= 80386
    [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl  },
#endif
    [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
    [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
#if CONFIG_CPU >= 80386
    [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
#endif
    [CC_OP_INCB] = { compute_all_incb, compute_c_incb },
    [CC_OP_INCW] = { compute_all_incw, compute_c_incw },
#if CONFIG_CPU >= 80386
    [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
#endif
    [CC_OP_DECB] = { compute_all_decb, compute_c_incb },
    [CC_OP_DECW] = { compute_all_decw, compute_c_incw },
#if CONFIG_CPU >= 80386
    [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
#endif
    [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
    [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
#if CONFIG_CPU >= 80386
    [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
#endif
    [CC_OP_SARB] = { compute_all_sarb, compute_c_sarb },
    [CC_OP_SARW] = { compute_all_sarw, compute_c_sarw },
#if CONFIG_CPU >= 80386
    [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
#endif

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
    [CC_OP_MULQ] = { compute_all_mulq, compute_c_mulq },

    [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq  },

    [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq  },

    [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq  },
    
    [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq  },
    
    [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
    
    [CC_OP_INCQ] = { compute_all_incq, compute_c_incq },

    [CC_OP_DECQ] = { compute_all_decq, compute_c_incq },

    [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },

    [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarq },

    [CC_OP_DIVQ] = { compute_all_divq, compute_c_divq  },
#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */

    [CC_OP_DIVB] = { compute_all_divb, compute_c_divb  },
    [CC_OP_DIVW] = { compute_all_divw, compute_c_divw  },
#if CONFIG_CPU >= 80386
    [CC_OP_DIVL] = { compute_all_divl, compute_c_divl  },
#endif
};


Generated by  Doxygen 1.6.0   Back to index