Logo Search packages:      
Sourcecode: faumachine version File versions  Download package

arch_gen_cpu_x86_core_fast.c

/*
 * $Id: arch_gen_cpu_x86_core_fast.c,v 1.35 2009-01-22 17:18:49 potyra Exp $
 *
 * Parts derived from QEMU sources.
 * Modified for FAUmachine by Volkmar Sieh.
 * 
 *  Copyright (c) 2002-2009 FAUmachine Team.
 *  Copyright (c) 2003-2005 Fabrice Bellard.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */

#define FAST

/* Set this to '1' to trace execution. */
#define DEBUG_CONTROL_FLOW          0
#define DEBUG_CONTROL_FLOW_FLAGS    0
#define DEBUG_CONTROL_FLOW_REGS           0
#define DEBUG_CONTROL_FLOW_SREGS    0

/* Set to '1' to debug SMM entry. */
#define DEBUG_SMM_ENTRY             0


#define SWITCH_TSS_CALL 2     /* FIXME VOSSI */

/* protected mode interrupt */
static void
NAME_(do_interrupt_protected)(
      int intno,
      int is_int,
      int error_code,
      unsigned int next_eip,
      int is_hw
)
{
      SegmentCache *dt;
      target_ulong ptr, ssp;
      int type, dpl, selector, ss_dpl, cpl, sp_mask;
      int has_error_code, new_stack, shift;
      uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
      uint32_t old_eip;

      has_error_code = 0;
      if (! is_int && ! is_hw) {
            switch(intno) {
            case 8:
            case 10:
            case 11:
            case 12:
            case 13:
            case 14:
            case 17:
                  has_error_code = 1;
                  break;
            }
      }
      if (is_int)
            old_eip = next_eip;
      else
            old_eip = env->eip;

      dt = &env->idt;
      if (intno * 8 + 7 > dt->limit)
            NAME_(raise_exception_err)(CPU_FAULT_GP, intno * 8 + 2);
      ptr = dt->base + intno * 8;
      e1 = ldl_kernel(ptr);
      e2 = ldl_kernel(ptr + 4);
      /* check gate type */
      type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
      switch(type) {
      case 5: /* task gate */
            /* must do that check here to return the correct error code */
            if (!(e2 & DESC_P_MASK))
                  NAME_(raise_exception_err)(CPU_FAULT_NP, intno * 8 + 2);
            NAME_(switch_tss)(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
            if (has_error_code) {
                  int mask, type_;
                  /* push the error code */
                  type_ = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
                  shift = type_ >> 3;
                  if (env->segs[R_SS].flags & DESC_B_MASK)
                        mask = 0xffffffff;
                  else
                        mask = 0xffff;
                  esp = (ESP - (2 << shift)) & mask;
                  ssp = env->segs[R_SS].base + esp;
                  if (shift)
                        stl_kernel(ssp, error_code);
                  else
                        stw_kernel(ssp, error_code);
                  ESP = (esp & mask) | (ESP & ~mask);
            }
            return;
      case 6: /* 286 interrupt gate */
      case 7: /* 286 trap gate */
      case 14: /* 386 interrupt gate */
      case 15: /* 386 trap gate */
            break;
      default:
            NAME_(raise_exception_err)(CPU_FAULT_GP, intno * 8 + 2);
            break;
      }
      dpl = (e2 >> DESC_DPL_SHIFT) & 3;
      cpl = env->hflags & HF_CPL_MASK;
      /* check privledge if software int */
      if (is_int && dpl < cpl)
            NAME_(raise_exception_err)(CPU_FAULT_GP, intno * 8 + 2);
      /* check valid bit */
      if (!(e2 & DESC_P_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_NP, intno * 8 + 2);
      selector = e1 >> 16;
      offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
      if ((selector & 0xfffc) == 0)
            NAME_(raise_exception_err)(CPU_FAULT_GP, 0);

      if (load_segment(&e1, &e2, selector) != 0)
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
      if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
      dpl = (e2 >> DESC_DPL_SHIFT) & 3;
      if (dpl > cpl)
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
      if (!(e2 & DESC_P_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_NP, selector & 0xfffc);
      if (!(e2 & DESC_C_MASK) && dpl < cpl) {
            /* to inner priviledge */
            get_ss_esp_from_tss(&ss, &esp, dpl);
            if ((ss & 0xfffc) == 0)
                  NAME_(raise_exception_err)(CPU_FAULT_TS, ss & 0xfffc);
            if ((ss & 3) != dpl)
                  NAME_(raise_exception_err)(CPU_FAULT_TS, ss & 0xfffc);
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
                  NAME_(raise_exception_err)(CPU_FAULT_TS, ss & 0xfffc);
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
            if (ss_dpl != dpl)
                  NAME_(raise_exception_err)(CPU_FAULT_TS, ss & 0xfffc);
            if (!(ss_e2 & DESC_S_MASK)
             || (ss_e2 & DESC_CS_MASK)
             || !(ss_e2 & DESC_W_MASK))
                  NAME_(raise_exception_err)(CPU_FAULT_TS, ss & 0xfffc);
            if (!(ss_e2 & DESC_P_MASK))
                  NAME_(raise_exception_err)(CPU_FAULT_TS, ss & 0xfffc);
            new_stack = 1;
            sp_mask = get_sp_mask(ss_e2);
            ssp = get_seg_base(ss_e1, ss_e2);
      } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
            /* to same priviledge */
            if (env->eflags & CPU_VM_MASK)
                  NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
            new_stack = 0;
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
            ssp = env->segs[R_SS].base;
            esp = ESP;
            dpl = cpl;
            ss_e1 = 0; /* avoid warning */
            ss_e2 = 0; /* avoid warning */
            ss = 0; /* avoid warning */
      } else {
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
      }

      shift = type >> 3;

#if 0
      /* XXX: check that enough room is available */
      push_size = 6 + (new_stack << 2) + (has_error_code << 1);
      if (env->eflags & CPU_VM_MASK)
            push_size += 8;
      push_size <<= shift;
#endif

      if (shift == 1) {
            if (new_stack) {
                  if (env->eflags & CPU_VM_MASK) {
                        PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
                        PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
                        PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
                        PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
                  }
                  PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
                  PUSHL(ssp, esp, sp_mask, ESP);
            }
            PUSHL(ssp, esp, sp_mask, compute_eflags());
            PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
            PUSHL(ssp, esp, sp_mask, old_eip);
            if (has_error_code) {
                  PUSHL(ssp, esp, sp_mask, error_code);
            }
      } else {
            if (new_stack) {
                  if (env->eflags & CPU_VM_MASK) {
                        PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
                        PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
                        PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
                        PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
                  }
                  PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
                  PUSHW(ssp, esp, sp_mask, ESP);
            }
            PUSHW(ssp, esp, sp_mask, compute_eflags());
            PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
            PUSHW(ssp, esp, sp_mask, old_eip);
            if (has_error_code) {
                  PUSHW(ssp, esp, sp_mask, error_code);
            }
      }

      if (new_stack) {
            if (env->eflags & CPU_VM_MASK) {
                  cpu_x86_load_seg_cache(R_ES, 0, 0, 0, 0);
                  cpu_x86_load_seg_cache(R_DS, 0, 0, 0, 0);
                  cpu_x86_load_seg_cache(R_FS, 0, 0, 0, 0);
                  cpu_x86_load_seg_cache(R_GS, 0, 0, 0, 0);
            }
            ss = (ss & ~3) | dpl;
            cpu_x86_load_seg_cache(R_SS, ss,
                        ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
      }
      ESP = (ESP & ~sp_mask) | (esp & sp_mask);

      selector = (selector & ~3) | dpl;
      cpu_x86_load_seg_cache(R_CS, selector,
                  get_seg_base(e1, e2), get_seg_limit(e1, e2), e2);
      cpu_x86_set_cpl(dpl);
      env->eip = offset;

      /* interrupt gate clear IF mask */
      if ((type & 1) == 0) {
            env->eflags &= ~CPU_IF_MASK;
      }
      env->eflags &= ~(CPU_TF_MASK | CPU_VM_MASK | CPU_RF_MASK | CPU_NT_MASK);
}


/* real mode interrupt */
static void
NAME_(do_interrupt_real)(int intno, int is_int, int error_code, unsigned int next_eip)
{
      SegmentCache *dt;
      target_ulong ptr, ssp;
      int selector;
      uint32_t offset, esp;
      uint32_t old_cs, old_eip;

      /* real mode (simpler !) */
      dt = &env->idt;
      if (intno * 4 + 3 > dt->limit) {
            NAME_(raise_exception_err)(CPU_FAULT_GP, intno * 8 + 2);
      }
      ptr = dt->base + intno * 4;
      offset = lduw_kernel(ptr);
      selector = lduw_kernel(ptr + 2);
      esp = ESP;
      ssp = env->segs[R_SS].base;
      if (is_int) {
            old_eip = next_eip;
      } else {
            old_eip = env->eip;
      }
      old_cs = env->segs[R_CS].selector;

      /* XXX: use SS segment size ? */
      PUSHW(ssp, esp, 0xffff, compute_eflags());
      PUSHW(ssp, esp, 0xffff, old_cs);
      PUSHW(ssp, esp, 0xffff, old_eip);

      /* update processor state */
      ESP = (ESP & ~0xffff) | (esp & 0xffff);
      env->eip = offset;
      env->segs[R_CS].selector = selector;
      env->segs[R_CS].base = (selector << 4);
      env->eflags &= ~(CPU_IF_MASK | CPU_TF_MASK | CPU_AC_MASK | CPU_RF_MASK);
}

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
static target_ulong
get_rsp_from_tss(int level)
{
    int index_;
    
#if 0
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 
           env->tr.base, env->tr.limit);
#endif

    if (!(env->tr.flags & DESC_P_MASK)) {
      assert(0);
    }
    index_ = 8 * level + 4;
    if ((index_ + 7) > env->tr.limit)
        NAME_(raise_exception_err)(CPU_FAULT_TS, env->tr.selector & 0xfffc);
    return ldq_kernel(env->tr.base + index_);
}

/* 64 bit interrupt */
static void
NAME_(do_interrupt64)(int intno, int is_int, int error_code,
            target_ulong next_eip, int is_hw)
{
      SegmentCache *dt;
      target_ulong ptr;
      int type, dpl, selector, cpl, ist;
      int has_error_code, new_stack;
      uint32_t e1, e2, e3, ss;
      target_ulong old_eip, esp, offset;

      has_error_code = 0;
      if (!is_int && !is_hw) {
            switch(intno) {
                  case 8:
                  case 10:
                  case 11:
                  case 12:
                  case 13:
                  case 14:
                  case 17:
                        has_error_code = 1;
                        break;
            }
      }
      if (is_int)
            old_eip = next_eip;
      else
            old_eip = env->eip;

      dt = &env->idt;
      if (intno * 16 + 15 > dt->limit)
            NAME_(raise_exception_err)(CPU_FAULT_GP, intno * 16 + 2);
      ptr = dt->base + intno * 16;
      e1 = ldl_kernel(ptr);
      e2 = ldl_kernel(ptr + 4);
      e3 = ldl_kernel(ptr + 8);
      /* check gate type */
      type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
      switch(type) {
            case 14: /* 386 interrupt gate */
            case 15: /* 386 trap gate */
                  break;
            default:
                  NAME_(raise_exception_err)(CPU_FAULT_GP, intno * 16 + 2);
                  break;
      }
      dpl = (e2 >> DESC_DPL_SHIFT) & 3;
      cpl = env->hflags & HF_CPL_MASK;
      /* check privledge if software int */
      if (is_int && dpl < cpl)
            NAME_(raise_exception_err)(CPU_FAULT_GP, intno * 16 + 2);
      /* check valid bit */
      if (!(e2 & DESC_P_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_NP, intno * 16 + 2);
      selector = e1 >> 16;
      offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);

      ist = e2 & 7;
      if ((selector & 0xfffc) == 0)
            NAME_(raise_exception_err)(CPU_FAULT_GP, 0);

      if (load_segment(&e1, &e2, selector) != 0)
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
      if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
      dpl = (e2 >> DESC_DPL_SHIFT) & 3;
      if (dpl > cpl)
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
      if (!(e2 & DESC_P_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_NP, selector & 0xfffc);
      if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
      if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
            /* to inner priviledge */
            if (ist != 0) {
                  esp = get_rsp_from_tss(ist + 3);
            } else {
                  esp = get_rsp_from_tss(dpl);
            }
            esp &= ~0xfLL; /* align stack */
            ss = 0;
            new_stack = 1;
      } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
            /* to same priviledge */
            if (env->eflags & CPU_VM_MASK)
                  NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
            new_stack = 0;
            if (ist != 0) {
                  esp = get_rsp_from_tss(ist + 3);
            } else {
                  esp = ESP;
            }
            esp &= ~0xfLL; /* align stack */
            dpl = cpl;
      } else {
            NAME_(raise_exception_err)(CPU_FAULT_GP, selector & 0xfffc);
            new_stack = 0; /* avoid warning */
            esp = 0; /* avoid warning */
      }

      PUSHQ(esp, env->segs[R_SS].selector);
      PUSHQ(esp, ESP);
      PUSHQ(esp, compute_eflags());
      PUSHQ(esp, env->segs[R_CS].selector);
      PUSHQ(esp, old_eip);
      if (has_error_code) {
            PUSHQ(esp, error_code);
      }

      if (new_stack) {
            ss = 0 | dpl;
            cpu_x86_load_seg_cache(R_SS, ss, 0, 0, 0);
      }
      ESP = esp;

      selector = (selector & ~3) | dpl;
      cpu_x86_load_seg_cache(R_CS, selector, 
                  get_seg_base(e1, e2),
                  get_seg_limit(e1, e2),
                  e2);
      cpu_x86_set_cpl(dpl);
      env->eip = offset;

      /* interrupt gate clear IF mask */
      if ((type & 1) == 0) {
            env->eflags &= ~CPU_IF_MASK;
      }
      env->eflags &= ~(CPU_TF_MASK | CPU_VM_MASK | CPU_RF_MASK | CPU_NT_MASK);
}
#endif /* CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT */

static void
do_interrupt(void)
{
      if (env->cr[0] & CPU_CR0_PE_MASK) {
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
            if (env->hflags & HF_LMA_MASK) {
                  NAME_(do_interrupt64)(env->exception_index, 0, 0, 0, 1);
            } else
#endif
            {
                  NAME_(do_interrupt_protected)(env->exception_index, 0, 0, 0, 1);
            }
      } else {
            NAME_(do_interrupt_real)(env->exception_index, 0, 0, 0);
      }
      env->exception_index = -1;
}

static void
do_exception(void)
{
      if (env->cr[0] & CPU_CR0_PE_MASK) {
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
            if (env->hflags & HF_LMA_MASK) {
                  NAME_(do_interrupt64)(env->exception_index,
                               env->exception_is_int,
                               env->error_code,
                               env->exception_next_eip,
                               0);
            } else
#endif
            {
                  NAME_(do_interrupt_protected)(env->exception_index,
                              env->exception_is_int,
                              env->error_code,
                              env->exception_next_eip,
                              0);
            }
      } else {
            NAME_(do_interrupt_real)(env->exception_index,
                        env->exception_is_int,
                        env->error_code,
                        env->exception_next_eip);
      }
      env->exception_index = -1;
}

#if 80386 <= CONFIG_CPU
/*
 * functions for physical RAM access, needed to write CPU state to SMRAM.
 * {ld,st}l_... should better be called {st,ld}d_... (d for "doubleword", as
 * "long" is ambiguous) - but we keep to the qemu naming scheme.
 */
/* FIXME */
extern void
NAME_(mw_data_w)(Paddr pa, uint32_t val);
extern void
NAME_(mw_data_l)(Paddr pa, uint32_t val);

static void
stw_phys(Paddr addr, uint16_t val)
{
      NAME_(mw_data_w)(addr, val);
}

static void
stl_phys(Paddr addr, uint32_t val)
{
      NAME_(mw_data_l)(addr, val);
}

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
static void
stq_phys(Paddr addr, uint64_t val)
{
      uint32_t v1;
      uint32_t v2;

      v1 = (uint32_t)val;
      v2 = val >> 32;
      NAME_(mw_data_l)(addr + 0, v1);
      NAME_(mw_data_l)(addr + 4, v2);
}
#endif

/*
 * handle SMI by writing CPU state to SMRAM and setting up SMI environment
 */
static void
do_smm(void)
{
      uint32_t base; /* This is correct, it's 32 Bit even on AMD64! */
#if DEBUG_SMM_ENTRY
      int i;
      uint32_t l;
      faum_log(FAUM_LOG_DEBUG, "CPU", "",
                  "Going to System Management Mode... SMBASE 0x%08x\n",
                  env->smbase);
#endif
      /*
       * write current cpu status to SMBASE+0x8000+Offset:
       */
      base = env->smbase + 0x8000;
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
      /* Source: EM64T Software Developers Guide Vol. 2, Appendix A and E
       * (they really put the exact same table into two different
       * appendixes :)  ) */
      /* Offset   Register */
      /* 7FF8H    CR0 */
      stq_phys(base+0x7ff8, env->cr[0]);
      /* 7FF0H    CR3 */
      stq_phys(base+0x7ff0, env->cr[3]);
      /* 7FE8H    RFLAGS */
      stq_phys(base+0x7fe8, env->eflags);
      /* 7FE0H    IA32_EFER */
      stq_phys(base+0x7fe0, env->efer);
      /* 7FD8H    RIP */
      stq_phys(base+0x7fd8, env->eip);
      /* 7FD0H    DR6 */
      stq_phys(base+0x7fd0, env->dr[6]);
      /* 7FC8H    DR7 */
      stq_phys(base+0x7fc8, env->dr[7]);
      /* 7FC4H    TR SEL */
      stl_phys(base+0x7fc4, env->tr.selector);
      /* 7FC0H    LDTR SEL */
      /* 7FBCH    GS SEL */
      stl_phys(base+0x7fbc, env->segs[R_GS].selector);
      /* 7FB8H    FS SEL */
      stl_phys(base+0x7fb8, env->segs[R_FS].selector);
      /* 7FB4H    DS SEL */
      stl_phys(base+0x7fb4, env->segs[R_DS].selector);
      /* 7FB0H    SS SEL */
      stl_phys(base+0x7fb0, env->segs[R_SS].selector);
      /* 7FACH    CS SEL */
      stl_phys(base+0x7fac, env->segs[R_CS].selector);
      /* 7FA8H    ES SEL */
      stl_phys(base+0x7fa8, env->segs[R_ES].selector);
      /* 7FA4H    IO_MISC */
      /* 7F9CH    IO_MEM_ADDR */
      /* 7F94H    RDI */
      stq_phys(base+0x7f94, EDI);
      /* 7F8CH    RSI */
      stq_phys(base+0x7f8c, ESI);
      /* 7F84H    RBP */
      stq_phys(base+0x7f84, EBP);
      /* 7F7CH    RSP */
      stq_phys(base+0x7f7c, ESP);
      /* 7F74H    RBX */
      stq_phys(base+0x7f74, EBX);
      /* 7F6CH    RDX */
      stq_phys(base+0x7f6c, EDX);
      /* 7F64H    RCX */
      stq_phys(base+0x7f64, ECX);
      /* 7F5CH    RAX */
      stq_phys(base+0x7f5c, EAX);
      /* 7F54H    R8 */
      stq_phys(base+0x7f54, env->regs[8]);
      /* 7F4CH    R9 */
      stq_phys(base+0x7f4c, env->regs[9]);
      /* 7F44H    R10 */
      stq_phys(base+0x7f44, env->regs[10]);
      /* 7F3CH    R11 */
      stq_phys(base+0x7f3c, env->regs[11]);
      /* 7F34H    R12 */
      stq_phys(base+0x7f34, env->regs[12]);
      /* 7F2CH    R13 */
      stq_phys(base+0x7f2c, env->regs[13]);
      /* 7F24H    R14 */
      stq_phys(base+0x7f24, env->regs[14]);
      /* 7F1CH    R15 */
      stq_phys(base+0x7f1c, env->regs[15]);
      /* 7F08H - 7F1BH  Reserved */
      /* 7F04H    IEDBASE */
      /* 7F02H    Auto HALT Restart Field (Word) */
      stw_phys(base+0x7f02, ((env->hflags >> HF_HALTED_SHIFT) & 1) << 0);
      /* 7F00H    I/O Instruction Restart Field (Word) */
      stw_phys(base+0x7f00, 0x0000);
      /* 7EFCH    SMM Revision Identifier Field (Doubleword) */
      /* see i386 implementation below for details */
      stl_phys(base+0x7efc, 0x00020001);
      /* 7EF8H    SMBASE Field (Doubleword) */
      stl_phys(base+0x7ef8, env->smbase);
      /* 7EF7H - 7EA8H Reserved */
      /* 7EA4H    LDT Info */
      /* 7EA0H    LDT Limit */
      /* 7E9CH    LDT Base (Lower 32 Bits) */
      /* 7E98H    IDT Limit */
      /* 7E94H    IDT Base (Lower 32 Bits) */
      /* 7E90H    GDT Limit */
      /* 7E8CH    GDT Base (Lower 32 Bits) */
      /* 7E8BH - 7E44H Reserved */
      /* 7E40H    CR4 */
      /* 7E3FH - 7DF0H Reserved */
      /* 7DE8H    IO_EIP */
      /* 7DE7H - 7DDCH Reserved */
      /* 7DD8H    IDT Base (Upper 32 Bits) */
      /* 7DD4H    LDT Base (Upper 32 Bits) */
      /* 7DD0H    GDT Base (Upper 32 Bits) */
      /* 7DCFH - 7C00H Reserved  - we'll store other internals here: */
      /* The following registers are saved (but not readable) and restored upon exiting SMM:
       * -   Control register CR4. (This register is cleared to all 0s while in SMM).
       * -   The hidden segment descriptor information stored in segment registers CS, DS, ES, FS,
       *     GS, and SS.
       */
      stq_phys(base+0x7dc8, env->cr[4]);
      stq_phys(base+0x7dc0, env->segs[R_CS].base);
      stq_phys(base+0x7db8, env->segs[R_DS].base);
      stq_phys(base+0x7db0, env->segs[R_ES].base);
      stq_phys(base+0x7da8, env->segs[R_FS].base);
      stq_phys(base+0x7da0, env->segs[R_GS].base);
      stq_phys(base+0x7d98, env->segs[R_SS].base);
      stq_phys(base+0x7d90, env->segs[R_CS].limit);
      stq_phys(base+0x7d88, env->segs[R_DS].limit);
      stq_phys(base+0x7d80, env->segs[R_ES].limit);
      stq_phys(base+0x7d78, env->segs[R_FS].limit);
      stq_phys(base+0x7d70, env->segs[R_GS].limit);
      stq_phys(base+0x7d68, env->segs[R_SS].limit);
      stq_phys(base+0x7d60, env->segs[R_CS].flags);
      stq_phys(base+0x7d58, env->segs[R_DS].flags);
      stq_phys(base+0x7d50, env->segs[R_ES].flags);
      stq_phys(base+0x7d48, env->segs[R_FS].flags);
      stq_phys(base+0x7d40, env->segs[R_GS].flags);
      stq_phys(base+0x7d38, env->segs[R_SS].flags);
      stq_phys(base+0x7d30, env->tr.base);
      stq_phys(base+0x7d28, env->tr.limit);
      stq_phys(base+0x7d20, env->tr.flags);
      /* maybe we've got even more to store here... FIXME? */
#else /* not (CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT) */
      /* Offset   Register */
      /* 7FFCH    CR0 */
      stl_phys(base+0x7ffc, env->cr[0]);
      /* 7FF8H    CR3 */
      stl_phys(base+0x7ff8, env->cr[3]);
      /* 7FF4H    EFLAGS */
      stl_phys(base+0x7ff4, env->eflags);
      /* 7FF0H    EIP */
      stl_phys(base+0x7ff0, EIP);
      /* 7FECH    EDI */
      stl_phys(base+0x7fec, EDI);
      /* 7FE8H    ESI */
      stl_phys(base+0x7fe8, ESI);
      /* 7FE4H    EBP */
      stl_phys(base+0x7fe4, EBP);
      /* 7FE0H    ESP */
      stl_phys(base+0x7fe0, ESP);
      /* 7FDCH    EBX */
      stl_phys(base+0x7fdc, EBX);
      /* 7FD8H    EDX */
      stl_phys(base+0x7fd8, EDX);
      /* 7FD4H    ECX */
      stl_phys(base+0x7fd4, ECX);
      /* 7FD0H    EAX */
      stl_phys(base+0x7fd0, EAX);
      /* 7FCCH    DR6 */
      stl_phys(base+0x7fcc, env->dr[6]);
      /* 7FC8H    DR7 */
      stl_phys(base+0x7fc8, env->dr[7]);
      /* 7FC4H    TR* */
      stl_phys(base+0x7fc4, env->tr.selector);
      /* 7FC0H    Reserved */
      /* 7FBCH    GS* */
      stl_phys(base+0x7fbc, env->segs[R_GS].selector);
      /* 7FB8H    FS* */
      stl_phys(base+0x7fb8, env->segs[R_FS].selector);
      /* 7FB4H    DS* */
      stl_phys(base+0x7fb4, env->segs[R_DS].selector);
      /* 7FB0H    SS* */
      stl_phys(base+0x7fb0, env->segs[R_SS].selector);
      /* 7FACH    CS* */
      stl_phys(base+0x7fac, env->segs[R_CS].selector);
      /* 7FA8H    ES* */
      stl_phys(base+0x7fa8, env->segs[R_ES].selector);
      /* 7FA4H    I/O State Field, see Section 13.7 */
      stl_phys(base+0x7fa4, 0x00000000); /* see SMM revision ID below */
      /* 7FA0H    I/O Memory Address Field, see Section 13.7 */
      stl_phys(base+0x7fa0, 0x00000000); /* see SMM revision ID below */
      /* 7F9FH-7F03H Reserved */
      /* 7F02H    Auto HALT Restart Field (Word) */
      /* difficult to implement in the current CPU simulation... */
      stw_phys(base+0x7f02, 0x0000);
      /* 7F00H    I/O Instruction Restart Field (Word) */
      stw_phys(base+0x7f00, 0x0000);
      /* 7EFCH    SMM Revision Identifier Field (Doubleword) */
      /* Processors that have an SMM revision ID of 30004H or higher have
       * I/O Instruction Restart. */
      /* The upper word of the SMM revision identifier refers to the
       * extensions available. If the I/O instruction restart flag (bit 16)
       * is set, the processor supports I/O instruction restart. If the
       * SMBASE relocation flag (bit 17) is set, SMRAM base address
       * relocation is supported. */
      /* we use "no i/o restart, but smbase relocation, revision 1" */
      stl_phys(base+0x7efc, 0x00020001);
      /* 7EF8H    SMBASE Field (Doubleword) */
      stl_phys(base+0x7ef8, env->smbase);
      /* 7EF7H-7E00H Reserved - we'll store other internals here: */
      /* The following registers are saved (but not readable) and restored upon exiting SMM:
       * -   Control register CR4. (This register is cleared to all 0s while in SMM).
       * -   The hidden segment descriptor information stored in segment registers CS, DS, ES, FS,
       *     GS, and SS.
       */
      stl_phys(base+0x7ef4, env->cr[4]);
      stl_phys(base+0x7ef0, env->segs[R_CS].base);
      stl_phys(base+0x7eec, env->segs[R_DS].base);
      stl_phys(base+0x7ee8, env->segs[R_ES].base);
      stl_phys(base+0x7ee4, env->segs[R_FS].base);
      stl_phys(base+0x7ee0, env->segs[R_GS].base);
      stl_phys(base+0x7edc, env->segs[R_SS].base);
      stl_phys(base+0x7ed8, env->segs[R_CS].limit);
      stl_phys(base+0x7ed4, env->segs[R_DS].limit);
      stl_phys(base+0x7ed0, env->segs[R_ES].limit);
      stl_phys(base+0x7ecc, env->segs[R_FS].limit);
      stl_phys(base+0x7ec8, env->segs[R_GS].limit);
      stl_phys(base+0x7ec4, env->segs[R_SS].limit);
      stl_phys(base+0x7ec0, env->segs[R_CS].flags);
      stl_phys(base+0x7ebc, env->segs[R_DS].flags);
      stl_phys(base+0x7eb8, env->segs[R_ES].flags);
      stl_phys(base+0x7eb4, env->segs[R_FS].flags);
      stl_phys(base+0x7eb0, env->segs[R_GS].flags);
      stl_phys(base+0x7eac, env->segs[R_SS].flags);
      stl_phys(base+0x7ea8, env->tr.base);
      stl_phys(base+0x7ea4, env->tr.limit);
      stl_phys(base+0x7ea0, env->tr.flags);
      /* maybe we've got even more to store here... FIXME? */
#endif /* not CONFIG_CPU_LM_SUPPORT */
      /*
       * set up SMM environment:
       */
      /* General-purpose registers    Undefined */
      /* EFLAGS                       00000002H */
      env->eflags = 0x00000002;
      /* CS selector                  SMM Base shifted right 4 bits */
      /* CS base                      SMM Base */
      cpu_x86_load_seg_cache(R_CS, (env->smbase >> 4), env->smbase,
                  0xffffffff, 0);
      /* DS, ES, FS, GS, SS Selectors 0000H */
      /* DS, ES, FS, GS, SS Bases     000000000H */
      /* DS, ES, FS, GS, SS Limits    0FFFFFFFFH */
      cpu_x86_load_seg_cache(R_DS, 0x0000, 0x00000000, 0xffffffff, 0);
      cpu_x86_load_seg_cache(R_ES, 0x0000, 0x00000000, 0xffffffff, 0);
      cpu_x86_load_seg_cache(R_FS, 0x0000, 0x00000000, 0xffffffff, 0);
      cpu_x86_load_seg_cache(R_GS, 0x0000, 0x00000000, 0xffffffff, 0);
      cpu_x86_load_seg_cache(R_SS, 0x0000, 0x00000000, 0xffffffff, 0);

      /* CR0                          PE, EM, TS, and PG flags set to 0;
       *                              others unmodified */
      env->cr[0] &= ~CPU_CR0_PE_MASK;
      env->cr[0] &= ~CPU_CR0_EM_MASK;
      env->cr[0] &= ~CPU_CR0_TS_MASK;
      env->cr[0] &= ~CPU_CR0_PG_MASK;
      NAME_(update_cr0)(env->cr[0]);
      /* CR4                          Cleared to zero */
      NAME_(update_cr4)(0x00000000);
      /* DR6                          Undefined */
      /* DR7                          00000400 */
      env->dr[7] = 0x00000400;
      /* EIP                          00008000H */
      env->eip = 0x00008000;
#if DEBUG_SMM_ENTRY
      fprintf(stderr, "\tFirst few bytes of SMI handler:");
      for (i=0; i < 64 ; i+=4) {
            if (! (i % 8)) {
                  fprintf(stderr, "\n\t%08x", env->smbase + 0x8000 + i);
            }
            l = ldl_phys(env->smbase + 0x8000 + i);
            fprintf(stderr, "\t%02lx  %02lx  %02lx  %02lx",
                        (unsigned long) l & 0xff,
                        (unsigned long) (l >> 8) & 0xff,
                        (unsigned long) (l >> 16) & 0xff,
                        (unsigned long) (l >> 24) & 0xff);
      }
      fprintf(stderr, "\n");
#endif /* DEBUG_SMM_ENTRY */
}
#endif /* 80386 <= CONFIG_CPU */

/*
 * Signal an interruption. It is executed in the main CPU loop.
 * is_int is TRUE if coming from the int instruction. next_eip is the
 * EIP value AFTER the interrupt instruction. It is only relevant if
 * is_int is TRUE.
 */
void __attribute__((__noreturn__))
NAME_(raise_interrupt)(int intno, int is_int, int error_code, int next_eip_addend)
{
      env->exception_index = intno;
      env->error_code = error_code;
      env->exception_is_int = is_int;
      env->exception_next_eip = env->eip + next_eip_addend;
      longjmp(env->jmp_env, 1);
}

/* same as raise_exception_err, but do not restore global registers */
void __attribute__((__noreturn__))
NAME_(raise_exception_err_norestore)(int exception_index, int error_code)
{
      env->exception_index = exception_index;
      env->error_code = error_code;
      env->exception_is_int = 0;
      env->exception_next_eip = 0;
      longjmp(env->jmp_env, 1);
}

/* shortcuts to generate exceptions */

void __attribute__((__noreturn__))
NAME_(raise_exception_err)(int exception_index, int error_code)
{
      NAME_(raise_interrupt)(exception_index, 0, error_code, 0);
}

void __attribute__((__noreturn__))
NAME_(raise_exception)(int exception_index)
{
      NAME_(raise_interrupt)(exception_index, 0, 0, 0);
}

/*
 * exit the current TB from a signal handler. The host registers are
 * restored in a state compatible with the CPU emulator
 */
void __attribute__((__noreturn__))
NAME_(resume_from_signal)(void *puc)
{
      longjmp(env->jmp_env, 1);
}

#if 80386 <= CONFIG_CPU
static void
cpu_enter_smm(void)
{
      env->smm = 1;

      /* Host bridge changes memory on non-smm/smm changes. */
      NAME_(mmu_flush_all)(1);
}

void
NAME_(leave_smm)(void)
{
      env->smm = 0;

      /* Host bridge changes memory on non-smm/smm changes. */
      NAME_(mmu_flush_all)(1);
}
#endif /* 80386 <= CONFIG_CPU */

#if DEBUG_CONTROL_FLOW
static void
NAME_(dump)(void)
{
#if DEBUG_CONTROL_FLOW_REGS
      fprintf(stderr, " %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx",
                  (unsigned long) EAX, (unsigned long) EBX,
                  (unsigned long) ECX, (unsigned long) EDX,
                  (unsigned long) EBP, (unsigned long) ESP,
                  (unsigned long) EDI, (unsigned long) ESI);
#endif
#if DEBUG_CONTROL_FLOW_SREGS
      fprintf(stderr, " %04x %04x %04x %04x %04x %04x",
                  (unsigned int) env->segs[R_CS].selector,
                  (unsigned int) env->segs[R_SS].selector,
                  (unsigned int) env->segs[R_DS].selector,
                  (unsigned int) env->segs[R_ES].selector,
                  (unsigned int) env->segs[R_FS].selector,
                  (unsigned int) env->segs[R_GS].selector);
#endif
#if DEBUG_CONTROL_FLOW_FLAGS
      fprintf(stderr, " %08lx", compute_eflags());
#endif
      fprintf(stderr, "\n");
}
#endif /* DEBUG_CONTROL_FLOW */

void __attribute__((__noreturn__))
NAME_(step)(void *_css)
{
      env = (struct CPUState *) _css;

again:      ;
      /* prepare setjmp context for exception handling */
      setjmp(env->jmp_env);
      T0 = 0;

      while (env->process.inst_cnt < env->process.inst_limit) {
            env->current_tb = (TranslationBlock *) 0;

            if (unlikely(! env->state_power)) {
#if DEBUG_CONTROL_FLOW
                  if (2 <= DEBUG_CONTROL_FLOW
                   || loglevel) {
                        fprintf(stderr, "%d: Power-off\n", env->apic.bsp);
                  }
#endif
                  if (env->process.inst_cnt < env->process.inst_limit) {
                        env->process.inst_cnt = env->process.inst_limit;
                  }
                  break;

            } else if (unlikely(env->state_n_reset != 3)) {
#if DEBUG_CONTROL_FLOW
                  if (2 <= DEBUG_CONTROL_FLOW
                   || loglevel) {
                        fprintf(stderr, "%d: Reset\n", env->apic.bsp);
                  }
#endif
                  env->state_n_reset |= 2; /* Reset handled. */
                  NAME_(core_reset)();
                  if (env->process.inst_cnt < env->process.inst_limit) {
                        env->process.inst_cnt = env->process.inst_limit;
                  }
                  break;

#if 80386 <= CONFIG_CPU
            } else if (unlikely(env->state_n_init != 3)) {
#if DEBUG_CONTROL_FLOW
                  if (2 <= DEBUG_CONTROL_FLOW
                   || loglevel) {
                        fprintf(stderr, "%d: Init\n", env->apic.bsp);
                  }
#endif
                  env->state_n_init |= 2; /* Init handled. */
                  NAME_(core_init)();
                  if (env->process.inst_cnt < env->process.inst_limit) {
                        env->process.inst_cnt = env->process.inst_limit;
                  }
                  break;

            } else if (unlikely(env->hflags & HF_WAITING_FOR_STARTUP_MASK)) {
                  /*
                   * Do nothing...
                   */
#if DEBUG_CONTROL_FLOW
                  if (2 <= DEBUG_CONTROL_FLOW
                   || loglevel) {
                        fprintf(stderr, "%d: Waiting for Startup-IPI at %08llx (%08llx:%08llx)",
                              env->apic.bsp,
                              (unsigned long long) env->eip
                              + (unsigned long long) env->segs[R_CS].base,
                              (unsigned long long) env->segs[R_CS].base,
                              (unsigned long long) env->eip);
                        NAME_(dump)();
                  }
#endif

                  if (env->process.inst_cnt < env->process.inst_limit) {
                        env->process.inst_cnt = env->process.inst_limit;
                  }
                  break;
#endif /* 80386 <= CONFIG_CPU */

            } else if (unlikely(0 <= env->exception_index)) {
                  /*
                   * If an exception is pending, we execute it here.
                   * Simulate a real cpu exception. On i386, it can
                   * trigger new exceptions, but we do not handle
                   * double or triple faults yet.
                   */
#if DEBUG_CONTROL_FLOW
                  if (2 <= DEBUG_CONTROL_FLOW
                   || loglevel) {
                        fprintf(stderr, "%d: Exception 0x%02x at %08llx (%08llx:%08llx)",
                              env->apic.bsp,
                              env->exception_index,
                              (unsigned long long) EIP
                              + (unsigned long long) env->segs[R_CS].base,
                              (unsigned long long) env->segs[R_CS].base,
                              (unsigned long long) EIP);
                        NAME_(dump)();
                  }
#endif
                  env->process.inst_cnt += 100;

                  do_exception();
                  T0 = 0;

#if 80386 <= CONFIG_CPU
            } else if (unlikely((env->interrupt_request & CPU_INTERRUPT_SMI)
                  && ! env->smm
                  && ! (env->hflags & HF_INHIBIT_IRQ_MASK))) {
                  env->hflags &= ~HF_HALTED_MASK;

                  /* signal "acknowledge" */
                  NAME_(apic_smi_ack)();

#if DEBUG_CONTROL_FLOW
                  if (2 <= DEBUG_CONTROL_FLOW
                   || loglevel) {
                        fprintf(stderr, "%d: SMM-Interrupt at %08llx (%08llx:%08llx)",
                              env->apic.bsp,
                              (unsigned long long) env->eip
                              + (unsigned long long) env->segs[R_CS].base,
                              (unsigned long long) env->segs[R_CS].base,
                              (unsigned long long) env->eip);
                        NAME_(dump)();
                  }
#endif

                  env->process.inst_cnt += 100;

                  cpu_enter_smm();
                  /* execute SMI handler: start system management mode */
                  do_smm();
                  /* FIXME knilch: improve performance */
                  T0 = 0;
#endif /* 80386 <= CONFIG_CPU */

            } else if (unlikely((env->interrupt_request & CPU_INTERRUPT_IRQ)
                  && (env->eflags & CPU_IF_MASK)
                  && ! (env->hflags & HF_INHIBIT_IRQ_MASK))) {
                  /*
                   * If hardware interrupt pending, we execute it.
                   */
                  env->hflags &= ~HF_HALTED_MASK;

                  env->exception_index = NAME_(apic_irq_ack)();
                  env->exception_is_int = 0;
                  env->error_code = 0;
                  env->exception_next_eip = 0;

#if DEBUG_CONTROL_FLOW
                  if (2 <= DEBUG_CONTROL_FLOW
                   || loglevel) {
                        fprintf(stderr, "%d: Interrupt 0x%02x at %08llx (%08llx:%08llx)",
                              env->apic.bsp,
                              env->exception_index,
                              (unsigned long long) env->eip
                              + (unsigned long long) env->segs[R_CS].base,
                              (unsigned long long) env->segs[R_CS].base,
                              (unsigned long long) env->eip);
                        NAME_(dump)();
                  }
#endif

                  env->process.inst_cnt += 100;

                  do_interrupt();
                  T0 = 0;

            } else if (unlikely(env->hflags & HF_HALTED_MASK)) {
                  /*
                   * Do nothing...
                   */
#if DEBUG_CONTROL_FLOW
                  if (2 <= DEBUG_CONTROL_FLOW
                   || loglevel) {
                        fprintf(stderr, "%d: Halting at %08llx (%08llx:%08llx)",
                              env->apic.bsp,
                              (unsigned long long) env->eip
                              + (unsigned long long) env->segs[R_CS].base,
                              (unsigned long long) env->segs[R_CS].base,
                              (unsigned long long) env->eip);
                        NAME_(dump)();
                  }
#endif

                  if (env->process.inst_cnt < env->process.inst_limit) {
                        env->process.inst_cnt = env->process.inst_limit;
                  }
                  break;

            } else
#if USE_KFAUM && 0
            if (kfaum_is_ok(env)) {
                  /*
                   * Execute user mode code.
                   */
                  int ret;

                  env->eflags = env->eflags
                        | compute_all()
                        | (DF & CPU_DF_MASK);

                  ret = NAME_(kfaum_exec)(env);
                  assert(ret == 0 || ret == 1);

                  /* Put eflags in CPU temporary format. */
                  CC_SRC = env->eflags
                        & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
                  DF = 1 - (2 * ((env->eflags >> 10) & 1));
                  CC_OP = CC_OP_EFLAGS;
                  env->eflags &= ~(CPU_DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);

                  T0 = 0;

                  if (ret == 1) {
                        env->exception_index = -1;
                        goto must_simulate;
                  }

            } else
#endif
            {
                  /*
                   * Execute the generated code.
                   */
                  TranslationBlock *tb;
                  target_ulong cs_base;
                  target_ulong pc;
                  unsigned int flags;

#if USE_KFAUM && 0
            must_simulate:;
#endif
#if DEBUG_CONTROL_FLOW
                  if (2 <= DEBUG_CONTROL_FLOW
                   || loglevel) {
                        fprintf(stderr, "%d: Executing at %08llx (%08llx:%08llx)",
                              env->apic.bsp,
                              (unsigned long long) env->eip
                              + (unsigned long long) env->segs[R_CS].base,
                              (unsigned long long) env->segs[R_CS].base,
                              (unsigned long long) env->eip);
                        NAME_(dump)();
                  }
#endif

                  /*
                   * We record a subset of the CPU state. It will
                   * always be the same before a given translated block
                   * is executed.
                   */
                  flags = env->hflags;
                  flags |= (env->eflags & (CPU_IOPL_MASK | CPU_TF_MASK | CPU_VM_MASK));
                  cs_base = env->segs[R_CS].base;
                  pc = cs_base + env->eip;

                  tb = NAME_(tb_get)(pc, cs_base, flags, T0);

                  env->current_tb = tb;
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT && defined(__i386__)
                  asm volatile ("subl $0x04,%%esp\n"
                              "push %%ebx\n"
                              "push %%esi\n"
                              "push %%edi\n"
                              "call *%0\n"
                              "pop %%edi\n"
                              "pop %%esi\n"
                              "pop %%ebx\n"
                              "addl $0x04,%%esp\n"
                              : : "r" ((void (*)(void)) tb->tc_ptr) : "ebx", "esi", "edi");
#else
                  ((void (*)(void)) tb->tc_ptr)();
#endif
            }
      }

      sched_to_scheduler();

      goto again;
}

Generated by  Doxygen 1.6.0   Back to index