Logo Search packages:      
Sourcecode: faumachine version File versions  Download package

arch_gen_cpu_x86_state.h

/*
 * $Id: arch_gen_cpu_x86_state.h,v 1.36 2009-02-18 16:15:26 vrsieh Exp $
 *
 * Parts derived from QEMU sources.
 * Modified for FAUmachine by Volkmar Sieh.
 *  
 *  Copyright (c) 2005-2009 FAUmachine Team.
 *  Copyright (c) 2003 Fabrice Bellard.
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
 * USA
 */

#ifndef __CPU_STATE_H_INCLUDED
#define __CPU_STATE_H_INCLUDED

#include "arch_gen_cpu_x86.h"

#include <inttypes.h>
#include <setjmp.h>

#include "qemu/libcpu.h"

#include "glue-main.h"

#include "sig_boolean.h"
#include "sig_icc_bus.h"
#if CONFIG_CPU < 80386
#include "sig_isa_bus.h"
#else
#include "sig_host_bus.h"
#endif

#define R_EAX 0
#define R_ECX 1
#define R_EDX 2
#define R_EBX 3
#define R_ESP 4
#define R_EBP 5
#define R_ESI 6
#define R_EDI 7

#define R_AL 0
#define R_CL 1
#define R_DL 2
#define R_BL 3
#define R_AH 4
#define R_CH 5
#define R_DH 6
#define R_BH 7

#define R_ES 0
#define R_CS 1
#define R_SS 2
#define R_DS 3
#define R_FS 4
#define R_GS 5

/*
 * Interrupts/exceptions/syscalls
 */
#define CPU_FAULT_DE          (0)   /* Divide Error */
#define CPU_FAULT_DB          (1)   /* Debug */
#define CPU_FAULT_NMI         (2)   /* NMI */
#define CPU_FAULT_BP          (3)   /* Breakpoint */
#define CPU_FAULT_OF          (4)   /* Overflow */
#define CPU_FAULT_BR          (5)   /* BOUND Range Exceeded */
#define CPU_FAULT_UD          (6)   /* Invalid Opcode */
#define CPU_FAULT_NM          (7)   /* Device Not Available */
#define CPU_FAULT_DF          (8)   /* Double fault */
/* #define CPU_FAULT_??       (9) */      /* Coprocessor Segment Overrun */
#define CPU_FAULT_TS          (10)  /* Invalid TSS */
#define CPU_FAULT_NP          (11)  /* Segment Not Present */
#define CPU_FAULT_SS          (12)  /* Stack-Segment Fault */
#define CPU_FAULT_GP          (13)  /* General Protection */
#define CPU_FAULT_PF          (14)  /* Page Fault */
/* #define CPU_FAULT_??       (15) */     /* Intel reserved */
#define CPU_FAULT_MF          (16)  /* Floating-Point Error (Math Fault) */
#define CPU_FAULT_AC          (17)  /* Alignment Check */
#define CPU_FAULT_MC          (18)  /* Machine Check */
#define CPU_FAULT_XF          (19)  /* Streaming SIMD Extensions */

/*
 * CPUID flag bits
 */
/* cpuid_features bits */
#define CPUID_FP87 (1 << 0)
#define CPUID_VME  (1 << 1)
#define CPUID_DE   (1 << 2)
#define CPUID_PSE  (1 << 3)
#define CPUID_TSC  (1 << 4)
#define CPUID_MSR  (1 << 5)
#define CPUID_PAE  (1 << 6)
#define CPUID_MCE  (1 << 7)
#define CPUID_CX8  (1 << 8)
#define CPUID_APIC (1 << 9)
#define CPUID_SEP  (1 << 11) /* sysenter/sysexit */
#define CPUID_MTRR (1 << 12)
#define CPUID_PGE  (1 << 13)
#define CPUID_MCA  (1 << 14)
#define CPUID_CMOV (1 << 15)
#define CPUID_PAT  (1 << 16)
#define CPUID_CLFLUSH (1 << 19)
/* ... */
#define CPUID_MMX  (1 << 23)
#define CPUID_FXSR (1 << 24)
#define CPUID_SSE  (1 << 25)
#define CPUID_SSE2 (1 << 26)

/* cpuid_ext_features bits */
#define CPUID_EXT_SS3      (1 << 0)
#define CPUID_EXT_MONITOR  (1 << 3)
#define CPUID_EXT_CX16     (1 << 13)

/* cpuid_ext2_features bits */
#define CPUID_EXT2_SYSCALL (1 << 11)
#define CPUID_EXT2_NX      (1 << 20)
#define CPUID_EXT2_FFXSR   (1 << 25)
#define CPUID_EXT2_LM      (1 << 29)
 
/*
 * %eflags Bits
 */
/* Don't change these definitions! Hardcoded in real CPU! */
#define CPU_ID_SHIFT    (21)
#define CPU_VIP_SHIFT   (20)
#define CPU_VIF_SHIFT   (19)
#define CPU_AC_SHIFT    (18)
#define CPU_VM_SHIFT    (17)
#define CPU_RF_SHIFT    (16)
                        /* Bit 15 not used (0). */
#define CPU_NT_SHIFT    (14)
#define CPU_IOPL_SHIFT  (12)  /* Two bits! */
#define CPU_OF_SHIFT    (11)
#define CPU_DF_SHIFT    (10)
#define CPU_IF_SHIFT    (9)
#define CPU_TF_SHIFT    (8)
#define CPU_SF_SHIFT    (7)
#define CPU_ZF_SHIFT    (6)
                        /* Bit 5 not used (0). */
#define CPU_AF_SHIFT    (4)
                        /* Bit 3 not used (0). */
#define CPU_PF_SHIFT    (2)
                        /* Bit 1 not used (1). */
#define CPU_CF_SHIFT    (0)

#define CPU_ID_MASK     (1 << CPU_ID_SHIFT)
#define CPU_VIP_MASK    (1 << CPU_VIP_SHIFT)
#define CPU_VIF_MASK    (1 << CPU_VIF_SHIFT)
#define CPU_AC_MASK     (1 << CPU_AC_SHIFT)
#define CPU_VM_MASK     (1 << CPU_VM_SHIFT)
#define CPU_RF_MASK     (1 << CPU_RF_SHIFT)
#define CPU_NT_MASK     (1 << CPU_NT_SHIFT)
#define CPU_IOPL_MASK   (3 << CPU_IOPL_SHIFT)
#define CPU_OF_MASK     (1 << CPU_OF_SHIFT)
#define CPU_DF_MASK     (1 << CPU_DF_SHIFT)
#define CPU_IF_MASK     (1 << CPU_IF_SHIFT)
#define CPU_TF_MASK     (1 << CPU_TF_SHIFT)
#define CPU_SF_MASK     (1 << CPU_SF_SHIFT)
#define CPU_ZF_MASK     (1 << CPU_ZF_SHIFT)
#define CPU_AF_MASK     (1 << CPU_AF_SHIFT)
#define CPU_PF_MASK     (1 << CPU_PF_SHIFT)
#define CPU_CF_MASK     (1 << CPU_CF_SHIFT)

#define CC_C      CPU_CF_MASK
#define CC_P      CPU_PF_MASK
#define CC_A      CPU_AF_MASK
#define CC_Z      CPU_ZF_MASK
#define CC_S    CPU_SF_MASK
#define CC_O    CPU_OF_MASK

/* hidden flags - used internally by qemu to represent additionnal cpu
   states. Only the CPL and INHIBIT_IRQ are not redundant. We avoid
   using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring
   with eflags. */
/* current cpl */
#define HF_CPL_SHIFT         0
/* true if hardware interrupts must be disabled for next instruction */
#define HF_INHIBIT_IRQ_SHIFT 3
/* 16 or 32 segments */
#define HF_CS32_SHIFT        4
#define HF_SS32_SHIFT        5
/* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
#define HF_ADDSEG_SHIFT      6
/* copy of CR0.PE (protected mode) */
#define HF_PE_SHIFT          7
#define HF_TF_SHIFT          8 /* must be same as eflags */
#define HF_MP_SHIFT          9 /* the order must be MP, EM, TS */
#define HF_EM_SHIFT         10
#define HF_TS_SHIFT         11
#define HF_IOPL_SHIFT       12 /* must be same as eflags */
#define HF_LMA_SHIFT        14 /* only used on x86_64: long mode active */
#define HF_CS64_SHIFT       15 /* only used on x86_64: 64 bit code segment  */
#define HF_OSFXSR_SHIFT     16 /* CR4.OSFXSR */
#define HF_VM_SHIFT         17 /* must be same as eflags */
#define HF_HALTED_SHIFT     18 /* CPU halted */
#define HF_NE_SHIFT         19 /* copy of CR0.NE */
#define HF_WAITING_FOR_STARTUP_SHIFT 20 /* Appl. Proc. waiting for Startup */

#define HF_CPL_MASK          (3 << HF_CPL_SHIFT)
#define HF_INHIBIT_IRQ_MASK  (1 << HF_INHIBIT_IRQ_SHIFT)
#define HF_CS32_MASK         (1 << HF_CS32_SHIFT)
#define HF_SS32_MASK         (1 << HF_SS32_SHIFT)
#define HF_ADDSEG_MASK       (1 << HF_ADDSEG_SHIFT)
#define HF_PE_MASK           (1 << HF_PE_SHIFT)
#define HF_TF_MASK           (1 << HF_TF_SHIFT)
#define HF_MP_MASK           (1 << HF_MP_SHIFT)
#define HF_EM_MASK           (1 << HF_EM_SHIFT)
#define HF_TS_MASK           (1 << HF_TS_SHIFT)
#define HF_LMA_MASK          (1 << HF_LMA_SHIFT)
#define HF_CS64_MASK         (1 << HF_CS64_SHIFT)
#define HF_OSFXSR_MASK       (1 << HF_OSFXSR_SHIFT)
#define HF_HALTED_MASK       (1 << HF_HALTED_SHIFT)
#define HF_NE_MASK           (1 << HF_NE_SHIFT)
#define HF_WAITING_FOR_STARTUP_MASK (1 << HF_WAITING_FOR_STARTUP_SHIFT)

/*
 * %cr0 Bits
 */
#define CPU_CR0_PE_SHIFT      (0)   /* Protection Enable */
#define CPU_CR0_MP_SHIFT      (1)   /* Monitor Coprocessor */
#define CPU_CR0_EM_SHIFT      (2)   /* Emulation */
#define CPU_CR0_TS_SHIFT      (3)   /* Task Switched */
#define CPU_CR0_ET_SHIFT      (4)   /* Extension Type */
#define CPU_CR0_NE_SHIFT      (5)   /* Numeric Error */
#define CPU_CR0_WP_SHIFT      (16)  /* Write Protect */
#define CPU_CR0_AM_SHIFT      (18)  /* Alignment Mask */
#define CPU_CR0_NW_SHIFT      (29)  /* Not Write-through */
#define CPU_CR0_CD_SHIFT      (30)  /* Cache Disable */
#define CPU_CR0_PG_SHIFT      (31)  /* Paging */

#define CPU_CR0_PE_MASK       (1 << CPU_CR0_PE_SHIFT)
#define CPU_CR0_MP_MASK       (1 << CPU_CR0_MP_SHIFT)
#define CPU_CR0_EM_MASK       (1 << CPU_CR0_EM_SHIFT)
#define CPU_CR0_TS_MASK       (1 << CPU_CR0_TS_SHIFT)
#define CPU_CR0_ET_MASK       (1 << CPU_CR0_ET_SHIFT)
#define CPU_CR0_NE_MASK       (1 << CPU_CR0_NE_SHIFT)
#define CPU_CR0_WP_MASK       (1 << CPU_CR0_WP_SHIFT)
#define CPU_CR0_AM_MASK       (1 << CPU_CR0_AM_SHIFT)
#define CPU_CR0_NW_MASK       (1 << CPU_CR0_NW_SHIFT)
#define CPU_CR0_CD_MASK       (1 << CPU_CR0_CD_SHIFT)
#define CPU_CR0_PG_MASK       (1 << CPU_CR0_PG_SHIFT)

/*
 * %cr3 Bits
 */
#define CPU_PWT_SHIFT   (3)   /* Page-level Writes Transparent */
#define CPU_PCD_SHIFT   (4)   /* Page-level Cache Disable */

#define CPU_PWT_MASK    (1 << CPU_PWT_SHIFT)
#define CPU_PCD_MASK    (1 << CPU_PCD_SHIFT)

/*
 * %cr4 Bits
 */
#define CPU_CR4_VME_SHIFT     (0)   /* Virtual-8086 Mode Extensions */
#define CPU_CR4_PVI_SHIFT     (1) /* Protected-Mode Virtual Interrupts */
#define CPU_CR4_TSD_SHIFT     (2) /* Time Stamp Disable */
#define CPU_CR4_DE_SHIFT      (3) /* Debugging Extensions */
#define CPU_CR4_PSE_SHIFT     (4) /* Page Size Extensions */
#define CPU_CR4_PAE_SHIFT     (5) /* Physical Address Extension */
#define CPU_CR4_MCE_SHIFT     (6) /* Machine Check Enable */
#define CPU_CR4_PGE_SHIFT     (7) /* Page Global Enable */
#define CPU_CR4_PCE_SHIFT     (8) /* Performance-Monitoring Counter Enable */
#define CPU_CR4_OSFXSR_SHIFT  (9) /* OS Support for FXSAVE and FXRSTOR */
#define CPU_CR4_OSXMMEXCPT_SHIFT (10) /* OS Support for Unmasked SIMD Exceptions */

#define CPU_CR4_VME_MASK      (1 << CPU_CR4_VME_SHIFT)
#define CPU_CR4_PVI_MASK      (1 << CPU_CR4_PVI_SHIFT)
#define CPU_CR4_TSD_MASK      (1 << CPU_CR4_TSD_SHIFT)
#define CPU_CR4_DE_MASK       (1 << CPU_CR4_DE_SHIFT)
#define CPU_CR4_PSE_MASK      (1 << CPU_CR4_PSE_SHIFT)
#define CPU_CR4_PAE_MASK      (1 << CPU_CR4_PAE_SHIFT)
#define CPU_CR4_MCE_MASK      (1 << CPU_CR4_MCE_SHIFT)
#define CPU_CR4_PGE_MASK      (1 << CPU_CR4_PGE_SHIFT)
#define CPU_CR4_PCE_MASK      (1 << CPU_CR4_PCE_SHIFT)
#define CPU_CR4_OSFXSR_MASK   (1 << CPU_CR4_OSFXSR_SHIFT)
#define CPU_CR4_OSXMMEXCPT_MASK     (1 << CPU_CR4_OSXMMEXCPT_SHIFT)

/*
 * Segment descriptor fields
 */
#define DESC_G_MASK     (1 << 23)
#define DESC_B_SHIFT    22
#define DESC_B_MASK     (1 << DESC_B_SHIFT)
#define DESC_L_SHIFT    21 /* x86_64 only : 64 bit code segment */
#define DESC_L_MASK     (1 << DESC_L_SHIFT)
#define DESC_AVL_MASK   (1 << 20)
#define DESC_P_MASK     (1 << 15)
#define DESC_DPL_SHIFT  13
#define DESC_S_MASK     (1 << 12)
#define DESC_TYPE_SHIFT 8
#define DESC_A_MASK     (1 << 8)

#define DESC_CS_MASK    (1 << 11) /* 1=code segment 0=data segment */
#define DESC_C_MASK     (1 << 10) /* code: conforming */
#define DESC_R_MASK     (1 << 9)  /* code: readable */

#define DESC_E_MASK     (1 << 10) /* data: expansion direction */
#define DESC_W_MASK     (1 << 9)  /* data: writable */

#define DESC_TSS_BUSY_MASK (1 << 9)

/*
 * Page descriptor bits
 */
#define PG_PRESENT_BIT  0
#define PG_RW_BIT 1
#define PG_USER_BIT     2
#define PG_PWT_BIT      3
#define PG_PCD_BIT      4
#define PG_ACCESSED_BIT 5
#define PG_DIRTY_BIT    6
#define PG_PSE_BIT      7
#define PG_GLOBAL_BIT   8
#define PG_NX_BIT 63

#define PG_PRESENT_MASK  (1 << PG_PRESENT_BIT)
#define PG_RW_MASK       (1 << PG_RW_BIT)
#define PG_USER_MASK     (1 << PG_USER_BIT)
#define PG_PWT_MASK      (1 << PG_PWT_BIT)
#define PG_PCD_MASK      (1 << PG_PCD_BIT)
#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
#define PG_DIRTY_MASK    (1 << PG_DIRTY_BIT)
#define PG_PSE_MASK      (1 << PG_PSE_BIT)
#define PG_GLOBAL_MASK   (1 << PG_GLOBAL_BIT)
#define PG_NX_MASK       (1LL << PG_NX_BIT)

#define PG_ERROR_W_BIT     1

#define PG_ERROR_P_MASK    0x01
#define PG_ERROR_W_MASK    (1 << PG_ERROR_W_BIT)
#define PG_ERROR_U_MASK    0x04
#define PG_ERROR_RSVD_MASK 0x08
#define PG_ERROR_I_D_MASK  0x10

#define APIC_ID         0x020
#define APIC_LVR  0x030
#define APIC_TASKPRI    0x080
#define APIC_ARBPRI     0x090
#define APIC_PROCPRI    0x0a0
#define APIC_EOI  0x0b0
#define APIC_LDR  0x0d0
#define APIC_DFR  0x0e0
#define APIC_SPIV 0x0f0
#define APIC_ISR  0x100
#define APIC_TMR  0x180
#define APIC_IRR  0x200
#define APIC_ESR  0x280
#define APIC_ICR  0x300
#define APIC_ICR2 0x310
#define APIC_LVTT 0x320
#define APIC_LVTPC      0x340
#define APIC_LVT0 0x350
#define APIC_LVT1 0x360
#define APIC_LVTERR     0x370
#define APIC_TMICT      0x380
#define APIC_TMCCT      0x390
#define APIC_TDCR 0x3e0

struct local_apic {
/*020*/ /* APIC ID Register */
#if 1 /* Pentium and P6 family */
      unsigned int phys_apic_id : 4;
#elif 0 /* Pentium 4 and Xeon */
      unsigned int phys_apic_id : 8;
#endif

/*080*/ /* Task Priority Register */
      unsigned int tpr : 8;

/*090*/ /* Arbitration Priority Register */
      unsigned int apr : 8;

/*0A0*/ /* Processor Priority Register */
      unsigned int ppr : 8;

/*0D0*/ /* Logical Destination Register */
      unsigned int ldr : 8;

/*0E0*/ /* Destination Format Register */
      unsigned int dfr_model : 4;

/*0F0*/ struct { /* Spurious Interrupt Vector Register */
            unsigned int spurious_vector : 8;
            unsigned int apic_enabled : 1;
            unsigned int focus_cpu : 1;
      } svr;

/*100*/ /* In Service Register */
      uint32_t isr[8];

/*180*/ /* Trigger Mode Register */
      uint32_t tmr[8];

/*200*/ /* Interrupt Request Register */
      uint32_t irr[8];

/*280*/ /* Error Status Register */
      unsigned int send_cs_error : 1;
      unsigned int receive_cs_error : 1;
      unsigned int send_accept_error : 1;
      unsigned int receive_accept_error : 1;
      unsigned int send_illegal_vector : 1;
      unsigned int receive_illegal_vector : 1;
      unsigned int illegal_register_address : 1;

/*300*/ struct { /* Interrupt Command Register 1 */
            unsigned int vector : 8;
            unsigned int delivery_mode : 3;
            unsigned int destination_mode : 1;
            unsigned int delivery_status : 1;
            unsigned int level : 1;
            unsigned int trigger : 1;
            unsigned int shorthand : 2;
      } icr1;

/*310*/ struct { /* Interrupt Command Register 2 */
            unsigned int destination : 8;
      } icr2;

/*320*/ struct { /* LVT - Timer */
            unsigned int vector : 8;
            unsigned int delivery_status : 1;
            unsigned int mask : 1;
            unsigned int timer_mode : 1;
      } lvt_timer;

/*340*/ struct { /* LVT - Performance Counter */
            unsigned int vector : 8;
            unsigned int delivery_mode : 3;
            unsigned int delivery_status : 1;
            unsigned int mask : 1;
      } lvt_pc;

/*350*/
/*360*/ struct { /* LVT - LINT0/1 */
            unsigned int vector : 8;
            unsigned int delivery_mode : 3;
            unsigned int delivery_status : 1;
            unsigned int polarity : 1;
            unsigned int remote_irr : 1;
            unsigned int trigger : 1;
            unsigned int mask : 1;
      } lvt_lint[2];

/*370*/ struct { /* LVT - Error */
            unsigned int vector : 8;
            unsigned int delivery_status : 1;
            unsigned int mask : 1;
      } lvt_error;

/*380*/ /* Timer Initial Count Register */
      uint32_t timer_icr;

/*390*/ /* Timer Current Count Register */
      uint32_t timer_ccr;

/*3E0*/ /* Timer Divide Configuration Register */
      unsigned int timer_dcr : 3;

      unsigned long long timer_event;
      int timer_running;

      int extint_pending;
      uint8_t extint_pri;
      int nmi_pending;
      int smi_pending;

      uint64_t base;
      int apic_enable;
      int bsp;

#if 0
      struct sig_boolean_or *lint0;
      struct sig_boolean_or *lint1;
      struct sig_boolean *smi;
      struct sig_icc_bus *icc_bus;
      struct sig_host_bus_main *host_bus_main;
#endif

      unsigned long tsc_to_bus;
};

/* 3-633 ff */
enum cpu_msr {
#if 80686 <= CONFIG_CPU
      MSR_IA32_BIOS_SIGN_ID         = 0x8b,
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_TSC_SUPPORT
      MSR_IA32_TSC                  = 0x10,
#endif

#if 80486 <= CONFIG_CPU && CONFIG_CPU_APIC_SUPPORT
      MSR_IA32_APICBASE       = 0x1b,
#define MSR_IA32_APICBASE_BSP           (1<<8)
#define MSR_IA32_APICBASE_ENABLE        (1<<11)
#define MSR_IA32_APICBASE_BASE          (0xfffff<<12)
#endif

      MSR_EBL_CR_POWERON            = 0x2a,

      /* L2 Cache Control Register */
      MSR_BBL_CR_CTL3               = 0x11e,

#if 80486 <= CONFIG_CPU && CONFIG_CPU_SEP_SUPPORT
      MSR_IA32_SYSENTER_CS          = 0x174,
      MSR_IA32_SYSENTER_ESP         = 0x175,
      MSR_IA32_SYSENTER_EIP         = 0x176,
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_MTRR_SUPPORT
      MSR_IA32_MTRRCAP        = 0xfe,

      MSR_IA32_MTRR_PHYSBASE0       = 0x200,
      MSR_IA32_MTRR_PHYSMASK0       = 0x201,
      MSR_IA32_MTRR_PHYSBASE1       = 0x202,
      MSR_IA32_MTRR_PHYSMASK1       = 0x203,
      MSR_IA32_MTRR_PHYSBASE2       = 0x204,
      MSR_IA32_MTRR_PHYSMASK2       = 0x205,
      MSR_IA32_MTRR_PHYSBASE3       = 0x206,
      MSR_IA32_MTRR_PHYSMASK3       = 0x207,
      MSR_IA32_MTRR_PHYSBASE4       = 0x208,
      MSR_IA32_MTRR_PHYSMASK4       = 0x209,
      MSR_IA32_MTRR_PHYSBASE5       = 0x20a,
      MSR_IA32_MTRR_PHYSMASK5       = 0x20b,
      MSR_IA32_MTRR_PHYSBASE6       = 0x20c,
      MSR_IA32_MTRR_PHYSMASK6       = 0x20d,
      MSR_IA32_MTRR_PHYSBASE7       = 0x20e,
      MSR_IA32_MTRR_PHYSMASK7       = 0x20f,

      MSR_IA32_MTRR_FIX64K_00000    = 0x250,
      MSR_IA32_MTRR_FIX16K_80000    = 0x258,
      MSR_IA32_MTRR_FIX16K_A0000    = 0x259,
      MSR_IA32_MTRR_FIX4K_C0000     = 0x268,
      MSR_IA32_MTRR_FIX4K_C8000     = 0x269,
      MSR_IA32_MTRR_FIX4K_D0000     = 0x26a,
      MSR_IA32_MTRR_FIX4K_D8000     = 0x26b,
      MSR_IA32_MTRR_FIX4K_E0000     = 0x26c,
      MSR_IA32_MTRR_FIX4K_E8000     = 0x26d,
      MSR_IA32_MTRR_FIX4K_F0000     = 0x26e,
      MSR_IA32_MTRR_FIX4K_F8000     = 0x26f,
      MSR_IA32_MTRR_DEF_TYPE        = 0x2ff,
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_MCA_SUPPORT
      MSR_IA32_MCG_CAP        = 0x179,
      MSR_IA32_MCG_STATUS           = 0x17a,
      MSR_IA32_MCG_CTL        = 0x17b,
      MSR_IA32_MC0_CTL        = 0x400,
      MSR_IA32_MC0_STATUS           = 0x401,
      MSR_IA32_MC0_ADDR       = 0x402,
      MSR_IA32_MC0_MISC       = 0x403,
      MSR_IA32_MC1_CTL        = 0x404,
      MSR_IA32_MC1_STATUS           = 0x405,
      MSR_IA32_MC1_ADDR       = 0x406,
      MSR_IA32_MC1_MISC       = 0x407,
      MSR_IA32_MC2_CTL        = 0x408,
      MSR_IA32_MC2_STATUS           = 0x409,
      MSR_IA32_MC2_ADDR       = 0x40a,
      MSR_IA32_MC2_MISC       = 0x40b,
      MSR_IA32_MC3_CTL        = 0x40c,
      MSR_IA32_MC3_STATUS           = 0x40d,
      MSR_IA32_MC3_ADDR       = 0x40e,
      MSR_IA32_MC3_MISC       = 0x40f,
      MSR_IA32_MC4_CTL        = 0x410,
      MSR_IA32_MC4_STATUS           = 0x411,
      MSR_IA32_MC4_ADDR       = 0x412,
      MSR_IA32_MC4_MISC       = 0x413,
#define MSR_IA32_MCx_CTL(x)         (MSR_IA32_MCG_MC0_CTL + (x) * 4)
#define MSR_IA32_MCx_STATUS(x)      (MSR_IA32_MCG_MC0_STATUS + (x) * 4)
#define MSR_IA32_MCx_ADDR(x)  (MSR_IA32_MCG_MC0_ADDR + (x) * 4)
#define MSR_IA32_MCx_MISC(x)  (MSR_IA32_MCG_MC0_MISC + (x) * 4)
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_PAT_SUPPORT
      MSR_IA32_PAT                  = 0x277, /* Page Attribute Table */
#endif

      CPU_N_MSR = 0x1000      /* Caution: must be highest! FIXME */
};

/* We cannot integrate the following MSRs into our normal MSR array.
 * We will save them as individual variables instead. */
#define MSR_EFER        0xc0000080
#define MSR_EFER_SCE   (1 << 0)
#define MSR_EFER_LME   (1 << 8)
#define MSR_EFER_LMA   (1 << 10)
#define MSR_EFER_NXE   (1 << 11)
#define MSR_EFER_FFXSR (1 << 14)

#define MSR_STAR        0xc0000081
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
#define MSR_LSTAR       0xc0000082
#define MSR_CSTAR       0xc0000083
#define MSR_FMASK       0xc0000084
#define MSR_FSBASE            0xc0000100
#define MSR_GSBASE            0xc0000101
#define MSR_KERNELGSBASE      0xc0000102
#endif

enum {
    CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
    CC_OP_EFLAGS,  /* all cc are explicitely computed, CC_SRC = flags */

    CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
    CC_OP_MULW,
    CC_OP_MULL,
    CC_OP_MULQ,

    CC_OP_DIVB, /* modify all ZF flag */
    CC_OP_DIVW,
    CC_OP_DIVL,
    CC_OP_DIVQ,

    CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
    CC_OP_ADDW,
    CC_OP_ADDL,
    CC_OP_ADDQ,

    CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
    CC_OP_ADCW,
    CC_OP_ADCL,
    CC_OP_ADCQ,

    CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
    CC_OP_SUBW,
    CC_OP_SUBL,
    CC_OP_SUBQ,

    CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
    CC_OP_SBBW,
    CC_OP_SBBL,
    CC_OP_SBBQ,

    CC_OP_LOGICB, /* modify all flags, CC_DST = res */
    CC_OP_LOGICW,
    CC_OP_LOGICL,
    CC_OP_LOGICQ,

    CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
    CC_OP_INCW,
    CC_OP_INCL,
    CC_OP_INCQ,

    CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C  */
    CC_OP_DECW,
    CC_OP_DECL,
    CC_OP_DECQ,

    CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
    CC_OP_SHLW,
    CC_OP_SHLL,
    CC_OP_SHLQ,

    CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
    CC_OP_SARW,
    CC_OP_SARL,
    CC_OP_SARQ,

    CC_OP_NB,
};

#if (defined(__i386__) || defined(__x86_64__))
#define USE_X86LDOUBLE
#endif

#ifdef USE_X86LDOUBLE
typedef long double CPU86_LDouble;
#else
typedef double CPU86_LDouble;
#endif
/* typedef float float32; */
/* typedef double float64; */
typedef CPU86_LDouble floatx80;

/* in QEMU as fpu/softfloat-native.h */
typedef struct float_status {
    signed char float_rounding_mode;
#ifdef USE_X86LDOUBLE
    signed char floatx80_rounding_precision;
#endif
} float_status;

typedef struct SegmentCache {
    uint32_t selector;
    target_ulong base;
    uint32_t limit;
    uint32_t flags;
} SegmentCache;

typedef union {
    uint8_t _b[16];
    uint16_t _w[8];
    uint32_t _l[4];
    uint64_t _q[2];
    float32 _s[4];
    float64 _d[2];
} XMMReg;

typedef union {
    uint8_t _b[8];
    uint16_t _w[2];
    uint32_t _l[1];
    uint64_t q;
} MMXReg;

#ifdef WORDS_BIGENDIAN
#define XMM_B(n) _b[15 - (n)]
#define XMM_W(n) _w[7 - (n)]
#define XMM_L(n) _l[3 - (n)]
#define XMM_S(n) _s[3 - (n)]
#define XMM_Q(n) _q[1 - (n)]
#define XMM_D(n) _d[1 - (n)]

#define MMX_B(n) _b[7 - (n)]
#define MMX_W(n) _w[3 - (n)]
#define MMX_L(n) _l[1 - (n)]
#else
#define XMM_B(n) _b[n]
#define XMM_W(n) _w[n]
#define XMM_L(n) _l[n]
#define XMM_S(n) _s[n]
#define XMM_Q(n) _q[n]
#define XMM_D(n) _d[n]

#define MMX_B(n) _b[n]
#define MMX_W(n) _w[n]
#define MMX_L(n) _l[n]
#endif
#define MMX_Q(n) q


#define TARGET_PAGE_BITS 12
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)


#if 80486 <= CONFIG_CPU && CONFIG_CPU_LM_SUPPORT
/* 64bit virtual address support */
typedef uint64_t Vaddr;
#else
typedef uint32_t Vaddr;
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_PAE_SUPPORT
/* 36bit physical address support */
typedef uint64_t Paddr;
#else
typedef uint32_t Paddr;
#endif
typedef char *Haddr;


typedef struct CPUTLBEntry {
      /*
       * Bit max to 12 : virtual address
       * Bit 11 to  4  : if non zero, memory io zone number
       * Bit 3         : indicates that the entry is invalid
       * Bit 2..0      : zero
       */
      target_ulong address;

      /*
       * Addend to virtual address to get address to be accessed.
       * WARNING: In case of RAM and ROM the address is the address
       * where the image is mapped into host address space and not the
       * emulated physical address!
       */
      Haddr host_addend;

      /* Addend to virtual address to get physical address. */
      Paddr phys_addend;
} CPUTLBEntry;

#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
#define CPU_NB_REGS 16
#else
#define CPU_NB_REGS 8
#endif

#define CPU_NB_SEGS 6

#define TB_JMP_CACHE_BITS 12
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)

#define CPU_TLB_BITS 8
#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)

/*
 * Constants
 */
#define CODE_GEN_MAX_SIZE     65536
#define CODE_GEN_ALIGN        16 /* must be >= of the size of a icache line */

#define CODE_GEN_HASH_BITS    15
#define CODE_GEN_PHYS_HASH_BITS     15


/*
 * maximum total translate dcode allocated:
 * NOTE: the translated code area cannot be too big because on some
 * archs the range of "fast" function calls is limited. Here is a
 * summary of the ranges:
 *
 * i386  : signed 32 bits
 * arm   : signed 26 bits
 * ppc   : signed 24 bits
 * sparc : signed 32 bits
 * alpha : signed 23 bits
 */
#if defined(__alpha__)
#define CODE_GEN_BUFFER_SIZE    (2 * 1024 * 1024)
#elif defined(__ia64)
#define CODE_GEN_BUFFER_SIZE    (4 * 1024 * 1024)       /* range of addl */
#elif defined(__powerpc__)
#define CODE_GEN_BUFFER_SIZE    (6 * 1024 * 1024)
#else
#define CODE_GEN_BUFFER_SIZE    (8 * 1024 * 1024) /* FIXME? fox - current QEMU h
as 16 */
#endif

#define CODE_GEN_HASH_SIZE      (1 << CODE_GEN_HASH_BITS)
#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)

#define L2_BITS 10
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)

#define L1_SIZE (1 << L1_BITS)
#define L2_SIZE (1 << L2_BITS)

/* threshold to flush the translated code buffer */
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)

#define SMC_BITMAP_USE_THRESHOLD 10

/* estimated block size for TB allocation */
/* XXX: use a per code average code fragment size and modulate it
 *    according to the host CPU */
#define CODE_GEN_AVG_BLOCK_SIZE 128

#define CODE_GEN_MAX_BLOCKS    (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE)

#if defined(__powerpc__)
#define USE_DIRECT_JUMP
#endif
#if (defined(__i386__) || defined(__x86_64__)) && !defined(_WIN32)
#define USE_DIRECT_JUMP
#endif

typedef struct TranslationBlock {
      target_ulong pc;  /* simulated PC corresponding to this block
                           (EIP + CS base) */
      target_ulong cs_base;   /* CS base for this block */
      unsigned int flags;     /* flags defining in which context the code
                           was generated */
      uint16_t size;          /* size of target code for this block
                           (1 <= size <= TARGET_PAGE_SIZE) */
      uint16_t cflags;  /* compile flags */
#define CF_TB_FP_USED   0x0001      /* fp ops are used in TB */
#define CF_FP_USED      0x0002      /* fp ops are used in TB or in a chained TB */
#define CF_SINGLE_INSN  0x0004      /* compile only a single instruction */

      uint8_t *tc_ptr;  /* pointer to the translated code */
      struct TranslationBlock *hash_next; /* next matching tb for virtual
                                     address */
      /* next matching tb for physical address. */
      struct TranslationBlock *phys_hash_next;
      /* first and second physical page containing code. The lower bit
         of the pointer tells the index in page_next[] */
      struct TranslationBlock *page_next[2];
      target_ulong page_addr[2];

      /* the following data are used to directly call another TB from
         the code of this one. */
      uint16_t tb_next_offset[2]; /* offset of original jump target */
#ifdef USE_DIRECT_JUMP
      uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
#else
      uint8_t *tb_next[2]; /* address of jump generated code */
#endif
      /* list of TBs jumping to this one. This is a circular list using
         the two least significant bits of the pointers to tell what is
         the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 = jmp_first */
      struct TranslationBlock *jmp_next[2];
      struct TranslationBlock *jmp_first;
} TranslationBlock;

typedef struct PageDesc {
      /* list of TBs intersecting this ram page */
      TranslationBlock *first_tb;
      /*
       * in order to optimize self modifying code, we count the number
       * of lookups we do to a given page to use a bitmap
       */
      unsigned int code_write_count;
      uint8_t *code_bitmap;
} PageDesc;

#ifdef __i386__
#define AREG0 "ebp"
#endif
#ifdef __x86_64__
#define AREG0 "rbp"
#endif

#if TARGET_LONG_BITS <= HOST_LONG_BITS
/* Host registers can be used to store target registers. */

#ifdef __i386__
#define AREG1 "ebx"
// #define AREG2 "esi" /* Otherwise we'll get internal compiler errors. */
#define AREG3 "edi"
#endif
#ifdef __x86_64__
#define AREG1 "rbx"
#define AREG2 "r12"
#define AREG3 "r13"
#if 0 /* These two lines are commented out without comment in current QEMU */
#define AREG4 "r14"
#define AREG5 "r15"
#endif
#endif

#endif /* TARGET_LONG_BITS <= HOST_LONG_BITS */

#ifdef FAST

#ifdef AREG1
register target_ulong T0 asm(AREG1);
#else
#define T0 (env->t0)
#endif

#ifdef AREG2
register target_ulong T1 asm(AREG2);
#else
#define T1 (env->t1)
#endif

#ifdef AREG3
register target_ulong T2 asm(AREG3);
#else
#define T2 (env->t2)
#endif

#ifdef AREG4
register target_ulong EAX asm(AREG4);
#else
#define EAX env->regs[R_EAX]
#endif

#ifdef AREG5
register target_ulong ESP asm(AREG5);
#else
#define ESP env->regs[R_ESP]
#endif

#ifdef AREG6
register target_ulong EBP asm(AREG6);
#else
#define EBP env->regs[R_EBP]
#endif

#ifdef AREG7
register target_ulong ECX asm(AREG7);
#else
#define ECX env->regs[R_ECX]
#endif

#ifdef AREG8
register target_ulong EDX asm(AREG8);
#else
#define EDX env->regs[R_EDX]
#endif

#ifdef AREG9
register target_ulong EBX asm(AREG9);
#else
#define EBX env->regs[R_EBX]
#endif

#ifdef AREG10
register target_ulong ESI asm(AREG10);
#else
#define ESI env->regs[R_ESI]
#endif

#ifdef AREG11
register target_ulong EDI asm(AREG11);
#else
#define EDI env->regs[R_EDI]
#endif

#define A0 T2

#define EIP  (env->eip)
#define DF  (env->df)

#define CC_SRC (env->cc_src)
#define CC_DST (env->cc_dst)
#define CC_OP  (env->cc_op)

/* float macros */
#define FT0    (env->ft0)
#define ST0    (env->fpregs[env->fpstt].d)
#define ST(n)  (env->fpregs[(env->fpstt + (n)) & 7].d)
#define ST1    ST(1)

#ifdef USE_FP_CONVERT
#define FP_CONVERT  (env->fp_convert)
#endif

#endif /* FAST */

struct cpu {
      /* temporaries if we cannot store them in host registers */
#ifndef AREG1
      target_ulong t0;
#endif
#ifndef AREG2
      target_ulong t1;
#endif
#ifndef AREG3
      target_ulong t2;
#endif
      
      /* standard registers */
      target_ulong regs[CPU_NB_REGS];
      target_ulong eip;
      target_ulong eflags; /* eflags register. During CPU emulation, CC
                        flags and DF are set to zero because they are
                        stored elsewhere */

      /* emulator internal eflags handling */
      target_ulong cc_src;
      target_ulong cc_dst;
      uint32_t cc_op;
      int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
      uint32_t hflags; /* hidden flags, see HF_xxx constants */

      /* Hardware Configuration */
      unsigned int apic_cluster_id;
      unsigned int apic_arbitration_id;
      /* ... */

      /* APIC stuff. */
      struct local_apic apic;

      /* FPU state */
      unsigned int fpstt; /* top of stack index */
      unsigned int fpus;
      unsigned int fpuc;
      uint8_t fptags[8];   /* 0 = valid, 1 = empty */
      union {
#ifdef USE_X86LDOUBLE
            CPU86_LDouble d __attribute__((aligned(16)));
#else
            CPU86_LDouble d;
#endif
            MMXReg mmx;
      } fpregs[8];

      /* emulator internal variables */
      float_status fp_status;
      CPU86_LDouble ft0;
      union {
            float f;
            double d;
            int i32;
            int64_t i64;
      } fp_convert;

      float_status sse_status;

      uint32_t mxcsr;
      XMMReg xmm_regs[CPU_NB_REGS];
      XMMReg xmm_t0;
      MMXReg mmx_t0;

      /* segments */
      SegmentCache segs[CPU_NB_SEGS]; /* selector values */
      SegmentCache ldt;
      SegmentCache tr;
      SegmentCache gdt; /* only base and limit are used */
      SegmentCache idt; /* only base and limit are used */

      /* msr registers */
#if 80686 <= CONFIG_CPU
      uint32_t update_signature;
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_SEP_SUPPORT
      uint32_t sysenter_cs; /* sysenter registers */
      uint32_t sysenter_esp;
      uint32_t sysenter_eip;
#endif
      uint64_t efer;
      uint64_t star;
#if 80486 <= CONFIG_CPU && CONFIG_CPU_LM_SUPPORT
      target_ulong lstar;
      target_ulong cstar;
      target_ulong fmask;
      target_ulong kernelgsbase;
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_PAT_SUPPORT
      uint8_t pat[8];
#endif
      /* All "normal" MSRs are kept in an array */
      struct {
            uint32_t low;
            uint32_t high;
      } msr[CPU_N_MSR];
      unsigned int state_power;
      unsigned int state_n_reset;
#if 80386 <= CONFIG_CPU
      unsigned int state_n_init;
#endif

      /* exception/interrupt handling */
      jmp_buf jmp_env;
      int exception_index;
      int error_code;
      int exception_is_int;
      target_ulong exception_next_eip;
      struct TranslationBlock *current_tb; /* currently executing TB */
      target_ulong cr[5]; /* NOTE: cr1 is unused */
      target_ulong dr[8]; /* debug registers */
#define CPU_INTERRUPT_IRQ     0x1 /* normal interrupt pending */
#define CPU_INTERRUPT_NMI     0x2 /* non-maskable interrupt pending */
#define CPU_INTERRUPT_SMI     0x4 /* system management interrupt pending */
      int interrupt_request;
      /* System Management Mode Base Address (SMBASE) */
      uint32_t smbase;
      int smm; /* whether CPU is in system management mode (SMM) */

      Paddr a20_mask;

      /* soft mmu support */
      /* in order to avoid passing too many arguments to the memory
         write helpers, we store some rarely used information in the CPU
         context) */
      unsigned long mem_write_pc; /* host pc at which the memory was
                              written */
      target_ulong mem_write_vaddr; /* target virtual addr at which the
                              memory was written */
      /* 0 = kernel, 1 = user */
      CPUTLBEntry tlb_read[2][CPU_TLB_SIZE];
      CPUTLBEntry tlb_write[2][CPU_TLB_SIZE];
      CPUTLBEntry tlb_code[2][CPU_TLB_SIZE];

      /* Code Generation */
      struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];

      uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]
                  __attribute__((aligned(32)));
      uint8_t *code_gen_ptr;

      PageDesc *l1_map[L1_SIZE];

      TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
      int nb_tbs;

      int tb_invalidated_flag;

      TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];

      /* Signals */
      struct sig_boolean *sig_power;
      struct sig_boolean *sig_n_reset;
#if 80386 <= CONFIG_CPU
      struct sig_boolean *sig_n_init;
#endif
#if CONFIG_CPU < 80386
      struct sig_isa_bus_main *isa_bus_main;
#else
      struct sig_host_bus_main *host_bus_main;
#endif
      struct sig_boolean_or *sig_irq;
      struct sig_boolean_or *sig_nmi;
#if 80386 <= CONFIG_CPU
      struct sig_boolean *sig_smi;
#endif
      struct sig_boolean_or *sig_n_ferr;
      struct sig_boolean *sig_n_ignne;
      int state_n_ignne;
#if 80386 <= CONFIG_CPU
      struct sig_boolean *sig_a20;
#endif
#if 80486 <= CONFIG_CPU && CONFIG_CPU_APIC_SUPPORT
      struct sig_icc_bus *icc_bus;
#endif

      /* CPU is simulated by process. */
      struct process process;
};
#define CPUState cpu

register struct CPUState *env asm(AREG0);

extern void __attribute__((__noreturn__))
NAME_(step)(void *css);

#endif /* __CPU_STATE_H_INCLUDED */

Generated by  Doxygen 1.6.0   Back to index