Logo Search packages:      
Sourcecode: faumachine version File versions  Download package

arch_gen_cpu_x86_mmu.c

/*
 * $Id: arch_gen_cpu_x86_mmu.c,v 1.57 2009-02-18 16:15:26 vrsieh Exp $
 *
 * Derived from QEMU sources.
 *  
 *  Copyright (c) 2005-2009 FAUmachine Team.
 *  Copyright (c) 2003 Fabrice Bellard.
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
 * USA
 */

/*
 * Constants
 */
#define PAGE_READ 0x0001
#define PAGE_WRITE      0x0002

void
NAME_(mmu_unmap_range)(unsigned long pa, unsigned long len)
{
      NAME_(mmu_flush_all)(1);
}

static inline void
cpu_tlb_protect1(CPUTLBEntry *tlb_entry, unsigned long pa)
{
      if ((tlb_entry->address & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
                  + tlb_entry->phys_addend != pa) {
            /* Different address in cache. */
            return;
      }

      switch (tlb_entry->address & ~TARGET_PAGE_MASK) {
      case IO_MEM_CODE:
      case IO_MEM_IO_CODE:
            /* Already protected. */
            break;
      case 0: /* RAM */
            tlb_entry->address
                  = (tlb_entry->address & TARGET_PAGE_MASK)
                  | IO_MEM_CODE;
            break;
      case IO_MEM_IO:
            tlb_entry->address
                  = (tlb_entry->address & TARGET_PAGE_MASK)
                  | IO_MEM_IO_CODE;
            break;
      default:
            assert(0);
      }
}

/*
 * Update the TLBs so that writes to code in the physical page 'pa'
 * can be detected.
 */
void
NAME_(tlb_protect)(unsigned long pa)
{
      int i;

      pa &= TARGET_PAGE_MASK;
      for (i = 0; i < CPU_TLB_SIZE; i++) {
            cpu_tlb_protect1(&env->tlb_write[0][i], pa);
            cpu_tlb_protect1(&env->tlb_write[1][i], pa);
      }

#if USE_KFAUM
      if (NAME_(kfaum_enabled)) {
            NAME_(kfaum_protect_page)(pa);
      }
#endif
}

static inline void
cpu_tlb_unprotect1(CPUTLBEntry *tlb_entry, unsigned long pa)
{
      if ((tlb_entry->address & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
                  + tlb_entry->phys_addend != pa) {
            /* Different address in cache. */
            return;
      }

      switch (tlb_entry->address & ~TARGET_PAGE_MASK) {
      case IO_MEM_RAM:
      case IO_MEM_IO:
            /* Already unprotected. */
            break;
      case IO_MEM_CODE:
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK)
                  | IO_MEM_RAM;
            break;
      case IO_MEM_IO_CODE:
            tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK)
                  | IO_MEM_IO;
            break;
      default:
            assert(0);
      }
}

/*
 * Update the TLB so that writes in physical page 'pa' are no longer
 * tested self modifying code.
 */
void
NAME_(tlb_unprotect)(unsigned long pa)
{
      int i;

      pa &= TARGET_PAGE_MASK;
      for (i = 0; i < CPU_TLB_SIZE; i++) {
            cpu_tlb_unprotect1(&env->tlb_write[0][i], pa);
            cpu_tlb_unprotect1(&env->tlb_write[1][i], pa);
      }
}

static inline __attribute__((always_inline)) uint32_t
ldl_phys(Paddr addr)
{
      uint32_t val;

      assert(! (addr & 3));
      val = NAME_(mr_data_l)(addr);

      return val;
}

static inline __attribute__((always_inline)) void
stl_phys(Paddr addr, uint32_t val)
{
      assert(! (addr & 3));
      NAME_(mw_data_l)(addr, val);
}

static inline __attribute__((always_inline)) uint64_t
ldq_phys(Paddr addr)
{
      uint32_t val1;
      uint32_t val2;

      assert(! (addr & 3));
      val1 = NAME_(mr_data_l)(addr + 0);
      val2 = NAME_(mr_data_l)(addr + 4);

      return (((uint64_t) val2) << 32) | val1;
}

/*
 * Add a new TLB entry. At most one entry for a given virtual address
 * is permitted.
 */
static void
tlb_set_page(
      Vaddr vaddr,
      Paddr paddr,
      int wflag,
      int is_user
)
{
      Haddr haddr_mr;
      Haddr haddr_mw;
      Haddr haddr_mx;
      int ret;
      TranslationBlock *first_tb;
      unsigned int index_;
      Vaddr address;
      Haddr addend;
      Paddr phys_addend;

      assert(! (vaddr & 0xfff));
      assert(! (paddr & 0xfff));
      paddr &= env->a20_mask;

      ret = NAME_(map)(paddr, &haddr_mr, &haddr_mw, &haddr_mx);

      /* NOTE: we also allocate the page at this stage */
      first_tb = NAME_(tb_find_alloc)(paddr);

      index_ = (vaddr >> 12) & (CPU_TLB_SIZE - 1);

      phys_addend = paddr - vaddr;

      /*
       * Memory Code
       */
      if (ret == 1
       || ! haddr_mr) {
            /* I/O Case */
            address = vaddr | IO_MEM_IO;
            addend = NULL;
      } else {
            /* Standard Memory */
            address = vaddr | IO_MEM_RAM;
            addend = haddr_mr - vaddr;
      }
      env->tlb_read[is_user][index_].address = address;
      env->tlb_read[is_user][index_].host_addend = addend;
      env->tlb_read[is_user][index_].phys_addend = phys_addend;
      
      /*
       * Memory Write
       */
      if (wflag) {
            if (first_tb) {
                  if (ret == 1
                   || ! haddr_mw) {
                        address = vaddr | IO_MEM_IO_CODE;
                        addend = NULL;
                  } else {
                        address = vaddr | IO_MEM_CODE;
                        addend = haddr_mw - vaddr;
                  }
            } else {
                  if (ret == 1
                   || ! haddr_mw) {
                        address = vaddr | IO_MEM_IO;
                        addend = NULL;
                  } else {
                        address = vaddr | IO_MEM_RAM;
                        addend = haddr_mw - vaddr;
                  }
            }
      } else {
            address = -1;
            addend = NULL;
      }
      env->tlb_write[is_user][index_].address = address;
      env->tlb_write[is_user][index_].host_addend = addend;
      env->tlb_write[is_user][index_].phys_addend = phys_addend;

      /*
       * Memory Code Read
       */
      if (ret == 1
       || ! haddr_mr) {
            /* I/O Case */
            address = vaddr | IO_MEM_IO;
            addend = NULL;
      } else {
            /* Standard Memory */
            address = vaddr | IO_MEM_RAM;
            addend = haddr_mx - vaddr;
      }
      env->tlb_code[is_user][index_].address = address;
      env->tlb_code[is_user][index_].host_addend = addend;
      env->tlb_code[is_user][index_].phys_addend = phys_addend;
}

#if 0
static void
NAME_(dump)(unsigned int level, uint32_t addr, uint32_t prefix)
{
      unsigned int nr;

if (! loglevel) {
      return;
}
      if (level == 0) {
            fprintf(stderr, "\nPage Table at %08lx:\n", (unsigned long) addr);
      }

      for (nr = 0; nr < 1024; nr++) {
            uint32_t pde;

            pde = ldl_phys(addr + nr * sizeof(pde));
            if (level == 0) {
                  fprintf(stderr, "\t");
            } else {
                  fprintf(stderr, "\t\t");
            }
            if (level == 0) {
                  fprintf(stderr, "%08lx: %08lx\n",
                              (unsigned long) nr << 22,
                              (unsigned long) pde);
            } else {
                  fprintf(stderr, "%08lx: %08lx\n",
                              prefix + ((unsigned long) nr << 12),
                              (unsigned long) pde);
            }

            if (level == 0
             && (pde & 1)) {
                  NAME_(dump)(1, pde & ~0xfff, (unsigned long) nr << 22);
            }
      }
}
#endif

#define PHYS_ADDR_MASK 0xfffff000
/*
 * Return value:
 * 0 = nothing more to do.
 * 1 = generate page-fault.
 */
int
NAME_(mmu_fault)(
      Vaddr addr,
      int want_wflag,
      int want_uflag
)
{
      uint32_t pde_addr;
      uint32_t pdpe_addr;
      uint32_t pte_addr;
      uint64_t pte;
      Vaddr virt_addr;
      uint64_t ptep;
      int error_code;
      int is_dirty;
      int uflag;
      int wflag;
      int dflag;
      int page_size;
      unsigned long paddr;
      Vaddr vaddr;
      unsigned long page_offset;

      if (! (env->cr[0] & CPU_CR0_PG_MASK)) {
            pte = addr;
            virt_addr = addr & TARGET_PAGE_MASK;
            uflag = 0;
            wflag = 1;
            dflag = 1;
            page_size = 4096;
            goto do_mapping;
      }

      if (env->cr[4] & CPU_CR4_PAE_MASK) { /* FIXME fox the whole if part needs reworking */
            uint64_t pde, pdpe;

            /* XXX: we only use 32 bit physical addresses */
#if CONFIG_CPU >= 80486 && CONFIG_CPU_LM_SUPPORT
            if (env->hflags & HF_LMA_MASK) {
                  uint32_t pml4e_addr;
                  uint64_t pml4e;
                  int32_t sext;
                  
                  /* test virtual address sign extension */
                  sext = (int64_t) addr >> 47;
                  if (sext != 0 && sext != -1) {
                        env->error_code = 0;
                        env->exception_index = CPU_FAULT_GP;
                        return 1;
                  }
                  
                  pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & env->a20_mask;
                  pml4e = ldq_phys(pml4e_addr);
                  if (!(pml4e & PG_PRESENT_MASK)) {
                        error_code = 0;
                        goto do_fault;
                  }
                  if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
                        error_code = PG_ERROR_RSVD_MASK;
                        goto do_fault;
                  }
                  if (!(pml4e & PG_ACCESSED_MASK)) {
                        pml4e |= PG_ACCESSED_MASK;
                        stl_phys(pml4e_addr, pml4e);
                  }
                  ptep = pml4e ^ PG_NX_MASK;
                  pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
                  pdpe = ldq_phys(pdpe_addr);
                  if (!(pdpe & PG_PRESENT_MASK)) {
                        error_code = 0;
                        goto do_fault;
                  }
                  if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
                        error_code = PG_ERROR_RSVD_MASK;
                        goto do_fault;
                  }
                  ptep &= pdpe ^ PG_NX_MASK;
                  if (!(pdpe & PG_ACCESSED_MASK)) {
                        pdpe |= PG_ACCESSED_MASK;
                        stl_phys(pdpe_addr, pdpe);
                  }
            } else
#endif /* CONFIG_CPU_LM_SUPPORT */
            {
                  /* XXX: load them when cr3 is loaded ? */
                  pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & env->a20_mask;
                  pdpe = ldq_phys(pdpe_addr);
                  if (!(pdpe & PG_PRESENT_MASK)) {
                        error_code = 0;
                        goto do_fault;
                  }
                  ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
            }

            pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
            pde = ldq_phys(pde_addr);
            if (!(pde & PG_PRESENT_MASK)) {
                  error_code = 0;
                  goto do_fault;
            }
            if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
                  error_code = PG_ERROR_RSVD_MASK;
                  goto do_fault;
            }
            ptep &= pde ^ PG_NX_MASK;
            if (pde & PG_PSE_MASK) {
                  /* 2 MB page */
                  page_size = 2048 * 1024;
                  ptep ^= PG_NX_MASK;
#if 0 /* FIXME? fox - what is iswrite == 2? */
                  if ((ptep & PG_NX_MASK) && is_write1 == 2)
                        goto do_fault_protect;
#endif
                  if (want_uflag) {
                        if (! (ptep & PG_USER_MASK))
                              goto do_fault_protect;
                        if (want_wflag
                         && ! (ptep & PG_RW_MASK))
                              goto do_fault_protect;
                  } else {
                        if ((env->cr[0] & CPU_CR0_WP_MASK)
                         && want_wflag
                         && ! (ptep & PG_RW_MASK))
                              goto do_fault_protect;
                  }
                  is_dirty = want_wflag && ! (pde & PG_DIRTY_MASK);
                  if (! (pde & PG_ACCESSED_MASK)
                   || is_dirty) {
                        pde |= PG_ACCESSED_MASK;
                        if (is_dirty)
                              pde |= PG_DIRTY_MASK;
                        stl_phys(pde_addr, pde);
                  }
                  /* align to page_size */
                  pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff); 
                  virt_addr = addr & ~(page_size - 1);
            } else {
                  /* 4 KB page */
                  if (!(pde & PG_ACCESSED_MASK)) {
                        pde |= PG_ACCESSED_MASK;
                        stl_phys(pde_addr, pde);
                  }
                  pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
                  pte = ldq_phys(pte_addr);
                  if (!(pte & PG_PRESENT_MASK)) {
                        error_code = 0;
                        goto do_fault;
                  }
                  if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
                        error_code = PG_ERROR_RSVD_MASK;
                        goto do_fault;
                  }
                  /* combine pde and pte nx, user and rw protections */
                  ptep &= pte ^ PG_NX_MASK;
                  ptep ^= PG_NX_MASK;
#if 0 /* FIXME? fox */
                  if ((ptep & PG_NX_MASK) && is_write1 == 2)
                        goto do_fault_protect;
#endif
                  if (want_uflag) {
                        if (! (ptep & PG_USER_MASK))
                              goto do_fault_protect;
                        if (want_wflag
                         && ! (ptep & PG_RW_MASK))
                              goto do_fault_protect;
                  } else {
                        if ((env->cr[0] & CPU_CR0_WP_MASK)
                         && want_wflag
                         && !(ptep & PG_RW_MASK))
                              goto do_fault_protect;
                  }
                  is_dirty = want_wflag && ! (pte & PG_DIRTY_MASK);
                  if (! (pte & PG_ACCESSED_MASK)
                   || is_dirty) {
                        pte |= PG_ACCESSED_MASK;
                        if (is_dirty)
                              pte |= PG_DIRTY_MASK;
                        stl_phys(pte_addr, pte);
                  }
                  page_size = 4096;
                  virt_addr = addr & ~0xfff;
                  pte = pte & (PHYS_ADDR_MASK | 0xfff);
            }
      } else { /* End of if that needs FIXME fox */
            uint32_t pde;

            /* Page directory entry. */
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask;
            pde = ldl_phys(pde_addr);
            if (! (pde & PG_PRESENT_MASK)) {
                  error_code = 0;
                  goto do_fault;
            }
            /* If PSE bit is set, then we use a 4MB page. */
            if ((pde & PG_PSE_MASK)
             && (env->cr[4] & CPU_CR4_PSE_MASK)) {
                  if (want_uflag) {
                        if (! (pde & PG_USER_MASK))
                              goto do_fault_protect;
                        if (want_wflag
                         && ! (pde & PG_RW_MASK))
                              goto do_fault_protect;
                  } else {
                        if ((env->cr[0] & CPU_CR0_WP_MASK)
                         && want_wflag
                         && ! (pde & PG_RW_MASK))
                              goto do_fault_protect;
                  }
                  is_dirty = want_wflag && ! (pde & PG_DIRTY_MASK);
                  if (! (pde & PG_ACCESSED_MASK)
                   || is_dirty) {
                        pde |= PG_ACCESSED_MASK;
                        if (is_dirty)
                              pde |= PG_DIRTY_MASK;
                        stl_phys(pde_addr, pde);
                  }
      
                  pte = pde & ~0x003ff000; /* align to 4MB */
                  ptep = pte;
                  page_size = 4096 * 1024;
                  virt_addr = addr & ~0x003fffff;
      
            } else {
                  if (! (pde & PG_ACCESSED_MASK)) {
                        pde |= PG_ACCESSED_MASK;
                        stl_phys(pde_addr, pde);
                  }
      
                  /* Page table entry. */
                  pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc))
                          & env->a20_mask;
                  pte = ldl_phys(pte_addr);
                  if (! (pte & PG_PRESENT_MASK)) {
                        error_code = 0;
                        goto do_fault;
                  }
                  /* Combine pde and pte user and rw protections. */
                  ptep = pte & pde;
                  if (want_uflag) {
                        if (! (ptep & PG_USER_MASK))
                              goto do_fault_protect;
                        if (want_wflag
                         && ! (ptep & PG_RW_MASK))
                              goto do_fault_protect;
                  } else {
                        if ((env->cr[0] & CPU_CR0_WP_MASK)
                         && want_wflag
                         && !(ptep & PG_RW_MASK))
                              goto do_fault_protect;
                  }
                  is_dirty = want_wflag && ! (pte & PG_DIRTY_MASK);
                  if (! (pte & PG_ACCESSED_MASK)
                   || is_dirty) {
                        pte |= PG_ACCESSED_MASK;
                        if (is_dirty)
                              pte |= PG_DIRTY_MASK;
                        stl_phys(pte_addr, pte);
                  }
      
                  page_size = 4096;
                  virt_addr = addr & ~0xfff;
            }
      }

      /* The page can be put in the TLB. */
      uflag = (ptep >> PG_USER_BIT) & 1;
      if ((env->cr[0] & CPU_CR0_WP_MASK)
       || want_uflag) {
            wflag = (ptep >> PG_RW_BIT) & 1;
      } else {
            wflag = 1;
      }
      dflag = (pte >> PG_DIRTY_BIT) & 1;

do_mapping:
      /*
       * Even if 4MB pages, we map only one 4KB page in the cache to
       * avoid filling it too fast.
       */
      page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
      paddr = (pte & TARGET_PAGE_MASK) + page_offset;
      vaddr = virt_addr + page_offset;

      /* FIXME VOSSI */
      wflag &= dflag;

      tlb_set_page(vaddr, paddr, wflag, want_uflag);
      return 0;

do_fault_protect:
      error_code = PG_ERROR_P_MASK;
do_fault:
      env->cr[2] = addr;
      env->error_code = (want_wflag << PG_ERROR_W_BIT) | error_code;
      if (want_uflag)
            env->error_code |= PG_ERROR_U_MASK;
      return 1;
}

Paddr
NAME_(tlb_virt_to_phys)(Vaddr va)
{
      unsigned int hash;
      unsigned int is_user;

      hash = (va >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
      is_user = ((env->hflags & HF_CPL_MASK) == 3);
      if (__builtin_expect(env->tlb_code[is_user][hash].address
                  != (va & TARGET_PAGE_MASK), 0)) {
            (void) ldub_code(va);
      }
      
      return va + env->tlb_code[is_user][hash].phys_addend;
}


static inline void
tlb_flush_entry(CPUTLBEntry *tlb_entry, Vaddr addr)
{
      if (addr == (tlb_entry->address & (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
            tlb_entry->address = -1;
}

/* XXX: also flush 4MB pages */
void
NAME_(mmu_invlpg)(Vaddr addr)
{
      int i;
      TranslationBlock *tb;

      /*
       * Must reset current TB so that interrupts cannot modify the
       * links while we are modifying them.
       */
      env->current_tb = NULL;

      addr &= TARGET_PAGE_MASK;
      i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
      tlb_flush_entry(&env->tlb_read[0][i], addr);
      tlb_flush_entry(&env->tlb_write[0][i], addr);
      tlb_flush_entry(&env->tlb_code[0][i], addr);
      tlb_flush_entry(&env->tlb_read[1][i], addr);
      tlb_flush_entry(&env->tlb_write[1][i], addr);
      tlb_flush_entry(&env->tlb_code[1][i], addr);

      for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
            tb = env->tb_jmp_cache[i];
            if (tb
             && ((tb->pc & TARGET_PAGE_MASK) == addr
              || ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
                  env->tb_jmp_cache[i] = NULL;
            }
      }

#if USE_KFAUM
      if (NAME_(kfaum_enabled)) {
            NAME_(kfaum_flush_page)(addr);
      }
#endif /* USE_KFAUM */
}

/*
 * NOTE: if flush_global is true, also flush global entries (not
 * implemented yet)
 */
void
NAME_(mmu_flush_all)(int flush_global)
{
      int i;

      /*
       * Must reset current TB so that interrupts cannot modify the
       * links while we are modifying them.
       */
      env->current_tb = NULL;

      for (i = 0; i < CPU_TLB_SIZE; i++) {
            env->tlb_read[0][i].address = -1;
            env->tlb_write[0][i].address = -1;
            env->tlb_code[0][i].address = -1;
            env->tlb_read[1][i].address = -1;
            env->tlb_write[1][i].address = -1;
            env->tlb_code[1][i].address = -1;
      }

      memset(env->tb_jmp_cache, 0, sizeof(env->tb_jmp_cache));

#if USE_KFAUM
      if (NAME_(kfaum_enabled)) {
            NAME_(kfaum_flush_all)(flush_global);
      }
#endif /* USE_KFAUM */
}

#if 80386 <= CONFIG_CPU
static void
NAME_(mmu_a20m_set)(unsigned int a20_state)
{
      a20_state = (a20_state != 0);
      if (a20_state != ((env->a20_mask >> 20) & 1)) {
            /*
             * If the cpu is currently executing code, we must
             * unlink it and all the potentially executing TB.
             */
            NAME_(interrupt)();

            /*
             * When a20 is changed, all the MMU mappings are
             * invalid, so we must flush everything.
             */
            NAME_(mmu_flush_all)(1);
            env->a20_mask = (Paddr) (~(1ULL << 20) | (a20_state << 20));
      }
}
#endif /* 80386 <= CONFIG_CPU */

#define MMUSUFFIX _cmmu
#define GETPC() ((void *) 0)
#define CODE_ACCESS

#define SHIFT 0
#include "arch_gen_cpu_x86_mmu_template.c"

#define SHIFT 1
#include "arch_gen_cpu_x86_mmu_template.c"

#define SHIFT 2
#include "arch_gen_cpu_x86_mmu_template.c"

#define SHIFT 3
#include "arch_gen_cpu_x86_mmu_template.c"

#undef GETPC
#undef MMUSUFFIX

void
NAME_(mmu_reset)(void)
{
      NAME_(mmu_flush_all)(1);
}

CPUReadMemoryFunc *const NAME_(io_mem_read)[IO_MEM_NB_ENTRIES][4] = {
      [IO_MEM_RAM >> IO_MEM_SHIFT] = {
            NULL, NULL, NULL /* Not used. */
      },
      [IO_MEM_CODE >> IO_MEM_SHIFT] = {
            NULL, NULL, NULL /* Not used. */
      },
      [IO_MEM_IO >> IO_MEM_SHIFT] = {
            NAME_(mr_data_b), NAME_(mr_data_w), NAME_(mr_data_l)
      },
      [IO_MEM_IO_CODE >> IO_MEM_SHIFT] = {
            NAME_(mr_data_b), NAME_(mr_data_w), NAME_(mr_data_l)
      },
};
CPUWriteMemoryFunc *const NAME_(io_mem_write)[IO_MEM_NB_ENTRIES][4] = {
      [IO_MEM_RAM >> IO_MEM_SHIFT] = {
            NULL, NULL, NULL /* Not used. */
      },
      [IO_MEM_CODE >> IO_MEM_SHIFT] = {
            NAME_(mw_code_b), NAME_(mw_code_w), NAME_(mw_code_l)
      },
      [IO_MEM_IO >> IO_MEM_SHIFT] = {
            NAME_(mw_data_b), NAME_(mw_data_w), NAME_(mw_data_l)
      },
      [IO_MEM_IO_CODE >> IO_MEM_SHIFT] = {
            NAME_(mw_code_b), NAME_(mw_code_w), NAME_(mw_code_l)
      },
};
CPUReadMemoryFunc *const NAME_(io_mem_code)[IO_MEM_NB_ENTRIES][4] = {
      [IO_MEM_RAM >> IO_MEM_SHIFT] = {
            NULL, NULL, NULL /* Not used. */
      },
      [IO_MEM_CODE >> IO_MEM_SHIFT] = {
            NULL, NULL, NULL /* Not used. */
      },
      [IO_MEM_IO >> IO_MEM_SHIFT] = {
            NAME_(mx_code_b), NAME_(mx_code_w), NAME_(mx_code_l)
      },
      [IO_MEM_IO_CODE >> IO_MEM_SHIFT] = {
            NAME_(mx_code_b), NAME_(mx_code_w), NAME_(mx_code_l)
      },
};

void
NAME_(mmu_init)(struct cpu *css)
{
}

Generated by  Doxygen 1.6.0   Back to index