コード例 #1
0
ファイル: pmic.c プロジェクト: tidatida/coreboot
int pmic_read_reg(unsigned bus, uint16_t reg, uint8_t *data)
{
    if (i2c_readb(bus, PAGE_ADDR(reg), PAGE_OFFSET(reg), data)) {
        printk(BIOS_ERR, "%s: page = 0x%02X, reg = 0x%02X failed!\n",
               __func__, PAGE_ADDR(reg), PAGE_OFFSET(reg));
        return -1;
    }
    return 0;
}
コード例 #2
0
ファイル: pmic.c プロジェクト: tidatida/coreboot
void pmic_write_reg(unsigned bus, uint16_t reg, uint8_t val, int delay)
{
    if (i2c_writeb(bus, PAGE_ADDR(reg), PAGE_OFFSET(reg), val)) {
        printk(BIOS_ERR, "%s: page = 0x%02X, reg = 0x%02X, "
               "value = 0x%02X failed!\n",
               __func__, PAGE_ADDR(reg), PAGE_OFFSET(reg), val);
        /* Reset the SoC on any PMIC write error */
        cpu_reset();
    } else {
        if (delay)
            udelay(500);
    }
}
コード例 #3
0
ファイル: debugstuff.cpp プロジェクト: hack477/bochs4wii
void BX_CPU_C::debug_disasm_instruction(bx_address offset)
{
#if BX_DEBUGGER
  bx_dbg_disassemble_current(BX_CPU_ID, 1); // only one cpu, print time stamp
#else
  bx_phy_address phy_addr;
  Bit8u  instr_buf[16];
  char   char_buf[512];
  size_t i=0;

  static char letters[] = "0123456789ABCDEF";
  static disassembler bx_disassemble;
  unsigned remainsInPage = 0x1000 - PAGE_OFFSET(offset);

  bx_bool valid = dbg_xlate_linear2phy(BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_CS, offset), &phy_addr);
  if (valid) {
    BX_MEM(0)->dbg_fetch_mem(BX_CPU_THIS, phy_addr, 16, instr_buf);
    unsigned isize = bx_disassemble.disasm(
        BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b,
        BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64,
        BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_CS), offset,
        instr_buf, char_buf+i);
    if (isize <= remainsInPage) {
      i=strlen(char_buf);
      char_buf[i++] = ' ';
      char_buf[i++] = ':';
      char_buf[i++] = ' ';
      for (unsigned j=0; j<isize; j++) {
        char_buf[i++] = letters[(instr_buf[j] >> 4) & 0xf];
        char_buf[i++] = letters[(instr_buf[j] >> 0) & 0xf];
      }
      char_buf[i] = 0;
      BX_INFO((">> %s", char_buf));
    }
    else {
コード例 #4
0
// Loads the program header table from an ELF file into a read-only private
// anonymous mmap-ed block.
bool ElfLoader::ReadProgramHeader(Error* error) {
  phdr_num_ = header_.e_phnum;

  // Like the kernel, only accept program header tables smaller than 64 KB.
  if (phdr_num_ < 1 || phdr_num_ > 65536 / sizeof(ELF::Phdr)) {
    error->Format("Invalid program header count: %d", phdr_num_);
    return false;
  }

  ELF::Addr page_min = PAGE_START(header_.e_phoff);
  ELF::Addr page_max =
      PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(ELF::Phdr)));
  ELF::Addr page_offset = PAGE_OFFSET(header_.e_phoff);

  phdr_size_ = page_max - page_min;

  void* mmap_result = fd_.Map(
      NULL, phdr_size_, PROT_READ, MAP_PRIVATE, page_min + file_offset_);
  if (mmap_result == MAP_FAILED) {
    error->Format("Phdr mmap failed: %s", strerror(errno));
    return false;
  }

  phdr_mmap_ = mmap_result;
  phdr_table_ = reinterpret_cast<ELF::Phdr*>(
      reinterpret_cast<char*>(mmap_result) + page_offset);
  return true;
}
コード例 #5
0
ファイル: linker_phdr.cpp プロジェクト: SuperNovaTeam/bionic
// Loads the program header table from an ELF file into a read-only private
// anonymous mmap-ed block.
bool ElfReader::ReadProgramHeader() {
  phdr_num_ = header_.e_phnum;

  // Like the kernel, we only accept program header tables that
  // are smaller than 64KiB.
  if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(Elf32_Phdr)) {
    DL_ERR("\"%s\" has invalid e_phnum: %d", name_, phdr_num_);
    return false;
  }

  Elf32_Addr page_min = PAGE_START(header_.e_phoff);
  Elf32_Addr page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(Elf32_Phdr)));
  Elf32_Addr page_offset = PAGE_OFFSET(header_.e_phoff);

  phdr_size_ = page_max - page_min;

  void* mmap_result = mmap(NULL, phdr_size_, PROT_READ, MAP_PRIVATE, fd_, page_min);
  if (mmap_result == MAP_FAILED) {
    DL_ERR("\"%s\" phdr mmap failed: %s", name_, strerror(errno));
    return false;
  }

  phdr_mmap_ = mmap_result;
  phdr_table_ = reinterpret_cast<Elf32_Phdr*>(reinterpret_cast<char*>(mmap_result) + page_offset);
  return true;
}
コード例 #6
0
ファイル: linker_phdr.c プロジェクト: tylerchen0619/olibc
// Loads the program header table from an ELF file into a read-only private
// anonymous mmap-ed block.
bool ElfReader_ReadProgramHeader(ElfReader* er) {
  er->phdr_num_ = er->header_.e_phnum;

  // Like the kernel, we only accept program header tables that
  // are smaller than 64KiB.
  if (er->phdr_num_ < 1 || er->phdr_num_ > 65536/sizeof(Elf32_Phdr)) {
    DL_ERR("\"%s\" has invalid e_phnum: %d", er->name_, er->phdr_num_);
    return false;
  }

  Elf32_Addr page_min = PAGE_START(er->header_.e_phoff);
  Elf32_Addr page_max = PAGE_END(er->header_.e_phoff + (er->phdr_num_ * sizeof(Elf32_Phdr)));
  Elf32_Addr page_offset = PAGE_OFFSET(er->header_.e_phoff);

  er->phdr_size_ = page_max - page_min;

  void* mmap_result = mmap(NULL, er->phdr_size_, PROT_READ, MAP_PRIVATE, er->fd_, page_min);
  if (mmap_result == MAP_FAILED) {
    DL_ERR("\"%s\" phdr mmap failed: %s", er->name_, strerror(errno));
    return false;
  }

  er->phdr_mmap_ = mmap_result;
  er->phdr_table_ = (Elf32_Phdr*)((char*)(mmap_result) + page_offset);
  return true;
}
コード例 #7
0
ファイル: dma.c プロジェクト: gapry/papaya
struct dma_addr  sos_dma_malloc (void* cookie, uint32_t size, int cached) {
    static int alloc_cached = 0;
    struct dma_addr dma_mem;
    (void)cookie;

    assert(_dma_pstart);
    _dma_pnext = DMA_ALIGN(_dma_pnext);
    if (_dma_pnext < _dma_pend) {
        /* If caching policy has changed we round to page boundary */
        if (alloc_cached != cached && PAGE_OFFSET(_dma_pnext) != 0) {
            _dma_pnext = ROUND_UP (_dma_pnext, seL4_PageBits);
        }

        alloc_cached = cached;

        /* no longer need don't need dma_fill since rootsvr does this for us
         * if we fault */
        dma_mem.phys = (eth_paddr_t)_dma_pnext;
        dma_mem.virt = (eth_vaddr_t)VIRT(dma_mem.phys);
        _dma_pnext += size;
    } else {
        dma_mem.phys = 0;
        dma_mem.virt = 0;
    }

    return dma_mem;
}
コード例 #8
0
ファイル: access.cpp プロジェクト: hack477/bochs4wii
BX_CPU_C::read_RMW_virtual_dword(unsigned s, bx_address offset)
{
  bx_address laddr;
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit32u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_RW);

  if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
    laddr = BX_CPU_THIS_PTR get_laddr(s, offset);
#if BX_SupportGuest2HostTLB
    unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
    bx_address lpf = AlignedAccessLPFOf(laddr, 3);
    bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
    if (tlbEntry->lpf == lpf) {
      // See if the TLB entry privilege level allows us write access
      // from this CPL.
      if (tlbEntry->accessBits & (0x10 << CPL)) {
        bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
        Bit32u pageOffset = PAGE_OFFSET(laddr);
        BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_RW);
        Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
        pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
        ReadHostDWordFromLittleEndian(hostAddr, data);
        BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
        BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data);
        return data;
      }
    }
#endif
#if BX_SUPPORT_X86_64
    if (! IsCanonical(laddr)) {
      BX_ERROR(("read_RMW_virtual_dword(): canonical failure"));
      exception(int_number(seg), 0, 0);
    }
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
    if (BX_CPU_THIS_PTR alignment_check()) {
      if (laddr & 3) {
        BX_ERROR(("read_RMW_virtual_dword(): #AC misaligned access"));
        exception(BX_AC_EXCEPTION, 0, 0);
      }
    }
#endif
    access_read_linear(laddr, 4, CPL, BX_RW, (void *) &data);
    return data;
  }

  if (seg->cache.valid & SegAccessWOK) {
    if (Is64BitMode() || (offset < (seg->cache.u.segment.limit_scaled-2)))
      goto accessOK;
  }
  write_virtual_checks(seg, offset, 4);
  goto accessOK;
}
コード例 #9
0
ファイル: acpi.c プロジェクト: AnupreetKJohar/weenix
/* given the physical address of an ACPI table this function
 * allocates memory for that table and copies the table into
 * that memory, returning the new virtual address for that table */
static void *_acpi_load_table(uintptr_t paddr)
{
        struct acpi_header *tmp =
                (struct acpi_header *)(pt_phys_tmp_map((uintptr_t)PAGE_ALIGN_DOWN(paddr)) + (PAGE_OFFSET(paddr)));

        /* this function is not designed to handle tables which
         * cross page boundaries */
        KASSERT(PAGE_OFFSET(paddr) + tmp->ah_size < PAGE_SIZE);
        struct acpi_header *table = kmalloc(tmp->ah_size);
        memcpy(table, tmp, tmp->ah_size);
        return (void *)table;
}
コード例 #10
0
ファイル: access.cpp プロジェクト: hack477/bochs4wii
BX_CPU_C::read_RMW_virtual_byte(unsigned s, bx_address offset)
{
  bx_address laddr;
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit8u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_RW);

  if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
    laddr = BX_CPU_THIS_PTR get_laddr(s, offset);
#if BX_SupportGuest2HostTLB
    unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
    bx_address lpf = LPFOf(laddr);
    bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
    if (tlbEntry->lpf == lpf) {
      // See if the TLB entry privilege level allows us write access
      // from this CPL.
      if (tlbEntry->accessBits & (0x10 << CPL)) {
        bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
        Bit32u pageOffset = PAGE_OFFSET(laddr);
        BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_RW);
        Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
        pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
        data = *hostAddr;
        BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
        BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data);
        return data;
      }
    }
#endif
    // Accelerated attempt falls through to long path.  Do it the
    // old fashioned way...
#if BX_SUPPORT_X86_64
    if (! IsCanonical(laddr)) {
      BX_ERROR(("read_RMW_virtual_byte(): canonical failure"));
      exception(int_number(seg), 0, 0);
    }
#endif
    access_read_linear(laddr, 1, CPL, BX_RW, (void *) &data);
    return data;
  }

  if (seg->cache.valid & SegAccessWOK) {
    if (Is64BitMode() || (offset <= seg->cache.u.segment.limit_scaled))
      goto accessOK;
  }
  write_virtual_checks(seg, offset, 1);
  goto accessOK;
}
コード例 #11
0
ファイル: access.cpp プロジェクト: hack477/bochs4wii
// assuming the write happens in legacy mode
void BX_CPU_C::write_new_stack_dword_32(bx_segment_reg_t *seg, bx_address offset, unsigned curr_pl, Bit32u data)
{
  Bit32u laddr;

  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);

  if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
    laddr = (Bit32u)(seg->cache.u.segment.base + offset);
#if BX_SupportGuest2HostTLB
    unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
    bx_address lpf = AlignedAccessLPFOf(laddr, 3);
    bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
    if (tlbEntry->lpf == lpf) {
      // See if the TLB entry privilege level allows us write access
      // from this CPL.
      if (tlbEntry->accessBits & (0x10 << CPL)) {
        bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
        Bit32u pageOffset = PAGE_OFFSET(laddr);
        BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_WRITE);
        BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 4, curr_pl, BX_WRITE, (Bit8u*) &data);
        Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
        pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
        WriteHostDWordToLittleEndian(hostAddr, data);
        return;
      }
    }
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
    if (BX_CPU_THIS_PTR alignment_check() && curr_pl == 3) {
      if (laddr & 3) {
        BX_ERROR(("write_new_stack_dword_32(): #AC misaligned access"));
        exception(BX_AC_EXCEPTION, 0, 0);
      }
    }
#endif
    access_write_linear(laddr, 4, curr_pl, (void *) &data);
    return;
  }

  if (seg->cache.valid & SegAccessWOK) {
    if (offset < (seg->cache.u.segment.limit_scaled-2))
      goto accessOK;
  }
  write_virtual_checks(seg, offset, 4);
  goto accessOK;
}
コード例 #12
0
ファイル: access64.cpp プロジェクト: hack477/bochs4wii
BX_CPU_C::read_RMW_virtual_qword_64(unsigned s, Bit64u offset)
{
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);

  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit64u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_RW);

  Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
  Bit64u lpf = AlignedAccessLPFOf(laddr, 7);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf) {
    // See if the TLB entry privilege level allows us write access
    // from this CPL.
    if (tlbEntry->accessBits & (0x10 << CPL)) {
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_RW);
      Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
      pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
      ReadHostQWordFromLittleEndian(hostAddr, data);
      BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
      BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
          tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data);
      return data;
    }
  }
#endif

  if (! IsCanonical(laddr)) {
    BX_ERROR(("read_RMW_virtual_qword_64(): canonical failure"));
    exception(int_number(seg), 0, 0);
  }

#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
  if (BX_CPU_THIS_PTR alignment_check()) {
    if (laddr & 7) {
      BX_ERROR(("read_RMW_virtual_qword_64(): #AC misaligned access"));
      exception(BX_AC_EXCEPTION, 0, 0);
    }
  }
#endif

  access_read_linear(laddr, 8, CPL, BX_RW, (void *) &data);
  return data;
}
コード例 #13
0
ファイル: storage_mgr.c プロジェクト: iypearun001/DB
/* Read bytes from file-system. This is not exposed, called by API's */
static RC readBytes(int pageNum, SM_FileHandle *fHandle, SM_PageHandle memPage)
{
    int fd;
    // Do we have this page?
    if (pageNum >= fHandle->totalNumPages || pageNum < 0)
        return RC_READ_NON_EXISTING_PAGE;

    // Read the block
    fd= (int) ((SM_FileMgmtInfo*) fHandle->mgmtInfo)->fd;
    lseek(fd, PAGE_OFFSET(pageNum), SEEK_SET);
    if (read(fd, memPage, PAGE_SIZE) < PAGE_SIZE)
        return RC_READ_FAILED;

    fHandle->curPagePos= pageNum;
    return RC_OK;
}
コード例 #14
0
ファイル: rplloader.cpp プロジェクト: zhuowei/rplloader
bool RPLLibrary::readElfHeader() {
	void* ehdr_map = ::mmap(nullptr, PAGE_END(sizeof(Elf32_Phdr)),
		PROT_READ, MAP_PRIVATE, fd, 0);
	if (ehdr_map == MAP_FAILED) return false;
	ehdr = (Elf32_Ehdr*) ehdr_map;
	// map the section headers
	// from crazy linker
	Elf32_Addr page_min = PAGE_START(ehdr->e_shoff);
	Elf32_Addr page_max = PAGE_END(ehdr->e_shoff + 
		(ehdr->e_shnum * sizeof(Elf32_Shdr)));
	Elf32_Addr page_offset = PAGE_OFFSET(ehdr->e_shoff);
	void* shdr_map = ::mmap(nullptr, page_max - page_min,
		PROT_READ, MAP_PRIVATE, fd, page_min);
	if (shdr_map == MAP_FAILED) return false;
	shdr = (Elf32_Shdr*) (((uintptr_t) shdr_map) + page_offset);
	return true;
}
コード例 #15
0
ファイル: access.cpp プロジェクト: hack477/bochs4wii
BX_CPU_C::read_virtual_byte(unsigned s, bx_address offset)
{
  bx_address laddr;
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit8u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_READ);

  if (seg->cache.valid & SegAccessROK4G) {
accessOK:
    laddr = BX_CPU_THIS_PTR get_laddr(s, offset);
#if BX_SupportGuest2HostTLB
    unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
    bx_address lpf = LPFOf(laddr);
    bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
    if (tlbEntry->lpf == lpf) {
      // See if the TLB entry privilege level allows us read access
      // from this CPL.
      if (tlbEntry->accessBits & (1<<CPL)) { // Read this pl OK.
        bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
        Bit32u pageOffset = PAGE_OFFSET(laddr);
        BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_READ);
        Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
        data = *hostAddr;
        BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data);
        return data;
      }
    }
#endif
#if BX_SUPPORT_X86_64
    if (! IsCanonical(laddr)) {
      BX_ERROR(("read_virtual_byte(): canonical failure"));
      exception(int_number(seg), 0, 0);
    }
#endif
    access_read_linear(laddr, 1, CPL, BX_READ, (void *) &data);
    return data;
  }

  if (seg->cache.valid & SegAccessROK) {
    if (Is64BitMode() || (offset <= seg->cache.u.segment.limit_scaled))
      goto accessOK;
  }
  read_virtual_checks(seg, offset, 1);
  goto accessOK;
}
コード例 #16
0
ファイル: access.cpp プロジェクト: hack477/bochs4wii
BX_CPU_C::v2h_read_byte(bx_address laddr, unsigned curr_pl)
{
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
  bx_address lpf = LPFOf(laddr);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf) {
    // See if the TLB entry privilege level allows us read access
    // from this CPL.
    if (tlbEntry->accessBits & (1<<curr_pl)) { // Read this pl OK.
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
      return hostAddr;
    }
  }

  return 0;
}
コード例 #17
0
ファイル: access64.cpp プロジェクト: hack477/bochs4wii
void BX_CPU_C::write_new_stack_qword_64(Bit64u laddr, unsigned curr_pl, Bit64u data)
{
#if BX_SupportGuest2HostTLB
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
  Bit64u lpf = AlignedAccessLPFOf(laddr, 7);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf) {
    // See if the TLB entry privilege level allows us write access
    // from this CPL.
    if (tlbEntry->accessBits & (0x10 << CPL)) {
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE);
      BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
          tlbEntry->ppf | pageOffset, 8, curr_pl, BX_WRITE, (Bit8u*) &data);
      Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
      pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
      WriteHostQWordToLittleEndian(hostAddr, data);
      return;
    }
  }
#endif

  if (! IsCanonical(laddr)) {
    BX_ERROR(("write_new_stack_qword_64(): canonical failure"));
    exception(BX_SS_EXCEPTION, 0, 0);
  }

#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
  if (BX_CPU_THIS_PTR alignment_check() && curr_pl == 3) {
    if (laddr & 7) {
      BX_ERROR(("write_new_stack_qword_64(): #AC misaligned access"));
      exception(BX_AC_EXCEPTION, 0, 0);
    }
  }
#endif

  access_write_linear(laddr, 8, curr_pl, (void *) &data);
}
コード例 #18
0
ファイル: z-doc.c プロジェクト: Alkalinear/poschengband
doc_char_ptr doc_char(doc_ptr doc, doc_pos_t pos)
{
    int            cb = doc->width * PAGE_HEIGHT * sizeof(doc_char_t);
    int            page_num = PAGE_NUM(pos.y);
    int            offset = PAGE_OFFSET(pos.y);
    doc_char_ptr   page = NULL;

    while (page_num >= vec_length(doc->pages))
    {
        page = malloc(cb);
        memset(page, 0, cb);
        vec_add(doc->pages, page);
    }

    page = vec_get(doc->pages, page_num);

    assert(0 <= doc->cursor.x && doc->cursor.x < doc->width);
    assert(offset * doc->width + pos.x < cb);

    return page + offset * doc->width + pos.x;
}
コード例 #19
0
ファイル: access.cpp プロジェクト: hack477/bochs4wii
BX_CPU_C::v2h_write_byte(bx_address laddr, unsigned curr_pl)
{
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
  bx_address lpf = LPFOf(laddr);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf)
  {
    // See if the TLB entry privilege level allows us write access
    // from this CPL.
    if (tlbEntry->accessBits & (0x10 << curr_pl)) {
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
      pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
      return hostAddr;
    }
  }

  return 0;
}
コード例 #20
0
ファイル: access64.cpp プロジェクト: hack477/bochs4wii
BX_CPU_C::write_virtual_byte_64(unsigned s, Bit64u offset, Bit8u data)
{
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);

  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_WRITE);

  Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
  Bit64u lpf = LPFOf(laddr);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf) {
    // See if the TLB entry privilege level allows us write access
    // from this CPL.
    if (tlbEntry->accessBits & (0x10 << CPL)) {
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_WRITE);
      BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 1, CPL, BX_WRITE, (Bit8u*) &data);
      Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
      pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
      *hostAddr = data;
      return;
    }
  }
#endif

  if (! IsCanonical(laddr)) {
    BX_ERROR(("write_virtual_byte_64(): canonical failure"));
    exception(int_number(seg), 0, 0);
  }

  access_write_linear(laddr, 1, CPL, (void *) &data);
}
コード例 #21
0
/* Load the program header table from an ELF file into a read-only private
 * anonymous mmap-ed block.
 *
 * Input:
 *   fd           -> file descriptor
 *   phdr_offset  -> file offset of phdr table
 *   phdr_num     -> number of entries in the table.
 *
 * Output:
 *   phdr_mmap    -> address of mmap block in memory.
 *   phdr_memsize -> size of mmap block in memory.
 *   phdr_table   -> address of first entry in memory.
 *
 * Return:
 *   -1 on error, or 0 on success.
 */
int phdr_table_load(int                fd,
                    Elf32_Addr         phdr_offset,
                    Elf32_Half         phdr_num,
                    void**             phdr_mmap,
                    Elf32_Addr*        phdr_size,
                    const Elf32_Phdr** phdr_table)
{
    Elf32_Addr  page_min, page_max, page_offset;
    void*       mmap_result;

    /* Just like the kernel, we only accept program header tables that
     * are smaller than 64KB. */
    if (phdr_num < 1 || phdr_num > 65536/sizeof(Elf32_Phdr)) {
        errno = EINVAL;
        return -1;
    }

    page_min = PAGE_START(phdr_offset);
    page_max = PAGE_END(phdr_offset + phdr_num*sizeof(Elf32_Phdr));
    page_offset = PAGE_OFFSET(phdr_offset);

    mmap_result = mmap(NULL,
                       page_max - page_min,
                       PROT_READ,
                       MAP_PRIVATE,
                       fd,
                       page_min);

    if (mmap_result == MAP_FAILED) {
        return -1;
    }

    *phdr_mmap = mmap_result;
    *phdr_size = page_max - page_min;
    *phdr_table = (Elf32_Phdr*)((char*)mmap_result + page_offset);

    return 0;
}
コード例 #22
0
ファイル: elf32.c プロジェクト: Aliced3645/os
/* Helper function for the ELF loader. Maps the specified segment
 * of the program header from the given file in to the given address
 * space with the given memory offset (in pages). On success returns 0, otherwise
 * returns a negative error code for the ELF loader to return.
 * Note that since any error returned by this function should
 * cause the ELF loader to give up, it is acceptable for the
 * address space to be modified after returning an error.
 * Note that memoff can be negative */
static int _elf32_map_segment(vmmap_t *map, vnode_t *file, int32_t memoff, const Elf32_Phdr *segment)
{
        uintptr_t addr;
        if (memoff < 0) {
                KASSERT(ADDR_TO_PN(segment->p_vaddr) > (uint32_t) -memoff);
                addr = (uintptr_t)segment->p_vaddr - (uintptr_t)PN_TO_ADDR(-memoff);
        } else {
                addr = (uintptr_t)segment->p_vaddr + (uintptr_t)PN_TO_ADDR(memoff);
        }
        uint32_t off = segment->p_offset;
        uint32_t memsz = segment->p_memsz;
        uint32_t filesz = segment->p_filesz;

        dbg(DBG_ELF, "Mapping program segment: type %#x, offset %#08x,"
            " vaddr %#08x, filesz %#x, memsz %#x, flags %#x, align %#x\n",
            segment->p_type, segment->p_offset, segment->p_vaddr,
            segment->p_filesz, segment->p_memsz, segment->p_flags,
            segment->p_align);

        /* check for bad data in the segment header */
        if (PAGE_SIZE != segment->p_align) {
                dbg(DBG_ELF, "ERROR: segment does not have correct alignment\n");
                return -ENOEXEC;
        } else if (filesz > memsz) {
                dbg(DBG_ELF, "ERROR: segment file size is greater than memory size\n");
                return -ENOEXEC;
        } else if (PAGE_OFFSET(addr) != PAGE_OFFSET(off)) {
                dbg(DBG_ELF, "ERROR: segment address and offset are not aligned correctly\n");
                return -ENOEXEC;
        }

        int perms = 0;
        if (PF_R & segment->p_flags) {
                perms |= PROT_READ;
        }
        if (PF_W & segment->p_flags) {
                perms |= PROT_WRITE;
        }
        if (PF_X & segment->p_flags) {
                perms |= PROT_EXEC;
        }

        if (0 < filesz) {
                /* something needs to be mapped from the file */
                /* start from the starting address and include enough pages to
                 * map all filesz bytes of the file */
                uint32_t lopage = ADDR_TO_PN(addr);
                uint32_t npages = ADDR_TO_PN(addr + filesz - 1) - lopage + 1;
                off_t fileoff = (off_t)PAGE_ALIGN_DOWN(off);

                int ret;
                if (!vmmap_is_range_empty(map, lopage, npages)) {
                        dbg(DBG_ELF, "ERROR: ELF file contains overlapping segments\n");
                        return -ENOEXEC;
                } else if (0 > (ret = vmmap_map(map, file, lopage, npages, perms,
                                                MAP_PRIVATE | MAP_FIXED, fileoff,
                                                0, NULL))) {
                        return ret;
                }
        }

        if (memsz > filesz) {
                /* there is left over memory in the segment which must
                 * be initialized to 0 (anonymously mapped) */
                uint32_t lopage = ADDR_TO_PN(addr + filesz);
                uint32_t npages = ADDR_TO_PN(PAGE_ALIGN_UP(addr + memsz)) - lopage;

                int ret;
                if (npages > 1 && !vmmap_is_range_empty(map, lopage + 1, npages - 1)) {
                        dbg(DBG_ELF, "ERROR: ELF file contains overlapping segments\n");
                        return -ENOEXEC;
                } else if (0 > (ret = vmmap_map(map, NULL, lopage, npages, perms,
                                                MAP_PRIVATE | MAP_FIXED, 0, 0, NULL))) {
                        return ret;
                } else if (!PAGE_ALIGNED(addr + filesz) && filesz > 0) {
                        /* In this case, we have accidentally zeroed too much of memory, as
                         * we zeroed all memory in the page containing addr + filesz.
                         * However, the remaining part of the data is not a full page, so we
                         * should not just map in another page (as there could be garbage
                         * after addr+filesz). For instance, consider the data-bss boundary
                         * (c.f. Intel x86 ELF supplement pp. 82).
                         * To fix this, we need to read in the contents of the file manually
                         * and put them at that user space addr in the anon map we just
                         * added. */
                        void *buf;
                        if (NULL == (buf = page_alloc()))
                                return -ENOMEM;
                        if (!(0 > (ret = file->vn_ops->read(file, (off_t) PAGE_ALIGN_DOWN(off + filesz),
                                                            buf, PAGE_OFFSET(addr + filesz))))) {
                                ret = vmmap_write(map, PAGE_ALIGN_DOWN(addr + filesz),
                                                  buf, PAGE_OFFSET(addr + filesz));
                        }
                        page_free(buf);
                        return ret;
                }
        }
        return 0;
}
コード例 #23
0
ファイル: linker_phdr.cpp プロジェクト: SuperNovaTeam/bionic
// Map all loadable segments in process' address space.
// This assumes you already called phdr_table_reserve_memory to
// reserve the address space range for the library.
// TODO: assert assumption.
bool ElfReader::LoadSegments() {
  for (size_t i = 0; i < phdr_num_; ++i) {
    const Elf32_Phdr* phdr = &phdr_table_[i];

    if (phdr->p_type != PT_LOAD) {
      continue;
    }

    // Segment addresses in memory.
    Elf32_Addr seg_start = phdr->p_vaddr + load_bias_;
    Elf32_Addr seg_end   = seg_start + phdr->p_memsz;

    Elf32_Addr seg_page_start = PAGE_START(seg_start);
    Elf32_Addr seg_page_end   = PAGE_END(seg_end);

    Elf32_Addr seg_file_end   = seg_start + phdr->p_filesz;

    // File offsets.
    Elf32_Addr file_start = phdr->p_offset;
    Elf32_Addr file_end   = file_start + phdr->p_filesz;

    Elf32_Addr file_page_start = PAGE_START(file_start);

    void* seg_addr = mmap((void*)seg_page_start,
                          file_end - file_page_start,
                          PFLAGS_TO_PROT(phdr->p_flags),
                          MAP_FIXED|MAP_PRIVATE,
                          fd_,
                          file_page_start);
    if (seg_addr == MAP_FAILED) {
      DL_ERR("couldn't map \"%s\" segment %d: %s", name_, i, strerror(errno));
      return false;
    }

    // if the segment is writable, and does not end on a page boundary,
    // zero-fill it until the page limit.
    if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
      memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
    }

    seg_file_end = PAGE_END(seg_file_end);

    // seg_file_end is now the first page address after the file
    // content. If seg_end is larger, we need to zero anything
    // between them. This is done by using a private anonymous
    // map for all extra pages.
    if (seg_page_end > seg_file_end) {
      void* zeromap = mmap((void*)seg_file_end,
                           seg_page_end - seg_file_end,
                           PFLAGS_TO_PROT(phdr->p_flags),
                           MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
                           -1,
                           0);
      if (zeromap == MAP_FAILED) {
        DL_ERR("couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
        return false;
      }
    }
  }
  return true;
}
コード例 #24
0
/* Map all loadable segments in process' address space.
 * This assumes you already called phdr_table_reserve_memory to
 * reserve the address space range for the library.
 *
 * Input:
 *   phdr_table    -> program header table
 *   phdr_count    -> number of entries in the table
 *   load_bias     -> load offset.
 *   fd            -> input file descriptor.
 *
 * Return:
 *   0 on success, -1 otherwise. Error code in errno.
 */
int
phdr_table_load_segments(const Elf32_Phdr* phdr_table,
                         int               phdr_count,
                         Elf32_Addr        load_bias,
                         int               fd)
{
    int nn;

    for (nn = 0; nn < phdr_count; nn++) {
        const Elf32_Phdr* phdr = &phdr_table[nn];
        void* seg_addr;

        if (phdr->p_type != PT_LOAD)
            continue;

        /* Segment addresses in memory */
        Elf32_Addr seg_start = phdr->p_vaddr + load_bias;
        Elf32_Addr seg_end   = seg_start + phdr->p_memsz;

        Elf32_Addr seg_page_start = PAGE_START(seg_start);
        Elf32_Addr seg_page_end   = PAGE_END(seg_end);

        Elf32_Addr seg_file_end   = seg_start + phdr->p_filesz;

        /* File offsets */
        Elf32_Addr file_start = phdr->p_offset;
        Elf32_Addr file_end   = file_start + phdr->p_filesz;

        Elf32_Addr file_page_start = PAGE_START(file_start);

        seg_addr = mmap((void*)seg_page_start,
                        file_end - file_page_start,
                        PFLAGS_TO_PROT(phdr->p_flags),
                        MAP_FIXED|MAP_PRIVATE,
                        fd,
                        file_page_start);

        if (seg_addr == MAP_FAILED) {
            return -1;
        }

        /* if the segment is writable, and does not end on a page boundary,
         * zero-fill it until the page limit. */
        if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
            memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
        }

        seg_file_end = PAGE_END(seg_file_end);

        /* seg_file_end is now the first page address after the file
         * content. If seg_end is larger, we need to zero anything
         * between them. This is done by using a private anonymous
         * map for all extra pages.
         */
        if (seg_page_end > seg_file_end) {
            void* zeromap = mmap((void*)seg_file_end,
                                    seg_page_end - seg_file_end,
                                    PFLAGS_TO_PROT(phdr->p_flags),
                                    MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
                                    -1,
                                    0);
            if (zeromap == MAP_FAILED) {
                return -1;
            }
        }
    }
    return 0;
}
コード例 #25
0
// Map all loadable segments in process' address space.
// This assumes you already called phdr_table_reserve_memory to
// reserve the address space range for the library.
bool ElfLoader::LoadSegments(Error* error) {
  for (size_t i = 0; i < phdr_num_; ++i) {
    const ELF::Phdr* phdr = &phdr_table_[i];

    if (phdr->p_type != PT_LOAD) {
      continue;
    }

    // Segment addresses in memory.
    ELF::Addr seg_start = phdr->p_vaddr + load_bias_;
    ELF::Addr seg_end = seg_start + phdr->p_memsz;

    ELF::Addr seg_page_start = PAGE_START(seg_start);
    ELF::Addr seg_page_end = PAGE_END(seg_end);

    ELF::Addr seg_file_end = seg_start + phdr->p_filesz;

    // File offsets.
    ELF::Addr file_start = phdr->p_offset;
    ELF::Addr file_end = file_start + phdr->p_filesz;

    ELF::Addr file_page_start = PAGE_START(file_start);
    ELF::Addr file_length = file_end - file_page_start;

    LOG("%s: file_offset=%p file_length=%p start_address=%p end_address=%p\n",
        __FUNCTION__,
        file_offset_ + file_page_start,
        file_length,
        seg_page_start,
        seg_page_start + PAGE_END(file_length));

    if (file_length != 0) {
      void* seg_addr = fd_.Map((void*)seg_page_start,
                               file_length,
                               PFLAGS_TO_PROT(phdr->p_flags),
                               MAP_FIXED | MAP_PRIVATE,
                               file_page_start + file_offset_);
      if (seg_addr == MAP_FAILED) {
        error->Format("Could not map segment %d: %s", i, strerror(errno));
        return false;
      }
    }

    // if the segment is writable, and does not end on a page boundary,
    // zero-fill it until the page limit.
    if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
      memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
    }

    seg_file_end = PAGE_END(seg_file_end);

    // seg_file_end is now the first page address after the file
    // content. If seg_end is larger, we need to zero anything
    // between them. This is done by using a private anonymous
    // map for all extra pages.
    if (seg_page_end > seg_file_end) {
      void* zeromap = mmap((void*)seg_file_end,
                           seg_page_end - seg_file_end,
                           PFLAGS_TO_PROT(phdr->p_flags),
                           MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE,
                           -1,
                           0);
      if (zeromap == MAP_FAILED) {
        error->Format("Could not zero-fill gap: %s", strerror(errno));
        return false;
      }
    }
  }
  return true;
}
コード例 #26
0
static status_t
parse_program_headers(image_t* image, char* buff, int phnum, int phentsize)
{
	elf_phdr* pheader;
	int regcount;
	int i;

	image->dso_tls_id = unsigned(-1);

	regcount = 0;
	for (i = 0; i < phnum; i++) {
		pheader = (elf_phdr*)(buff + i * phentsize);

		switch (pheader->p_type) {
			case PT_NULL:
				/* NOP header */
				break;
			case PT_LOAD:
				if (pheader->p_memsz == pheader->p_filesz) {
					/*
					 * everything in one area
					 */
					image->regions[regcount].start = pheader->p_vaddr;
					image->regions[regcount].size = pheader->p_memsz;
					image->regions[regcount].vmstart
						= PAGE_BASE(pheader->p_vaddr);
					image->regions[regcount].vmsize
						= TO_PAGE_SIZE(pheader->p_memsz
							+ PAGE_OFFSET(pheader->p_vaddr));
					image->regions[regcount].fdstart = pheader->p_offset;
					image->regions[regcount].fdsize = pheader->p_filesz;
					image->regions[regcount].delta = 0;
					image->regions[regcount].flags = 0;
					if (pheader->p_flags & PF_WRITE) {
						// this is a writable segment
						image->regions[regcount].flags |= RFLAG_RW;
					}
				} else {
					/*
					 * may require splitting
					 */
					addr_t A = TO_PAGE_SIZE(pheader->p_vaddr
						+ pheader->p_memsz);
					addr_t B = TO_PAGE_SIZE(pheader->p_vaddr
						+ pheader->p_filesz);

					image->regions[regcount].start = pheader->p_vaddr;
					image->regions[regcount].size = pheader->p_filesz;
					image->regions[regcount].vmstart
						= PAGE_BASE(pheader->p_vaddr);
					image->regions[regcount].vmsize
						= TO_PAGE_SIZE(pheader->p_filesz
							+ PAGE_OFFSET(pheader->p_vaddr));
					image->regions[regcount].fdstart = pheader->p_offset;
					image->regions[regcount].fdsize = pheader->p_filesz;
					image->regions[regcount].delta = 0;
					image->regions[regcount].flags = 0;
					if (pheader->p_flags & PF_WRITE) {
						// this is a writable segment
						image->regions[regcount].flags |= RFLAG_RW;
					}

					if (A != B) {
						/*
						 * yeah, it requires splitting
						 */
						regcount += 1;
						image->regions[regcount].start = pheader->p_vaddr;
						image->regions[regcount].size
							= pheader->p_memsz - pheader->p_filesz;
						image->regions[regcount].vmstart
							= image->regions[regcount-1].vmstart
								+ image->regions[regcount-1].vmsize;
						image->regions[regcount].vmsize
							= TO_PAGE_SIZE(pheader->p_memsz
									+ PAGE_OFFSET(pheader->p_vaddr))
								- image->regions[regcount-1].vmsize;
						image->regions[regcount].fdstart = 0;
						image->regions[regcount].fdsize = 0;
						image->regions[regcount].delta = 0;
						image->regions[regcount].flags = RFLAG_ANON;
						if (pheader->p_flags & PF_WRITE) {
							// this is a writable segment
							image->regions[regcount].flags |= RFLAG_RW;
						}
					}
				}
				regcount += 1;
				break;
			case PT_DYNAMIC:
				image->dynamic_ptr = pheader->p_vaddr;
				break;
			case PT_INTERP:
				// should check here for appropiate interpreter
				break;
			case PT_NOTE:
				// unsupported
				break;
			case PT_SHLIB:
				// undefined semantics
				break;
			case PT_PHDR:
				// we don't use it
				break;
			case PT_RELRO:
				// not implemented yet, but can be ignored
				break;
			case PT_STACK:
				// we don't use it
				break;
			case PT_TLS:
				image->dso_tls_id
					= TLSBlockTemplates::Get().Register(
						TLSBlockTemplate((void*)pheader->p_vaddr,
							pheader->p_filesz, pheader->p_memsz));
				break;
			default:
				FATAL("%s: Unhandled pheader type in parse 0x%" B_PRIx32 "\n",
					image->path, pheader->p_type);
				return B_BAD_DATA;
		}
	}

	return B_OK;
}
コード例 #27
0
ファイル: images.cpp プロジェクト: mariuz/haiku
status_t
map_image(int fd, char const* path, image_t* image, bool fixed)
{
	// cut the file name from the path as base name for the created areas
	const char* baseName = strrchr(path, '/');
	if (baseName != NULL)
		baseName++;
	else
		baseName = path;

	// determine how much space we need for all loaded segments

	addr_t reservedAddress = 0;
	addr_t loadAddress;
	size_t reservedSize = 0;
	size_t length = 0;
	uint32 addressSpecifier = B_ANY_ADDRESS;

	for (uint32 i = 0; i < image->num_regions; i++) {
		// for BeOS compatibility: if we load an old BeOS executable, we
		// have to relocate it, if possible - we recognize it because the
		// vmstart is set to 0 (hopefully always)
		if (fixed && image->regions[i].vmstart == 0)
			fixed = false;

		uint32 regionAddressSpecifier;
		get_image_region_load_address(image, i,
			loadAddress - image->regions[i - 1].vmstart, fixed,
			loadAddress, regionAddressSpecifier);
		if (i == 0) {
			reservedAddress = loadAddress;
			addressSpecifier = regionAddressSpecifier;
		}

		length += TO_PAGE_SIZE(image->regions[i].vmsize
			+ (loadAddress % B_PAGE_SIZE));

		size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize)
			- reservedAddress;
		if (size > reservedSize)
			reservedSize = size;
	}

	// Check whether the segments have an unreasonable amount of unused space
	// inbetween.
	if (reservedSize > length + 8 * 1024)
		return B_BAD_DATA;

	// reserve that space and allocate the areas from that one
	if (_kern_reserve_address_range(&reservedAddress, addressSpecifier,
			reservedSize) != B_OK)
		return B_NO_MEMORY;

	for (uint32 i = 0; i < image->num_regions; i++) {
		char regionName[B_OS_NAME_LENGTH];

		snprintf(regionName, sizeof(regionName), "%s_seg%lu%s",
			baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro");

		get_image_region_load_address(image, i, image->regions[i - 1].delta,
			fixed, loadAddress, addressSpecifier);

		// If the image position is arbitrary, we must let it point to the start
		// of the reserved address range.
		if (addressSpecifier != B_EXACT_ADDRESS)
			loadAddress = reservedAddress;

		if ((image->regions[i].flags & RFLAG_ANON) != 0) {
			image->regions[i].id = _kern_create_area(regionName,
				(void**)&loadAddress, B_EXACT_ADDRESS,
				image->regions[i].vmsize, B_NO_LOCK,
				B_READ_AREA | B_WRITE_AREA);

			if (image->regions[i].id < 0) {
				_kern_unreserve_address_range(reservedAddress, reservedSize);
				return image->regions[i].id;
			}
		} else {
			// Map all segments r/w first -- write access might be needed for
			// relocations. When we've done with those we change the protection
			// of read-only segments back to read-only. We map those segments
			// over-committing, since quite likely only a relatively small
			// number of pages needs to be touched and we want to avoid a lot
			// of memory to be committed for them temporarily, just because we
			// have to write map them.
			uint32 protection = B_READ_AREA | B_WRITE_AREA
				| ((image->regions[i].flags & RFLAG_RW) != 0
					? 0 : B_OVERCOMMITTING_AREA);
			image->regions[i].id = _kern_map_file(regionName,
				(void**)&loadAddress, B_EXACT_ADDRESS,
				image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false,
				fd, PAGE_BASE(image->regions[i].fdstart));

			if (image->regions[i].id < 0) {
				_kern_unreserve_address_range(reservedAddress, reservedSize);
				return image->regions[i].id;
			}

			TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path,
				(void *)loadAddress, image->regions[i].vmsize,
				image->regions[i].flags & RFLAG_RW ? "rw" : "read-only"));

			// handle trailer bits in data segment
			if (image->regions[i].flags & RFLAG_RW) {
				addr_t startClearing = loadAddress
					+ PAGE_OFFSET(image->regions[i].start)
					+ image->regions[i].size;
				addr_t toClear = image->regions[i].vmsize
					- PAGE_OFFSET(image->regions[i].start)
					- image->regions[i].size;

				TRACE(("cleared 0x%lx and the following 0x%lx bytes\n",
					startClearing, toClear));
				memset((void *)startClearing, 0, toClear);
			}
		}

		image->regions[i].delta = loadAddress - image->regions[i].vmstart;
		image->regions[i].vmstart = loadAddress;
	}

	if (image->dynamic_ptr != 0)
		image->dynamic_ptr += image->regions[0].delta;

	return B_OK;
}
コード例 #28
0
ファイル: elf32.c プロジェクト: Aliced3645/os
static int _elf32_load(const char *filename, int fd, char *const argv[],
                       char *const envp[], uint32_t *eip, uint32_t *esp)
{
        int err = 0;
        Elf32_Ehdr header;
        Elf32_Ehdr interpheader;

        /* variables to clean up on failure */
        vmmap_t *map = NULL;
        file_t *file = NULL;
        char *pht = NULL;
        char *interpname = NULL;
        int interpfd = -1;
        file_t *interpfile = NULL;
        char *interppht = NULL;
        Elf32_auxv_t *auxv = NULL;
        char *argbuf = NULL;

        uintptr_t entry;

        file = fget(fd);
        KASSERT(NULL != file);

        /* Load and verify the ELF header */
        if (0 > (err = _elf32_load_ehdr(fd, &header, 0))) {
                goto done;
        }

        if (NULL == (map = vmmap_create())) {
                err = -ENOMEM;
                goto done;
        }

        size_t phtsize = header.e_phentsize * header.e_phnum;
        if (NULL == (pht = kmalloc(phtsize))) {
                err = -ENOMEM;
                goto done;
        }
        /* Read in the program header table */
        if (0 > (err = _elf32_load_phtable(fd, &header, pht, phtsize))) {
                goto done;
        }
        /* Load the segments in the program header table */
        if (0 > (err = _elf32_map_progsegs(file->f_vnode, map, &header, pht, 0))) {
                goto done;
        }

        Elf32_Phdr *phinterp = NULL;
        /* Check if program requires an interpreter */
        if (0 > (err = _elf32_find_phinterp(&header, pht, &phinterp))) {
                goto done;
        }

        /* Calculate program bounds for future reference */
        void *proglow;
        void *proghigh;
        _elf32_calc_progbounds(&header, pht, &proglow, &proghigh);

        entry = (uintptr_t) header.e_entry;

        /* if an interpreter was requested load it */
        if (NULL != phinterp) {
                /* read the file name of the interpreter from the binary */
                if (0 > (err = do_lseek(fd, phinterp->p_offset, SEEK_SET))) {
                        goto done;
                } else if (NULL == (interpname = kmalloc(phinterp->p_filesz))) {
                        err = -ENOMEM;
                        goto done;
                } else if (0 > (err = do_read(fd, interpname, phinterp->p_filesz))) {
                        goto done;
                }
                if (err != (int)phinterp->p_filesz) {
                        err = -ENOEXEC;
                        goto done;
                }

                /* open the interpreter */
                dbgq(DBG_ELF, "ELF Interpreter: %*s\n", phinterp->p_filesz, interpname);
                if (0 > (interpfd = do_open(interpname, O_RDONLY))) {
                        err = interpfd;
                        goto done;
                }
                kfree(interpname);
                interpname = NULL;

                interpfile = fget(interpfd);
                KASSERT(NULL != interpfile);

                /* Load and verify the interpreter ELF header */
                if (0 > (err = _elf32_load_ehdr(interpfd, &interpheader, 1))) {
                        goto done;
                }
                size_t interpphtsize = interpheader.e_phentsize * interpheader.e_phnum;
                if (NULL == (interppht = kmalloc(interpphtsize))) {
                        err = -ENOMEM;
                        goto done;
                }
                /* Read in the program header table */
                if (0 > (err = _elf32_load_phtable(interpfd, &interpheader, interppht, interpphtsize))) {
                        goto done;
                }

                /* Interpreter shouldn't itself need an interpreter */
                Elf32_Phdr *interpphinterp;
                if (0 > (err = _elf32_find_phinterp(&interpheader, interppht, &interpphinterp))) {
                        goto done;
                }
                if (NULL != interpphinterp) {
                        err = -EINVAL;
                        goto done;
                }

                /* Calculate the interpreter program size */
                void *interplow;
                void *interphigh;
                _elf32_calc_progbounds(&interpheader, interppht, &interplow, &interphigh);
                uint32_t interpnpages = ADDR_TO_PN(PAGE_ALIGN_UP(interphigh)) - ADDR_TO_PN(interplow);

                /* Find space for the interpreter */
                /* This is the first pn at which the interpreter will be mapped */
                uint32_t interppagebase = (uint32_t) vmmap_find_range(map, interpnpages, VMMAP_DIR_HILO);
                if ((uint32_t) - 1 == interppagebase) {
                        err = -ENOMEM;
                        goto done;
                }

                /* Base address at which the interpreter begins on that page */
                void *interpbase = (void *)((uintptr_t)PN_TO_ADDR(interppagebase) + PAGE_OFFSET(interplow));

                /* Offset from "expected base" in number of pages */
                int32_t interpoff = (int32_t) interppagebase - (int32_t) ADDR_TO_PN(interplow);

                entry = (uintptr_t) interpbase + ((uintptr_t) interpheader.e_entry - (uintptr_t) interplow);

                /* Load the interpreter program header and map in its segments */
                if (0 > (err = _elf32_map_progsegs(interpfile->f_vnode, map, &interpheader, interppht, interpoff))) {
                        goto done;
                }

                /* Build the ELF aux table */
                /* Need to hold AT_PHDR, AT_PHENT, AT_PHNUM, AT_ENTRY, AT_BASE,
                 * AT_PAGESZ, AT_NULL */
                if (NULL == (auxv = (Elf32_auxv_t *) kmalloc(7 * sizeof(Elf32_auxv_t)))) {
                        err = -ENOMEM;
                        goto done;
                }
                Elf32_auxv_t *auxvent = auxv;

                /* Add all the necessary entries */
                auxvent->a_type = AT_PHDR;
                auxvent->a_un.a_ptr = pht;
                auxvent++;

                auxvent->a_type = AT_PHENT;
                auxvent->a_un.a_val = header.e_phentsize;
                auxvent++;

                auxvent->a_type = AT_PHNUM;
                auxvent->a_un.a_val = header.e_phnum;
                auxvent++;

                auxvent->a_type = AT_ENTRY;
                auxvent->a_un.a_ptr = (void *) header.e_entry;
                auxvent++;

                auxvent->a_type = AT_BASE;
                auxvent->a_un.a_ptr = interpbase;
                auxvent++;

                auxvent->a_type = AT_PAGESZ;
                auxvent->a_un.a_val = PAGE_SIZE;
                auxvent++;

                auxvent->a_type = AT_NULL;

        } else {
                /* Just put AT_NULL (we don't really need this at all) */
                if (NULL == (auxv = (Elf32_auxv_t *) kmalloc(sizeof(Elf32_auxv_t)))) {
                        err = -ENOMEM;
                        goto done;
                }
                auxv->a_type = AT_NULL;
        }

        /* Allocate a stack. We put the stack immediately below the program text.
         * (in the Intel x86 ELF supplement pp 59 "example stack", that is where the
         * stack is located). I suppose we can add this "extra page for magic data" too */
        uint32_t stack_lopage = ADDR_TO_PN(proglow) - (DEFAULT_STACK_SIZE / PAGE_SIZE) - 1;
        err = vmmap_map(map, NULL, stack_lopage, (DEFAULT_STACK_SIZE / PAGE_SIZE) + 1,
                        PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, 0, 0, NULL);
        KASSERT(0 == err);
        dbg(DBG_ELF, "Mapped stack at low addr 0x%p, size %#x\n",
            PN_TO_ADDR(stack_lopage), DEFAULT_STACK_SIZE + PAGE_SIZE);


        /* Copy out arguments onto the user stack */
        int argc, envc, auxc;
        size_t argsize = _elf32_calc_argsize(argv, envp, auxv, phtsize, &argc, &envc, &auxc);
        /* Make sure it fits on the stack */
        if (argsize >= DEFAULT_STACK_SIZE) {
                err = -E2BIG;
                goto done;
        }
        /* Copy arguments into kernel buffer */
        if (NULL == (argbuf = (char *) kmalloc(argsize))) {
                err = -ENOMEM;
                goto done;
        }
        /* Calculate where in user space we start putting the args. */
        void *arglow = (void *)((uintptr_t)(((char *) proglow) - argsize) & ~PTR_MASK);
        /* Copy everything into the user address space, modifying addresses in
         * argv, envp, and auxv to be user addresses as we go. */
        _elf32_load_args(map, arglow, argsize, argbuf, argv, envp, auxv, argc, envc, auxc, phtsize);

        dbg(DBG_ELF, "Past the point of no return. Swapping to map at 0x%p, setting brk to 0x%p\n", map, proghigh);
        /* the final threshold / What warm unspoken secrets will we learn? / Beyond
         * the point of no return ... */

        /* Give the process the new mappings. */
        vmmap_t *tempmap = curproc->p_vmmap;
        curproc->p_vmmap = map;
        map = tempmap; /* So the old maps are cleaned up */
        curproc->p_vmmap->vmm_proc = curproc;
        map->vmm_proc = NULL;

        /* Flush the process pagetables and TLB */
        pt_unmap_range(curproc->p_pagedir, USER_MEM_LOW, USER_MEM_HIGH);
        tlb_flush_all();

        /* Set the process break and starting break (immediately after the mapped-in
         * text/data/bss from the executable) */
        curproc->p_brk = proghigh;
        curproc->p_start_brk = proghigh;

        strncpy(curproc->p_comm, filename, PROC_NAME_LEN);

        /* Tell the caller the correct stack pointer and instruction
         * pointer to begin execution in user space */
        *eip = (uint32_t) entry;
        *esp = ((uint32_t) arglow) - 4; /* Space on the user stack for the (garbage) return address */
        /* Note that the return address will be fixed by the userland entry code,
         * whether in static or dynamic */

        /* And we're done */
        err = 0;

done:
        if (NULL != map) {
                vmmap_destroy(map);
        }
        if (NULL != file) {
                fput(file);
        }
        if (NULL != pht) {
                kfree(pht);
        }
        if (NULL != interpname) {
                kfree(interpname);
        }
        if (0 <= interpfd) {
                do_close(interpfd);
        }
        if (NULL != interpfile) {
                fput(interpfile);
        }
        if (NULL != interppht) {
                kfree(interppht);
        }
        if (NULL != auxv) {
                kfree(auxv);
        }
        if (NULL != argbuf) {
                kfree(argbuf);
        }
        return err;
}