static int32 count_regions(const char* imagePath, char const* buff, int phnum, int phentsize) { elf_phdr* pheaders; int32 count = 0; int i; for (i = 0; i < phnum; i++) { pheaders = (elf_phdr*)(buff + i * phentsize); switch (pheaders->p_type) { case PT_NULL: // NOP header break; case PT_LOAD: count += 1; if (pheaders->p_memsz != pheaders->p_filesz) { addr_t A = TO_PAGE_SIZE(pheaders->p_vaddr + pheaders->p_memsz); addr_t B = TO_PAGE_SIZE(pheaders->p_vaddr + pheaders->p_filesz); if (A != B) count += 1; } break; case PT_DYNAMIC: // will be handled at some other place break; case PT_INTERP: // should check here for appropriate interpreter break; case PT_NOTE: // unsupported break; case PT_SHLIB: // undefined semantics break; case PT_PHDR: // we don't use it break; case PT_RELRO: // not implemented yet, but can be ignored break; case PT_STACK: // we don't use it break; case PT_TLS: // will be handled at some other place break; default: FATAL("%s: Unhandled pheader type in count 0x%" B_PRIx32 "\n", imagePath, pheaders->p_type); break; } } return count; }
void rldelf_init(void) { init_add_ons(); // create the debug area { size_t size = TO_PAGE_SIZE(sizeof(runtime_loader_debug_area)); runtime_loader_debug_area *area; area_id areaID = _kern_create_area(RUNTIME_LOADER_DEBUG_AREA_NAME, (void **)&area, B_RANDOMIZED_ANY_ADDRESS, size, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA); if (areaID < B_OK) { FATAL("Failed to create debug area.\n"); _kern_loading_app_failed(areaID); } area->loaded_images = &get_loaded_images(); } // initialize error message if needed if (report_errors()) { void *buffer = malloc(1024); if (buffer == NULL) return; gErrorMessage.SetTo(buffer, 1024, 'Rler'); } }
status_t map_image(int fd, char const* path, image_t* image, bool fixed) { // cut the file name from the path as base name for the created areas const char* baseName = strrchr(path, '/'); if (baseName != NULL) baseName++; else baseName = path; // determine how much space we need for all loaded segments addr_t reservedAddress = 0; addr_t loadAddress; size_t reservedSize = 0; size_t length = 0; uint32 addressSpecifier = B_ANY_ADDRESS; for (uint32 i = 0; i < image->num_regions; i++) { // for BeOS compatibility: if we load an old BeOS executable, we // have to relocate it, if possible - we recognize it because the // vmstart is set to 0 (hopefully always) if (fixed && image->regions[i].vmstart == 0) fixed = false; uint32 regionAddressSpecifier; get_image_region_load_address(image, i, loadAddress - image->regions[i - 1].vmstart, fixed, loadAddress, regionAddressSpecifier); if (i == 0) { reservedAddress = loadAddress; addressSpecifier = regionAddressSpecifier; } length += TO_PAGE_SIZE(image->regions[i].vmsize + (loadAddress % B_PAGE_SIZE)); size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize) - reservedAddress; if (size > reservedSize) reservedSize = size; } // Check whether the segments have an unreasonable amount of unused space // inbetween. if (reservedSize > length + 8 * 1024) return B_BAD_DATA; // reserve that space and allocate the areas from that one if (_kern_reserve_address_range(&reservedAddress, addressSpecifier, reservedSize) != B_OK) return B_NO_MEMORY; for (uint32 i = 0; i < image->num_regions; i++) { char regionName[B_OS_NAME_LENGTH]; snprintf(regionName, sizeof(regionName), "%s_seg%lu%s", baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro"); get_image_region_load_address(image, i, image->regions[i - 1].delta, fixed, loadAddress, addressSpecifier); // If the image position is arbitrary, we must let it point to the start // of the reserved address range. if (addressSpecifier != B_EXACT_ADDRESS) loadAddress = reservedAddress; if ((image->regions[i].flags & RFLAG_ANON) != 0) { image->regions[i].id = _kern_create_area(regionName, (void**)&loadAddress, B_EXACT_ADDRESS, image->regions[i].vmsize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA); if (image->regions[i].id < 0) { _kern_unreserve_address_range(reservedAddress, reservedSize); return image->regions[i].id; } } else { // Map all segments r/w first -- write access might be needed for // relocations. When we've done with those we change the protection // of read-only segments back to read-only. We map those segments // over-committing, since quite likely only a relatively small // number of pages needs to be touched and we want to avoid a lot // of memory to be committed for them temporarily, just because we // have to write map them. uint32 protection = B_READ_AREA | B_WRITE_AREA | ((image->regions[i].flags & RFLAG_RW) != 0 ? 0 : B_OVERCOMMITTING_AREA); image->regions[i].id = _kern_map_file(regionName, (void**)&loadAddress, B_EXACT_ADDRESS, image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false, fd, PAGE_BASE(image->regions[i].fdstart)); if (image->regions[i].id < 0) { _kern_unreserve_address_range(reservedAddress, reservedSize); return image->regions[i].id; } TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path, (void *)loadAddress, image->regions[i].vmsize, image->regions[i].flags & RFLAG_RW ? "rw" : "read-only")); // handle trailer bits in data segment if (image->regions[i].flags & RFLAG_RW) { addr_t startClearing = loadAddress + PAGE_OFFSET(image->regions[i].start) + image->regions[i].size; addr_t toClear = image->regions[i].vmsize - PAGE_OFFSET(image->regions[i].start) - image->regions[i].size; TRACE(("cleared 0x%lx and the following 0x%lx bytes\n", startClearing, toClear)); memset((void *)startClearing, 0, toClear); } } image->regions[i].delta = loadAddress - image->regions[i].vmstart; image->regions[i].vmstart = loadAddress; } if (image->dynamic_ptr != 0) image->dynamic_ptr += image->regions[0].delta; return B_OK; }
static status_t parse_program_headers(image_t* image, char* buff, int phnum, int phentsize) { elf_phdr* pheader; int regcount; int i; image->dso_tls_id = unsigned(-1); regcount = 0; for (i = 0; i < phnum; i++) { pheader = (elf_phdr*)(buff + i * phentsize); switch (pheader->p_type) { case PT_NULL: /* NOP header */ break; case PT_LOAD: if (pheader->p_memsz == pheader->p_filesz) { /* * everything in one area */ image->regions[regcount].start = pheader->p_vaddr; image->regions[regcount].size = pheader->p_memsz; image->regions[regcount].vmstart = PAGE_BASE(pheader->p_vaddr); image->regions[regcount].vmsize = TO_PAGE_SIZE(pheader->p_memsz + PAGE_OFFSET(pheader->p_vaddr)); image->regions[regcount].fdstart = pheader->p_offset; image->regions[regcount].fdsize = pheader->p_filesz; image->regions[regcount].delta = 0; image->regions[regcount].flags = 0; if (pheader->p_flags & PF_WRITE) { // this is a writable segment image->regions[regcount].flags |= RFLAG_RW; } } else { /* * may require splitting */ addr_t A = TO_PAGE_SIZE(pheader->p_vaddr + pheader->p_memsz); addr_t B = TO_PAGE_SIZE(pheader->p_vaddr + pheader->p_filesz); image->regions[regcount].start = pheader->p_vaddr; image->regions[regcount].size = pheader->p_filesz; image->regions[regcount].vmstart = PAGE_BASE(pheader->p_vaddr); image->regions[regcount].vmsize = TO_PAGE_SIZE(pheader->p_filesz + PAGE_OFFSET(pheader->p_vaddr)); image->regions[regcount].fdstart = pheader->p_offset; image->regions[regcount].fdsize = pheader->p_filesz; image->regions[regcount].delta = 0; image->regions[regcount].flags = 0; if (pheader->p_flags & PF_WRITE) { // this is a writable segment image->regions[regcount].flags |= RFLAG_RW; } if (A != B) { /* * yeah, it requires splitting */ regcount += 1; image->regions[regcount].start = pheader->p_vaddr; image->regions[regcount].size = pheader->p_memsz - pheader->p_filesz; image->regions[regcount].vmstart = image->regions[regcount-1].vmstart + image->regions[regcount-1].vmsize; image->regions[regcount].vmsize = TO_PAGE_SIZE(pheader->p_memsz + PAGE_OFFSET(pheader->p_vaddr)) - image->regions[regcount-1].vmsize; image->regions[regcount].fdstart = 0; image->regions[regcount].fdsize = 0; image->regions[regcount].delta = 0; image->regions[regcount].flags = RFLAG_ANON; if (pheader->p_flags & PF_WRITE) { // this is a writable segment image->regions[regcount].flags |= RFLAG_RW; } } } regcount += 1; break; case PT_DYNAMIC: image->dynamic_ptr = pheader->p_vaddr; break; case PT_INTERP: // should check here for appropiate interpreter break; case PT_NOTE: // unsupported break; case PT_SHLIB: // undefined semantics break; case PT_PHDR: // we don't use it break; case PT_RELRO: // not implemented yet, but can be ignored break; case PT_STACK: // we don't use it break; case PT_TLS: image->dso_tls_id = TLSBlockTemplates::Get().Register( TLSBlockTemplate((void*)pheader->p_vaddr, pheader->p_filesz, pheader->p_memsz)); break; default: FATAL("%s: Unhandled pheader type in parse 0x%" B_PRIx32 "\n", image->path, pheader->p_type); return B_BAD_DATA; } } return B_OK; }