void* mmap(void* address, size_t length, int protection, int flags, int fd, off_t offset) { // offset and length must be page-aligned if (length == 0 || offset % B_PAGE_SIZE != 0) { __set_errno(B_BAD_VALUE); return MAP_FAILED; } // check anonymous mapping if ((flags & MAP_ANONYMOUS) != 0) { fd = -1; } else if (fd < 0) { __set_errno(EBADF); return MAP_FAILED; } // either MAP_SHARED or MAP_PRIVATE must be specified if (((flags & MAP_SHARED) != 0) == ((flags & MAP_PRIVATE) != 0)) { __set_errno(B_BAD_VALUE); return MAP_FAILED; } // translate mapping, address specification, and protection int mapping = (flags & MAP_SHARED) != 0 ? REGION_NO_PRIVATE_MAP : REGION_PRIVATE_MAP; uint32 addressSpec; if ((flags & MAP_FIXED) != 0) addressSpec = B_EXACT_ADDRESS; else if (address != NULL) addressSpec = B_BASE_ADDRESS; else addressSpec = B_RANDOMIZED_ANY_ADDRESS; uint32 areaProtection = 0; if ((protection & PROT_READ) != 0) areaProtection |= B_READ_AREA; if ((protection & PROT_WRITE) != 0) areaProtection |= B_WRITE_AREA; if ((protection & PROT_EXEC) != 0) areaProtection |= B_EXECUTE_AREA; // ask the kernel to map area_id area = _kern_map_file("mmap area", &address, addressSpec, length, areaProtection, mapping, true, fd, offset); if (area < 0) { __set_errno(area); return MAP_FAILED; } return address; }
status_t map_image(int fd, char const* path, image_t* image, bool fixed) { // cut the file name from the path as base name for the created areas const char* baseName = strrchr(path, '/'); if (baseName != NULL) baseName++; else baseName = path; // determine how much space we need for all loaded segments addr_t reservedAddress = 0; addr_t loadAddress; size_t reservedSize = 0; size_t length = 0; uint32 addressSpecifier = B_ANY_ADDRESS; for (uint32 i = 0; i < image->num_regions; i++) { // for BeOS compatibility: if we load an old BeOS executable, we // have to relocate it, if possible - we recognize it because the // vmstart is set to 0 (hopefully always) if (fixed && image->regions[i].vmstart == 0) fixed = false; uint32 regionAddressSpecifier; get_image_region_load_address(image, i, loadAddress - image->regions[i - 1].vmstart, fixed, loadAddress, regionAddressSpecifier); if (i == 0) { reservedAddress = loadAddress; addressSpecifier = regionAddressSpecifier; } length += TO_PAGE_SIZE(image->regions[i].vmsize + (loadAddress % B_PAGE_SIZE)); size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize) - reservedAddress; if (size > reservedSize) reservedSize = size; } // Check whether the segments have an unreasonable amount of unused space // inbetween. if (reservedSize > length + 8 * 1024) return B_BAD_DATA; // reserve that space and allocate the areas from that one if (_kern_reserve_address_range(&reservedAddress, addressSpecifier, reservedSize) != B_OK) return B_NO_MEMORY; for (uint32 i = 0; i < image->num_regions; i++) { char regionName[B_OS_NAME_LENGTH]; snprintf(regionName, sizeof(regionName), "%s_seg%lu%s", baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro"); get_image_region_load_address(image, i, image->regions[i - 1].delta, fixed, loadAddress, addressSpecifier); // If the image position is arbitrary, we must let it point to the start // of the reserved address range. if (addressSpecifier != B_EXACT_ADDRESS) loadAddress = reservedAddress; if ((image->regions[i].flags & RFLAG_ANON) != 0) { image->regions[i].id = _kern_create_area(regionName, (void**)&loadAddress, B_EXACT_ADDRESS, image->regions[i].vmsize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA); if (image->regions[i].id < 0) { _kern_unreserve_address_range(reservedAddress, reservedSize); return image->regions[i].id; } } else { // Map all segments r/w first -- write access might be needed for // relocations. When we've done with those we change the protection // of read-only segments back to read-only. We map those segments // over-committing, since quite likely only a relatively small // number of pages needs to be touched and we want to avoid a lot // of memory to be committed for them temporarily, just because we // have to write map them. uint32 protection = B_READ_AREA | B_WRITE_AREA | ((image->regions[i].flags & RFLAG_RW) != 0 ? 0 : B_OVERCOMMITTING_AREA); image->regions[i].id = _kern_map_file(regionName, (void**)&loadAddress, B_EXACT_ADDRESS, image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false, fd, PAGE_BASE(image->regions[i].fdstart)); if (image->regions[i].id < 0) { _kern_unreserve_address_range(reservedAddress, reservedSize); return image->regions[i].id; } TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path, (void *)loadAddress, image->regions[i].vmsize, image->regions[i].flags & RFLAG_RW ? "rw" : "read-only")); // handle trailer bits in data segment if (image->regions[i].flags & RFLAG_RW) { addr_t startClearing = loadAddress + PAGE_OFFSET(image->regions[i].start) + image->regions[i].size; addr_t toClear = image->regions[i].vmsize - PAGE_OFFSET(image->regions[i].start) - image->regions[i].size; TRACE(("cleared 0x%lx and the following 0x%lx bytes\n", startClearing, toClear)); memset((void *)startClearing, 0, toClear); } } image->regions[i].delta = loadAddress - image->regions[i].vmstart; image->regions[i].vmstart = loadAddress; } if (image->dynamic_ptr != 0) image->dynamic_ptr += image->regions[0].delta; return B_OK; }