Exemple #1
0
void
rldelf_init(void)
{
	init_add_ons();

	// create the debug area
	{
		size_t size = TO_PAGE_SIZE(sizeof(runtime_loader_debug_area));

		runtime_loader_debug_area *area;
		area_id areaID = _kern_create_area(RUNTIME_LOADER_DEBUG_AREA_NAME,
			(void **)&area, B_RANDOMIZED_ANY_ADDRESS, size, B_NO_LOCK,
			B_READ_AREA | B_WRITE_AREA);
		if (areaID < B_OK) {
			FATAL("Failed to create debug area.\n");
			_kern_loading_app_failed(areaID);
		}

		area->loaded_images = &get_loaded_images();
	}

	// initialize error message if needed
	if (report_errors()) {
		void *buffer = malloc(1024);
		if (buffer == NULL)
			return;

		gErrorMessage.SetTo(buffer, 1024, 'Rler');
	}
}
Exemple #2
0
static status_t
add_area(size_t size)
{
	void *base;
	area_id area = _kern_create_area("rld heap", &base, B_ANY_ADDRESS, size,
		B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
	if (area < B_OK)
		return area;

	sAvailable += size - sizeof(uint32);

	// declare the whole heap as one chunk, and add it
	// to the free list

	free_chunk *chunk = (free_chunk *)base;
	chunk->size = size;
	chunk->next = sFreeAnchor.next;

	sFreeAnchor.next = chunk;
	return B_OK;
}
Exemple #3
0
status_t
map_image(int fd, char const* path, image_t* image, bool fixed)
{
	// cut the file name from the path as base name for the created areas
	const char* baseName = strrchr(path, '/');
	if (baseName != NULL)
		baseName++;
	else
		baseName = path;

	// determine how much space we need for all loaded segments

	addr_t reservedAddress = 0;
	addr_t loadAddress;
	size_t reservedSize = 0;
	size_t length = 0;
	uint32 addressSpecifier = B_ANY_ADDRESS;

	for (uint32 i = 0; i < image->num_regions; i++) {
		// for BeOS compatibility: if we load an old BeOS executable, we
		// have to relocate it, if possible - we recognize it because the
		// vmstart is set to 0 (hopefully always)
		if (fixed && image->regions[i].vmstart == 0)
			fixed = false;

		uint32 regionAddressSpecifier;
		get_image_region_load_address(image, i,
			loadAddress - image->regions[i - 1].vmstart, fixed,
			loadAddress, regionAddressSpecifier);
		if (i == 0) {
			reservedAddress = loadAddress;
			addressSpecifier = regionAddressSpecifier;
		}

		length += TO_PAGE_SIZE(image->regions[i].vmsize
			+ (loadAddress % B_PAGE_SIZE));

		size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize)
			- reservedAddress;
		if (size > reservedSize)
			reservedSize = size;
	}

	// Check whether the segments have an unreasonable amount of unused space
	// inbetween.
	if (reservedSize > length + 8 * 1024)
		return B_BAD_DATA;

	// reserve that space and allocate the areas from that one
	if (_kern_reserve_address_range(&reservedAddress, addressSpecifier,
			reservedSize) != B_OK)
		return B_NO_MEMORY;

	for (uint32 i = 0; i < image->num_regions; i++) {
		char regionName[B_OS_NAME_LENGTH];

		snprintf(regionName, sizeof(regionName), "%s_seg%lu%s",
			baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro");

		get_image_region_load_address(image, i, image->regions[i - 1].delta,
			fixed, loadAddress, addressSpecifier);

		// If the image position is arbitrary, we must let it point to the start
		// of the reserved address range.
		if (addressSpecifier != B_EXACT_ADDRESS)
			loadAddress = reservedAddress;

		if ((image->regions[i].flags & RFLAG_ANON) != 0) {
			image->regions[i].id = _kern_create_area(regionName,
				(void**)&loadAddress, B_EXACT_ADDRESS,
				image->regions[i].vmsize, B_NO_LOCK,
				B_READ_AREA | B_WRITE_AREA);

			if (image->regions[i].id < 0) {
				_kern_unreserve_address_range(reservedAddress, reservedSize);
				return image->regions[i].id;
			}
		} else {
			// Map all segments r/w first -- write access might be needed for
			// relocations. When we've done with those we change the protection
			// of read-only segments back to read-only. We map those segments
			// over-committing, since quite likely only a relatively small
			// number of pages needs to be touched and we want to avoid a lot
			// of memory to be committed for them temporarily, just because we
			// have to write map them.
			uint32 protection = B_READ_AREA | B_WRITE_AREA
				| ((image->regions[i].flags & RFLAG_RW) != 0
					? 0 : B_OVERCOMMITTING_AREA);
			image->regions[i].id = _kern_map_file(regionName,
				(void**)&loadAddress, B_EXACT_ADDRESS,
				image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false,
				fd, PAGE_BASE(image->regions[i].fdstart));

			if (image->regions[i].id < 0) {
				_kern_unreserve_address_range(reservedAddress, reservedSize);
				return image->regions[i].id;
			}

			TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path,
				(void *)loadAddress, image->regions[i].vmsize,
				image->regions[i].flags & RFLAG_RW ? "rw" : "read-only"));

			// handle trailer bits in data segment
			if (image->regions[i].flags & RFLAG_RW) {
				addr_t startClearing = loadAddress
					+ PAGE_OFFSET(image->regions[i].start)
					+ image->regions[i].size;
				addr_t toClear = image->regions[i].vmsize
					- PAGE_OFFSET(image->regions[i].start)
					- image->regions[i].size;

				TRACE(("cleared 0x%lx and the following 0x%lx bytes\n",
					startClearing, toClear));
				memset((void *)startClearing, 0, toClear);
			}
		}

		image->regions[i].delta = loadAddress - image->regions[i].vmstart;
		image->regions[i].vmstart = loadAddress;
	}

	if (image->dynamic_ptr != 0)
		image->dynamic_ptr += image->regions[0].delta;

	return B_OK;
}
area_id
create_area(const char *name, void **address, uint32 addressSpec, size_t size,
	uint32 lock, uint32 protection)
{
	return _kern_create_area(name, address, addressSpec, size, lock, protection);
}