static inline status_t AllocateRegion(AddrType* _address, AddrType size, uint8 protection, void **_mappedAddress) { #ifdef _BOOT_PLATFORM_EFI void* address = (void*)*_address; status_t status = platform_allocate_region(&address, size, protection, false); if (status != B_OK) return status; *_mappedAddress = address; platform_bootloader_address_to_kernel_address(address, _address); #else // Assume the real 64-bit base address is KERNEL_LOAD_BASE_64_BIT and // the mappings in the loader address space are at KERNEL_LOAD_BASE. void* address = (void*)(addr_t)(*_address & 0xffffffff); status_t status = platform_allocate_region(&address, size, protection, false); if (status != B_OK) return status; *_mappedAddress = address; *_address = (AddrType)(addr_t)address + KERNEL_LOAD_BASE_64_BIT - KERNEL_LOAD_BASE; #endif return B_OK; }
status_t ArchFBArmOmap3::Probe() { #if 0 // TODO: More dynamic framebuffer base? if (!fBase) { int err = platform_allocate_region(&gFrameBufferBase, gKernelArgs.frame_buffer.physical_buffer.size, 0, false); if (err < B_OK) return err; gKernelArgs.frame_buffer.physical_buffer.start = (addr_t)gFrameBufferBase; dprintf("video framebuffer: %p\n", gFrameBufferBase); } #else gKernelArgs.frame_buffer.physical_buffer.start = fBase; #endif gKernelArgs.frame_buffer.depth = 16; gKernelArgs.frame_buffer.width = 1024; gKernelArgs.frame_buffer.height = 768; gKernelArgs.frame_buffer.bytes_per_row = gKernelArgs.frame_buffer.width * 2; gKernelArgs.frame_buffer.physical_buffer.size = gKernelArgs.frame_buffer.width * gKernelArgs.frame_buffer.height * gKernelArgs.frame_buffer.depth / 8; TRACE("video mode: %ux%ux%u\n", gKernelArgs.frame_buffer.width, gKernelArgs.frame_buffer.height, gKernelArgs.frame_buffer.depth); return B_OK; }
/*! This function can be used to allocate memory that is going to be passed over to the kernel. For example, the preloaded_image structures are allocated this way. The boot loader heap doesn't make it into the kernel! */ extern "C" void* kernel_args_malloc(size_t size) { //dprintf("kernel_args_malloc(): %ld bytes (%ld bytes left)\n", size, sFree); if (sFirstFree != NULL && size <= sFree) { // there is enough space in the current buffer void* address = sFirstFree; sFirstFree = (void*)((addr_t)sFirstFree + size); sLast = address; sFree -= size; return address; } if (size > kChunkSize / 2 && sFree < size) { // the block is so large, we'll allocate a new block for it void* block = NULL; if (platform_allocate_region(&block, size, B_READ_AREA | B_WRITE_AREA, false) != B_OK) { return NULL; } if (add_kernel_args_range(block, size) != B_OK) panic("kernel_args max range to low!\n"); return block; } // just allocate a new block and "close" the old one void* block = NULL; if (platform_allocate_region(&block, kChunkSize, B_READ_AREA | B_WRITE_AREA, false) != B_OK) { return NULL; } sFirstFree = (void*)((addr_t)block + size); sLast = block; sFree = kChunkSize - size; if (add_kernel_args_range(block, kChunkSize) != B_OK) panic("kernel_args max range to low!\n"); return block; }
static inline status_t AllocateRegion(AddrType* _address, AddrType size, uint8 protection, void** _mappedAddress) { status_t status = platform_allocate_region((void**)_address, size, protection, false); if (status != B_OK) return status; *_mappedAddress = (void*)*_address; return B_OK; }
status_t TarFS::Volume::_Inflate(boot::Partition* partition, void* cookie, off_t offset, RegionDeleter& regionDeleter, size_t* inflatedBytes) { char in[2048]; z_stream zStream = { (Bytef*)in, // next in sizeof(in), // avail in 0, // total in NULL, // next out 0, // avail out 0, // total out 0, // msg 0, // state Z_NULL, // zalloc Z_NULL, // zfree Z_NULL, // opaque 0, // data type 0, // adler 0, // reserved }; int status; char* out = (char*)regionDeleter.Get(); bool headerRead = false; do { ssize_t bytesRead = partition->ReadAt(cookie, offset, in, sizeof(in)); if (bytesRead != (ssize_t)sizeof(in)) { if (bytesRead <= 0) { status = Z_STREAM_ERROR; break; } } zStream.avail_in = bytesRead; zStream.next_in = (Bytef*)in; if (!headerRead) { // check and skip gzip header if (!skip_gzip_header(&zStream)) return B_BAD_DATA; headerRead = true; if (!out) { // allocate memory for the uncompressed data if (platform_allocate_region((void**)&out, kTarRegionSize, B_READ_AREA | B_WRITE_AREA, false) != B_OK) { TRACE(("tarfs: allocating region failed!\n")); return B_NO_MEMORY; } regionDeleter.SetTo(out); } zStream.avail_out = kTarRegionSize; zStream.next_out = (Bytef*)out; status = inflateInit2(&zStream, -15); if (status != Z_OK) return B_ERROR; } status = inflate(&zStream, Z_SYNC_FLUSH); offset += bytesRead; if (zStream.avail_in != 0 && status != Z_STREAM_END) dprintf("tarfs: didn't read whole block: %s\n", zStream.msg); } while (status == Z_OK); inflateEnd(&zStream); if (status != Z_STREAM_END) { TRACE(("tarfs: inflating failed: %d!\n", status)); return B_BAD_DATA; } *inflatedBytes = zStream.total_out; return B_OK; }
status_t elf_load_image(int fd, preloaded_image *image) { size_t totalSize; status_t status; TRACE(("elf_load_image(fd = %d, image = %p)\n", fd, image)); struct Elf32_Ehdr &elfHeader = image->elf_header; ssize_t length = read_pos(fd, 0, &elfHeader, sizeof(Elf32_Ehdr)); if (length < (ssize_t)sizeof(Elf32_Ehdr)) return B_BAD_TYPE; status = verify_elf_header(elfHeader); if (status < B_OK) return status; ssize_t size = elfHeader.e_phnum * elfHeader.e_phentsize; Elf32_Phdr *programHeaders = (struct Elf32_Phdr *)malloc(size); if (programHeaders == NULL) { dprintf("error allocating space for program headers\n"); return B_NO_MEMORY; } length = read_pos(fd, elfHeader.e_phoff, programHeaders, size); if (length < size) { TRACE(("error reading in program headers\n")); status = B_ERROR; goto error1; } // create an area large enough to hold the image image->data_region.size = 0; image->text_region.size = 0; for (int32 i = 0; i < elfHeader.e_phnum; i++) { Elf32_Phdr &header = programHeaders[i]; switch (header.p_type) { case PT_LOAD: break; case PT_DYNAMIC: image->dynamic_section.start = header.p_vaddr; image->dynamic_section.size = header.p_memsz; continue; case PT_INTERP: case PT_PHDR: // known but unused type continue; default: dprintf("unhandled pheader type 0x%lx\n", header.p_type); continue; } elf_region *region; if (header.IsReadWrite()) { if (image->data_region.size != 0) { dprintf("elf: rw already handled!\n"); continue; } region = &image->data_region; } else if (header.IsExecutable()) { if (image->text_region.size != 0) { dprintf("elf: ro already handled!\n"); continue; } region = &image->text_region; } else continue; region->start = ROUNDDOWN(header.p_vaddr, B_PAGE_SIZE); region->size = ROUNDUP(header.p_memsz + (header.p_vaddr % B_PAGE_SIZE), B_PAGE_SIZE); region->delta = -region->start; TRACE(("segment %d: start = %p, size = %lu, delta = %lx\n", i, region->start, region->size, region->delta)); } // found both, text and data? if (image->data_region.size == 0 || image->text_region.size == 0) { dprintf("Couldn't find both text and data segment!\n"); status = B_BAD_DATA; goto error1; } // get the segment order elf_region *firstRegion; elf_region *secondRegion; if (image->text_region.start < image->data_region.start) { firstRegion = &image->text_region; secondRegion = &image->data_region; } else { firstRegion = &image->data_region; secondRegion = &image->text_region; } // Check whether the segments have an unreasonable amount of unused space // inbetween. totalSize = secondRegion->start + secondRegion->size - firstRegion->start; if (totalSize > image->text_region.size + image->data_region.size + 8 * 1024) { status = B_BAD_DATA; goto error1; } // The kernel and the modules are relocatable, thus // platform_allocate_region() can automatically allocate an address, // but shall prefer the specified base address. if (platform_allocate_region((void **)&firstRegion->start, totalSize, B_READ_AREA | B_WRITE_AREA, false) < B_OK) { status = B_NO_MEMORY; goto error1; } // initialize the region pointers to the allocated region secondRegion->start += firstRegion->start + firstRegion->delta; image->data_region.delta += image->data_region.start; image->text_region.delta += image->text_region.start; // load program data for (int i = 0; i < elfHeader.e_phnum; i++) { Elf32_Phdr &header = programHeaders[i]; if (header.p_type != PT_LOAD) continue; elf_region *region; if (header.IsReadWrite()) region = &image->data_region; else if (header.IsExecutable()) region = &image->text_region; else continue; TRACE(("load segment %d (%ld bytes)...\n", i, header.p_filesz)); length = read_pos(fd, header.p_offset, (void *)(region->start + (header.p_vaddr % B_PAGE_SIZE)), header.p_filesz); if (length < (ssize_t)header.p_filesz) { status = B_BAD_DATA; dprintf("error reading in seg %d\n", i); goto error2; } // Clear anything above the file size (that may also contain the BSS // area) uint32 offset = (header.p_vaddr % B_PAGE_SIZE) + header.p_filesz; if (offset < region->size) memset((void *)(region->start + offset), 0, region->size - offset); } // offset dynamic section, and program entry addresses by the delta of the // regions image->dynamic_section.start += image->text_region.delta; image->elf_header.e_entry += image->text_region.delta; image->num_debug_symbols = 0; image->debug_symbols = NULL; image->debug_string_table = NULL; if (sLoadElfSymbols) load_elf_symbol_table(fd, image); free(programHeaders); return B_OK; error2: if (image->text_region.start != 0) platform_free_region((void *)image->text_region.start, totalSize); error1: free(programHeaders); return status; }