void terminate_program(void) { image_t **termList; ssize_t count, i; count = get_sorted_image_list(NULL, &termList, RFLAG_TERMINATED); if (count < B_OK) return; if (gInvalidImageIDs) { // After fork, we lazily rebuild the image IDs of all loaded images update_image_ids(); } TRACE(("%ld: terminate dependencies\n", find_thread(NULL))); for (i = count; i-- > 0;) { image_t *image = termList[i]; TRACE(("%ld: term: %s\n", find_thread(NULL), image->name)); image_event(image, IMAGE_EVENT_UNINITIALIZING); if (image->term_routine) ((init_term_function)image->term_routine)(image->id); image_event(image, IMAGE_EVENT_UNLOADING); } TRACE(("%ld: term done.\n", find_thread(NULL))); free(termList); }
static void init_dependencies(image_t *image, bool initHead) { image_t **initList; ssize_t count, i; count = get_sorted_image_list(image, &initList, RFLAG_INITIALIZED); if (count <= 0) return; if (!initHead) { // this removes the "calling" image image->flags &= ~RFLAG_INITIALIZED; initList[--count] = NULL; } TRACE(("%ld: init dependencies\n", find_thread(NULL))); for (i = 0; i < count; i++) { image = initList[i]; TRACE(("%ld: init: %s\n", find_thread(NULL), image->name)); if (image->init_routine != 0) ((init_term_function)image->init_routine)(image->id); image_event(image, IMAGE_EVENT_INITIALIZED); } TRACE(("%ld: init done.\n", find_thread(NULL))); free(initList); }
static status_t relocate_image(image_t *rootImage, image_t *image) { SymbolLookupCache cache(image); status_t status = arch_relocate_image(rootImage, image, &cache); if (status < B_OK) { FATAL("%s: Troubles relocating: %s\n", image->path, strerror(status)); return status; } _kern_image_relocated(image->id); image_event(image, IMAGE_EVENT_RELOCATED); return B_OK; }
status_t load_image(char const* name, image_type type, const char* rpath, const char* requestingObjectPath, image_t** _image) { int32 pheaderSize, sheaderSize; char path[PATH_MAX]; ssize_t length; char pheaderBuffer[4096]; int32 numRegions; image_t* found; image_t* image; status_t status; int fd; elf_ehdr eheader; // Have we already loaded that image? Don't check for add-ons -- we always // reload them. if (type != B_ADD_ON_IMAGE) { found = find_loaded_image_by_name(name, APP_OR_LIBRARY_TYPE); if (found == NULL && type != B_APP_IMAGE && gProgramImage != NULL) { // Special case for add-ons that link against the application // executable, with the executable not having a soname set. if (const char* lastSlash = strrchr(name, '/')) { if (strcmp(gProgramImage->name, lastSlash + 1) == 0) found = gProgramImage; } } if (found) { atomic_add(&found->ref_count, 1); *_image = found; KTRACE("rld: load_container(\"%s\", type: %d, rpath: \"%s\") " "already loaded", name, type, rpath); return B_OK; } } KTRACE("rld: load_container(\"%s\", type: %d, rpath: \"%s\")", name, type, rpath); strlcpy(path, name, sizeof(path)); // find and open the file fd = open_executable(path, type, rpath, get_program_path(), requestingObjectPath, sSearchPathSubDir); if (fd < 0) { FATAL("Cannot open file %s: %s\n", name, strerror(fd)); KTRACE("rld: load_container(\"%s\"): failed to open file", name); return fd; } // normalize the image path status = _kern_normalize_path(path, true, path); if (status != B_OK) goto err1; // Test again if this image has been registered already - this time, // we can check the full path, not just its name as noted. // You could end up loading an image twice with symbolic links, else. if (type != B_ADD_ON_IMAGE) { found = find_loaded_image_by_name(path, APP_OR_LIBRARY_TYPE); if (found) { atomic_add(&found->ref_count, 1); *_image = found; _kern_close(fd); KTRACE("rld: load_container(\"%s\"): already loaded after all", name); return B_OK; } } length = _kern_read(fd, 0, &eheader, sizeof(eheader)); if (length != sizeof(eheader)) { status = B_NOT_AN_EXECUTABLE; FATAL("%s: Troubles reading ELF header\n", path); goto err1; } status = parse_elf_header(&eheader, &pheaderSize, &sheaderSize); if (status < B_OK) { FATAL("%s: Incorrect ELF header\n", path); goto err1; } // ToDo: what to do about this restriction?? if (pheaderSize > (int)sizeof(pheaderBuffer)) { FATAL("%s: Cannot handle program headers bigger than %lu\n", path, sizeof(pheaderBuffer)); status = B_UNSUPPORTED; goto err1; } length = _kern_read(fd, eheader.e_phoff, pheaderBuffer, pheaderSize); if (length != pheaderSize) { FATAL("%s: Could not read program headers: %s\n", path, strerror(length)); status = B_BAD_DATA; goto err1; } numRegions = count_regions(path, pheaderBuffer, eheader.e_phnum, eheader.e_phentsize); if (numRegions <= 0) { FATAL("%s: Troubles parsing Program headers, numRegions = %" B_PRId32 "\n", path, numRegions); status = B_BAD_DATA; goto err1; } image = create_image(name, path, numRegions); if (image == NULL) { FATAL("%s: Failed to allocate image_t object\n", path); status = B_NO_MEMORY; goto err1; } status = parse_program_headers(image, pheaderBuffer, eheader.e_phnum, eheader.e_phentsize); if (status < B_OK) goto err2; if (!assert_dynamic_loadable(image)) { FATAL("%s: Dynamic segment must be loadable (implementation " "restriction)\n", image->path); status = B_UNSUPPORTED; goto err2; } status = map_image(fd, path, image, eheader.e_type == ET_EXEC); if (status < B_OK) { FATAL("%s: Could not map image: %s\n", image->path, strerror(status)); status = B_ERROR; goto err2; } if (!parse_dynamic_segment(image)) { FATAL("%s: Troubles handling dynamic section\n", image->path); status = B_BAD_DATA; goto err3; } if (eheader.e_entry != 0) image->entry_point = eheader.e_entry + image->regions[0].delta; analyze_image_haiku_version_and_abi(fd, image, eheader, sheaderSize, pheaderBuffer, sizeof(pheaderBuffer)); // If this is the executable image, we init the search path // subdir, if the compiler version doesn't match ours. if (type == B_APP_IMAGE) { #if __GNUC__ == 2 if ((image->abi & B_HAIKU_ABI_MAJOR) == B_HAIKU_ABI_GCC_4) sSearchPathSubDir = "x86"; #elif __GNUC__ >= 4 if ((image->abi & B_HAIKU_ABI_MAJOR) == B_HAIKU_ABI_GCC_2) sSearchPathSubDir = "x86_gcc2"; #endif } set_abi_version(image->abi); // init gcc version dependent image flags // symbol resolution strategy if (image->abi == B_HAIKU_ABI_GCC_2_ANCIENT) image->find_undefined_symbol = find_undefined_symbol_beos; // init version infos status = init_image_version_infos(image); image->type = type; register_image(image, fd, path); image_event(image, IMAGE_EVENT_LOADED); _kern_close(fd); enqueue_loaded_image(image); *_image = image; KTRACE("rld: load_container(\"%s\"): done: id: %" B_PRId32 " (ABI: %#" B_PRIx32 ")", name, image->id, image->abi); return B_OK; err3: unmap_image(image); err2: delete_image_struct(image); err1: _kern_close(fd); KTRACE("rld: load_container(\"%s\"): failed: %s", name, strerror(status)); return status; }
status_t unload_library(void* handle, image_id imageID, bool addOn) { image_t *image; image_type type = addOn ? B_ADD_ON_IMAGE : B_LIBRARY_IMAGE; if (handle == NULL && imageID < 0) return B_BAD_IMAGE_ID; if (handle == RLD_GLOBAL_SCOPE) return B_OK; rld_lock(); // for now, just do stupid simple global locking if (gInvalidImageIDs) { // After fork, we lazily rebuild the image IDs of all loaded images update_image_ids(); } // we only check images that have been already initialized status_t status = B_BAD_IMAGE_ID; if (handle != NULL) { image = (image_t*)handle; put_image(image); status = B_OK; } else { image = find_loaded_image_by_id(imageID, true); if (image != NULL) { // unload image if (type == image->type) { put_image(image); status = B_OK; } else status = B_BAD_VALUE; } } if (status == B_OK) { while ((image = get_disposable_images().head) != NULL) { // Call the exit hooks that live in this image. // Note: With the Itanium ABI this shouldn't really be done this // way anymore, since global destructors are registered via // __cxa_atexit() (the ones that are registered dynamically) and the // termination routine should call __cxa_finalize() for the image. // The reason why we still do it is that hooks registered with // atexit() aren't associated with the image. We could find out // there which image the hooks lives in and register it // respectively, but since that would be done always, that's // probably more expensive than calling // call_atexit_hooks_for_range() only here, which happens only when // libraries are unloaded dynamically. if (gRuntimeLoader.call_atexit_hooks_for_range) { gRuntimeLoader.call_atexit_hooks_for_range( image->regions[0].vmstart, image->regions[0].vmsize); } image_event(image, IMAGE_EVENT_UNINITIALIZING); if (image->term_routine) ((init_term_function)image->term_routine)(image->id); dequeue_disposable_image(image); unmap_image(image); image_event(image, IMAGE_EVENT_UNLOADING); delete_image(image); } } rld_unlock(); return status; }