Exemple #1
0
image_id
preload_image(char const* path)
{
	if (path == NULL)
		return B_BAD_VALUE;

	KTRACE("rld: preload_image(\"%s\")", path);

	image_t *image = NULL;
	status_t status = load_image(path, B_LIBRARY_IMAGE, NULL, NULL, &image);
	if (status < B_OK) {
		KTRACE("rld: preload_image(\"%s\") failed to load container: %s", path,
			strerror(status));
		return status;
	}

	if (image->find_undefined_symbol == NULL)
		image->find_undefined_symbol = find_undefined_symbol_global;

	status = load_dependencies(image);
	if (status < B_OK)
		goto err;

	set_image_flags_recursively(image, RTLD_GLOBAL);

	status = relocate_dependencies(image);
	if (status < B_OK)
		goto err;

	status = add_preloaded_image(image);
	if (status < B_OK)
		goto err;

	inject_runtime_loader_api(image);

	remap_images();
	init_dependencies(image, true);

	// if the image contains an add-on, register it
	runtime_loader_add_on* addOnStruct;
	if (find_symbol(image,
			SymbolLookupInfo("__gRuntimeLoaderAddOn", B_SYMBOL_TYPE_DATA),
			(void**)&addOnStruct) == B_OK) {
		add_add_on(image, addOnStruct);
	}

	KTRACE("rld: preload_image(\"%s\") done: id: %" B_PRId32, path, image->id);

	return image->id;

err:
	KTRACE("rld: preload_image(\"%s\") failed: %s", path, strerror(status));

	dequeue_loaded_image(image);
	delete_image(image);
	return status;
}
Exemple #2
0
/*!	You must call this function with interrupts disabled, and the semaphore's
	spinlock held. Note that it will unlock the spinlock itself.
	Since it cannot free() the semaphore's name with interrupts turned off, it
	will return that one in \a name.
*/
static void
uninit_sem_locked(struct sem_entry& sem, char** _name)
{
	KTRACE("delete_sem(sem: %ld)", sem.u.used.id);

	notify_sem_select_events(&sem, B_EVENT_INVALID);
	sem.u.used.select_infos = NULL;

	// free any threads waiting for this semaphore
	SpinLocker schedulerLocker(gSchedulerLock);
	while (queued_thread* entry = sem.queue.RemoveHead()) {
		entry->queued = false;
		thread_unblock_locked(entry->thread, B_BAD_SEM_ID);
	}
	schedulerLocker.Unlock();

	int32 id = sem.id;
	sem.id = -1;
	*_name = sem.u.used.name;
	sem.u.used.name = NULL;

	RELEASE_SEM_LOCK(sem);

	// append slot to the free list
	GRAB_SEM_LIST_LOCK();
	free_sem_slot(id % sMaxSems, id + sMaxSems);
	atomic_add(&sUsedSems, -1);
	RELEASE_SEM_LIST_LOCK();
}
Exemple #3
0
/*! set real and effective user IDs
 * The function shall fail, return -1, and set errno to the corresponding value if one or more of the following are true:
 * [EINVAL] The value of the uid argument is invalid and not supported by the implementation.
 * [EPERM] The process does not have appropriate privileges and uid does not match the real user ID or the saved set-user-ID.
 * */
UINT32 syscall_setreuid(SYSTEM_CALL_ARGS_PTR sys_call_args, UINT32 *retval)
{
	int ruid, euid;
	ruid = sys_call_args->args[0];
	euid = sys_call_args->args[1];
	KTRACE("%p %p %p %p\n", sys_call_args->args[0], sys_call_args->args[1], sys_call_args->args[2], sys_call_args->args[3]);
	* retval = 0;
	return 0;
}
Exemple #4
0
/*! get the name of the current system
 * int uname(struct utsname *name);
 * Upon successful completion, a non-negative value shall be returned. Otherwise, -1 shall be returned and errno set to indicate the error.
 * */
UINT32 syscall_uname(SYSTEM_CALL_ARGS_PTR sys_call_args, UINT32 *retval)
{
	KTRACE("%p %p %p %p\n", sys_call_args->args[0], sys_call_args->args[1], sys_call_args->args[2], sys_call_args->args[3]);
	struct utsname * name;
	
	name = (struct utsname *)sys_call_args->args[0];
	
	strcpy( name->sysname, UTS_SYSTEM_NAME);
	strcpy( name->nodename, UTS_NODE_NAME);
	strcpy( name->release, UTS_RELEASE );
	strcpy( name->version, UTS_VERSION );
	strcpy( name->machine, UTS_MACHINE );

	* retval = 0;
	return 0;
}
Exemple #5
0
void OS_ThreadExit(void)
{
    int tid = current->tid;

    DEBUG ("(); tid=%d", tid);
    KTRACE (KTRACE_EXIT, current->tid, 0);

    /*  Init the thread status  */
    task_table[tid].status = 0;
    /*  Traps off, set S, PS and FP unit    */
    //task_table[tid].context.psr = 0x000010c0;

    /*  Free the thread stack   */
    OS_Free(task_table[tid].stack.addr);

    /*  Add the TCB into the free TCB queue   */
    OS_ListAppend(&tcb_list, &task_table[tid].node);
}
Exemple #6
0
/*
 * Function: tid_t OS_TaskCreate(uint32_t  _stack_size, void *_ip, uint8 _priority)
 *
 * This routine creates a new thread and returns the TCB created
 */
tid_t OS_TaskCreate(uint32_t *_stackaddr, uint32_t  _stacksize, void *_ip, void *_arg, uint8_t _priority)
{
    OS_task_t *task;
    OS_Node_t *n;

  //  DEBUG ("(_stackaddr=%p, _stacksize=%p, _ip=%p, _arg=%p, priority=%d)",
   //     _stackaddr, _stacksize, _ip, _arg, _priority);

    ASSERT (_priority <= SCHED_MIN_PRIORITY);

    /* get a free thread control block */
    n = (OS_Node_t *) OS_ListGetNode (&tcb_list);

    /* assert that we really have a free tcb */
    ASSERT (n != NULL);
    if (n == NULL) return -1;

    task = GET_ELEMENT(OS_task_t, n, node);
    TRACE (task->tid, "d");
    KTRACE (KTRACE_CREATE, task->tid, _priority);

    /*  Stablish the thread stack */
    if (_stackaddr == NULL)
    {
        if (_stacksize < CONFIG_ERCOS_MIN_STACK) _stacksize = CONFIG_ERCOS_MIN_STACK;
        _stackaddr = OS_Malloc (_stacksize);
    } else {
        ASSERT (_stacksize >= CONFIG_ERCOS_MIN_STACK);
    }
    task->stack.size = _stacksize;
    task->stack.addr = (uint8_t *)_stackaddr;

    /*  Fill the stack with the 0x5a mark */
#ifndef CONFIG_NDEBUG
//    unsigned i;
//    DEBUG ("Stack @ %p, %p bytes", _stackaddr, _stacksize);
//   for (i = 0; i < _stacksize; i++) ((uint8_t *) _stackaddr)[i] = 0x5a;
#endif

    /*  Init the new thread context */
    ercos_hal_hwcontext_init (&(task->context), task->stack.addr, task->stack.size, 0, OS_ThreadWrapper);

    /*  Stablish the thread status  */
    task->status = TS_READY;

    /*  Stablish the thread entry point  */
    task->entry_point = _ip;
    task->arg = _arg;

    /*  Init the pointer to the catched mutex   */
    task->pmutex =(OS_Mutex_t*) NULL;

    /*  Set the thread base and temporal priorities   */
    task->priority = _priority;
    task->base_priority = _priority;

    /*  Add the task to the correspond sched queue  */
    OS_SchedTaskReady(task->tid);

    return task->tid;
}
Exemple #7
0
/*! get the process group ID for a process*/
UINT32 syscall_getpgid(SYSTEM_CALL_ARGS_PTR sys_call_args, UINT32 *retval)
{
	KTRACE("%p %p %p %p\n", sys_call_args->args[0], sys_call_args->args[1], sys_call_args->args[2], sys_call_args->args[3]);
	* retval = 1;
	return 0;
}
Exemple #8
0
/*! If successful, the getlogin_r() function shall return zero; otherwise, an error number shall be returned to indicate the error.
 * [EMFILE] {OPEN_MAX} file descriptors are currently open in the calling process.
 * [ENFILE] The maximum allowable number of files is currently open in the system.
 * [ENXIO] The calling process has no controlling terminal.
 * */
UINT32 syscall_getlogin_r(SYSTEM_CALL_ARGS_PTR sys_call_args, UINT32 *retval)
{
	KTRACE("%p %p %p %p\n", sys_call_args->args[0], sys_call_args->args[1], sys_call_args->args[2], sys_call_args->args[3]);
	* retval = 0;
	return 1;
}
Exemple #9
0
status_t
load_image(char const* name, image_type type, const char* rpath,
	const char* requestingObjectPath, image_t** _image)
{
	int32 pheaderSize, sheaderSize;
	char path[PATH_MAX];
	ssize_t length;
	char pheaderBuffer[4096];
	int32 numRegions;
	image_t* found;
	image_t* image;
	status_t status;
	int fd;

	elf_ehdr eheader;

	// Have we already loaded that image? Don't check for add-ons -- we always
	// reload them.
	if (type != B_ADD_ON_IMAGE) {
		found = find_loaded_image_by_name(name, APP_OR_LIBRARY_TYPE);

		if (found == NULL && type != B_APP_IMAGE && gProgramImage != NULL) {
			// Special case for add-ons that link against the application
			// executable, with the executable not having a soname set.
			if (const char* lastSlash = strrchr(name, '/')) {
				if (strcmp(gProgramImage->name, lastSlash + 1) == 0)
					found = gProgramImage;
			}
		}

		if (found) {
			atomic_add(&found->ref_count, 1);
			*_image = found;
			KTRACE("rld: load_container(\"%s\", type: %d, rpath: \"%s\") "
				"already loaded", name, type, rpath);
			return B_OK;
		}
	}

	KTRACE("rld: load_container(\"%s\", type: %d, rpath: \"%s\")", name, type,
		rpath);

	strlcpy(path, name, sizeof(path));

	// find and open the file
	fd = open_executable(path, type, rpath, get_program_path(),
		requestingObjectPath, sSearchPathSubDir);
	if (fd < 0) {
		FATAL("Cannot open file %s: %s\n", name, strerror(fd));
		KTRACE("rld: load_container(\"%s\"): failed to open file", name);
		return fd;
	}

	// normalize the image path
	status = _kern_normalize_path(path, true, path);
	if (status != B_OK)
		goto err1;

	// Test again if this image has been registered already - this time,
	// we can check the full path, not just its name as noted.
	// You could end up loading an image twice with symbolic links, else.
	if (type != B_ADD_ON_IMAGE) {
		found = find_loaded_image_by_name(path, APP_OR_LIBRARY_TYPE);
		if (found) {
			atomic_add(&found->ref_count, 1);
			*_image = found;
			_kern_close(fd);
			KTRACE("rld: load_container(\"%s\"): already loaded after all",
				name);
			return B_OK;
		}
	}

	length = _kern_read(fd, 0, &eheader, sizeof(eheader));
	if (length != sizeof(eheader)) {
		status = B_NOT_AN_EXECUTABLE;
		FATAL("%s: Troubles reading ELF header\n", path);
		goto err1;
	}

	status = parse_elf_header(&eheader, &pheaderSize, &sheaderSize);
	if (status < B_OK) {
		FATAL("%s: Incorrect ELF header\n", path);
		goto err1;
	}

	// ToDo: what to do about this restriction??
	if (pheaderSize > (int)sizeof(pheaderBuffer)) {
		FATAL("%s: Cannot handle program headers bigger than %lu\n",
			path, sizeof(pheaderBuffer));
		status = B_UNSUPPORTED;
		goto err1;
	}

	length = _kern_read(fd, eheader.e_phoff, pheaderBuffer, pheaderSize);
	if (length != pheaderSize) {
		FATAL("%s: Could not read program headers: %s\n", path,
			strerror(length));
		status = B_BAD_DATA;
		goto err1;
	}

	numRegions = count_regions(path, pheaderBuffer, eheader.e_phnum,
		eheader.e_phentsize);
	if (numRegions <= 0) {
		FATAL("%s: Troubles parsing Program headers, numRegions = %" B_PRId32
			"\n", path, numRegions);
		status = B_BAD_DATA;
		goto err1;
	}

	image = create_image(name, path, numRegions);
	if (image == NULL) {
		FATAL("%s: Failed to allocate image_t object\n", path);
		status = B_NO_MEMORY;
		goto err1;
	}

	status = parse_program_headers(image, pheaderBuffer, eheader.e_phnum,
		eheader.e_phentsize);
	if (status < B_OK)
		goto err2;

	if (!assert_dynamic_loadable(image)) {
		FATAL("%s: Dynamic segment must be loadable (implementation "
			"restriction)\n", image->path);
		status = B_UNSUPPORTED;
		goto err2;
	}

	status = map_image(fd, path, image, eheader.e_type == ET_EXEC);
	if (status < B_OK) {
		FATAL("%s: Could not map image: %s\n", image->path, strerror(status));
		status = B_ERROR;
		goto err2;
	}

	if (!parse_dynamic_segment(image)) {
		FATAL("%s: Troubles handling dynamic section\n", image->path);
		status = B_BAD_DATA;
		goto err3;
	}

	if (eheader.e_entry != 0)
		image->entry_point = eheader.e_entry + image->regions[0].delta;

	analyze_image_haiku_version_and_abi(fd, image, eheader, sheaderSize,
		pheaderBuffer, sizeof(pheaderBuffer));

	// If this is the executable image, we init the search path
	// subdir, if the compiler version doesn't match ours.
	if (type == B_APP_IMAGE) {
		#if __GNUC__ == 2
			if ((image->abi & B_HAIKU_ABI_MAJOR) == B_HAIKU_ABI_GCC_4)
				sSearchPathSubDir = "x86";
		#elif __GNUC__ >= 4
			if ((image->abi & B_HAIKU_ABI_MAJOR) == B_HAIKU_ABI_GCC_2)
				sSearchPathSubDir = "x86_gcc2";
		#endif
	}

	set_abi_version(image->abi);

	// init gcc version dependent image flags
	// symbol resolution strategy
	if (image->abi == B_HAIKU_ABI_GCC_2_ANCIENT)
		image->find_undefined_symbol = find_undefined_symbol_beos;

	// init version infos
	status = init_image_version_infos(image);

	image->type = type;
	register_image(image, fd, path);
	image_event(image, IMAGE_EVENT_LOADED);

	_kern_close(fd);

	enqueue_loaded_image(image);

	*_image = image;

	KTRACE("rld: load_container(\"%s\"): done: id: %" B_PRId32 " (ABI: %#"
		B_PRIx32 ")", name, image->id, image->abi);

	return B_OK;

err3:
	unmap_image(image);
err2:
	delete_image_struct(image);
err1:
	_kern_close(fd);

	KTRACE("rld: load_container(\"%s\"): failed: %s", name,
		strerror(status));

	return status;
}
Exemple #10
0
status_t
release_sem_etc(sem_id id, int32 count, uint32 flags)
{
	int32 slot = id % sMaxSems;

	if (gKernelStartup)
		return B_OK;
	if (sSemsActive == false)
		return B_NO_MORE_SEMS;
	if (id < 0)
		return B_BAD_SEM_ID;
	if (count <= 0 && (flags & B_RELEASE_ALL) == 0)
		return B_BAD_VALUE;

	InterruptsLocker _;
	SpinLocker semLocker(sSems[slot].lock);

	if (sSems[slot].id != id) {
		TRACE(("sem_release_etc: invalid sem_id %ld\n", id));
		return B_BAD_SEM_ID;
	}

	// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
	//	doesn't have any use outside the kernel
	if ((flags & B_CHECK_PERMISSION) != 0
		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
		dprintf("thread %ld tried to release kernel semaphore.\n",
			thread_get_current_thread_id());
		return B_NOT_ALLOWED;
	}

	KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id, count,
		flags);

	sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer;
#if DEBUG_SEM_LAST_ACQUIRER
	sSems[slot].u.used.last_releaser = thread_get_current_thread_id();
	sSems[slot].u.used.last_release_count = count;
#endif

	if (flags & B_RELEASE_ALL) {
		count = sSems[slot].u.used.net_count - sSems[slot].u.used.count;

		// is there anything to do for us at all?
		if (count == 0)
			return B_OK;

		// Don't release more than necessary -- there might be interrupted/
		// timed out threads in the queue.
		flags |= B_RELEASE_IF_WAITING_ONLY;
	}

	// Grab the scheduler lock, so thread_is_blocked() is reliable (due to
	// possible interruptions or timeouts, it wouldn't be otherwise).
	SpinLocker schedulerLocker(gSchedulerLock);

	while (count > 0) {
		queued_thread* entry = sSems[slot].queue.Head();
		if (entry == NULL) {
			if ((flags & B_RELEASE_IF_WAITING_ONLY) == 0) {
				sSems[slot].u.used.count += count;
				sSems[slot].u.used.net_count += count;
			}
			break;
		}

		if (thread_is_blocked(entry->thread)) {
			// The thread is still waiting. If its count is satisfied,
			// unblock it. Otherwise we can't unblock any other thread.
			if (entry->count > sSems[slot].u.used.net_count + count) {
				sSems[slot].u.used.count += count;
				sSems[slot].u.used.net_count += count;
				break;
			}

			thread_unblock_locked(entry->thread, B_OK);

			int delta = min_c(count, entry->count);
			sSems[slot].u.used.count += delta;
			sSems[slot].u.used.net_count += delta - entry->count;
			count -= delta;
		} else {
			// The thread is no longer waiting, but still queued, which
			// means acquiration failed and we can just remove it.
			sSems[slot].u.used.count += entry->count;
		}

		sSems[slot].queue.Remove(entry);
		entry->queued = false;
	}

	schedulerLocker.Unlock();

	if (sSems[slot].u.used.count > 0)
		notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE);

	// If we've unblocked another thread reschedule, if we've not explicitly
	// been told not to.
	if ((flags & B_DO_NOT_RESCHEDULE) == 0) {
		semLocker.Unlock();
		schedulerLocker.Lock();
		scheduler_reschedule_if_necessary_locked();
	}

	return B_OK;
}
Exemple #11
0
status_t
switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
	uint32 flags, bigtime_t timeout)
{
	int slot = id % sMaxSems;
	int state;
	status_t status = B_OK;

	if (gKernelStartup)
		return B_OK;
	if (sSemsActive == false)
		return B_NO_MORE_SEMS;

	if (!are_interrupts_enabled()) {
		panic("switch_sem_etc: called with interrupts disabled for sem %ld\n",
			id);
	}

	if (id < 0)
		return B_BAD_SEM_ID;
	if (count <= 0
		|| (flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) {
		return B_BAD_VALUE;
	}

	state = disable_interrupts();
	GRAB_SEM_LOCK(sSems[slot]);

	if (sSems[slot].id != id) {
		TRACE(("switch_sem_etc: bad sem %ld\n", id));
		status = B_BAD_SEM_ID;
		goto err;
	}

	// TODO: the B_CHECK_PERMISSION flag should be made private, as it
	//	doesn't have any use outside the kernel
	if ((flags & B_CHECK_PERMISSION) != 0
		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
		dprintf("thread %ld tried to acquire kernel semaphore %ld.\n",
			thread_get_current_thread_id(), id);
		status = B_NOT_ALLOWED;
		goto err;
	}

	if (sSems[slot].u.used.count - count < 0) {
		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) {
			// immediate timeout
			status = B_WOULD_BLOCK;
			goto err;
		} else if ((flags & B_ABSOLUTE_TIMEOUT) != 0 && timeout < 0) {
			// absolute negative timeout
			status = B_TIMED_OUT;
			goto err;
		}
	}

	KTRACE("switch_sem_etc(semToBeReleased: %ld, sem: %ld, count: %ld, "
		"flags: 0x%lx, timeout: %lld)", semToBeReleased, id, count, flags,
		timeout);

	if ((sSems[slot].u.used.count -= count) < 0) {
		// we need to block
		Thread *thread = thread_get_current_thread();

		TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p,"
			" name = %s\n", id, sSems[slot].u.used.name, thread, thread->name));

		// do a quick check to see if the thread has any pending signals
		// this should catch most of the cases where the thread had a signal
		SpinLocker schedulerLocker(gSchedulerLock);
		if (thread_is_interrupted(thread, flags)) {
			schedulerLocker.Unlock();
			sSems[slot].u.used.count += count;
			status = B_INTERRUPTED;
				// the other semaphore will be released later
			goto err;
		}

		if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0)
			timeout = B_INFINITE_TIMEOUT;

		// enqueue in the semaphore queue and get ready to wait
		queued_thread queueEntry(thread, count);
		sSems[slot].queue.Add(&queueEntry);
		queueEntry.queued = true;

		thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SEMAPHORE,
			(void*)(addr_t)id);

		RELEASE_SEM_LOCK(sSems[slot]);

		// release the other semaphore, if any
		if (semToBeReleased >= 0) {
			release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
			semToBeReleased = -1;
		}

		schedulerLocker.Lock();

		status_t acquireStatus = timeout == B_INFINITE_TIMEOUT
			? thread_block_locked(thread)
			: thread_block_with_timeout_locked(flags, timeout);

		schedulerLocker.Unlock();
		GRAB_SEM_LOCK(sSems[slot]);

		// If we're still queued, this means the acquiration failed, and we
		// need to remove our entry and (potentially) wake up other threads.
		if (queueEntry.queued)
			remove_thread_from_sem(&queueEntry, &sSems[slot]);

		if (acquireStatus >= B_OK) {
			sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
#if DEBUG_SEM_LAST_ACQUIRER
			sSems[slot].u.used.last_acquire_count = count;
#endif
		}

		RELEASE_SEM_LOCK(sSems[slot]);
		restore_interrupts(state);

		TRACE(("switch_sem_etc(sem %ld): exit block name %s, "
			"thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id,
			thread->name));
		KTRACE("switch_sem_etc() done: 0x%lx", acquireStatus);
		return acquireStatus;
	} else {
		sSems[slot].u.used.net_count -= count;
		sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
#if DEBUG_SEM_LAST_ACQUIRER
		sSems[slot].u.used.last_acquire_count = count;
#endif
	}

err:
	RELEASE_SEM_LOCK(sSems[slot]);
	restore_interrupts(state);

	if (status == B_INTERRUPTED && semToBeReleased >= B_OK) {
		// depending on when we were interrupted, we need to still
		// release the semaphore to always leave in a consistent
		// state
		release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
	}

#if 0
	if (status == B_NOT_ALLOWED)
	_user_debugger("Thread tried to acquire kernel semaphore.");
#endif

	KTRACE("switch_sem_etc() done: 0x%lx", status);

	return status;
}
Exemple #12
0
/*!	Creates a semaphore with the given parameters.

	This function is only available from within the kernel, and
	should not be made public - if possible, we should remove it
	completely (and have only create_sem() exported).
*/
sem_id
create_sem_etc(int32 count, const char* name, team_id owner)
{
	struct sem_entry* sem = NULL;
	cpu_status state;
	sem_id id = B_NO_MORE_SEMS;
	char* tempName;
	size_t nameLength;

	if (sSemsActive == false || sUsedSems == sMaxSems)
		return B_NO_MORE_SEMS;

	if (name == NULL)
		name = "unnamed semaphore";

	// get the owning team
	Team* team = Team::Get(owner);
	if (team == NULL)
		return B_BAD_TEAM_ID;
	BReference<Team> teamReference(team, true);

	// clone the name
	nameLength = strlen(name) + 1;
	nameLength = min_c(nameLength, B_OS_NAME_LENGTH);
	tempName = (char*)malloc(nameLength);
	if (tempName == NULL)
		return B_NO_MEMORY;

	strlcpy(tempName, name, nameLength);

	state = disable_interrupts();
	GRAB_SEM_LIST_LOCK();

	// get the first slot from the free list
	sem = sFreeSemsHead;
	if (sem) {
		// remove it from the free list
		sFreeSemsHead = sem->u.unused.next;
		if (!sFreeSemsHead)
			sFreeSemsTail = NULL;

		// init the slot
		GRAB_SEM_LOCK(*sem);
		sem->id = sem->u.unused.next_id;
		sem->u.used.count = count;
		sem->u.used.net_count = count;
		new(&sem->queue) ThreadQueue;
		sem->u.used.name = tempName;
		sem->u.used.owner = team->id;
		sem->u.used.select_infos = NULL;
		id = sem->id;

		list_add_item(&team->sem_list, &sem->u.used.team_link);

		RELEASE_SEM_LOCK(*sem);

		atomic_add(&sUsedSems, 1);

		KTRACE("create_sem_etc(count: %ld, name: %s, owner: %ld) -> %ld",
			count, name, owner, id);

		T_SCHEDULING_ANALYSIS(CreateSemaphore(id, name));
		NotifyWaitObjectListeners(&WaitObjectListener::SemaphoreCreated, id,
			name);
	}

	RELEASE_SEM_LIST_LOCK();
	restore_interrupts(state);

	if (sem == NULL)
		free(tempName);

	return id;
}
Exemple #13
0
static status_t
load_immediate_dependencies(image_t *image)
{
	elf_dyn *d = (elf_dyn *)image->dynamic_ptr;
	bool reportErrors = report_errors();
	status_t status = B_OK;
	uint32 i, j;
	const char *rpath;

	if (!d || (image->flags & RFLAG_DEPENDENCIES_LOADED))
		return B_OK;

	image->flags |= RFLAG_DEPENDENCIES_LOADED;

	if (image->num_needed == 0)
		return B_OK;

	KTRACE("rld: load_dependencies(\"%s\", id: %" B_PRId32 ")", image->name,
		image->id);

	image->needed = (image_t**)malloc(image->num_needed * sizeof(image_t *));
	if (image->needed == NULL) {
		FATAL("%s: Failed to allocate needed struct\n", image->path);
		KTRACE("rld: load_dependencies(\"%s\", id: %" B_PRId32
			") failed: no memory", image->name, image->id);
		return B_NO_MEMORY;
	}

	memset(image->needed, 0, image->num_needed * sizeof(image_t *));
	rpath = find_dt_rpath(image);

	for (i = 0, j = 0; d[i].d_tag != DT_NULL; i++) {
		switch (d[i].d_tag) {
			case DT_NEEDED:
			{
				int32 neededOffset = d[i].d_un.d_val;
				const char *name = STRING(image, neededOffset);

				status_t loadStatus = load_image(name, B_LIBRARY_IMAGE,
					rpath, image->path, &image->needed[j]);
				if (loadStatus < B_OK) {
					status = loadStatus;
					// correct error code in case the file could not been found
					if (status == B_ENTRY_NOT_FOUND) {
						status = B_MISSING_LIBRARY;

						if (reportErrors)
							gErrorMessage.AddString("missing library", name);
					}

					// Collect all missing libraries in case we report back
					if (!reportErrors) {
						KTRACE("rld: load_dependencies(\"%s\", id: %" B_PRId32
							") failed: %s", image->name, image->id,
							strerror(status));
						return status;
					}
				}

				j += 1;
				break;
			}

			default:
				// ignore any other tag
				continue;
		}
	}

	if (status < B_OK) {
		KTRACE("rld: load_dependencies(\"%s\", id: %" B_PRId32 ") "
			"failed: %s", image->name, image->id,
			strerror(status));
		return status;
	}

	if (j != image->num_needed) {
		FATAL("Internal error at load_dependencies()");
		KTRACE("rld: load_dependencies(\"%s\", id: %" B_PRId32 ") "
			"failed: internal error", image->name, image->id);
		return B_ERROR;
	}

	KTRACE("rld: load_dependencies(\"%s\", id: %" B_PRId32 ") done",
		image->name, image->id);

	return B_OK;
}
Exemple #14
0
image_id
load_library(char const *path, uint32 flags, bool addOn, void** _handle)
{
	image_t *image = NULL;
	image_type type = (addOn ? B_ADD_ON_IMAGE : B_LIBRARY_IMAGE);
	status_t status;

	if (path == NULL && addOn)
		return B_BAD_VALUE;

	KTRACE("rld: load_library(\"%s\", %#" B_PRIx32 ", %d)", path, flags, addOn);

	rld_lock();
		// for now, just do stupid simple global locking

	// have we already loaded this library?
	// Checking it at this stage saves loading its dependencies again
	if (!addOn) {
		// a NULL path is fine -- it means the global scope shall be opened
		if (path == NULL) {
			*_handle = RLD_GLOBAL_SCOPE;
			rld_unlock();
			return 0;
		}

		image = find_loaded_image_by_name(path, APP_OR_LIBRARY_TYPE);
		if (image != NULL && (flags & RTLD_GLOBAL) != 0)
			set_image_flags_recursively(image, RTLD_GLOBAL);

		if (image) {
			atomic_add(&image->ref_count, 1);
			rld_unlock();
			KTRACE("rld: load_library(\"%s\"): already loaded: %" B_PRId32,
				path, image->id);
			*_handle = image;
			return image->id;
		}
	}

	status = load_image(path, type, NULL, NULL, &image);
	if (status < B_OK) {
		rld_unlock();
		KTRACE("rld: load_library(\"%s\") failed to load container: %s", path,
			strerror(status));
		return status;
	}

	if (image->find_undefined_symbol == NULL) {
		if (addOn)
			image->find_undefined_symbol = find_undefined_symbol_add_on;
		else
			image->find_undefined_symbol = find_undefined_symbol_global;
	}

	status = load_dependencies(image);
	if (status < B_OK)
		goto err;

	// If specified, set the RTLD_GLOBAL flag recursively on this image and all
	// dependencies. If not specified, we temporarily set
	// RFLAG_USE_FOR_RESOLVING so that the dependencies will correctly be used
	// for undefined symbol resolution.
	if ((flags & RTLD_GLOBAL) != 0)
		set_image_flags_recursively(image, RTLD_GLOBAL);
	else
		set_image_flags_recursively(image, RFLAG_USE_FOR_RESOLVING);

	status = relocate_dependencies(image);
	if (status < B_OK)
		goto err;

	if ((flags & RTLD_GLOBAL) == 0)
		clear_image_flags_recursively(image, RFLAG_USE_FOR_RESOLVING);

	remap_images();
	init_dependencies(image, true);

	rld_unlock();

	KTRACE("rld: load_library(\"%s\") done: id: %" B_PRId32, path, image->id);

	*_handle = image;
	return image->id;

err:
	KTRACE("rld: load_library(\"%s\") failed: %s", path, strerror(status));

	dequeue_loaded_image(image);
	delete_image(image);
	rld_unlock();
	return status;
}
Exemple #15
0
image_id
load_program(char const *path, void **_entry)
{
	status_t status;
	image_t *image;

	KTRACE("rld: load_program(\"%s\")", path);

	rld_lock();
		// for now, just do stupid simple global locking

	preload_images();

	TRACE(("rld: load %s\n", path));

	status = load_image(path, B_APP_IMAGE, NULL, NULL, &gProgramImage);
	if (status < B_OK)
		goto err;

	if (gProgramImage->find_undefined_symbol == NULL)
		gProgramImage->find_undefined_symbol = find_undefined_symbol_global;

	status = load_dependencies(gProgramImage);
	if (status < B_OK)
		goto err;

	// Set RTLD_GLOBAL on all libraries including the program.
	// This results in the desired symbol resolution for dlopen()ed libraries.
	set_image_flags_recursively(gProgramImage, RTLD_GLOBAL);

	status = relocate_dependencies(gProgramImage);
	if (status < B_OK)
		goto err;

	inject_runtime_loader_api(gProgramImage);

	remap_images();
	init_dependencies(gProgramImage, true);

	// Since the images are initialized now, we no longer should use our
	// getenv(), but use the one from libroot.so
	find_symbol_breadth_first(gProgramImage,
		SymbolLookupInfo("getenv", B_SYMBOL_TYPE_TEXT), &image,
		(void**)&gGetEnv);

	if (gProgramImage->entry_point == 0) {
		status = B_NOT_AN_EXECUTABLE;
		goto err;
	}

	*_entry = (void *)(gProgramImage->entry_point);

	rld_unlock();

	gProgramLoaded = true;

	KTRACE("rld: load_program(\"%s\") done: entry: %p, id: %" B_PRId32 , path,
		*_entry, gProgramImage->id);

	return gProgramImage->id;

err:
	KTRACE("rld: load_program(\"%s\") failed: %s", path, strerror(status));

	delete_image(gProgramImage);

	if (report_errors()) {
		// send error message
		gErrorMessage.AddInt32("error", status);
		gErrorMessage.SetDeliveryInfo(gProgramArgs->error_token,
			-1, 0, find_thread(NULL));

		_kern_write_port_etc(gProgramArgs->error_port, 'KMSG',
			gErrorMessage.Buffer(), gErrorMessage.ContentSize(), 0, 0);
	}
	_kern_loading_app_failed(status);
	rld_unlock();

	return status;
}