Esempio n. 1
0
status_t
PackagesDirectory::Init(const char* path, dev_t mountPointDeviceID,
	ino_t mountPointNodeID, struct stat& _st)
{
	// Open the directory. We want the path be interpreted depending on from
	// where it came (kernel or userland), but we always want a FD in the
	// kernel I/O context. There's no VFS service method to do that for us,
	// so we need to do that ourselves.
	bool calledFromKernel
		= team_get_current_team_id() == team_get_kernel_team_id();
		// Not entirely correct, but good enough for now. The only
		// alternative is to have that information passed in as well.

	struct vnode* vnode;
	status_t error;
	if (path != NULL) {
		error = vfs_get_vnode_from_path(path, calledFromKernel, &vnode);
	} else {
		// No path given -- use the "packages" directory at our mount point.
		error = vfs_entry_ref_to_vnode(mountPointDeviceID, mountPointNodeID,
			"packages", &vnode);
	}
	if (error != B_OK) {
		ERROR("Failed to open packages directory \"%s\"\n", strerror(error));
		RETURN_ERROR(error);
	}

	return _Init(vnode, _st);
}
Esempio n. 2
0
status_t
user_timer_get_clock(clockid_t clockID, bigtime_t& _time)
{
	switch (clockID) {
		case CLOCK_MONOTONIC:
			_time = system_time();
			return B_OK;

		case CLOCK_REALTIME:
			_time = real_time_clock_usecs();
			return B_OK;

		case CLOCK_THREAD_CPUTIME_ID:
		{
			Thread* thread = thread_get_current_thread();
			InterruptsSpinLocker timeLocker(thread->time_lock);
			_time = thread->CPUTime(false);
			return B_OK;
		}

		case CLOCK_PROCESS_USER_CPUTIME_ID:
		{
			Team* team = thread_get_current_thread()->team;
			InterruptsSpinLocker timeLocker(team->time_lock);
			_time = team->UserCPUTime();
			return B_OK;
		}

		case CLOCK_PROCESS_CPUTIME_ID:
		default:
		{
			// get the ID of the target team (or the respective placeholder)
			team_id teamID;
			if (clockID == CLOCK_PROCESS_CPUTIME_ID) {
				teamID = B_CURRENT_TEAM;
			} else {
				if (clockID < 0)
					return B_BAD_VALUE;
				if (clockID == team_get_kernel_team_id())
					return B_NOT_ALLOWED;

				teamID = clockID;
			}

			// get the team
			Team* team = Team::Get(teamID);
			if (team == NULL)
				return B_BAD_VALUE;
			BReference<Team> teamReference(team, true);

			// get the time
			InterruptsSpinLocker timeLocker(team->time_lock);
			_time = team->CPUTime(false);

			return B_OK;
		}
	}
}
Esempio n. 3
0
static status_t
delete_sem_internal(sem_id id, bool checkPermission)
{
	if (sSemsActive == false)
		return B_NO_MORE_SEMS;
	if (id < 0)
		return B_BAD_SEM_ID;

	int32 slot = id % sMaxSems;

	cpu_status state = disable_interrupts();
	GRAB_SEM_LIST_LOCK();
	GRAB_SEM_LOCK(sSems[slot]);

	if (sSems[slot].id != id) {
		RELEASE_SEM_LOCK(sSems[slot]);
		RELEASE_SEM_LIST_LOCK();
		restore_interrupts(state);
		TRACE(("delete_sem: invalid sem_id %ld\n", id));
		return B_BAD_SEM_ID;
	}

	if (checkPermission
		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
		RELEASE_SEM_LOCK(sSems[slot]);
		RELEASE_SEM_LIST_LOCK();
		restore_interrupts(state);
		dprintf("thread %ld tried to delete kernel semaphore %ld.\n",
			thread_get_current_thread_id(), id);
		return B_NOT_ALLOWED;
	}

	if (sSems[slot].u.used.owner >= 0) {
		list_remove_link(&sSems[slot].u.used.team_link);
		sSems[slot].u.used.owner = -1;
	} else
		panic("sem %ld has no owner", id);

	RELEASE_SEM_LIST_LOCK();

	char* name;
	uninit_sem_locked(sSems[slot], &name);

	SpinLocker schedulerLocker(gSchedulerLock);
	scheduler_reschedule_if_necessary_locked();
	schedulerLocker.Unlock();

	restore_interrupts(state);

	free(name);
	return B_OK;
}
Esempio n. 4
0
status_t
select_port(int32 id, struct select_info* info, bool kernel)
{
	if (id < 0)
		return B_BAD_PORT_ID;

	int32 slot = id % sMaxPorts;

	MutexLocker locker(sPorts[slot].lock);

	if (sPorts[slot].id != id || is_port_closed(slot))
		return B_BAD_PORT_ID;
	if (!kernel && sPorts[slot].owner == team_get_kernel_team_id()) {
		// kernel port, but call from userland
		return B_NOT_ALLOWED;
	}

	info->selected_events &= B_EVENT_READ | B_EVENT_WRITE | B_EVENT_INVALID;

	if (info->selected_events != 0) {
		uint16 events = 0;

		info->next = sPorts[slot].select_infos;
		sPorts[slot].select_infos = info;

		// check for events
		if ((info->selected_events & B_EVENT_READ) != 0
			&& !sPorts[slot].messages.IsEmpty()) {
			events |= B_EVENT_READ;
		}

		if (sPorts[slot].write_count > 0)
			events |= B_EVENT_WRITE;

		if (events != 0)
			notify_select_events(info, events);
	}

	return B_OK;
}
Esempio n. 5
0
status_t
select_sem(int32 id, struct select_info* info, bool kernel)
{
	cpu_status state;
	int32 slot;
	status_t error = B_OK;

	if (id < 0)
		return B_BAD_SEM_ID;

	slot = id % sMaxSems;

	state = disable_interrupts();
	GRAB_SEM_LOCK(sSems[slot]);

	if (sSems[slot].id != id) {
		// bad sem ID
		error = B_BAD_SEM_ID;
	} else if (!kernel
		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
		// kernel semaphore, but call from userland
		error = B_NOT_ALLOWED;
	} else {
		info->selected_events &= B_EVENT_ACQUIRE_SEMAPHORE | B_EVENT_INVALID;

		if (info->selected_events != 0) {
			info->next = sSems[slot].u.used.select_infos;
			sSems[slot].u.used.select_infos = info;

			if (sSems[slot].u.used.count > 0)
				notify_select_events(info, B_EVENT_ACQUIRE_SEMAPHORE);
		}
	}

	RELEASE_SEM_LOCK(sSems[slot]);
	restore_interrupts(state);

	return error;
}
Esempio n. 6
0
status_t
_user_set_clock(clockid_t clockID, bigtime_t time)
{
	switch (clockID) {
		case CLOCK_MONOTONIC:
			return B_BAD_VALUE;

		case CLOCK_REALTIME:
			// only root may set the time
			if (geteuid() != 0)
				return B_NOT_ALLOWED;

			set_real_time_clock_usecs(time);
			return B_OK;

		case CLOCK_THREAD_CPUTIME_ID:
		{
			Thread* thread = thread_get_current_thread();
			InterruptsSpinLocker timeLocker(thread->time_lock);
			bigtime_t diff = time - thread->CPUTime(false);
			thread->cpu_clock_offset += diff;

			thread_clock_changed(thread, diff);
			return B_OK;
		}

		case CLOCK_PROCESS_USER_CPUTIME_ID:
			// not supported -- this clock is an Haiku-internal extension
			return B_BAD_VALUE;

		case CLOCK_PROCESS_CPUTIME_ID:
		default:
		{
			// get the ID of the target team (or the respective placeholder)
			team_id teamID;
			if (clockID == CLOCK_PROCESS_CPUTIME_ID) {
				teamID = B_CURRENT_TEAM;
			} else {
				if (clockID < 0)
					return B_BAD_VALUE;
				if (clockID == team_get_kernel_team_id())
					return B_NOT_ALLOWED;

				teamID = clockID;
			}

			// get the team
			Team* team = Team::Get(teamID);
			if (team == NULL)
				return B_BAD_VALUE;
			BReference<Team> teamReference(team, true);

			// set the time offset
			InterruptsSpinLocker timeLocker(team->time_lock);
			bigtime_t diff = time - team->CPUTime(false);
			team->cpu_clock_offset += diff;

			team_clock_changed(team, diff);
			return B_OK;
		}
	}

	return B_OK;
}
Esempio n. 7
0
static int32
create_timer(clockid_t clockID, int32 timerID, Team* team, Thread* thread,
	uint32 flags, const struct sigevent& event,
	ThreadCreationAttributes* threadAttributes, bool isDefaultEvent)
{
	// create the timer object
	UserTimer* timer;
	switch (clockID) {
		case CLOCK_MONOTONIC:
			timer = new(std::nothrow) SystemTimeUserTimer;
			break;

		case CLOCK_REALTIME:
			timer = new(std::nothrow) RealTimeUserTimer;
			break;

		case CLOCK_THREAD_CPUTIME_ID:
			timer = new(std::nothrow) ThreadTimeUserTimer(
				thread_get_current_thread()->id);
			break;

		case CLOCK_PROCESS_CPUTIME_ID:
			if (team == NULL)
				return B_BAD_VALUE;
			timer = new(std::nothrow) TeamTimeUserTimer(team->id);
			break;

		case CLOCK_PROCESS_USER_CPUTIME_ID:
			if (team == NULL)
				return B_BAD_VALUE;
			timer = new(std::nothrow) TeamUserTimeUserTimer(team->id);
			break;

		default:
		{
			// The clock ID is a ID of the team whose CPU time the clock refers
			// to. Check whether the team exists and we have permission to
			// access its clock.
			if (clockID <= 0)
				return B_BAD_VALUE;
			if (clockID == team_get_kernel_team_id())
				return B_NOT_ALLOWED;

			Team* timedTeam = Team::GetAndLock(clockID);
			if (timedTeam == NULL)
				return B_BAD_VALUE;

			uid_t uid = geteuid();
			uid_t teamUID = timedTeam->effective_uid;

			timedTeam->UnlockAndReleaseReference();

			if (uid != 0 && uid != teamUID)
				return B_NOT_ALLOWED;

			timer = new(std::nothrow) TeamTimeUserTimer(clockID);
			break;
		}
	}

	if (timer == NULL)
		return B_NO_MEMORY;
	ObjectDeleter<UserTimer> timerDeleter(timer);

	if (timerID >= 0)
		timer->SetID(timerID);

	SignalEvent* signalEvent = NULL;

	switch (event.sigev_notify) {
		case SIGEV_NONE:
			// the timer's event remains NULL
			break;

		case SIGEV_SIGNAL:
		{
			if (event.sigev_signo <= 0 || event.sigev_signo > MAX_SIGNAL_NUMBER)
				return B_BAD_VALUE;

			if (thread != NULL && (flags & USER_TIMER_SIGNAL_THREAD) != 0) {
				// The signal shall be sent to the thread.
				signalEvent = ThreadSignalEvent::Create(thread,
					event.sigev_signo, SI_TIMER, 0, team->id);
			} else {
				// The signal shall be sent to the team.
				signalEvent = TeamSignalEvent::Create(team, event.sigev_signo,
					SI_TIMER, 0);
			}

			if (signalEvent == NULL)
				return B_NO_MEMORY;

			timer->SetEvent(signalEvent);
			break;
		}

		case SIGEV_THREAD:
		{
			if (threadAttributes == NULL)
				return B_BAD_VALUE;

			CreateThreadEvent* event
				= CreateThreadEvent::Create(*threadAttributes);
			if (event == NULL)
				return B_NO_MEMORY;

			timer->SetEvent(event);
			break;
		}

		default:
			return B_BAD_VALUE;
	}

	// add it to the team/thread
	TimerLocker timerLocker;
	timerLocker.Lock(team, thread);

	status_t error = thread != NULL
		? thread->AddUserTimer(timer) : team->AddUserTimer(timer);
	if (error != B_OK)
		return error;

	// set a signal event's user value
	if (signalEvent != NULL) {
		// If no sigevent structure was given, use the timer ID.
		union sigval signalValue = event.sigev_value;
		if (isDefaultEvent)
			signalValue.sival_int = timer->ID();

		signalEvent->SetUserValue(signalValue);
	}

	return timerDeleter.Detach()->ID();
}
Esempio n. 8
0
status_t
release_sem_etc(sem_id id, int32 count, uint32 flags)
{
	int32 slot = id % sMaxSems;

	if (gKernelStartup)
		return B_OK;
	if (sSemsActive == false)
		return B_NO_MORE_SEMS;
	if (id < 0)
		return B_BAD_SEM_ID;
	if (count <= 0 && (flags & B_RELEASE_ALL) == 0)
		return B_BAD_VALUE;

	InterruptsLocker _;
	SpinLocker semLocker(sSems[slot].lock);

	if (sSems[slot].id != id) {
		TRACE(("sem_release_etc: invalid sem_id %ld\n", id));
		return B_BAD_SEM_ID;
	}

	// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
	//	doesn't have any use outside the kernel
	if ((flags & B_CHECK_PERMISSION) != 0
		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
		dprintf("thread %ld tried to release kernel semaphore.\n",
			thread_get_current_thread_id());
		return B_NOT_ALLOWED;
	}

	KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id, count,
		flags);

	sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer;
#if DEBUG_SEM_LAST_ACQUIRER
	sSems[slot].u.used.last_releaser = thread_get_current_thread_id();
	sSems[slot].u.used.last_release_count = count;
#endif

	if (flags & B_RELEASE_ALL) {
		count = sSems[slot].u.used.net_count - sSems[slot].u.used.count;

		// is there anything to do for us at all?
		if (count == 0)
			return B_OK;

		// Don't release more than necessary -- there might be interrupted/
		// timed out threads in the queue.
		flags |= B_RELEASE_IF_WAITING_ONLY;
	}

	// Grab the scheduler lock, so thread_is_blocked() is reliable (due to
	// possible interruptions or timeouts, it wouldn't be otherwise).
	SpinLocker schedulerLocker(gSchedulerLock);

	while (count > 0) {
		queued_thread* entry = sSems[slot].queue.Head();
		if (entry == NULL) {
			if ((flags & B_RELEASE_IF_WAITING_ONLY) == 0) {
				sSems[slot].u.used.count += count;
				sSems[slot].u.used.net_count += count;
			}
			break;
		}

		if (thread_is_blocked(entry->thread)) {
			// The thread is still waiting. If its count is satisfied,
			// unblock it. Otherwise we can't unblock any other thread.
			if (entry->count > sSems[slot].u.used.net_count + count) {
				sSems[slot].u.used.count += count;
				sSems[slot].u.used.net_count += count;
				break;
			}

			thread_unblock_locked(entry->thread, B_OK);

			int delta = min_c(count, entry->count);
			sSems[slot].u.used.count += delta;
			sSems[slot].u.used.net_count += delta - entry->count;
			count -= delta;
		} else {
			// The thread is no longer waiting, but still queued, which
			// means acquiration failed and we can just remove it.
			sSems[slot].u.used.count += entry->count;
		}

		sSems[slot].queue.Remove(entry);
		entry->queued = false;
	}

	schedulerLocker.Unlock();

	if (sSems[slot].u.used.count > 0)
		notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE);

	// If we've unblocked another thread reschedule, if we've not explicitly
	// been told not to.
	if ((flags & B_DO_NOT_RESCHEDULE) == 0) {
		semLocker.Unlock();
		schedulerLocker.Lock();
		scheduler_reschedule_if_necessary_locked();
	}

	return B_OK;
}
Esempio n. 9
0
status_t
switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
	uint32 flags, bigtime_t timeout)
{
	int slot = id % sMaxSems;
	int state;
	status_t status = B_OK;

	if (gKernelStartup)
		return B_OK;
	if (sSemsActive == false)
		return B_NO_MORE_SEMS;

	if (!are_interrupts_enabled()) {
		panic("switch_sem_etc: called with interrupts disabled for sem %ld\n",
			id);
	}

	if (id < 0)
		return B_BAD_SEM_ID;
	if (count <= 0
		|| (flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) {
		return B_BAD_VALUE;
	}

	state = disable_interrupts();
	GRAB_SEM_LOCK(sSems[slot]);

	if (sSems[slot].id != id) {
		TRACE(("switch_sem_etc: bad sem %ld\n", id));
		status = B_BAD_SEM_ID;
		goto err;
	}

	// TODO: the B_CHECK_PERMISSION flag should be made private, as it
	//	doesn't have any use outside the kernel
	if ((flags & B_CHECK_PERMISSION) != 0
		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
		dprintf("thread %ld tried to acquire kernel semaphore %ld.\n",
			thread_get_current_thread_id(), id);
		status = B_NOT_ALLOWED;
		goto err;
	}

	if (sSems[slot].u.used.count - count < 0) {
		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) {
			// immediate timeout
			status = B_WOULD_BLOCK;
			goto err;
		} else if ((flags & B_ABSOLUTE_TIMEOUT) != 0 && timeout < 0) {
			// absolute negative timeout
			status = B_TIMED_OUT;
			goto err;
		}
	}

	KTRACE("switch_sem_etc(semToBeReleased: %ld, sem: %ld, count: %ld, "
		"flags: 0x%lx, timeout: %lld)", semToBeReleased, id, count, flags,
		timeout);

	if ((sSems[slot].u.used.count -= count) < 0) {
		// we need to block
		Thread *thread = thread_get_current_thread();

		TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p,"
			" name = %s\n", id, sSems[slot].u.used.name, thread, thread->name));

		// do a quick check to see if the thread has any pending signals
		// this should catch most of the cases where the thread had a signal
		SpinLocker schedulerLocker(gSchedulerLock);
		if (thread_is_interrupted(thread, flags)) {
			schedulerLocker.Unlock();
			sSems[slot].u.used.count += count;
			status = B_INTERRUPTED;
				// the other semaphore will be released later
			goto err;
		}

		if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0)
			timeout = B_INFINITE_TIMEOUT;

		// enqueue in the semaphore queue and get ready to wait
		queued_thread queueEntry(thread, count);
		sSems[slot].queue.Add(&queueEntry);
		queueEntry.queued = true;

		thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SEMAPHORE,
			(void*)(addr_t)id);

		RELEASE_SEM_LOCK(sSems[slot]);

		// release the other semaphore, if any
		if (semToBeReleased >= 0) {
			release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
			semToBeReleased = -1;
		}

		schedulerLocker.Lock();

		status_t acquireStatus = timeout == B_INFINITE_TIMEOUT
			? thread_block_locked(thread)
			: thread_block_with_timeout_locked(flags, timeout);

		schedulerLocker.Unlock();
		GRAB_SEM_LOCK(sSems[slot]);

		// If we're still queued, this means the acquiration failed, and we
		// need to remove our entry and (potentially) wake up other threads.
		if (queueEntry.queued)
			remove_thread_from_sem(&queueEntry, &sSems[slot]);

		if (acquireStatus >= B_OK) {
			sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
#if DEBUG_SEM_LAST_ACQUIRER
			sSems[slot].u.used.last_acquire_count = count;
#endif
		}

		RELEASE_SEM_LOCK(sSems[slot]);
		restore_interrupts(state);

		TRACE(("switch_sem_etc(sem %ld): exit block name %s, "
			"thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id,
			thread->name));
		KTRACE("switch_sem_etc() done: 0x%lx", acquireStatus);
		return acquireStatus;
	} else {
		sSems[slot].u.used.net_count -= count;
		sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
#if DEBUG_SEM_LAST_ACQUIRER
		sSems[slot].u.used.last_acquire_count = count;
#endif
	}

err:
	RELEASE_SEM_LOCK(sSems[slot]);
	restore_interrupts(state);

	if (status == B_INTERRUPTED && semToBeReleased >= B_OK) {
		// depending on when we were interrupted, we need to still
		// release the semaphore to always leave in a consistent
		// state
		release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
	}

#if 0
	if (status == B_NOT_ALLOWED)
	_user_debugger("Thread tried to acquire kernel semaphore.");
#endif

	KTRACE("switch_sem_etc() done: 0x%lx", status);

	return status;
}
Esempio n. 10
0
sem_id
create_sem(int32 count, const char* name)
{
	return create_sem_etc(count, name, team_get_kernel_team_id());
}