Esempio n. 1
0
/*!	\brief sigaction() for the specified thread.
	A \a threadID is < 0 specifies the current thread.
*/
int
sigaction_etc(thread_id threadID, int signal, const struct sigaction *act,
	struct sigaction *oldAction)
{
	struct thread *thread;
	cpu_status state;
	status_t error = B_OK;

	if (signal < 1 || signal > MAX_SIGNO
		|| (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
		return B_BAD_VALUE;

	state = disable_interrupts();
	GRAB_THREAD_LOCK();

	thread = (threadID < 0
		? thread_get_current_thread()
		: thread_get_thread_struct_locked(threadID));

	if (thread) {
		if (oldAction) {
			// save previous sigaction structure
			memcpy(oldAction, &thread->sig_action[signal - 1],
				sizeof(struct sigaction));
		}

		if (act) {
			T(SigAction(thread, signal, act));

			// set new sigaction structure
			memcpy(&thread->sig_action[signal - 1], act,
				sizeof(struct sigaction));
			thread->sig_action[signal - 1].sa_mask &= BLOCKABLE_SIGNALS;
		}

		if (act && act->sa_handler == SIG_IGN) {
			// remove pending signal if it should now be ignored
			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
		} else if (act && act->sa_handler == SIG_DFL
			&& (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0) {
			// remove pending signal for those signals whose default
			// action is to ignore them
			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));
		}
	} else
		error = B_BAD_THREAD_ID;

	RELEASE_THREAD_LOCK();
	restore_interrupts(state);

	return error;
}
Esempio n. 2
0
bigtime_t
_user_estimate_max_scheduling_latency(thread_id id)
{
	syscall_64_bit_return_value();

	InterruptsSpinLocker locker(gThreadSpinlock);

	struct thread* thread = id < 0
		? thread_get_current_thread() : thread_get_thread_struct_locked(id);
	if (thread == NULL)
		return 0;

	return gScheduler->estimate_max_scheduling_latency(thread);
}
Esempio n. 3
0
int
send_signal_etc(pid_t id, uint signal, uint32 flags)
{
	status_t status = B_BAD_THREAD_ID;
	struct thread *thread;
	cpu_status state = 0;

	if (signal < 0 || signal > MAX_SIGNO)
		return B_BAD_VALUE;

	T(SendSignal(id, signal, flags));

	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
		state = disable_interrupts();

	if (id > 0) {
		// send a signal to the specified thread

		GRAB_THREAD_LOCK();

		thread = thread_get_thread_struct_locked(id);
		if (thread != NULL)
			status = deliver_signal(thread, signal, flags);
	} else {
		// send a signal to the specified process group
		// (the absolute value of the id)

		struct process_group *group;

		// TODO: handle -1 correctly
		if (id == 0 || id == -1) {
			// send a signal to the current team
			id = thread_get_current_thread()->team->id;
		} else
			id = -id;

		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
			GRAB_TEAM_LOCK();

		group = team_get_process_group_locked(NULL, id);
		if (group != NULL) {
			struct team *team, *next;

			// Send a signal to all teams in this process group

			for (team = group->teams; team != NULL; team = next) {
				next = team->group_next;
				id = team->id;

				GRAB_THREAD_LOCK();

				thread = thread_get_thread_struct_locked(id);
				if (thread != NULL) {
					// we don't stop because of an error sending the signal; we
					// rather want to send as much signals as possible
					status = deliver_signal(thread, signal, flags);
				}

				RELEASE_THREAD_LOCK();
			}
		}

		if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
			RELEASE_TEAM_LOCK();

		GRAB_THREAD_LOCK();
	}

	if ((flags & (B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED)) == 0)
		scheduler_reschedule_if_necessary_locked();

	RELEASE_THREAD_LOCK();

	if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0)
		restore_interrupts(state);

	return status;
}