/*! \brief sigaction() for the specified thread. A \a threadID is < 0 specifies the current thread. */ int sigaction_etc(thread_id threadID, int signal, const struct sigaction *act, struct sigaction *oldAction) { struct thread *thread; cpu_status state; status_t error = B_OK; if (signal < 1 || signal > MAX_SIGNO || (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0) return B_BAD_VALUE; state = disable_interrupts(); GRAB_THREAD_LOCK(); thread = (threadID < 0 ? thread_get_current_thread() : thread_get_thread_struct_locked(threadID)); if (thread) { if (oldAction) { // save previous sigaction structure memcpy(oldAction, &thread->sig_action[signal - 1], sizeof(struct sigaction)); } if (act) { T(SigAction(thread, signal, act)); // set new sigaction structure memcpy(&thread->sig_action[signal - 1], act, sizeof(struct sigaction)); thread->sig_action[signal - 1].sa_mask &= BLOCKABLE_SIGNALS; } if (act && act->sa_handler == SIG_IGN) { // remove pending signal if it should now be ignored atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal)); } else if (act && act->sa_handler == SIG_DFL && (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0) { // remove pending signal for those signals whose default // action is to ignore them atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal)); } } else error = B_BAD_THREAD_ID; RELEASE_THREAD_LOCK(); restore_interrupts(state); return error; }
/*! Wait for the specified signals, and return the signal retrieved in \a _signal. */ int sigwait(const sigset_t *set, int *_signal) { struct thread *thread = thread_get_current_thread(); while (!has_signals_pending(thread)) { thread_prepare_to_block(thread, B_CAN_INTERRUPT, THREAD_BLOCK_TYPE_SIGNAL, NULL); thread_block(); } int signalsPending = atomic_get(&thread->sig_pending) & *set; update_current_thread_signals_flag(); if (signalsPending) { // select the lowest pending signal to return in _signal for (int signal = 1; signal < NSIG; signal++) { if ((SIGNAL_TO_MASK(signal) & signalsPending) != 0) { *_signal = signal; return B_OK; } } } return B_INTERRUPTED; }
int __sigismember(const sigset_t* set, int signal) { if (signal <= 0 || signal > MAX_SIGNAL_NUMBER) { __set_errno(EINVAL); return -1; } return (*set & SIGNAL_TO_MASK(signal)) != 0 ? 1 : 0; }
int __sigaddset_beos(sigset_t_beos* set, int signal) { if (signal <= 0 || signal > MAX_SIGNAL_NUMBER_BEOS) { __set_errno(EINVAL); return -1; } *set |= SIGNAL_TO_MASK(signal); return 0; }
static bool notify_debugger(struct thread *thread, int signal, struct sigaction *handler, bool deadly) { uint64 signalMask = SIGNAL_TO_MASK(signal); // first check the ignore signal masks the debugger specified for the thread if (atomic_get(&thread->debug_info.ignore_signals_once) & signalMask) { atomic_and(&thread->debug_info.ignore_signals_once, ~signalMask); return true; } if (atomic_get(&thread->debug_info.ignore_signals) & signalMask) return true; // deliver the event return user_debug_handle_signal(signal, handler, deadly); }
/*! Delivers the \a signal to the \a thread, but doesn't handle the signal - it just makes sure the thread gets the signal, ie. unblocks it if needed. This function must be called with interrupts disabled and the thread lock held. */ static status_t deliver_signal(struct thread *thread, uint signal, uint32 flags) { if (flags & B_CHECK_PERMISSION) { // ToDo: introduce euid & uid fields to the team and check permission } if (signal == 0) return B_OK; if (thread->team == team_get_kernel_team()) { // Signals to kernel threads will only wake them up if (thread->state == B_THREAD_SUSPENDED) scheduler_enqueue_in_run_queue(thread); return B_OK; } atomic_or(&thread->sig_pending, SIGNAL_TO_MASK(signal)); switch (signal) { case SIGKILL: { // Forward KILLTHR to the main thread of the team struct thread *mainThread = thread->team->main_thread; atomic_or(&mainThread->sig_pending, SIGNAL_TO_MASK(SIGKILLTHR)); // Wake up main thread if (mainThread->state == B_THREAD_SUSPENDED) scheduler_enqueue_in_run_queue(mainThread); else thread_interrupt(mainThread, true); update_thread_signals_flag(mainThread); // Supposed to fall through } case SIGKILLTHR: // Wake up suspended threads and interrupt waiting ones if (thread->state == B_THREAD_SUSPENDED) scheduler_enqueue_in_run_queue(thread); else thread_interrupt(thread, true); break; case SIGCONT: // Wake up thread if it was suspended if (thread->state == B_THREAD_SUSPENDED) scheduler_enqueue_in_run_queue(thread); if ((flags & SIGNAL_FLAG_DONT_RESTART_SYSCALL) != 0) atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL); atomic_and(&thread->sig_pending, ~STOP_SIGNALS); // remove any pending stop signals break; default: if (thread->sig_pending & (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD))) { // Interrupt thread if it was waiting thread_interrupt(thread, false); } break; } update_thread_signals_flag(thread); return B_OK; }
bool is_signal_blocked(int signal) { return (atomic_get(&thread_get_current_thread()->sig_block_mask) & SIGNAL_TO_MASK(signal)) != 0; }
/*! Actually handles the signal - ie. the thread will exit, a custom signal handler is prepared, or whatever the signal demands. */ bool handle_signals(struct thread *thread) { uint32 signalMask = atomic_get(&thread->sig_pending) & ~atomic_get(&thread->sig_block_mask); // If SIGKILL[THR] are pending, we ignore other signals. // Otherwise check, if the thread shall stop for debugging. if (signalMask & KILL_SIGNALS) { signalMask &= KILL_SIGNALS; } else if (thread->debug_info.flags & B_THREAD_DEBUG_STOP) { user_debug_stop_thread(); } if (signalMask == 0) return 0; if (thread->user_thread->defer_signals > 0 && (signalMask & NON_DEFERRABLE_SIGNALS) == 0) { thread->user_thread->pending_signals = signalMask; return 0; } thread->user_thread->pending_signals = 0; bool restart = (atomic_and(&thread->flags, ~THREAD_FLAGS_DONT_RESTART_SYSCALL) & THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0; T(HandleSignals(signalMask)); for (int32 i = 0; i < NSIG; i++) { bool debugSignal; int32 signal = i + 1; if ((signalMask & SIGNAL_TO_MASK(signal)) == 0) continue; // clear the signal that we will handle atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal)); debugSignal = !(~atomic_get(&thread->team->debug_info.flags) & (B_TEAM_DEBUG_SIGNALS | B_TEAM_DEBUG_DEBUGGER_INSTALLED)); // TODO: since sigaction_etc() could clobber the fields at any time, // we should actually copy the relevant fields atomically before // accessing them (only the debugger is calling sigaction_etc() // right now). // Update: sigaction_etc() is only used by the userland debugger // support. We can just as well restrict getting/setting signal // handlers to work only when the respective thread is stopped. // Then sigaction() could be used instead and we could get rid of // sigaction_etc(). struct sigaction* handler = &thread->sig_action[i]; TRACE(("Thread 0x%lx received signal %s\n", thread->id, sigstr[signal])); if (handler->sa_handler == SIG_IGN) { // signal is to be ignored // ToDo: apply zombie cleaning on SIGCHLD // notify the debugger if (debugSignal) notify_debugger(thread, signal, handler, false); continue; } else if (handler->sa_handler == SIG_DFL) { // default signal behaviour switch (signal) { case SIGCHLD: case SIGWINCH: case SIGURG: // notify the debugger if (debugSignal) notify_debugger(thread, signal, handler, false); continue; case SIGCONT: // notify the debugger if (debugSignal && !notify_debugger(thread, signal, handler, false)) continue; // notify threads waiting for team state changes if (thread == thread->team->main_thread) { InterruptsSpinLocker locker(gTeamSpinlock); team_set_job_control_state(thread->team, JOB_CONTROL_STATE_CONTINUED, signal, false); // The standard states that the system *may* send a // SIGCHLD when a child is continued. I haven't found // a good reason why we would want to, though. } continue; case SIGSTOP: case SIGTSTP: case SIGTTIN: case SIGTTOU: // notify the debugger if (debugSignal && !notify_debugger(thread, signal, handler, false)) continue; thread->next_state = B_THREAD_SUSPENDED; // notify threads waiting for team state changes if (thread == thread->team->main_thread) { InterruptsSpinLocker locker(gTeamSpinlock); team_set_job_control_state(thread->team, JOB_CONTROL_STATE_STOPPED, signal, false); // send a SIGCHLD to the parent (if it does have // SA_NOCLDSTOP defined) SpinLocker _(gThreadSpinlock); struct thread* parentThread = thread->team->parent->main_thread; struct sigaction& parentHandler = parentThread->sig_action[SIGCHLD - 1]; if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) deliver_signal(parentThread, SIGCHLD, 0); } return true; case SIGSEGV: case SIGFPE: case SIGILL: case SIGTRAP: case SIGABRT: // If this is the main thread, we just fall through and let // this signal kill the team. Otherwise we send a SIGKILL to // the main thread first, since the signal will kill this // thread only. if (thread != thread->team->main_thread) send_signal(thread->team->main_thread->id, SIGKILL); case SIGQUIT: case SIGPOLL: case SIGPROF: case SIGSYS: case SIGVTALRM: case SIGXCPU: case SIGXFSZ: TRACE(("Shutting down thread 0x%lx due to signal #%ld\n", thread->id, signal)); case SIGKILL: case SIGKILLTHR: default: // if the thread exited normally, the exit reason is already set if (thread->exit.reason != THREAD_RETURN_EXIT) { thread->exit.reason = THREAD_RETURN_INTERRUPTED; thread->exit.signal = (uint16)signal; } // notify the debugger if (debugSignal && signal != SIGKILL && signal != SIGKILLTHR && !notify_debugger(thread, signal, handler, true)) continue; thread_exit(); // won't return } } // User defined signal handler // notify the debugger if (debugSignal && !notify_debugger(thread, signal, handler, false)) continue; if (!restart || (handler->sa_flags & SA_RESTART) == 0) atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL); T(ExecuteSignalHandler(signal, handler)); TRACE(("### Setting up custom signal handler frame...\n")); arch_setup_signal_frame(thread, handler, signal, atomic_get(&thread->sig_block_mask)); if (handler->sa_flags & SA_ONESHOT) handler->sa_handler = SIG_DFL; if ((handler->sa_flags & SA_NOMASK) == 0) { // Update the block mask while the signal handler is running - it // will be automatically restored when the signal frame is left. atomic_or(&thread->sig_block_mask, (handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGNALS); } update_current_thread_signals_flag(); return false; } // clear syscall restart thread flag, if we're not supposed to restart the // syscall if (!restart) atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL); update_current_thread_signals_flag(); return false; }