Пример #1
0
static void PopDirState(int goback, char *name, struct stat *sb, Recursion r)
{
    if (goback && r.travlinks)
    {
        if (chdir(name) == -1)
        {
            CfOut(cf_error, "chdir", "Error in backing out of recursive travlink descent securely to %s", name);
            HandleSignals(SIGTERM);
        }

        CheckLinkSecurity(sb, name);
    }
    else if (goback)
    {
        if (chdir("..") == -1)
        {
            CfOut(cf_error, "chdir", "Error in backing out of recursive descent securely to %s", name);
            HandleSignals(SIGTERM);
        }
    }
}
Пример #2
0
static void CheckLinkSecurity(struct stat *sb, char *name)
{
    struct stat security;

    CfDebug("Checking the inode and device to make sure we are where we think we are...\n");

    if (cfstat(".", &security) == -1)
    {
        CfOut(cf_error, "stat", "Could not stat directory %s after entering!", name);
        return;
    }

    if ((sb->st_dev != security.st_dev) || (sb->st_ino != security.st_ino))
    {
        CfOut(cf_error, "",
              "SERIOUS SECURITY ALERT: path race exploited in recursion to/from %s. Not safe for agent to continue - aborting",
              name);
        HandleSignals(SIGTERM);
        /* Exits */
    }
}
Пример #3
0
/*! Actually handles the signal - ie. the thread will exit, a custom signal
	handler is prepared, or whatever the signal demands.
*/
bool
handle_signals(struct thread *thread)
{
	uint32 signalMask = atomic_get(&thread->sig_pending)
		& ~atomic_get(&thread->sig_block_mask);

	// If SIGKILL[THR] are pending, we ignore other signals.
	// Otherwise check, if the thread shall stop for debugging.
	if (signalMask & KILL_SIGNALS) {
		signalMask &= KILL_SIGNALS;
	} else if (thread->debug_info.flags & B_THREAD_DEBUG_STOP) {
		user_debug_stop_thread();
	}

	if (signalMask == 0)
		return 0;

	if (thread->user_thread->defer_signals > 0
		&& (signalMask & NON_DEFERRABLE_SIGNALS) == 0) {
		thread->user_thread->pending_signals = signalMask;
		return 0;
	}

	thread->user_thread->pending_signals = 0;

	bool restart = (atomic_and(&thread->flags,
			~THREAD_FLAGS_DONT_RESTART_SYSCALL)
		& THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;

	T(HandleSignals(signalMask));

	for (int32 i = 0; i < NSIG; i++) {
		bool debugSignal;
		int32 signal = i + 1;

		if ((signalMask & SIGNAL_TO_MASK(signal)) == 0)
			continue;

		// clear the signal that we will handle
		atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(signal));

		debugSignal = !(~atomic_get(&thread->team->debug_info.flags)
				& (B_TEAM_DEBUG_SIGNALS | B_TEAM_DEBUG_DEBUGGER_INSTALLED));

		// TODO: since sigaction_etc() could clobber the fields at any time,
		//		we should actually copy the relevant fields atomically before
		//		accessing them (only the debugger is calling sigaction_etc()
		//		right now).
		//		Update: sigaction_etc() is only used by the userland debugger
		//		support. We can just as well restrict getting/setting signal
		//		handlers to work only when the respective thread is stopped.
		//		Then sigaction() could be used instead and we could get rid of
		//		sigaction_etc().
		struct sigaction* handler = &thread->sig_action[i];

		TRACE(("Thread 0x%lx received signal %s\n", thread->id, sigstr[signal]));

		if (handler->sa_handler == SIG_IGN) {
			// signal is to be ignored
			// ToDo: apply zombie cleaning on SIGCHLD

			// notify the debugger
			if (debugSignal)
				notify_debugger(thread, signal, handler, false);
			continue;
		} else if (handler->sa_handler == SIG_DFL) {
			// default signal behaviour
			switch (signal) {
				case SIGCHLD:
				case SIGWINCH:
				case SIGURG:
					// notify the debugger
					if (debugSignal)
						notify_debugger(thread, signal, handler, false);
					continue;

				case SIGCONT:
					// notify the debugger
					if (debugSignal
						&& !notify_debugger(thread, signal, handler, false))
						continue;

					// notify threads waiting for team state changes
					if (thread == thread->team->main_thread) {
						InterruptsSpinLocker locker(gTeamSpinlock);
						team_set_job_control_state(thread->team,
							JOB_CONTROL_STATE_CONTINUED, signal, false);

						// The standard states that the system *may* send a
						// SIGCHLD when a child is continued. I haven't found
						// a good reason why we would want to, though.
					}
					continue;

				case SIGSTOP:
				case SIGTSTP:
				case SIGTTIN:
				case SIGTTOU:
					// notify the debugger
					if (debugSignal
						&& !notify_debugger(thread, signal, handler, false))
						continue;

					thread->next_state = B_THREAD_SUSPENDED;

					// notify threads waiting for team state changes
					if (thread == thread->team->main_thread) {
						InterruptsSpinLocker locker(gTeamSpinlock);
						team_set_job_control_state(thread->team,
							JOB_CONTROL_STATE_STOPPED, signal, false);

						// send a SIGCHLD to the parent (if it does have
						// SA_NOCLDSTOP defined)
						SpinLocker _(gThreadSpinlock);
						struct thread* parentThread
							= thread->team->parent->main_thread;
						struct sigaction& parentHandler
							= parentThread->sig_action[SIGCHLD - 1];
						if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0)
							deliver_signal(parentThread, SIGCHLD, 0);
					}

					return true;

				case SIGSEGV:
				case SIGFPE:
				case SIGILL:
				case SIGTRAP:
				case SIGABRT:
					// If this is the main thread, we just fall through and let
					// this signal kill the team. Otherwise we send a SIGKILL to
					// the main thread first, since the signal will kill this
					// thread only.
					if (thread != thread->team->main_thread)
						send_signal(thread->team->main_thread->id, SIGKILL);
				case SIGQUIT:
				case SIGPOLL:
				case SIGPROF:
				case SIGSYS:
				case SIGVTALRM:
				case SIGXCPU:
				case SIGXFSZ:
					TRACE(("Shutting down thread 0x%lx due to signal #%ld\n",
						thread->id, signal));
				case SIGKILL:
				case SIGKILLTHR:
				default:
					// if the thread exited normally, the exit reason is already set
					if (thread->exit.reason != THREAD_RETURN_EXIT) {
						thread->exit.reason = THREAD_RETURN_INTERRUPTED;
						thread->exit.signal = (uint16)signal;
					}

					// notify the debugger
					if (debugSignal && signal != SIGKILL && signal != SIGKILLTHR
						&& !notify_debugger(thread, signal, handler, true))
						continue;

					thread_exit();
						// won't return
			}
		}

		// User defined signal handler

		// notify the debugger
		if (debugSignal && !notify_debugger(thread, signal, handler, false))
			continue;

		if (!restart || (handler->sa_flags & SA_RESTART) == 0)
			atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);

		T(ExecuteSignalHandler(signal, handler));

		TRACE(("### Setting up custom signal handler frame...\n"));
		arch_setup_signal_frame(thread, handler, signal,
			atomic_get(&thread->sig_block_mask));

		if (handler->sa_flags & SA_ONESHOT)
			handler->sa_handler = SIG_DFL;
		if ((handler->sa_flags & SA_NOMASK) == 0) {
			// Update the block mask while the signal handler is running - it
			// will be automatically restored when the signal frame is left.
			atomic_or(&thread->sig_block_mask,
				(handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGNALS);
		}

		update_current_thread_signals_flag();

		return false;
	}

	// clear syscall restart thread flag, if we're not supposed to restart the
	// syscall
	if (!restart)
		atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);

	update_current_thread_signals_flag();

	return false;
}