Ejemplo n.º 1
0
void
linux_to_bsd_sigset(l_sigset_t *lss, sigset_t *bss)
{
	int b, l;

	SIGEMPTYSET(*bss);
	bss->__bits[0] = lss->__bits[0] & ~((1U << LINUX_SIGTBLSZ) - 1);
	bss->__bits[1] = lss->__bits[1];
	for (l = 1; l <= LINUX_SIGTBLSZ; l++) {
		if (LINUX_SIGISMEMBER(*lss, l)) {
			b = linux_to_bsd_signal[_SIG_IDX(l)];
			if (b)
				SIGADDSET(*bss, b);
		}
	}
}
Ejemplo n.º 2
0
void
_pthread_exit_mask(void *status, sigset_t *mask)
{
    struct pthread *curthread = _get_curthread();

    /* Check if this thread is already in the process of exiting: */
    if (curthread->cancelling) {
        char msg[128];
        snprintf(msg, sizeof(msg), "Thread %p has called "
                 "pthread_exit() from a destructor. POSIX 1003.1 "
                 "1996 s16.2.5.2 does not allow this!", curthread);
        PANIC(msg);
    }

    /* Flag this thread as exiting. */
    curthread->cancelling = 1;
    curthread->no_cancel = 1;
    curthread->cancel_async = 0;
    curthread->cancel_point = 0;
    if (mask != NULL)
        __sys_sigprocmask(SIG_SETMASK, mask, NULL);
    if (curthread->unblock_sigcancel) {
        sigset_t set;

        curthread->unblock_sigcancel = 0;
        SIGEMPTYSET(set);
        SIGADDSET(set, SIGCANCEL);
        __sys_sigprocmask(SIG_UNBLOCK, mask, NULL);
    }

    /* Save the return value: */
    curthread->ret = status;
#ifdef _PTHREAD_FORCED_UNWIND

#ifdef PIC
    thread_uw_init();
#endif /* PIC */

#ifdef PIC
    if (uwl_forcedunwind != NULL) {
#else
    if (_Unwind_ForcedUnwind != NULL) {
#endif
        if (curthread->unwind_disabled) {
            if (message_printed == 0) {
                message_printed = 1;
                _thread_printf(2, "Warning: old _pthread_cleanup_push was called, "
                               "stack unwinding is disabled.\n");
            }
            goto cleanup;
        }
        thread_unwind();

    } else {
cleanup:
        while (curthread->cleanup != NULL) {
            __pthread_cleanup_pop_imp(1);
        }
        exit_thread();
    }

#else
    while (curthread->cleanup != NULL) {
        __pthread_cleanup_pop_imp(1);
    }

    exit_thread();
#endif /* _PTHREAD_FORCED_UNWIND */
}

static void
exit_thread(void)
{
    struct pthread *curthread = _get_curthread();

    /* Check if there is thread specific data: */
    if (curthread->specific != NULL) {
        /* Run the thread-specific data destructors: */
        _thread_cleanupspecific();
    }

    if (!_thr_isthreaded())
        exit(0);

    if (atomic_fetchadd_int(&_thread_active_threads, -1) == 1) {
        exit(0);
        /* Never reach! */
    }

    /* Tell malloc that the thread is exiting. */
    _malloc_thread_cleanup();

    THR_LOCK(curthread);
    curthread->state = PS_DEAD;
    if (curthread->flags & THR_FLAGS_NEED_SUSPEND) {
        curthread->cycle++;
        _thr_umtx_wake(&curthread->cycle, INT_MAX, 0);
    }
    if (!curthread->force_exit && SHOULD_REPORT_EVENT(curthread, TD_DEATH))
        _thr_report_death(curthread);
    /*
     * Thread was created with initial refcount 1, we drop the
     * reference count to allow it to be garbage collected.
     */
    curthread->refcount--;
    _thr_try_gc(curthread, curthread); /* thread lock released */

#if defined(_PTHREADS_INVARIANTS)
    if (THR_IN_CRITICAL(curthread))
        PANIC("thread exits with resources held!");
#endif
    /*
     * Kernel will do wakeup at the address, so joiner thread
     * will be resumed if it is sleeping at the address.
     */
    thr_exit(&curthread->tid);
    PANIC("thr_exit() returned");
    /* Never reach! */
}
Ejemplo n.º 3
0
/*
 * This function and pthread_create() do a lot of the same things.
 * It'd be nice to consolidate the common stuff in one place.
 */
static void
init_main_thread(struct pthread *thread)
{
	/* Setup the thread attributes. */
	thread->attr = _pthread_attr_default;
	thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
	/*
	 * Set up the thread stack.
	 *
	 * Create a red zone below the main stack.  All other stacks
	 * are constrained to a maximum size by the parameters
	 * passed to mmap(), but this stack is only limited by
	 * resource limits, so this stack needs an explicitly mapped
	 * red zone to protect the thread stack that is just beyond.
	 */
	if (mmap((void *)((uintptr_t)_usrstack - _thr_stack_initial -
	    _thr_guard_default), _thr_guard_default, 0, MAP_ANON,
	    -1, 0) == MAP_FAILED)
		PANIC("Cannot allocate red zone for initial thread");

	/*
	 * Mark the stack as an application supplied stack so that it
	 * isn't deallocated.
	 *
	 * XXX - I'm not sure it would hurt anything to deallocate
	 *       the main thread stack because deallocation doesn't
	 *       actually free() it; it just puts it in the free
	 *       stack queue for later reuse.
	 */
	thread->attr.stackaddr_attr = (void *)((uintptr_t)_usrstack -
	     _thr_stack_initial);
	thread->attr.stacksize_attr = _thr_stack_initial;
	thread->attr.guardsize_attr = _thr_guard_default;
	thread->attr.flags |= THR_STACK_USER;

	/*
	 * Write a magic value to the thread structure
	 * to help identify valid ones:
	 */
	thread->magic = THR_MAGIC;

	thread->slice_usec = -1;
	thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
	thread->name = strdup("initial thread");

	/* Initialize the thread for signals: */
	SIGEMPTYSET(thread->sigmask);

	/*
	 * Set up the thread mailbox.  The threads saved context
	 * is also in the mailbox.
	 */
	thread->tcb->tcb_tmbx.tm_udata = thread;
	thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_size =
	    thread->attr.stacksize_attr;
	thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_sp =
	    thread->attr.stackaddr_attr;

	/* Default the priority of the initial thread: */
	thread->base_priority = THR_DEFAULT_PRIORITY;
	thread->active_priority = THR_DEFAULT_PRIORITY;
	thread->inherited_priority = 0;

	/* Initialize the mutex queue: */
	TAILQ_INIT(&thread->mutexq);

	/* Initialize hooks in the thread structure: */
	thread->specific = NULL;
	thread->cleanup = NULL;
	thread->flags = 0;
	thread->sigbackout = NULL;
	thread->continuation = NULL;

	thread->state = PS_RUNNING;
	thread->uniqueid = 0;
}
Ejemplo n.º 4
0
static void
init_private(void)
{
	struct clockinfo clockinfo;
	size_t len;
	int mib[2];

	/*
	 * Avoid reinitializing some things if they don't need to be,
	 * e.g. after a fork().
	 */
	if (init_once == 0) {
		/* Find the stack top */
		mib[0] = CTL_KERN;
		mib[1] = KERN_USRSTACK;
		len = sizeof (_usrstack);
		if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1)
			PANIC("Cannot get kern.usrstack from sysctl");
		/* Get the kernel clockrate: */
		mib[0] = CTL_KERN;
		mib[1] = KERN_CLOCKRATE;
		len = sizeof (struct clockinfo);
		if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0)
			_clock_res_usec = 1000000 / clockinfo.stathz;
		else
			_clock_res_usec = CLOCK_RES_USEC;

		_thr_page_size = getpagesize();
		_thr_guard_default = _thr_page_size;
		if (sizeof(void *) == 8) {
			_thr_stack_default = THR_STACK64_DEFAULT;
			_thr_stack_initial = THR_STACK64_INITIAL;
		}
		else {
			_thr_stack_default = THR_STACK32_DEFAULT;
			_thr_stack_initial = THR_STACK32_INITIAL;
		}
		_pthread_attr_default.guardsize_attr = _thr_guard_default;
		_pthread_attr_default.stacksize_attr = _thr_stack_default;
		TAILQ_INIT(&_thr_atfork_list);
		init_once = 1;	/* Don't do this again. */
	} else {
		/*
		 * Destroy the locks before creating them.  We don't
		 * know what state they are in so it is better to just
		 * recreate them.
		 */
		_lock_destroy(&_thread_signal_lock);
		_lock_destroy(&_mutex_static_lock);
		_lock_destroy(&_rwlock_static_lock);
		_lock_destroy(&_keytable_lock);
	}

	/* Initialize everything else. */
	TAILQ_INIT(&_thread_list);
	TAILQ_INIT(&_thread_gc_list);
	_pthread_mutex_init(&_thr_atfork_mutex, NULL);

	/*
	 * Initialize the lock for temporary installation of signal
	 * handlers (to support sigwait() semantics) and for the
	 * process signal mask and pending signal sets.
	 */
	if (_lock_init(&_thread_signal_lock, LCK_ADAPTIVE,
	    _kse_lock_wait, _kse_lock_wakeup, calloc) != 0)
		PANIC("Cannot initialize _thread_signal_lock");
	if (_lock_init(&_mutex_static_lock, LCK_ADAPTIVE,
	    _thr_lock_wait, _thr_lock_wakeup, calloc) != 0)
		PANIC("Cannot initialize mutex static init lock");
	if (_lock_init(&_rwlock_static_lock, LCK_ADAPTIVE,
	    _thr_lock_wait, _thr_lock_wakeup, calloc) != 0)
		PANIC("Cannot initialize rwlock static init lock");
	if (_lock_init(&_keytable_lock, LCK_ADAPTIVE,
	    _thr_lock_wait, _thr_lock_wakeup, calloc) != 0)
		PANIC("Cannot initialize thread specific keytable lock");
	_thr_spinlock_init();

	/* Clear pending signals and get the process signal mask. */
	SIGEMPTYSET(_thr_proc_sigpending);

	/* Are we in M:N mode (default) or 1:1 mode? */
#ifdef SYSTEM_SCOPE_ONLY
	_thread_scope_system = 1;
#else
	if (getenv("LIBPTHREAD_SYSTEM_SCOPE") != NULL)
		_thread_scope_system = 1;
	else if (getenv("LIBPTHREAD_PROCESS_SCOPE") != NULL)
		_thread_scope_system = -1;
#endif
	if (getenv("LIBPTHREAD_DEBUG") != NULL)
		_thr_debug_flags |= DBG_INFO_DUMP;

	/*
	 * _thread_list_lock and _kse_count are initialized
	 * by _kse_init()
	 */
}
Ejemplo n.º 5
0
static void
pmclog_loop(void *arg)
{
	struct pmclog_proc_init_args *ia;
	struct pmc_owner *po;
	struct pmclog_buffer *lb;
	struct proc *p;
	struct ucred *ownercred;
	struct ucred *mycred;
	struct thread *td;
	sigset_t unb;
	struct uio auio;
	struct iovec aiov;
	size_t nbytes;
	int error;

	td = curthread;

	SIGEMPTYSET(unb);
	SIGADDSET(unb, SIGHUP);
	(void)kern_sigprocmask(td, SIG_UNBLOCK, &unb, NULL, 0);

	ia = arg;
	MPASS(ia->kthr == curproc);
	MPASS(!ia->acted);
	mtx_lock(&pmc_kthread_mtx);
	while (ia->po == NULL && !ia->exit)
		msleep(ia, &pmc_kthread_mtx, PWAIT, "pmclogi", 0);
	if (ia->exit) {
		ia->acted = true;
		wakeup(ia);
		mtx_unlock(&pmc_kthread_mtx);
		kproc_exit(0);
	}
	MPASS(ia->po != NULL);
	po = ia->po;
	ia->acted = true;
	wakeup(ia);
	mtx_unlock(&pmc_kthread_mtx);
	ia = NULL;

	p = po->po_owner;
	mycred = td->td_ucred;

	PROC_LOCK(p);
	ownercred = crhold(p->p_ucred);
	PROC_UNLOCK(p);

	PMCDBG2(LOG,INI,1, "po=%p kt=%p", po, po->po_kthread);
	KASSERT(po->po_kthread == curthread->td_proc,
	    ("[pmclog,%d] proc mismatch po=%p po/kt=%p curproc=%p", __LINE__,
		po, po->po_kthread, curthread->td_proc));

	lb = NULL;


	/*
	 * Loop waiting for I/O requests to be added to the owner
	 * struct's queue.  The loop is exited when the log file
	 * is deconfigured.
	 */

	mtx_lock(&pmc_kthread_mtx);

	for (;;) {

		/* check if we've been asked to exit */
		if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
			break;

		if (lb == NULL) { /* look for a fresh buffer to write */
			mtx_lock_spin(&po->po_mtx);
			if ((lb = TAILQ_FIRST(&po->po_logbuffers)) == NULL) {
				mtx_unlock_spin(&po->po_mtx);

				/* No more buffers and shutdown required. */
				if (po->po_flags & PMC_PO_SHUTDOWN)
					break;

				(void) msleep(po, &pmc_kthread_mtx, PWAIT,
				    "pmcloop", 0);
				continue;
			}

			TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next);
			mtx_unlock_spin(&po->po_mtx);
		}

		mtx_unlock(&pmc_kthread_mtx);

		/* process the request */
		PMCDBG3(LOG,WRI,2, "po=%p base=%p ptr=%p", po,
		    lb->plb_base, lb->plb_ptr);
		/* change our thread's credentials before issuing the I/O */

		aiov.iov_base = lb->plb_base;
		aiov.iov_len  = nbytes = lb->plb_ptr - lb->plb_base;

		auio.uio_iov    = &aiov;
		auio.uio_iovcnt = 1;
		auio.uio_offset = -1;
		auio.uio_resid  = nbytes;
		auio.uio_rw     = UIO_WRITE;
		auio.uio_segflg = UIO_SYSSPACE;
		auio.uio_td     = td;

		/* switch thread credentials -- see kern_ktrace.c */
		td->td_ucred = ownercred;
		error = fo_write(po->po_file, &auio, ownercred, 0, td);
		td->td_ucred = mycred;

		if (error) {
			/* XXX some errors are recoverable */
			/* send a SIGIO to the owner and exit */
			PROC_LOCK(p);
			kern_psignal(p, SIGIO);
			PROC_UNLOCK(p);

			mtx_lock(&pmc_kthread_mtx);

			po->po_error = error; /* save for flush log */

			PMCDBG2(LOG,WRI,2, "po=%p error=%d", po, error);

			break;
		}

		mtx_lock(&pmc_kthread_mtx);

		/* put the used buffer back into the global pool */
		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);

		mtx_lock_spin(&pmc_bufferlist_mtx);
		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
		mtx_unlock_spin(&pmc_bufferlist_mtx);

		lb = NULL;
	}

	wakeup_one(po->po_kthread);
	po->po_kthread = NULL;

	mtx_unlock(&pmc_kthread_mtx);

	/* return the current I/O buffer to the global pool */
	if (lb) {
		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);

		mtx_lock_spin(&pmc_bufferlist_mtx);
		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
		mtx_unlock_spin(&pmc_bufferlist_mtx);
	}

	/*
	 * Exit this thread, signalling the waiter
	 */

	crfree(ownercred);

	kproc_exit(0);
}