Ejemplo n.º 1
0
int
_pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
{
	struct pthread *curthread;
	int state;

	_thr_check_init();

	for (;;) {
		state = once_control->state;
		if (state == ONCE_DONE)
			return (0);
		if (state == ONCE_NEVER_DONE) {
			if (atomic_cmpset_acq_int(&once_control->state, state, ONCE_IN_PROGRESS))
				break;
		} else if (state == ONCE_IN_PROGRESS) {
			if (atomic_cmpset_acq_int(&once_control->state, state, ONCE_WAIT))
				_thr_umtx_wait_uint(&once_control->state, ONCE_WAIT, NULL, 0);
		} else if (state == ONCE_WAIT) {
			_thr_umtx_wait_uint(&once_control->state, state, NULL, 0);
		} else
			return (EINVAL);
        }

	curthread = _get_curthread();
	THR_CLEANUP_PUSH(curthread, once_cancel_handler, once_control);
	init_routine();
	THR_CLEANUP_POP(curthread, 0);
	if (atomic_cmpset_rel_int(&once_control->state, ONCE_IN_PROGRESS, ONCE_DONE))
		return (0);
	atomic_store_rel_int(&once_control->state, ONCE_DONE);
	_thr_umtx_wake(&once_control->state, INT_MAX, 0);
	return (0);
}
Ejemplo n.º 2
0
static void
once_cancel_handler(void *arg)
{
	pthread_once_t *once_control = arg;

	if (atomic_cmpset_rel_int(&once_control->state, ONCE_IN_PROGRESS, ONCE_NEVER_DONE))
		return;
	atomic_store_rel_int(&once_control->state, ONCE_NEVER_DONE);
	_thr_umtx_wake(&once_control->state, INT_MAX, 0);
}
Ejemplo n.º 3
0
void
_pthread_exit_mask(void *status, sigset_t *mask)
{
    struct pthread *curthread = _get_curthread();

    /* Check if this thread is already in the process of exiting: */
    if (curthread->cancelling) {
        char msg[128];
        snprintf(msg, sizeof(msg), "Thread %p has called "
                 "pthread_exit() from a destructor. POSIX 1003.1 "
                 "1996 s16.2.5.2 does not allow this!", curthread);
        PANIC(msg);
    }

    /* Flag this thread as exiting. */
    curthread->cancelling = 1;
    curthread->no_cancel = 1;
    curthread->cancel_async = 0;
    curthread->cancel_point = 0;
    if (mask != NULL)
        __sys_sigprocmask(SIG_SETMASK, mask, NULL);
    if (curthread->unblock_sigcancel) {
        sigset_t set;

        curthread->unblock_sigcancel = 0;
        SIGEMPTYSET(set);
        SIGADDSET(set, SIGCANCEL);
        __sys_sigprocmask(SIG_UNBLOCK, mask, NULL);
    }

    /* Save the return value: */
    curthread->ret = status;
#ifdef _PTHREAD_FORCED_UNWIND

#ifdef PIC
    thread_uw_init();
#endif /* PIC */

#ifdef PIC
    if (uwl_forcedunwind != NULL) {
#else
    if (_Unwind_ForcedUnwind != NULL) {
#endif
        if (curthread->unwind_disabled) {
            if (message_printed == 0) {
                message_printed = 1;
                _thread_printf(2, "Warning: old _pthread_cleanup_push was called, "
                               "stack unwinding is disabled.\n");
            }
            goto cleanup;
        }
        thread_unwind();

    } else {
cleanup:
        while (curthread->cleanup != NULL) {
            __pthread_cleanup_pop_imp(1);
        }
        exit_thread();
    }

#else
    while (curthread->cleanup != NULL) {
        __pthread_cleanup_pop_imp(1);
    }

    exit_thread();
#endif /* _PTHREAD_FORCED_UNWIND */
}

static void
exit_thread(void)
{
    struct pthread *curthread = _get_curthread();

    /* Check if there is thread specific data: */
    if (curthread->specific != NULL) {
        /* Run the thread-specific data destructors: */
        _thread_cleanupspecific();
    }

    if (!_thr_isthreaded())
        exit(0);

    if (atomic_fetchadd_int(&_thread_active_threads, -1) == 1) {
        exit(0);
        /* Never reach! */
    }

    /* Tell malloc that the thread is exiting. */
    _malloc_thread_cleanup();

    THR_LOCK(curthread);
    curthread->state = PS_DEAD;
    if (curthread->flags & THR_FLAGS_NEED_SUSPEND) {
        curthread->cycle++;
        _thr_umtx_wake(&curthread->cycle, INT_MAX, 0);
    }
    if (!curthread->force_exit && SHOULD_REPORT_EVENT(curthread, TD_DEATH))
        _thr_report_death(curthread);
    /*
     * Thread was created with initial refcount 1, we drop the
     * reference count to allow it to be garbage collected.
     */
    curthread->refcount--;
    _thr_try_gc(curthread, curthread); /* thread lock released */

#if defined(_PTHREADS_INVARIANTS)
    if (THR_IN_CRITICAL(curthread))
        PANIC("thread exits with resources held!");
#endif
    /*
     * Kernel will do wakeup at the address, so joiner thread
     * will be resumed if it is sleeping at the address.
     */
    thr_exit(&curthread->tid);
    PANIC("thr_exit() returned");
    /* Never reach! */
}
Ejemplo n.º 4
0
int
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
	       void *(*start_routine) (void *), void *arg)
{
	struct pthread *curthread, *new_thread;
	struct thr_param param;
	struct sched_param sched_param;
	struct rtprio rtp;
	sigset_t set, oset;
	cpuset_t *cpusetp;
	int i, cpusetsize, create_suspended, locked, old_stack_prot, ret;

	cpusetp = NULL;
	ret = cpusetsize = 0;
	_thr_check_init();

	/*
	 * Tell libc and others now they need lock to protect their data.
	 */
	if (_thr_isthreaded() == 0) {
		_malloc_first_thread();
		if (_thr_setthreaded(1))
			return (EAGAIN);
	}

	curthread = _get_curthread();
	if ((new_thread = _thr_alloc(curthread)) == NULL)
		return (EAGAIN);

	memset(&param, 0, sizeof(param));

	if (attr == NULL || *attr == NULL)
		/* Use the default thread attributes: */
		new_thread->attr = _pthread_attr_default;
	else {
		new_thread->attr = *(*attr);
		cpusetp = new_thread->attr.cpuset;
		cpusetsize = new_thread->attr.cpusetsize;
		new_thread->attr.cpuset = NULL;
		new_thread->attr.cpusetsize = 0;
	}
	if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
		/* inherit scheduling contention scope */
		if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
			new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
		else
			new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;

		new_thread->attr.prio = curthread->attr.prio;
		new_thread->attr.sched_policy = curthread->attr.sched_policy;
	}

	new_thread->tid = TID_TERMINATED;

	old_stack_prot = _rtld_get_stack_prot();
	if (create_stack(&new_thread->attr) != 0) {
		/* Insufficient memory to create a stack: */
		_thr_free(curthread, new_thread);
		return (EAGAIN);
	}
	/*
	 * Write a magic value to the thread structure
	 * to help identify valid ones:
	 */
	new_thread->magic = THR_MAGIC;
	new_thread->start_routine = start_routine;
	new_thread->arg = arg;
	new_thread->cancel_enable = 1;
	new_thread->cancel_async = 0;
	/* Initialize the mutex queue: */
	for (i = 0; i < TMQ_NITEMS; i++)
		TAILQ_INIT(&new_thread->mq[i]);

	/* Initialise hooks in the thread structure: */
	if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) {
		new_thread->flags = THR_FLAGS_NEED_SUSPEND;
		create_suspended = 1;
	} else {
		create_suspended = 0;
	}

	new_thread->state = PS_RUNNING;

	if (new_thread->attr.flags & PTHREAD_CREATE_DETACHED)
		new_thread->flags |= THR_FLAGS_DETACHED;

	/* Add the new thread. */
	new_thread->refcount = 1;
	_thr_link(curthread, new_thread);

	/*
	 * Handle the race between __pthread_map_stacks_exec and
	 * thread linkage.
	 */
	if (old_stack_prot != _rtld_get_stack_prot())
		_thr_stack_fix_protection(new_thread);

	/* Return thread pointer eariler so that new thread can use it. */
	(*thread) = new_thread;
	if (SHOULD_REPORT_EVENT(curthread, TD_CREATE) || cpusetp != NULL) {
		THR_THREAD_LOCK(curthread, new_thread);
		locked = 1;
	} else
		locked = 0;
	param.start_func = (void (*)(void *)) thread_start;
	param.arg = new_thread;
	param.stack_base = new_thread->attr.stackaddr_attr;
	param.stack_size = new_thread->attr.stacksize_attr;
	param.tls_base = (char *)new_thread->tcb;
	param.tls_size = sizeof(struct tcb);
	param.child_tid = &new_thread->tid;
	param.parent_tid = &new_thread->tid;
	param.flags = 0;
	if (new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM)
		param.flags |= THR_SYSTEM_SCOPE;
	if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED)
		param.rtp = NULL;
	else {
		sched_param.sched_priority = new_thread->attr.prio;
		_schedparam_to_rtp(new_thread->attr.sched_policy,
			&sched_param, &rtp);
		param.rtp = &rtp;
	}

	/* Schedule the new thread. */
	if (create_suspended) {
		SIGFILLSET(set);
		SIGDELSET(set, SIGTRAP);
		__sys_sigprocmask(SIG_SETMASK, &set, &oset);
		new_thread->sigmask = oset;
		SIGDELSET(new_thread->sigmask, SIGCANCEL);
	}

	ret = thr_new(&param, sizeof(param));

	if (ret != 0) {
		ret = errno;
		/*
		 * Translate EPROCLIM into well-known POSIX code EAGAIN.
		 */
		if (ret == EPROCLIM)
			ret = EAGAIN;
	}

	if (create_suspended)
		__sys_sigprocmask(SIG_SETMASK, &oset, NULL);

	if (ret != 0) {
		if (!locked)
			THR_THREAD_LOCK(curthread, new_thread);
		new_thread->state = PS_DEAD;
		new_thread->tid = TID_TERMINATED;
		new_thread->flags |= THR_FLAGS_DETACHED;
		new_thread->refcount--;
		if (new_thread->flags & THR_FLAGS_NEED_SUSPEND) {
			new_thread->cycle++;
			_thr_umtx_wake(&new_thread->cycle, INT_MAX, 0);
		}
		_thr_try_gc(curthread, new_thread); /* thread lock released */
		atomic_add_int(&_thread_active_threads, -1);
	} else if (locked) {
		if (cpusetp != NULL) {
			if (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
				TID(new_thread), cpusetsize, cpusetp)) {
				ret = errno;
				/* kill the new thread */
				new_thread->force_exit = 1;
				new_thread->flags |= THR_FLAGS_DETACHED;
				_thr_try_gc(curthread, new_thread);
				 /* thread lock released */
				goto out;
			}
		}

		_thr_report_creation(curthread, new_thread);
		THR_THREAD_UNLOCK(curthread, new_thread);
	}
out:
	if (ret)
		(*thread) = 0;
	return (ret);
}
Ejemplo n.º 5
0
int
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
    void *(*start_routine) (void *), void *arg)
{
	struct lwp_params create_params;
	void *stack;
	sigset_t sigmask, oldsigmask;
	struct pthread *curthread, *new_thread;
	int ret = 0, locked;

	_thr_check_init();

	/*
	 * Tell libc and others now they need lock to protect their data.
	 */
	if (_thr_isthreaded() == 0 && _thr_setthreaded(1))
		return (EAGAIN);

	curthread = tls_get_curthread();
	if ((new_thread = _thr_alloc(curthread)) == NULL)
		return (EAGAIN);

	if (attr == NULL || *attr == NULL) {
		/* Use the default thread attributes: */
		new_thread->attr = _pthread_attr_default;
	} else {
		new_thread->attr = *(*attr);
	}
	if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
		/* inherit scheduling contention scope */
		if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
			new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
		else
			new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
		/*
		 * scheduling policy and scheduling parameters will be
		 * inherited in following code.
		 */
	}

	if (create_stack(&new_thread->attr) != 0) {
		/* Insufficient memory to create a stack: */
		new_thread->terminated = 1;
		_thr_free(curthread, new_thread);
		return (EAGAIN);
	}
	/*
	 * Write a magic value to the thread structure
	 * to help identify valid ones:
	 */
	new_thread->magic = THR_MAGIC;
	new_thread->start_routine = start_routine;
	new_thread->arg = arg;
	new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
	    PTHREAD_CANCEL_DEFERRED;
	/*
	 * Check if this thread is to inherit the scheduling
	 * attributes from its parent:
	 */
	if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
		/*
		 * Copy the scheduling attributes. Lock the scheduling
		 * lock to get consistent scheduling parameters.
		 */
		THR_LOCK(curthread);
		new_thread->base_priority = curthread->base_priority;
		new_thread->attr.prio = curthread->attr.prio;
		new_thread->attr.sched_policy = curthread->attr.sched_policy;
		THR_UNLOCK(curthread);
	} else {
		/*
		 * Use just the thread priority, leaving the
		 * other scheduling attributes as their
		 * default values:
		 */
		new_thread->base_priority = new_thread->attr.prio;
	}
	new_thread->active_priority = new_thread->base_priority;

	/* Initialize the mutex queue: */
	TAILQ_INIT(&new_thread->mutexq);

	/* Initialise hooks in the thread structure: */
	if (new_thread->attr.suspend == THR_CREATE_SUSPENDED)
		new_thread->flags = THR_FLAGS_NEED_SUSPEND;

	new_thread->state = PS_RUNNING;

	if (new_thread->attr.flags & PTHREAD_CREATE_DETACHED)
		new_thread->tlflags |= TLFLAGS_DETACHED;

	/* Add the new thread. */
	new_thread->refcount = 1;
	_thr_link(curthread, new_thread);
	/* Return thread pointer eariler so that new thread can use it. */
	(*thread) = new_thread;
	if (SHOULD_REPORT_EVENT(curthread, TD_CREATE)) {
		THR_THREAD_LOCK(curthread, new_thread);
		locked = 1;
	} else
		locked = 0;
	/* Schedule the new thread. */
	stack = (char *)new_thread->attr.stackaddr_attr +
			new_thread->attr.stacksize_attr;
	bzero(&create_params, sizeof(create_params));
	create_params.lwp_func = thread_start;
	create_params.lwp_arg = new_thread;
	create_params.lwp_stack = stack;
	create_params.lwp_tid1 = &new_thread->tid;
	/*
	 * Thread created by thr_create() inherits currrent thread
	 * sigmask, however, before new thread setup itself correctly,
	 * it can not handle signal, so we should mask all signals here.
	 * We do this at the very last moment, so that we don't run
	 * into problems while we have all signals disabled.
	 */
	SIGFILLSET(sigmask);
	__sys_sigprocmask(SIG_SETMASK, &sigmask, &oldsigmask);
	new_thread->sigmask = oldsigmask;
	ret = lwp_create(&create_params);
	__sys_sigprocmask(SIG_SETMASK, &oldsigmask, NULL);
	if (ret != 0) {
		if (!locked)
			THR_THREAD_LOCK(curthread, new_thread);
		new_thread->state = PS_DEAD;
		new_thread->terminated = 1;
		if (new_thread->flags & THR_FLAGS_NEED_SUSPEND) {
			new_thread->cycle++;
			_thr_umtx_wake(&new_thread->cycle, INT_MAX);
		}
		THR_THREAD_UNLOCK(curthread, new_thread);
		THREAD_LIST_LOCK(curthread);
		_thread_active_threads--;
		new_thread->tlflags |= TLFLAGS_DETACHED;
		_thr_ref_delete_unlocked(curthread, new_thread);
		THREAD_LIST_UNLOCK(curthread);
		(*thread) = NULL;
		ret = EAGAIN;
	} else if (locked) {
		_thr_report_creation(curthread, new_thread);
		THR_THREAD_UNLOCK(curthread, new_thread);
	}
	return (ret);
}
Ejemplo n.º 6
0
void
_pthread_exit(void *status)
{
	struct pthread *curthread = _get_curthread();

	/* Check if this thread is already in the process of exiting: */
	if (curthread->cancelling) {
		char msg[128];
		snprintf(msg, sizeof(msg), "Thread %p has called "
		    "pthread_exit() from a destructor. POSIX 1003.1 "
		    "1996 s16.2.5.2 does not allow this!", curthread);
		PANIC(msg);
	}

	/* Flag this thread as exiting. */
	curthread->cancelling = 1;
	
	_thr_exit_cleanup();

	/* Save the return value: */
	curthread->ret = status;
	while (curthread->cleanup != NULL) {
		_pthread_cleanup_pop(1);
	}

	/* Check if there is thread specific data: */
	if (curthread->specific != NULL) {
		/* Run the thread-specific data destructors: */
		_thread_cleanupspecific();
	}

	if (!_thr_isthreaded())
		exit(0);

	THREAD_LIST_LOCK(curthread);
	_thread_active_threads--;
	if (_thread_active_threads == 0) {
		THREAD_LIST_UNLOCK(curthread);
		exit(0);
		/* Never reach! */
	}
	THREAD_LIST_UNLOCK(curthread);

	/* Tell malloc that the thread is exiting. */
	_malloc_thread_cleanup();

	THREAD_LIST_LOCK(curthread);
	THR_LOCK(curthread);
	curthread->state = PS_DEAD;
	if (curthread->flags & THR_FLAGS_NEED_SUSPEND) {
		curthread->cycle++;
		_thr_umtx_wake(&curthread->cycle, INT_MAX, 0);
	}
	THR_UNLOCK(curthread);
	/*
	 * Thread was created with initial refcount 1, we drop the
	 * reference count to allow it to be garbage collected.
	 */
	curthread->refcount--;
	if (curthread->tlflags & TLFLAGS_DETACHED)
		THR_GCLIST_ADD(curthread);
	THREAD_LIST_UNLOCK(curthread);
	if (!curthread->force_exit && SHOULD_REPORT_EVENT(curthread, TD_DEATH))
		_thr_report_death(curthread);

	/*
	 * Kernel will do wakeup at the address, so joiner thread
	 * will be resumed if it is sleeping at the address.
	 */
	thr_exit(&curthread->tid);
#ifndef __AVM2__ // might exit if we're impersonating another thread!
	PANIC("thr_exit() returned");
#endif
	/* Never reach! */
}