int
_nanosleep(const struct timespec * time_to_sleep,
    struct timespec * time_remaining)
{
	struct pthread	*curthread = _get_curthread();
	int             ret = 0;
	struct timespec current_time;
	struct timespec current_time1;
	struct timespec remaining_time;
	struct timeval  tv;

	/* Check if the time to sleep is legal: */
	if (time_to_sleep == NULL || time_to_sleep->tv_sec < 0 ||
		time_to_sleep->tv_nsec < 0 || time_to_sleep->tv_nsec >= 1000000000) {
		/* Return an EINVAL error : */
		errno = EINVAL;
		ret = -1;
	} else {
		/*
		 * As long as we're going to get the time of day, we
		 * might as well store it in the global time of day:
		 */
		gettimeofday((struct timeval *) &_sched_tod, NULL);
		GET_CURRENT_TOD(tv);
		TIMEVAL_TO_TIMESPEC(&tv, &current_time);

		/* Calculate the time for the current thread to wake up: */
		curthread->wakeup_time.tv_sec = current_time.tv_sec + time_to_sleep->tv_sec;
		curthread->wakeup_time.tv_nsec = current_time.tv_nsec + time_to_sleep->tv_nsec;

		/* Check if the nanosecond field has overflowed: */
		if (curthread->wakeup_time.tv_nsec >= 1000000000) {
			/* Wrap the nanosecond field: */
			curthread->wakeup_time.tv_sec += 1;
			curthread->wakeup_time.tv_nsec -= 1000000000;
		}
		curthread->interrupted = 0;

		/* Reschedule the current thread to sleep: */
		_thread_kern_sched_state(PS_SLEEP_WAIT, __FILE__, __LINE__);

		/*
		 * As long as we're going to get the time of day, we
		 * might as well store it in the global time of day:
		 */
		gettimeofday((struct timeval *) &_sched_tod, NULL);
		GET_CURRENT_TOD(tv);
		TIMEVAL_TO_TIMESPEC(&tv, &current_time1);

		/* Calculate the remaining time to sleep: */
		remaining_time.tv_sec = time_to_sleep->tv_sec + current_time.tv_sec - current_time1.tv_sec;
		remaining_time.tv_nsec = time_to_sleep->tv_nsec + current_time.tv_nsec - current_time1.tv_nsec;

		/* Check if the nanosecond field has underflowed: */
		if (remaining_time.tv_nsec < 0) {
			/* Handle the underflow: */
			remaining_time.tv_sec -= 1;
			remaining_time.tv_nsec += 1000000000;
		}

		/* Check if the nanosecond field has overflowed: */
		if (remaining_time.tv_nsec >= 1000000000) {
			/* Handle the overflow: */
			remaining_time.tv_sec += 1;
			remaining_time.tv_nsec -= 1000000000;
		}

		/* Check if the sleep was longer than the required time: */
		if (remaining_time.tv_sec < 0) {
			/* Reset the time left: */
			remaining_time.tv_sec = 0;
			remaining_time.tv_nsec = 0;
		}

		/* Check if the time remaining is to be returned: */
		if (time_remaining != NULL) {
			/* Return the actual time slept: */
			time_remaining->tv_sec = remaining_time.tv_sec;
			time_remaining->tv_nsec = remaining_time.tv_nsec;
		}

		/* Check if the sleep was interrupted: */
		if (curthread->interrupted) {
			/* Return an EINTR error : */
			errno = EINTR;
			ret = -1;
		}
	}
	return (ret);
}
Beispiel #2
0
/*
 * Cancellation behavior:
 *   if the thread is canceled, joinee is not recycled.
 */
static int
join_common(pthread_t pthread, void **thread_return,
	const struct timespec *abstime)
{
	struct pthread *curthread = _get_curthread();
	struct timespec ts, ts2, *tsp;
	void *tmp;
	long tid;
	int ret = 0;

	if (pthread == NULL)
		return (EINVAL);

	if (pthread == curthread)
		return (EDEADLK);

	if ((ret = _thr_find_thread(curthread, pthread, 1)) != 0)
		return (ESRCH);

	if ((pthread->flags & THR_FLAGS_DETACHED) != 0) {
		ret = EINVAL;
	} else if (pthread->joiner != NULL) {
		/* Multiple joiners are not supported. */
		ret = ENOTSUP;
	}
	if (ret) {
		THR_THREAD_UNLOCK(curthread, pthread);
		return (ret);
	}
	/* Set the running thread to be the joiner: */
	pthread->joiner = curthread;

	THR_THREAD_UNLOCK(curthread, pthread);

	THR_CLEANUP_PUSH(curthread, backout_join, pthread);
	_thr_cancel_enter(curthread);

	tid = pthread->tid;
	while (pthread->tid != TID_TERMINATED) {
		_thr_testcancel(curthread);
		if (abstime != NULL) {
			clock_gettime(CLOCK_REALTIME, &ts);
			TIMESPEC_SUB(&ts2, abstime, &ts);
			if (ts2.tv_sec < 0) {
				ret = ETIMEDOUT;
				break;
			}
			tsp = &ts2;
		} else
			tsp = NULL;
		ret = _thr_umtx_wait(&pthread->tid, tid, tsp);
		if (ret == ETIMEDOUT)
			break;
	}

	_thr_cancel_leave(curthread, 0);
	THR_CLEANUP_POP(curthread, 0);

	if (ret == ETIMEDOUT) {
		THR_THREAD_LOCK(curthread, pthread);
		pthread->joiner = NULL;
		THR_THREAD_UNLOCK(curthread, pthread);
	} else {
		ret = 0;
		tmp = pthread->ret;
		THR_THREAD_LOCK(curthread, pthread);
		pthread->flags |= THR_FLAGS_DETACHED;
		pthread->joiner = NULL;
		_thr_try_gc(curthread, pthread); /* thread lock released */

		if (thread_return != NULL)
			*thread_return = tmp;
	}
	return (ret);
}
Beispiel #3
0
int
_pthread_join(pthread_t pthread, void **thread_return)
{
	struct pthread *curthread = _get_curthread();
	void *tmp;
	kse_critical_t crit;
	int ret = 0;
 
	_thr_cancel_enter(curthread);

	/* Check if the caller has specified an invalid thread: */
	if (pthread == NULL || pthread->magic != THR_MAGIC) {
		/* Invalid thread: */
		_thr_cancel_leave(curthread, 1);
		return (EINVAL);
	}

	/* Check if the caller has specified itself: */
	if (pthread == curthread) {
		/* Avoid a deadlock condition: */
		_thr_cancel_leave(curthread, 1);
		return (EDEADLK);
	}

	/*
	 * Find the thread in the list of active threads or in the
	 * list of dead threads:
	 */
	if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/1)) != 0) {
		/* Return an error: */
		_thr_cancel_leave(curthread, 1);
		return (ESRCH);
	}

	THR_SCHED_LOCK(curthread, pthread);
	/* Check if this thread has been detached: */
	if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) {
		THR_SCHED_UNLOCK(curthread, pthread);
		/* Remove the reference and return an error: */
		_thr_ref_delete(curthread, pthread);
		ret = EINVAL;
	} else {
		/* Lock the target thread while checking its state. */
		if (pthread->state == PS_DEAD) {
			/* Return the thread's return value: */
			tmp = pthread->ret;

			/* Detach the thread. */
			pthread->attr.flags |= PTHREAD_DETACHED;

			/* Unlock the thread. */
			THR_SCHED_UNLOCK(curthread, pthread);

			/*
			 * Remove the thread from the list of active
			 * threads and add it to the GC list.
			 */
			crit = _kse_critical_enter();
			KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
			THR_LIST_REMOVE(pthread);
			THR_GCLIST_ADD(pthread);
			KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
			_kse_critical_leave(crit);

			/* Remove the reference. */
			_thr_ref_delete(curthread, pthread);
			if (thread_return != NULL)
				*thread_return = tmp;
		}
		else if (pthread->joiner != NULL) {
			/* Unlock the thread and remove the reference. */
			THR_SCHED_UNLOCK(curthread, pthread);
			_thr_ref_delete(curthread, pthread);

			/* Multiple joiners are not supported. */
			ret = ENOTSUP;
		}
		else {
			/* Set the running thread to be the joiner: */
			pthread->joiner = curthread;

			/* Keep track of which thread we're joining to: */
			curthread->join_status.thread = pthread;

			/* Unlock the thread and remove the reference. */
			THR_SCHED_UNLOCK(curthread, pthread);
			_thr_ref_delete(curthread, pthread);

			THR_SCHED_LOCK(curthread, curthread);
			while (curthread->join_status.thread == pthread) {
				THR_SET_STATE(curthread, PS_JOIN);
				THR_SCHED_UNLOCK(curthread, curthread);
				/* Schedule the next thread: */
				_thr_sched_switch(curthread);
				THR_SCHED_LOCK(curthread, curthread);
			}
			THR_SCHED_UNLOCK(curthread, curthread);

			if ((curthread->cancelflags & THR_CANCELLING) &&
			   !(curthread->cancelflags & PTHREAD_CANCEL_DISABLE)) {
				if (_thr_ref_add(curthread, pthread, 1) == 0) {
					THR_SCHED_LOCK(curthread, pthread);
					pthread->joiner = NULL;
					THR_SCHED_UNLOCK(curthread, pthread);
					_thr_ref_delete(curthread, pthread);
				}
				_pthread_exit(PTHREAD_CANCELED);
			}

			/*
			 * The thread return value and error are set by the
			 * thread we're joining to when it exits or detaches:
			 */
			ret = curthread->join_status.error;
			if ((ret == 0) && (thread_return != NULL))
				*thread_return = curthread->join_status.ret;
		}
	}
	_thr_cancel_leave(curthread, 1);

	/* Return the completion status: */
	return (ret);
}
Beispiel #4
0
void
_pthread_cancel_leave(int maycancel)
{
	_thr_cancel_leave(_get_curthread(), maycancel);
}
Beispiel #5
0
int
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
	       void *(*start_routine) (void *), void *arg)
{
	struct pthread *curthread, *new_thread;
	struct thr_param param;
	struct sched_param sched_param;
	struct rtprio rtp;
	sigset_t set, oset;
	cpuset_t *cpusetp;
	int i, cpusetsize, create_suspended, locked, old_stack_prot, ret;

	cpusetp = NULL;
	ret = cpusetsize = 0;
	_thr_check_init();

	/*
	 * Tell libc and others now they need lock to protect their data.
	 */
	if (_thr_isthreaded() == 0) {
		_malloc_first_thread();
		if (_thr_setthreaded(1))
			return (EAGAIN);
	}

	curthread = _get_curthread();
	if ((new_thread = _thr_alloc(curthread)) == NULL)
		return (EAGAIN);

	memset(&param, 0, sizeof(param));

	if (attr == NULL || *attr == NULL)
		/* Use the default thread attributes: */
		new_thread->attr = _pthread_attr_default;
	else {
		new_thread->attr = *(*attr);
		cpusetp = new_thread->attr.cpuset;
		cpusetsize = new_thread->attr.cpusetsize;
		new_thread->attr.cpuset = NULL;
		new_thread->attr.cpusetsize = 0;
	}
	if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
		/* inherit scheduling contention scope */
		if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
			new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
		else
			new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;

		new_thread->attr.prio = curthread->attr.prio;
		new_thread->attr.sched_policy = curthread->attr.sched_policy;
	}

	new_thread->tid = TID_TERMINATED;

	old_stack_prot = _rtld_get_stack_prot();
	if (create_stack(&new_thread->attr) != 0) {
		/* Insufficient memory to create a stack: */
		_thr_free(curthread, new_thread);
		return (EAGAIN);
	}
	/*
	 * Write a magic value to the thread structure
	 * to help identify valid ones:
	 */
	new_thread->magic = THR_MAGIC;
	new_thread->start_routine = start_routine;
	new_thread->arg = arg;
	new_thread->cancel_enable = 1;
	new_thread->cancel_async = 0;
	/* Initialize the mutex queue: */
	for (i = 0; i < TMQ_NITEMS; i++)
		TAILQ_INIT(&new_thread->mq[i]);

	/* Initialise hooks in the thread structure: */
	if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) {
		new_thread->flags = THR_FLAGS_NEED_SUSPEND;
		create_suspended = 1;
	} else {
		create_suspended = 0;
	}

	new_thread->state = PS_RUNNING;

	if (new_thread->attr.flags & PTHREAD_CREATE_DETACHED)
		new_thread->flags |= THR_FLAGS_DETACHED;

	/* Add the new thread. */
	new_thread->refcount = 1;
	_thr_link(curthread, new_thread);

	/*
	 * Handle the race between __pthread_map_stacks_exec and
	 * thread linkage.
	 */
	if (old_stack_prot != _rtld_get_stack_prot())
		_thr_stack_fix_protection(new_thread);

	/* Return thread pointer eariler so that new thread can use it. */
	(*thread) = new_thread;
	if (SHOULD_REPORT_EVENT(curthread, TD_CREATE) || cpusetp != NULL) {
		THR_THREAD_LOCK(curthread, new_thread);
		locked = 1;
	} else
		locked = 0;
	param.start_func = (void (*)(void *)) thread_start;
	param.arg = new_thread;
	param.stack_base = new_thread->attr.stackaddr_attr;
	param.stack_size = new_thread->attr.stacksize_attr;
	param.tls_base = (char *)new_thread->tcb;
	param.tls_size = sizeof(struct tcb);
	param.child_tid = &new_thread->tid;
	param.parent_tid = &new_thread->tid;
	param.flags = 0;
	if (new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM)
		param.flags |= THR_SYSTEM_SCOPE;
	if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED)
		param.rtp = NULL;
	else {
		sched_param.sched_priority = new_thread->attr.prio;
		_schedparam_to_rtp(new_thread->attr.sched_policy,
			&sched_param, &rtp);
		param.rtp = &rtp;
	}

	/* Schedule the new thread. */
	if (create_suspended) {
		SIGFILLSET(set);
		SIGDELSET(set, SIGTRAP);
		__sys_sigprocmask(SIG_SETMASK, &set, &oset);
		new_thread->sigmask = oset;
		SIGDELSET(new_thread->sigmask, SIGCANCEL);
	}

	ret = thr_new(&param, sizeof(param));

	if (ret != 0) {
		ret = errno;
		/*
		 * Translate EPROCLIM into well-known POSIX code EAGAIN.
		 */
		if (ret == EPROCLIM)
			ret = EAGAIN;
	}

	if (create_suspended)
		__sys_sigprocmask(SIG_SETMASK, &oset, NULL);

	if (ret != 0) {
		if (!locked)
			THR_THREAD_LOCK(curthread, new_thread);
		new_thread->state = PS_DEAD;
		new_thread->tid = TID_TERMINATED;
		new_thread->flags |= THR_FLAGS_DETACHED;
		new_thread->refcount--;
		if (new_thread->flags & THR_FLAGS_NEED_SUSPEND) {
			new_thread->cycle++;
			_thr_umtx_wake(&new_thread->cycle, INT_MAX, 0);
		}
		_thr_try_gc(curthread, new_thread); /* thread lock released */
		atomic_add_int(&_thread_active_threads, -1);
	} else if (locked) {
		if (cpusetp != NULL) {
			if (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
				TID(new_thread), cpusetsize, cpusetp)) {
				ret = errno;
				/* kill the new thread */
				new_thread->force_exit = 1;
				new_thread->flags |= THR_FLAGS_DETACHED;
				_thr_try_gc(curthread, new_thread);
				 /* thread lock released */
				goto out;
			}
		}

		_thr_report_creation(curthread, new_thread);
		THR_THREAD_UNLOCK(curthread, new_thread);
	}
out:
	if (ret)
		(*thread) = 0;
	return (ret);
}
Beispiel #6
0
void
_pthread_exit(void *status)
{
	struct pthread *curthread = _get_curthread();

	/* Check if this thread is already in the process of exiting: */
	if (curthread->cancelling) {
		char msg[128];
		snprintf(msg, sizeof(msg), "Thread %p has called "
		    "pthread_exit() from a destructor. POSIX 1003.1 "
		    "1996 s16.2.5.2 does not allow this!", curthread);
		PANIC(msg);
	}

	/* Flag this thread as exiting. */
	curthread->cancelling = 1;
	
	_thr_exit_cleanup();

	/* Save the return value: */
	curthread->ret = status;
	while (curthread->cleanup != NULL) {
		_pthread_cleanup_pop(1);
	}

	/* Check if there is thread specific data: */
	if (curthread->specific != NULL) {
		/* Run the thread-specific data destructors: */
		_thread_cleanupspecific();
	}

	if (!_thr_isthreaded())
		exit(0);

	THREAD_LIST_LOCK(curthread);
	_thread_active_threads--;
	if (_thread_active_threads == 0) {
		THREAD_LIST_UNLOCK(curthread);
		exit(0);
		/* Never reach! */
	}
	THREAD_LIST_UNLOCK(curthread);

	/* Tell malloc that the thread is exiting. */
	_malloc_thread_cleanup();

	THREAD_LIST_LOCK(curthread);
	THR_LOCK(curthread);
	curthread->state = PS_DEAD;
	if (curthread->flags & THR_FLAGS_NEED_SUSPEND) {
		curthread->cycle++;
		_thr_umtx_wake(&curthread->cycle, INT_MAX, 0);
	}
	THR_UNLOCK(curthread);
	/*
	 * Thread was created with initial refcount 1, we drop the
	 * reference count to allow it to be garbage collected.
	 */
	curthread->refcount--;
	if (curthread->tlflags & TLFLAGS_DETACHED)
		THR_GCLIST_ADD(curthread);
	THREAD_LIST_UNLOCK(curthread);
	if (!curthread->force_exit && SHOULD_REPORT_EVENT(curthread, TD_DEATH))
		_thr_report_death(curthread);

	/*
	 * Kernel will do wakeup at the address, so joiner thread
	 * will be resumed if it is sleeping at the address.
	 */
	thr_exit(&curthread->tid);
#ifndef __AVM2__ // might exit if we're impersonating another thread!
	PANIC("thr_exit() returned");
#endif
	/* Never reach! */
}
Beispiel #7
0
void
_pthread_exit(void *status)
{
	struct pthread *curthread = _get_curthread();
	kse_critical_t crit;
	struct kse *curkse;

	/* Check if this thread is already in the process of exiting: */
	if ((curthread->flags & THR_FLAGS_EXITING) != 0) {
		char msg[128];
		snprintf(msg, sizeof(msg), "Thread %p has called "
		    "pthread_exit() from a destructor. POSIX 1003.1 "
		    "1996 s16.2.5.2 does not allow this!", curthread);
		PANIC(msg);
	}

	/*
	 * Flag this thread as exiting.  Threads should now be prevented
	 * from joining to this thread.
	 */
	THR_SCHED_LOCK(curthread, curthread);
	curthread->flags |= THR_FLAGS_EXITING;
	THR_SCHED_UNLOCK(curthread, curthread);
	
	/*
	 * To avoid signal-lost problem, if signals had already been
	 * delivered to us, handle it. we have already set EXITING flag
	 * so no new signals should be delivered to us.
	 * XXX this is not enough if signal was delivered just before
	 * thread called sigprocmask and masked it! in this case, we
	 * might have to re-post the signal by kill() if the signal
	 * is targeting process (not for a specified thread).
	 * Kernel has same signal-lost problem, a signal may be delivered
	 * to a thread which is on the way to call sigprocmask or thr_exit()!
	 */
	if (curthread->check_pending)
		_thr_sig_check_pending(curthread);
	/* Save the return value: */
	curthread->ret = status;
	while (curthread->cleanup != NULL) {
		_pthread_cleanup_pop(1);
	}
	if (curthread->attr.cleanup_attr != NULL) {
		curthread->attr.cleanup_attr(curthread->attr.arg_attr);
	}
	/* Check if there is thread specific data: */
	if (curthread->specific != NULL) {
		/* Run the thread-specific data destructors: */
		_thread_cleanupspecific();
	}
	if (!_kse_isthreaded())
		exit(0);
	crit = _kse_critical_enter();
	curkse = _get_curkse();
	KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
	/* Use thread_list_lock */
	_thread_active_threads--;
	if ((_thread_scope_system <= 0 && _thread_active_threads == 1) ||
	    (_thread_scope_system > 0 && _thread_active_threads == 0)) {
		KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
		_kse_critical_leave(crit);
		exit(0);
		/* Never reach! */
	}
	KSE_LOCK_RELEASE(curkse, &_thread_list_lock);

	/* This thread will never be re-scheduled. */
	KSE_LOCK(curkse);
	THR_SET_STATE(curthread, PS_DEAD);
	_thr_sched_switch_unlocked(curthread);
	/* Never reach! */

	/* This point should not be reached. */
	PANIC("Dead thread has resumed");
}
Beispiel #8
0
int
connect(int fd, const struct sockaddr * name, socklen_t namelen)
{
	struct pthread	*curthread = _get_curthread();
	struct sockaddr tmpname;
	socklen_t	errnolen, tmpnamelen;
	int             ret;

	/* This is a cancellation point: */
	_thread_enter_cancellation_point();

	if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) {
		if ((ret = _thread_sys_connect(fd, name, namelen)) < 0) {
			if (!(_thread_fd_table[fd]->status_flags->flags & O_NONBLOCK) &&
			((errno == EWOULDBLOCK) || (errno == EINPROGRESS) ||
			 (errno == EALREADY) || (errno == EAGAIN))) {
				curthread->data.fd.fd = fd;

				/* Reset the interrupted operation flag: */
				curthread->interrupted = 0;
				curthread->closing_fd = 0;

				/* Set the timeout: */
				_thread_kern_set_timeout(NULL);
				_thread_kern_sched_state(PS_FDW_WAIT, __FILE__, __LINE__);

				/*
				 * Check if the operation was
				 * interrupted by a signal or
				 * a closing fd.
				 */
				if (curthread->interrupted) {
					errno = EINTR;
					ret = -1;
				} else if (curthread->closing_fd) {
					errno = EBADF;
					ret = -1;
				} else {
					tmpnamelen = sizeof(tmpname);
					/* 0 now lets see if it really worked */
					if (((ret = _thread_sys_getpeername(fd, &tmpname, &tmpnamelen)) < 0) &&
					    (errno == ENOTCONN)) {

						/*
						 * Get the error, this function
						 * should not fail 
						 */
						errnolen = sizeof(errno);
						_thread_sys_getsockopt(fd, SOL_SOCKET, SO_ERROR, &errno, &errnolen);
					}
				}
			} else {
				ret = -1;
			}
		}
		_FD_UNLOCK(fd, FD_RDWR);
	}

	/* No longer in a cancellation point: */
	_thread_leave_cancellation_point();

	return (ret);
}
Beispiel #9
0
int
_pthread_mutex_trylock(pthread_mutex_t * mutex)
{
	struct pthread	*curthread = _get_curthread();
	int	ret = 0;

	if (mutex == NULL)
		ret = EINVAL;

	/*
	 * If the mutex is statically initialized, perform the dynamic
	 * initialization:
	 */
	else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
		/*
		 * Defer signals to protect the scheduling queues from
		 * access by the signal handler:
		 */
		_thread_kern_sig_defer();

		/* Lock the mutex structure: */
		_SPINLOCK(&(*mutex)->lock);

		/*
		 * If the mutex was statically allocated, properly
		 * initialize the tail queue.
		 */
		if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
			TAILQ_INIT(&(*mutex)->m_queue);
			_MUTEX_INIT_LINK(*mutex);
			(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
		}

		/* Process according to mutex type: */
		switch ((*mutex)->m_protocol) {
		/* Default POSIX mutex: */
		case PTHREAD_PRIO_NONE:	
			/* Check if this mutex is not locked: */
			if ((*mutex)->m_owner == NULL) {
				/* Lock the mutex for the running thread: */
				(*mutex)->m_owner = curthread;

				/* Add to the list of owned mutexes: */
				_MUTEX_ASSERT_NOT_OWNED(*mutex);
				TAILQ_INSERT_TAIL(&curthread->mutexq,
				    (*mutex), m_qe);
			} else if ((*mutex)->m_owner == curthread)
				ret = mutex_self_trylock(*mutex);
			else
				/* Return a busy error: */
				ret = EBUSY;
			break;

		/* POSIX priority inheritence mutex: */
		case PTHREAD_PRIO_INHERIT:
			/* Check if this mutex is not locked: */
			if ((*mutex)->m_owner == NULL) {
				/* Lock the mutex for the running thread: */
				(*mutex)->m_owner = curthread;

				/* Track number of priority mutexes owned: */
				curthread->priority_mutex_count++;

				/*
				 * The mutex takes on the attributes of the
				 * running thread when there are no waiters.
				 */
				(*mutex)->m_prio = curthread->active_priority;
				(*mutex)->m_saved_prio =
				    curthread->inherited_priority;

				/* Add to the list of owned mutexes: */
				_MUTEX_ASSERT_NOT_OWNED(*mutex);
				TAILQ_INSERT_TAIL(&curthread->mutexq,
				    (*mutex), m_qe);
			} else if ((*mutex)->m_owner == curthread)
				ret = mutex_self_trylock(*mutex);
			else
				/* Return a busy error: */
				ret = EBUSY;
			break;

		/* POSIX priority protection mutex: */
		case PTHREAD_PRIO_PROTECT:
			/* Check for a priority ceiling violation: */
			if (curthread->active_priority > (*mutex)->m_prio)
				ret = EINVAL;

			/* Check if this mutex is not locked: */
			else if ((*mutex)->m_owner == NULL) {
				/* Lock the mutex for the running thread: */
				(*mutex)->m_owner = curthread;

				/* Track number of priority mutexes owned: */
				curthread->priority_mutex_count++;

				/*
				 * The running thread inherits the ceiling
				 * priority of the mutex and executes at that
				 * priority.
				 */
				curthread->active_priority = (*mutex)->m_prio;
				(*mutex)->m_saved_prio =
				    curthread->inherited_priority;
				curthread->inherited_priority =
				    (*mutex)->m_prio;

				/* Add to the list of owned mutexes: */
				_MUTEX_ASSERT_NOT_OWNED(*mutex);
				TAILQ_INSERT_TAIL(&curthread->mutexq,
				    (*mutex), m_qe);
			} else if ((*mutex)->m_owner == curthread)
				ret = mutex_self_trylock(*mutex);
			else
				/* Return a busy error: */
				ret = EBUSY;
			break;

		/* Trap invalid mutex types: */
		default:
			/* Return an invalid argument error: */
			ret = EINVAL;
			break;
		}

		/* Unlock the mutex structure: */
		_SPINUNLOCK(&(*mutex)->lock);

		/*
		 * Undefer and handle pending signals, yielding if
		 * necessary:
		 */
		_thread_kern_sig_undefer();
	}

	/* Return the completion status: */
	return (ret);
}
Beispiel #10
0
int
_pthread_cond_signal(pthread_cond_t * cond)
{
	struct pthread	*curthread = _get_curthread();
	struct pthread	*pthread;
	struct kse_mailbox *kmbx;
	int		rval = 0;

	THR_ASSERT(curthread->locklevel == 0,
	    "cv_timedwait: locklevel is not zero!");
	if (cond == NULL)
		rval = EINVAL;
       /*
        * If the condition variable is statically initialized, perform dynamic
        * initialization.
        */
	else if (*cond != NULL || (rval = _pthread_cond_init(cond, NULL)) == 0) {
		/* Lock the condition variable structure: */
		THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);

		/* Process according to condition variable type: */
		switch ((*cond)->c_type) {
		/* Fast condition variable: */
		case COND_TYPE_FAST:
			/* Increment the sequence number: */
			(*cond)->c_seqno++;

			/*
			 * Wakeups have to be done with the CV lock held;
			 * otherwise there is a race condition where the
			 * thread can timeout, run on another KSE, and enter
			 * another blocking state (including blocking on a CV).
			 */
			if ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
			    != NULL) {
				THR_SCHED_LOCK(curthread, pthread);
				cond_queue_remove(*cond, pthread);
				pthread->sigbackout = NULL;
				if ((pthread->kseg == curthread->kseg) &&
				    (pthread->active_priority >
				    curthread->active_priority))
					curthread->critical_yield = 1;
				kmbx = _thr_setrunnable_unlocked(pthread);
				THR_SCHED_UNLOCK(curthread, pthread);
				if (kmbx != NULL)
					kse_wakeup(kmbx);
			}
			/* Check for no more waiters: */
			if (TAILQ_EMPTY(&(*cond)->c_queue))
				(*cond)->c_mutex = NULL;
			break;

		/* Trap invalid condition variable types: */
		default:
			/* Return an invalid argument error: */
			rval = EINVAL;
			break;
		}

		/* Unlock the condition variable structure: */
		THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
	}

	/* Return the completion status: */
	return (rval);
}
Beispiel #11
0
int
_pthread_cond_broadcast(pthread_cond_t * cond)
{
	struct pthread	*curthread = _get_curthread();
	struct pthread	*pthread;
	struct kse_mailbox *kmbx;
	int		rval = 0;

	THR_ASSERT(curthread->locklevel == 0,
	    "cv_timedwait: locklevel is not zero!");
	if (cond == NULL)
		rval = EINVAL;
       /*
        * If the condition variable is statically initialized, perform dynamic
        * initialization.
        */
	else if (*cond != NULL || (rval = _pthread_cond_init(cond, NULL)) == 0) {
		/* Lock the condition variable structure: */
		THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);

		/* Process according to condition variable type: */
		switch ((*cond)->c_type) {
		/* Fast condition variable: */
		case COND_TYPE_FAST:
			/* Increment the sequence number: */
			(*cond)->c_seqno++;

			/*
			 * Enter a loop to bring all threads off the
			 * condition queue:
			 */
			while ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
			    != NULL) {
				THR_SCHED_LOCK(curthread, pthread);
				cond_queue_remove(*cond, pthread);
				pthread->sigbackout = NULL;
				if ((pthread->kseg == curthread->kseg) &&
				    (pthread->active_priority >
				    curthread->active_priority))
					curthread->critical_yield = 1;
				kmbx = _thr_setrunnable_unlocked(pthread);
				THR_SCHED_UNLOCK(curthread, pthread);
				if (kmbx != NULL)
					kse_wakeup(kmbx);
			}

			/* There are no more waiting threads: */
			(*cond)->c_mutex = NULL;
			break;

		/* Trap invalid condition variable types: */
		default:
			/* Return an invalid argument error: */
			rval = EINVAL;
			break;
		}

		/* Unlock the condition variable structure: */
		THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
	}

	/* Return the completion status: */
	return (rval);
}
Beispiel #12
0
int
_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
		       const struct timespec * abstime)
{
	struct pthread	*curthread = _get_curthread();
	int	rval = 0;
	int	done = 0;
	int	mutex_locked = 1;
	int	seqno;

	THR_ASSERT(curthread->locklevel == 0,
	    "cv_timedwait: locklevel is not zero!");

	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
	    abstime->tv_nsec >= 1000000000)
		return (EINVAL);
	/*
	 * If the condition variable is statically initialized, perform dynamic
	 * initialization.
	 */
	if (*cond == NULL && (rval = _pthread_cond_init(cond, NULL)) != 0)
		return (rval);

	if (!_kse_isthreaded())
		_kse_setthreaded(1);

	/*
	 * Enter a loop waiting for a condition signal or broadcast
	 * to wake up this thread.  A loop is needed in case the waiting
	 * thread is interrupted by a signal to execute a signal handler.
	 * It is not (currently) possible to remain in the waiting queue
	 * while running a handler.  Instead, the thread is interrupted
	 * and backed out of the waiting queue prior to executing the
	 * signal handler.
	 */

	/* Lock the condition variable structure: */
	THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
	seqno = (*cond)->c_seqno;
	do {
		/*
		 * If the condvar was statically allocated, properly
		 * initialize the tail queue.
		 */
		if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
			TAILQ_INIT(&(*cond)->c_queue);
			(*cond)->c_flags |= COND_FLAGS_INITED;
		}

		/* Process according to condition variable type: */
		switch ((*cond)->c_type) {
		/* Fast condition variable: */
		case COND_TYPE_FAST:
			if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
			    ((*cond)->c_mutex != *mutex))) {
				/* Return invalid argument error: */
				rval = EINVAL;
			} else {
				/* Reset the timeout and interrupted flags: */
				curthread->timeout = 0;
				curthread->interrupted = 0;

				/*
				 * Queue the running thread for the condition
				 * variable:
				 */
				cond_queue_enq(*cond, curthread);

				/* Unlock the mutex: */
				if (mutex_locked &&
				   ((rval = _mutex_cv_unlock(mutex)) != 0)) {
					/*
					 * Cannot unlock the mutex; remove the
					 * running thread from the condition
					 * variable queue: 
					 */
					cond_queue_remove(*cond, curthread);
				} else {
					/* Remember the mutex: */
					(*cond)->c_mutex = *mutex;

					/*
					 * Don't unlock the mutex the next
					 * time through the loop (if the
					 * thread has to be requeued after
					 * handling a signal).
					 */
					mutex_locked = 0;

					/*
					 * This thread is active and is in a
					 * critical region (holding the cv
					 * lock); we should be able to safely
					 * set the state.
					 */
					THR_SCHED_LOCK(curthread, curthread);
					/* Set the wakeup time: */
					curthread->wakeup_time.tv_sec =
					    abstime->tv_sec;
					curthread->wakeup_time.tv_nsec =
					    abstime->tv_nsec;
					THR_SET_STATE(curthread, PS_COND_WAIT);

					/* Remember the CV: */
					curthread->data.cond = *cond;
					curthread->sigbackout = cond_wait_backout;
					THR_SCHED_UNLOCK(curthread, curthread);

					/* Unlock the CV structure: */
					THR_LOCK_RELEASE(curthread,
					    &(*cond)->c_lock);

					/* Schedule the next thread: */
					_thr_sched_switch(curthread);

					/*
					 * XXX - This really isn't a good check
					 * since there can be more than one
					 * thread waiting on the CV.  Signals
					 * sent to threads waiting on mutexes
					 * or CVs should really be deferred
					 * until the threads are no longer
					 * waiting, but POSIX says that signals
					 * should be sent "as soon as possible".
					 */
					done = (seqno != (*cond)->c_seqno);
					if (done && !THR_IN_CONDQ(curthread)) {
						/*
						 * The thread is dequeued, so
						 * it is safe to clear these.
						 */
						curthread->data.cond = NULL;
						curthread->sigbackout = NULL;
						check_continuation(curthread,
						    NULL, mutex);
						return (_mutex_cv_lock(mutex));
					}

					/* Relock the CV structure: */
					THR_LOCK_ACQUIRE(curthread,
					    &(*cond)->c_lock);

					/*
					 * Clear these after taking the lock to
					 * prevent a race condition where a
					 * signal can arrive before dequeueing
					 * the thread.
					 */
					curthread->data.cond = NULL;
					curthread->sigbackout = NULL;

					done = (seqno != (*cond)->c_seqno);

					if (THR_IN_CONDQ(curthread)) {
						cond_queue_remove(*cond,
						    curthread);

						/* Check for no more waiters: */
						if (TAILQ_EMPTY(&(*cond)->c_queue))
							(*cond)->c_mutex = NULL;
					}

					if (curthread->timeout != 0) {
						/* The wait timedout. */
						rval = ETIMEDOUT;
					}
				}
			}
			break;

		/* Trap invalid condition variable types: */
		default:
			/* Return an invalid argument error: */
			rval = EINVAL;
			break;
		}

		check_continuation(curthread, *cond,
		    mutex_locked ? NULL : mutex);
	} while ((done == 0) && (rval == 0));

	/* Unlock the condition variable structure: */
	THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);

	if (mutex_locked == 0)
		_mutex_cv_lock(mutex);

	/* Return the completion status: */
	return (rval);
}
Beispiel #13
0
void
_pthread_exit(void *status)
{
	struct pthread	*curthread = _get_curthread();
	pthread_t pthread;

	/* Check if this thread is already in the process of exiting: */
	if ((curthread->flags & PTHREAD_EXITING) != 0) {
		char msg[128];
		snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",curthread);
		PANIC(msg);
	}

	/* Flag this thread as exiting: */
	curthread->flags |= PTHREAD_EXITING;

	/* Save the return value: */
	curthread->ret = status;

	while (curthread->cleanup != NULL) {
		pthread_cleanup_pop(1);
	}
	if (curthread->attr.cleanup_attr != NULL) {
		curthread->attr.cleanup_attr(curthread->attr.arg_attr);
	}
	/* Check if there is thread specific data: */
	if (curthread->specific != NULL) {
		/* Run the thread-specific data destructors: */
		_thread_cleanupspecific();
	}

	/* Free thread-specific poll_data structure, if allocated: */
	if (curthread->poll_data.fds != NULL) {
		free(curthread->poll_data.fds);
		curthread->poll_data.fds = NULL;
	}

	/*
	 * Lock the garbage collector mutex to ensure that the garbage
	 * collector is not using the dead thread list.
	 */
	if (_pthread_mutex_lock(&_gc_mutex) != 0)
		PANIC("Cannot lock gc mutex");

	/* Add this thread to the list of dead threads. */
	TAILQ_INSERT_HEAD(&_dead_list, curthread, dle);

	/*
	 * Signal the garbage collector thread that there is something
	 * to clean up.
	 */
	if (_pthread_cond_signal(&_gc_cond) != 0)
		PANIC("Cannot signal gc cond");

	/*
	 * Avoid a race condition where a scheduling signal can occur
	 * causing the garbage collector thread to run.  If this happens,
	 * the current thread can be cleaned out from under us.
	 */
	_thread_kern_sig_defer();

	/* Unlock the garbage collector mutex: */
	if (_pthread_mutex_unlock(&_gc_mutex) != 0)
		PANIC("Cannot unlock gc mutex");

	/* Check if there is a thread joining this one: */
	if (curthread->joiner != NULL) {
		pthread = curthread->joiner;
		curthread->joiner = NULL;

		/* Make the joining thread runnable: */
		PTHREAD_NEW_STATE(pthread, PS_RUNNING);

		/* Set the return value for the joining thread: */
		pthread->join_status.ret = curthread->ret;
		pthread->join_status.error = 0;
		pthread->join_status.thread = NULL;

		/* Make this thread collectable by the garbage collector. */
		PTHREAD_ASSERT(((curthread->attr.flags & PTHREAD_DETACHED) ==
		    0), "Cannot join a detached thread");
		curthread->attr.flags |= PTHREAD_DETACHED;
	}

	/* Remove this thread from the thread list: */
	TAILQ_REMOVE(&_thread_list, curthread, tle);

	/* This thread will never be re-scheduled. */
	_thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__);

	/* This point should not be reached. */
	PANIC("Dead thread has resumed");
}
Beispiel #14
0
int
_pthread_create(pthread_t *thread, const pthread_attr_t *attr,
	       void *(*start_routine) (void *), void *arg)
{
	struct pthread	*curthread = _get_curthread();
	struct itimerval itimer;
	int		f_gc = 0;
	int             ret = 0;
	pthread_t       gc_thread;
	pthread_t       new_thread;
	pthread_attr_t	pattr;
	void           *stack;
#if !defined(__ia64__)
	u_long 		stackp;
#endif

	if (thread == NULL)
		return(EINVAL);

	/*
	 * Locking functions in libc are required when there are
	 * threads other than the initial thread.
	 */
	__isthreaded = 1;

	/* Allocate memory for the thread structure: */
	if ((new_thread = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
		/* Insufficient memory to create a thread: */
		ret = EAGAIN;
	} else {
		/* Check if default thread attributes are required: */
		if (attr == NULL || *attr == NULL) {
			/* Use the default thread attributes: */
			pattr = &_pthread_attr_default;
		} else {
			pattr = *attr;
		}
		/* Check if a stack was specified in the thread attributes: */
		if ((stack = pattr->stackaddr_attr) != NULL) {
		}
		/* Allocate a stack: */
		else {
			stack = _thread_stack_alloc(pattr->stacksize_attr,
			    pattr->guardsize_attr);
			if (stack == NULL) {
				ret = EAGAIN;
				free(new_thread);
			}
		}

		/* Check for errors: */
		if (ret != 0) {
		} else {
			/* Initialise the thread structure: */
			memset(new_thread, 0, sizeof(struct pthread));
			new_thread->slice_usec = -1;
			new_thread->stack = stack;
			new_thread->start_routine = start_routine;
			new_thread->arg = arg;

			new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
			    PTHREAD_CANCEL_DEFERRED;

			/*
			 * Write a magic value to the thread structure
			 * to help identify valid ones:
			 */
			new_thread->magic = PTHREAD_MAGIC;

			/* Initialise the thread for signals: */
			new_thread->sigmask = curthread->sigmask;
			new_thread->sigmask_seqno = 0;

			/* Initialize the signal frame: */
			new_thread->curframe = NULL;

			/* Initialise the jump buffer: */
			_setjmp(new_thread->ctx.jb);

			/*
			 * Set up new stack frame so that it looks like it
			 * returned from a longjmp() to the beginning of
			 * _thread_start().
			 */
			SET_RETURN_ADDR_JB(new_thread->ctx.jb, _thread_start);

#if !defined(__ia64__)
			stackp = (long)new_thread->stack + pattr->stacksize_attr - sizeof(double);
#if defined(__amd64__)
			stackp &= ~0xFUL;
#endif
			/* The stack starts high and builds down: */
			SET_STACK_JB(new_thread->ctx.jb, stackp);
#else
			SET_STACK_JB(new_thread->ctx.jb,
			    (long)new_thread->stack, pattr->stacksize_attr);
#endif

			/* Copy the thread attributes: */
			memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr));

			/*
			 * Check if this thread is to inherit the scheduling
			 * attributes from its parent:
			 */
			if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
				/* Copy the scheduling attributes: */
				new_thread->base_priority =
				    curthread->base_priority &
				    ~PTHREAD_SIGNAL_PRIORITY;
				new_thread->attr.prio =
				    curthread->base_priority &
				    ~PTHREAD_SIGNAL_PRIORITY;
				new_thread->attr.sched_policy =
				    curthread->attr.sched_policy;
			} else {
				/*
				 * Use just the thread priority, leaving the
				 * other scheduling attributes as their
				 * default values:
				 */
				new_thread->base_priority =
				    new_thread->attr.prio;
			}
			new_thread->active_priority = new_thread->base_priority;
			new_thread->inherited_priority = 0;

			/* Initialize joiner to NULL (no joiner): */
			new_thread->joiner = NULL;

			/* Initialize the mutex queue: */
			TAILQ_INIT(&new_thread->mutexq);

			/* Initialise hooks in the thread structure: */
			new_thread->specific = NULL;
			new_thread->cleanup = NULL;
			new_thread->flags = 0;
			new_thread->poll_data.nfds = 0;
			new_thread->poll_data.fds = NULL;
			new_thread->continuation = NULL;

			/*
			 * Defer signals to protect the scheduling queues
			 * from access by the signal handler:
			 */
			_thread_kern_sig_defer();

			/*
			 * Initialise the unique id which GDB uses to
			 * track threads.
			 */
			new_thread->uniqueid = next_uniqueid++;

			/*
			 * Check if the garbage collector thread
			 * needs to be started.
			 */
			f_gc = (TAILQ_FIRST(&_thread_list) == _thread_initial);

			/* Add the thread to the linked list of all threads: */
			TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle);

			if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
				new_thread->flags |= PTHREAD_FLAGS_SUSPENDED;
				new_thread->state = PS_SUSPENDED;
			} else {
				new_thread->state = PS_RUNNING;
				PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
			}

			/*
			 * Undefer and handle pending signals, yielding
			 * if necessary.
			 */
			_thread_kern_sig_undefer();

			/* Return a pointer to the thread structure: */
			(*thread) = new_thread;

			if (f_gc != 0) {
				/* Install the scheduling timer: */
				itimer.it_interval.tv_sec = 0;
				itimer.it_interval.tv_usec = _clock_res_usec;
				itimer.it_value = itimer.it_interval;
				if (setitimer(_ITIMER_SCHED_TIMER, &itimer,
				    NULL) != 0)
					PANIC("Cannot set interval timer");
			}

			/* Schedule the new user thread: */
			_thread_kern_sched(NULL);

			/*
			 * Start a garbage collector thread
			 * if necessary.
			 */
			if (f_gc && _pthread_create(&gc_thread, NULL,
				    _thread_gc, NULL) != 0)
				PANIC("Can't create gc thread");

		}
	}

	/* Return the status: */
	return (ret);
}
Beispiel #15
0
static int
cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
               const struct timespec *abstime, int cancel)
{
    struct pthread	*curthread = _get_curthread();
    struct sleepqueue *sq;
    int	recurse;
    int	error;
    int	defered;

    if (curthread->wchan != NULL)
        PANIC("thread was already on queue.");

    if (cancel)
        _thr_testcancel(curthread);

    _sleepq_lock(cvp);
    /*
     * set __has_user_waiters before unlocking mutex, this allows
     * us to check it without locking in pthread_cond_signal().
     */
    cvp->__has_user_waiters = 1;
    defered = 0;
    (void)_mutex_cv_unlock(mp, &recurse, &defered);
    curthread->mutex_obj = mp;
    _sleepq_add(cvp, curthread);
    for(;;) {
        _thr_clear_wake(curthread);
        _sleepq_unlock(cvp);
        if (defered) {
            defered = 0;
            if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0)
                (void)_umtx_op_err(&mp->m_lock, UMTX_OP_MUTEX_WAKE2,
                                   mp->m_lock.m_flags, 0, 0);
        }
        if (curthread->nwaiter_defer > 0) {
            _thr_wake_all(curthread->defer_waiters,
                          curthread->nwaiter_defer);
            curthread->nwaiter_defer = 0;
        }

        if (cancel) {
            _thr_cancel_enter2(curthread, 0);
            error = _thr_sleep(curthread, cvp->__clock_id, abstime);
            _thr_cancel_leave(curthread, 0);
        } else {
            error = _thr_sleep(curthread, cvp->__clock_id, abstime);
        }

        _sleepq_lock(cvp);
        if (curthread->wchan == NULL) {
            error = 0;
            break;
        } else if (cancel && SHOULD_CANCEL(curthread)) {
            sq = _sleepq_lookup(cvp);
            cvp->__has_user_waiters =
                _sleepq_remove(sq, curthread);
            _sleepq_unlock(cvp);
            curthread->mutex_obj = NULL;
            _mutex_cv_lock(mp, recurse);
            if (!THR_IN_CRITICAL(curthread))
                _pthread_exit(PTHREAD_CANCELED);
            else /* this should not happen */
                return (0);
        } else if (error == ETIMEDOUT) {
            sq = _sleepq_lookup(cvp);
            cvp->__has_user_waiters =
                _sleepq_remove(sq, curthread);
            break;
        }
    }
    _sleepq_unlock(cvp);
    curthread->mutex_obj = NULL;
    _mutex_cv_lock(mp, recurse);
    return (error);
}
Beispiel #16
0
int
_pthread_mutex_lock(pthread_mutex_t * mutex)
{
	struct pthread	*curthread = _get_curthread();
	int	ret = 0;

	if (_thread_initial == NULL)
		_thread_init();

	if (mutex == NULL)
		return (EINVAL);

	/*
	 * If the mutex is statically initialized, perform the dynamic
	 * initialization:
	 */
	if ((*mutex == NULL) &&
	    ((ret = init_static(mutex)) != 0))
		return (ret);

	/* Reset the interrupted flag: */
	curthread->interrupted = 0;

	/*
	 * Enter a loop waiting to become the mutex owner.  We need a
	 * loop in case the waiting thread is interrupted by a signal
	 * to execute a signal handler.  It is not (currently) possible
	 * to remain in the waiting queue while running a handler.
	 * Instead, the thread is interrupted and backed out of the
	 * waiting queue prior to executing the signal handler.
	 */
	do {
		/*
		 * Defer signals to protect the scheduling queues from
		 * access by the signal handler:
		 */
		_thread_kern_sig_defer();

		/* Lock the mutex structure: */
		_SPINLOCK(&(*mutex)->lock);

		/*
		 * If the mutex was statically allocated, properly
		 * initialize the tail queue.
		 */
		if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
			TAILQ_INIT(&(*mutex)->m_queue);
			(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
			_MUTEX_INIT_LINK(*mutex);
		}

		/* Process according to mutex type: */
		switch ((*mutex)->m_protocol) {
		/* Default POSIX mutex: */
		case PTHREAD_PRIO_NONE:
			if ((*mutex)->m_owner == NULL) {
				/* Lock the mutex for this thread: */
				(*mutex)->m_owner = curthread;

				/* Add to the list of owned mutexes: */
				_MUTEX_ASSERT_NOT_OWNED(*mutex);
				TAILQ_INSERT_TAIL(&curthread->mutexq,
				    (*mutex), m_qe);

			} else if ((*mutex)->m_owner == curthread)
				ret = mutex_self_lock(*mutex);
			else {
				/*
				 * Join the queue of threads waiting to lock
				 * the mutex: 
				 */
				mutex_queue_enq(*mutex, curthread);

				/*
				 * Keep a pointer to the mutex this thread
				 * is waiting on:
				 */
				curthread->data.mutex = *mutex;

				/*
				 * Unlock the mutex structure and schedule the
				 * next thread:
				 */
				_thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
				    &(*mutex)->lock, __FILE__, __LINE__);

				/* Lock the mutex structure again: */
				_SPINLOCK(&(*mutex)->lock);
			}
			break;

		/* POSIX priority inheritence mutex: */
		case PTHREAD_PRIO_INHERIT:
			/* Check if this mutex is not locked: */
			if ((*mutex)->m_owner == NULL) {
				/* Lock the mutex for this thread: */
				(*mutex)->m_owner = curthread;

				/* Track number of priority mutexes owned: */
				curthread->priority_mutex_count++;

				/*
				 * The mutex takes on attributes of the
				 * running thread when there are no waiters.
				 */
				(*mutex)->m_prio = curthread->active_priority;
				(*mutex)->m_saved_prio =
				    curthread->inherited_priority;
				curthread->inherited_priority =
				    (*mutex)->m_prio;

				/* Add to the list of owned mutexes: */
				_MUTEX_ASSERT_NOT_OWNED(*mutex);
				TAILQ_INSERT_TAIL(&curthread->mutexq,
				    (*mutex), m_qe);

			} else if ((*mutex)->m_owner == curthread)
				ret = mutex_self_lock(*mutex);
			else {
				/*
				 * Join the queue of threads waiting to lock
				 * the mutex: 
				 */
				mutex_queue_enq(*mutex, curthread);

				/*
				 * Keep a pointer to the mutex this thread
				 * is waiting on:
				 */
				curthread->data.mutex = *mutex;

				if (curthread->active_priority >
				    (*mutex)->m_prio)
					/* Adjust priorities: */
					mutex_priority_adjust(*mutex);

				/*
				 * Unlock the mutex structure and schedule the
				 * next thread:
				 */
				_thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
				    &(*mutex)->lock, __FILE__, __LINE__);

				/* Lock the mutex structure again: */
				_SPINLOCK(&(*mutex)->lock);
			}
			break;

		/* POSIX priority protection mutex: */
		case PTHREAD_PRIO_PROTECT:
			/* Check for a priority ceiling violation: */
			if (curthread->active_priority > (*mutex)->m_prio)
				ret = EINVAL;

			/* Check if this mutex is not locked: */
			else if ((*mutex)->m_owner == NULL) {
				/*
				 * Lock the mutex for the running
				 * thread:
				 */
				(*mutex)->m_owner = curthread;

				/* Track number of priority mutexes owned: */
				curthread->priority_mutex_count++;

				/*
				 * The running thread inherits the ceiling
				 * priority of the mutex and executes at that
				 * priority:
				 */
				curthread->active_priority = (*mutex)->m_prio;
				(*mutex)->m_saved_prio =
				    curthread->inherited_priority;
				curthread->inherited_priority =
				    (*mutex)->m_prio;

				/* Add to the list of owned mutexes: */
				_MUTEX_ASSERT_NOT_OWNED(*mutex);
				TAILQ_INSERT_TAIL(&curthread->mutexq,
				    (*mutex), m_qe);
			} else if ((*mutex)->m_owner == curthread)
				ret = mutex_self_lock(*mutex);
			else {
				/*
				 * Join the queue of threads waiting to lock
				 * the mutex: 
				 */
				mutex_queue_enq(*mutex, curthread);

				/*
				 * Keep a pointer to the mutex this thread
				 * is waiting on:
				 */
				curthread->data.mutex = *mutex;

				/* Clear any previous error: */
				errno = 0;

				/*
				 * Unlock the mutex structure and schedule the
				 * next thread:
				 */
				_thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
				    &(*mutex)->lock, __FILE__, __LINE__);

				/* Lock the mutex structure again: */
				_SPINLOCK(&(*mutex)->lock);

				/*
				 * The threads priority may have changed while
				 * waiting for the mutex causing a ceiling
				 * violation.
				 */
				ret = errno;
				errno = 0;
			}
			break;

		/* Trap invalid mutex types: */
		default:
			/* Return an invalid argument error: */
			ret = EINVAL;
			break;
		}

		/*
		 * Check to see if this thread was interrupted and
		 * is still in the mutex queue of waiting threads:
		 */
		if (curthread->interrupted != 0)
			mutex_queue_remove(*mutex, curthread);

		/* Unlock the mutex structure: */
		_SPINUNLOCK(&(*mutex)->lock);

		/*
		 * Undefer and handle pending signals, yielding if
		 * necessary:
		 */
		_thread_kern_sig_undefer();
	} while (((*mutex)->m_owner != curthread) && (ret == 0) &&
	    (curthread->interrupted == 0));

	if (curthread->interrupted != 0 &&
	    curthread->continuation != NULL)
		curthread->continuation((void *) curthread);

	/* Return the completion status: */
	return (ret);
}
Beispiel #17
0
int
_nanosleep(const struct timespec *time_to_sleep,
    struct timespec *time_remaining)
{
	struct pthread	*curthread = _get_curthread();
	int             ret = 0;
	struct timespec ts, ts1;
	struct timespec remaining_time;
	struct timespec wakeup_time;

	/* Check if the time to sleep is legal: */
	if ((time_to_sleep == NULL) || (time_to_sleep->tv_sec < 0) ||
	    (time_to_sleep->tv_nsec < 0) ||
	    (time_to_sleep->tv_nsec >= 1000000000)) {
		/* Return an EINVAL error : */
		errno = EINVAL;
		ret = -1;
	} else {
		if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
			return (__sys_nanosleep(time_to_sleep, time_remaining));
			
		KSE_GET_TOD(curthread->kse, &ts);

		/* Calculate the time for the current thread to wake up: */
		TIMESPEC_ADD(&wakeup_time, &ts, time_to_sleep);

		THR_LOCK_SWITCH(curthread);
		curthread->interrupted = 0;
		curthread->wakeup_time = wakeup_time;
		THR_SET_STATE(curthread, PS_SLEEP_WAIT);

		/* Reschedule the current thread to sleep: */
		_thr_sched_switch_unlocked(curthread);

		/* Calculate the remaining time to sleep: */
		KSE_GET_TOD(curthread->kse, &ts1);
		remaining_time.tv_sec = time_to_sleep->tv_sec
		    + ts.tv_sec - ts1.tv_sec;
		remaining_time.tv_nsec = time_to_sleep->tv_nsec
		    + ts.tv_nsec - ts1.tv_nsec;

		/* Check if the nanosecond field has underflowed: */
		if (remaining_time.tv_nsec < 0) {
			/* Handle the underflow: */
			remaining_time.tv_sec -= 1;
			remaining_time.tv_nsec += 1000000000;
		}
		/* Check if the nanosecond field has overflowed: */
		else if (remaining_time.tv_nsec >= 1000000000) {
			/* Handle the overflow: */
			remaining_time.tv_sec += 1;
			remaining_time.tv_nsec -= 1000000000;
		}

		/* Check if the sleep was longer than the required time: */
		if (remaining_time.tv_sec < 0) {
			/* Reset the time left: */
			remaining_time.tv_sec = 0;
			remaining_time.tv_nsec = 0;
		}

		/* Check if the time remaining is to be returned: */
		if (time_remaining != NULL) {
			/* Return the actual time slept: */
			time_remaining->tv_sec = remaining_time.tv_sec;
			time_remaining->tv_nsec = remaining_time.tv_nsec;
		}

		/* Check if the sleep was interrupted: */
		if (curthread->interrupted) {
			/* Return an EINTR error : */
			errno = EINTR;
			ret = -1;
		}
	}
	return (ret);
}
Beispiel #18
0
static inline int
mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
{
	struct pthread	*curthread = _get_curthread();
	int	ret = 0;

	if (mutex == NULL || *mutex == NULL) {
		ret = EINVAL;
	} else {
		/*
		 * Defer signals to protect the scheduling queues from
		 * access by the signal handler:
		 */
		_thread_kern_sig_defer();

		/* Lock the mutex structure: */
		_SPINLOCK(&(*mutex)->lock);

		/* Process according to mutex type: */
		switch ((*mutex)->m_protocol) {
		/* Default POSIX mutex: */
		case PTHREAD_PRIO_NONE:
			/*
			 * Check if the running thread is not the owner of the
			 * mutex:
			 */
			if ((*mutex)->m_owner != curthread) {
				/*
				 * Return an invalid argument error for no
				 * owner and a permission error otherwise:
				 */
				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
			}
			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
			    ((*mutex)->m_data.m_count > 0)) {
				/* Decrement the count: */
				(*mutex)->m_data.m_count--;
			} else {
				/*
				 * Clear the count in case this is recursive
				 * mutex.
				 */
				(*mutex)->m_data.m_count = 0;

				/* Remove the mutex from the threads queue. */
				_MUTEX_ASSERT_IS_OWNED(*mutex);
				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
				    (*mutex), m_qe);
				_MUTEX_INIT_LINK(*mutex);

				/*
				 * Get the next thread from the queue of
				 * threads waiting on the mutex: 
				 */
				if (((*mutex)->m_owner =
			  	    mutex_queue_deq(*mutex)) != NULL) {
					/* Make the new owner runnable: */
					PTHREAD_NEW_STATE((*mutex)->m_owner,
					    PS_RUNNING);

					/*
					 * Add the mutex to the threads list of
					 * owned mutexes:
					 */
					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
					    (*mutex), m_qe);

					/*
					 * The owner is no longer waiting for
					 * this mutex:
					 */
					(*mutex)->m_owner->data.mutex = NULL;
				}
			}
			break;

		/* POSIX priority inheritence mutex: */
		case PTHREAD_PRIO_INHERIT:
			/*
			 * Check if the running thread is not the owner of the
			 * mutex:
			 */
			if ((*mutex)->m_owner != curthread) {
				/*
				 * Return an invalid argument error for no
				 * owner and a permission error otherwise:
				 */
				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
			}
			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
			    ((*mutex)->m_data.m_count > 0)) {
				/* Decrement the count: */
				(*mutex)->m_data.m_count--;
			} else {
				/*
				 * Clear the count in case this is recursive
				 * mutex.
				 */
				(*mutex)->m_data.m_count = 0;

				/*
				 * Restore the threads inherited priority and
				 * recompute the active priority (being careful
				 * not to override changes in the threads base
				 * priority subsequent to locking the mutex).
				 */
				curthread->inherited_priority =
					(*mutex)->m_saved_prio;
				curthread->active_priority =
				    MAX(curthread->inherited_priority,
				    curthread->base_priority);

				/*
				 * This thread now owns one less priority mutex.
				 */
				curthread->priority_mutex_count--;

				/* Remove the mutex from the threads queue. */
				_MUTEX_ASSERT_IS_OWNED(*mutex);
				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
				    (*mutex), m_qe);
				_MUTEX_INIT_LINK(*mutex);

				/*
				 * Get the next thread from the queue of threads
				 * waiting on the mutex: 
				 */
				if (((*mutex)->m_owner = 
				    mutex_queue_deq(*mutex)) == NULL)
					/* This mutex has no priority. */
					(*mutex)->m_prio = 0;
				else {
					/*
					 * Track number of priority mutexes owned:
					 */
					(*mutex)->m_owner->priority_mutex_count++;

					/*
					 * Add the mutex to the threads list
					 * of owned mutexes:
					 */
					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
					    (*mutex), m_qe);

					/*
					 * The owner is no longer waiting for
					 * this mutex:
					 */
					(*mutex)->m_owner->data.mutex = NULL;

					/*
					 * Set the priority of the mutex.  Since
					 * our waiting threads are in descending
					 * priority order, the priority of the
					 * mutex becomes the active priority of
					 * the thread we just dequeued.
					 */
					(*mutex)->m_prio =
					    (*mutex)->m_owner->active_priority;

					/*
					 * Save the owning threads inherited
					 * priority:
					 */
					(*mutex)->m_saved_prio =
						(*mutex)->m_owner->inherited_priority;

					/*
					 * The owning threads inherited priority
					 * now becomes his active priority (the
					 * priority of the mutex).
					 */
					(*mutex)->m_owner->inherited_priority =
						(*mutex)->m_prio;

					/*
					 * Make the new owner runnable:
					 */
					PTHREAD_NEW_STATE((*mutex)->m_owner,
					    PS_RUNNING);
				}
			}
			break;

		/* POSIX priority ceiling mutex: */
		case PTHREAD_PRIO_PROTECT:
			/*
			 * Check if the running thread is not the owner of the
			 * mutex:
			 */
			if ((*mutex)->m_owner != curthread) {
				/*
				 * Return an invalid argument error for no
				 * owner and a permission error otherwise:
				 */
				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
			}
			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
			    ((*mutex)->m_data.m_count > 0)) {
				/* Decrement the count: */
				(*mutex)->m_data.m_count--;
			} else {
				/*
				 * Clear the count in case this is recursive
				 * mutex.
				 */
				(*mutex)->m_data.m_count = 0;

				/*
				 * Restore the threads inherited priority and
				 * recompute the active priority (being careful
				 * not to override changes in the threads base
				 * priority subsequent to locking the mutex).
				 */
				curthread->inherited_priority =
					(*mutex)->m_saved_prio;
				curthread->active_priority =
				    MAX(curthread->inherited_priority,
				    curthread->base_priority);

				/*
				 * This thread now owns one less priority mutex.
				 */
				curthread->priority_mutex_count--;

				/* Remove the mutex from the threads queue. */
				_MUTEX_ASSERT_IS_OWNED(*mutex);
				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
				    (*mutex), m_qe);
				_MUTEX_INIT_LINK(*mutex);

				/*
				 * Enter a loop to find a waiting thread whose
				 * active priority will not cause a ceiling
				 * violation:
				 */
				while ((((*mutex)->m_owner =
				    mutex_queue_deq(*mutex)) != NULL) &&
				    ((*mutex)->m_owner->active_priority >
				     (*mutex)->m_prio)) {
					/*
					 * Either the mutex ceiling priority
					 * been lowered and/or this threads
					 * priority has been raised subsequent
					 * to this thread being queued on the
					 * waiting list.
					 */
					tls_set_tcb((*mutex)->m_owner->tcb);
					errno = EINVAL;
					tls_set_tcb(curthread->tcb);
					PTHREAD_NEW_STATE((*mutex)->m_owner,
					    PS_RUNNING);
					/*
					 * The thread is no longer waiting for
					 * this mutex:
					 */
					(*mutex)->m_owner->data.mutex = NULL;
				}

				/* Check for a new owner: */
				if ((*mutex)->m_owner != NULL) {
					/*
					 * Track number of priority mutexes owned:
					 */
					(*mutex)->m_owner->priority_mutex_count++;

					/*
					 * Add the mutex to the threads list
					 * of owned mutexes:
					 */
					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
					    (*mutex), m_qe);

					/*
					 * The owner is no longer waiting for
					 * this mutex:
					 */
					(*mutex)->m_owner->data.mutex = NULL;

					/*
					 * Save the owning threads inherited
					 * priority:
					 */
					(*mutex)->m_saved_prio =
						(*mutex)->m_owner->inherited_priority;

					/*
					 * The owning thread inherits the
					 * ceiling priority of the mutex and
					 * executes at that priority:
					 */
					(*mutex)->m_owner->inherited_priority =
					    (*mutex)->m_prio;
					(*mutex)->m_owner->active_priority =
					    (*mutex)->m_prio;

					/*
					 * Make the new owner runnable:
					 */
					PTHREAD_NEW_STATE((*mutex)->m_owner,
					    PS_RUNNING);
				}
			}
			break;

		/* Trap invalid mutex types: */
		default:
			/* Return an invalid argument error: */
			ret = EINVAL;
			break;
		}

		if ((ret == 0) && (add_reference != 0)) {
			/* Increment the reference count: */
			(*mutex)->m_refcount++;
		}

		/* Unlock the mutex structure: */
		_SPINUNLOCK(&(*mutex)->lock);

		/*
		 * Undefer and handle pending signals, yielding if
		 * necessary:
		 */
		_thread_kern_sig_undefer();
	}

	/* Return the completion status: */
	return (ret);
}
Beispiel #19
0
int
_pthread_cancel(pthread_t pthread)
{
    struct pthread *curthread = _get_curthread();
    struct pthread *joinee = NULL;
    struct kse_mailbox *kmbx = NULL;
    int ret;

    if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0)) == 0) {
        /*
         * Take the thread's lock while we change the cancel flags.
         */
        THR_THREAD_LOCK(curthread, pthread);
        THR_SCHED_LOCK(curthread, pthread);
        if (pthread->flags & THR_FLAGS_EXITING) {
            THR_SCHED_UNLOCK(curthread, pthread);
            THR_THREAD_UNLOCK(curthread, pthread);
            _thr_ref_delete(curthread, pthread);
            return (ESRCH);
        }
        if (((pthread->cancelflags & PTHREAD_CANCEL_DISABLE) != 0) ||
                (((pthread->cancelflags & THR_AT_CANCEL_POINT) == 0) &&
                 ((pthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0)))
            /* Just mark it for cancellation: */
            pthread->cancelflags |= THR_CANCELLING;
        else {
            /*
             * Check if we need to kick it back into the
             * run queue:
             */
            switch (pthread->state) {
            case PS_RUNNING:
                /* No need to resume: */
                pthread->cancelflags |= THR_CANCELLING;
                break;

            case PS_LOCKWAIT:
                /*
                 * These can't be removed from the queue.
                 * Just mark it as cancelling and tell it
                 * to yield once it leaves the critical
                 * region.
                 */
                pthread->cancelflags |= THR_CANCELLING;
                pthread->critical_yield = 1;
                break;

            case PS_SLEEP_WAIT:
            case PS_SIGSUSPEND:
            case PS_SIGWAIT:
                /* Interrupt and resume: */
                pthread->interrupted = 1;
                pthread->cancelflags |= THR_CANCELLING;
                kmbx = _thr_setrunnable_unlocked(pthread);
                break;

            case PS_JOIN:
                /* Disconnect the thread from the joinee: */
                joinee = pthread->join_status.thread;
                pthread->join_status.thread = NULL;
                pthread->cancelflags |= THR_CANCELLING;
                kmbx = _thr_setrunnable_unlocked(pthread);
                if ((joinee != NULL) &&
                        (pthread->kseg == joinee->kseg)) {
                    /* Remove the joiner from the joinee. */
                    joinee->joiner = NULL;
                    joinee = NULL;
                }
                break;

            case PS_SUSPENDED:
            case PS_MUTEX_WAIT:
            case PS_COND_WAIT:
                /*
                 * Threads in these states may be in queues.
                 * In order to preserve queue integrity, the
                 * cancelled thread must remove itself from the
                 * queue.  Mark the thread as interrupted and
                 * needing cancellation, and set the state to
                 * running.  When the thread resumes, it will
                 * remove itself from the queue and call the
                 * cancellation completion routine.
                 */
                pthread->interrupted = 1;
                pthread->cancelflags |= THR_CANCEL_NEEDED;
                kmbx = _thr_setrunnable_unlocked(pthread);
                pthread->continuation =
                    _thr_finish_cancellation;
                break;

            case PS_DEAD:
            case PS_DEADLOCK:
            case PS_STATE_MAX:
                /* Ignore - only here to silence -Wall: */
                break;
            }
            if ((pthread->cancelflags & THR_AT_CANCEL_POINT) &&
                    (pthread->blocked != 0 ||
                     pthread->attr.flags & PTHREAD_SCOPE_SYSTEM))
                kse_thr_interrupt(&pthread->tcb->tcb_tmbx,
                                  KSE_INTR_INTERRUPT, 0);
        }

        /*
         * Release the thread's lock and remove the
         * reference:
         */
        THR_SCHED_UNLOCK(curthread, pthread);
        THR_THREAD_UNLOCK(curthread, pthread);
        _thr_ref_delete(curthread, pthread);
        if (kmbx != NULL)
            kse_wakeup(kmbx);

        if ((joinee != NULL) &&
                (_thr_ref_add(curthread, joinee, /* include dead */1) == 0)) {
            /* Remove the joiner from the joinee. */
            THR_SCHED_LOCK(curthread, joinee);
            joinee->joiner = NULL;
            THR_SCHED_UNLOCK(curthread, joinee);
            _thr_ref_delete(curthread, joinee);
        }
    }
    return (ret);
}
Beispiel #20
0
int
_pthread_setschedparam(pthread_t pthread, int policy, 
	const struct sched_param *param)
{
	struct pthread *curthread = _get_curthread();
	int	in_syncq;
	int	in_readyq = 0;
	int	old_prio;
	int	ret = 0;

	if ((param == NULL) || (policy < SCHED_FIFO) || (policy > SCHED_RR)) {
		/* Return an invalid argument error: */
		ret = EINVAL;
	} else if ((param->sched_priority < THR_MIN_PRIORITY) ||
	    (param->sched_priority > THR_MAX_PRIORITY)) {
		/* Return an unsupported value error. */
		ret = ENOTSUP;

	/* Find the thread in the list of active threads: */
	} else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
	    == 0) {
		/*
		 * Lock the threads scheduling queue while we change
		 * its priority:
		 */
		THR_SCHED_LOCK(curthread, pthread);
		if ((pthread->state == PS_DEAD) ||
		    (pthread->state == PS_DEADLOCK) ||
		    ((pthread->flags & THR_FLAGS_EXITING) != 0)) {
			THR_SCHED_UNLOCK(curthread, pthread);
			_thr_ref_delete(curthread, pthread);
			return (ESRCH);
		}
		in_syncq = pthread->sflags & THR_FLAGS_IN_SYNCQ;

		/* Set the scheduling policy: */
		pthread->attr.sched_policy = policy;

		if (param->sched_priority ==
		    THR_BASE_PRIORITY(pthread->base_priority))
			/*
			 * There is nothing to do; unlock the threads
			 * scheduling queue.
			 */
			THR_SCHED_UNLOCK(curthread, pthread);
		else {
			/*
			 * Remove the thread from its current priority
			 * queue before any adjustments are made to its
			 * active priority:
			 */
			old_prio = pthread->active_priority;
			if ((pthread->flags & THR_FLAGS_IN_RUNQ) != 0) {
				in_readyq = 1;
				THR_RUNQ_REMOVE(pthread);
			}

			/* Set the thread base priority: */
			pthread->base_priority &=
			    (THR_SIGNAL_PRIORITY | THR_RT_PRIORITY);
			pthread->base_priority = param->sched_priority;

			/* Recalculate the active priority: */
			pthread->active_priority = MAX(pthread->base_priority,
			    pthread->inherited_priority);

			if (in_readyq) {
				if ((pthread->priority_mutex_count > 0) &&
				    (old_prio > pthread->active_priority)) {
					/*
					 * POSIX states that if the priority is
					 * being lowered, the thread must be
					 * inserted at the head of the queue for
					 * its priority if it owns any priority
					 * protection or inheritence mutexes.
					 */
					THR_RUNQ_INSERT_HEAD(pthread);
				}
				else
					THR_RUNQ_INSERT_TAIL(pthread);
			}

			/* Unlock the threads scheduling queue: */
			THR_SCHED_UNLOCK(curthread, pthread);

			/*
			 * Check for any mutex priority adjustments.  This
			 * includes checking for a priority mutex on which
			 * this thread is waiting.
			 */
			_mutex_notify_priochange(curthread, pthread, in_syncq);
		}
		_thr_ref_delete(curthread, pthread);
	}
	return (ret);
}
Beispiel #21
0
void
_pthread_cancel_enter(int maycancel)
{
	_thr_cancel_enter2(_get_curthread(), maycancel);
}
Beispiel #22
0
int
_pthread_create(pthread_t *thread, const pthread_attr_t *attr,
	       void *(*start_routine) (void *), void *arg)
{
	struct pthread	*curthread = _get_curthread();
	struct itimerval itimer;
	int		f_gc = 0;
	int             ret = 0;
	pthread_t       gc_thread;
	pthread_t       new_thread;
	pthread_attr_t	pattr;
	void           *stack;

	if (thread == NULL)
		return (EINVAL);

	/*
	 * Locking functions in libc are required when there are
	 * threads other than the initial thread.
	 */
	__isthreaded = 1;

	/* Allocate memory for the thread structure: */
	if ((new_thread = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
		/* Insufficient memory to create a thread: */
		ret = EAGAIN;
	} else {
		/* Check if default thread attributes are required: */
		if (attr == NULL || *attr == NULL) {
			/* Use the default thread attributes: */
			pattr = &pthread_attr_default;
		} else {
			pattr = *attr;
		}
		/* Check if a stack was specified in the thread attributes: */
		if ((stack = pattr->stackaddr_attr) != NULL) {
		}
		/* Allocate memory for a default-size stack: */
		else if (pattr->stacksize_attr == PTHREAD_STACK_DEFAULT) {
			struct stack	*spare_stack;
			
			/* Allocate or re-use a default-size stack. */
			
			/*
			 * Use the garbage collector mutex for synchronization
			 * of the spare stack list.
			 */
			if (pthread_mutex_lock(&_gc_mutex) != 0)
				PANIC("Cannot lock gc mutex");
			
			if ((spare_stack = SLIST_FIRST(&_stackq)) != NULL) {
				/* Use the spare stack. */
				SLIST_REMOVE_HEAD(&_stackq, qe);
				
				/* Unlock the garbage collector mutex. */
				if (pthread_mutex_unlock(&_gc_mutex) != 0)
					PANIC("Cannot unlock gc mutex");
				
				stack = sizeof(struct stack)
				    + (void *) spare_stack
				    - PTHREAD_STACK_DEFAULT;
			} else {
				/* Allocate a new stack. */
				stack = _next_stack + PTHREAD_STACK_GUARD;

				/*
				 * Even if stack allocation fails, we don't want
				 * to try to use this location again, so
				 * unconditionally decrement _next_stack.  Under
				 * normal operating conditions, the most likely
				 * reason for an mmap() error is a stack
				 * overflow of the adjacent thread stack.
				 */
				_next_stack -= (PTHREAD_STACK_DEFAULT
				    + PTHREAD_STACK_GUARD);

				/* Unlock the garbage collector mutex. */
				if (pthread_mutex_unlock(&_gc_mutex) != 0)
					PANIC("Cannot unlock gc mutex");

				/* Stack: */
				if (mmap(stack, PTHREAD_STACK_DEFAULT,
				    PROT_READ | PROT_WRITE, MAP_STACK,
				    -1, 0) == MAP_FAILED) {
					ret = EAGAIN;
					free(new_thread);
				}
			}
		}
		/*
		 * The user wants a stack of a particular size.  Lets hope they
		 * really know what they want, and simply malloc the stack.
		 */
		else if ((stack = (void *) malloc(pattr->stacksize_attr))
		    == NULL) {
			/* Insufficient memory to create a thread: */
			ret = EAGAIN;
			free(new_thread);
		}

		/* Check for errors: */
		if (ret != 0) {
		} else {
			/* Initialise the thread structure: */
			memset(new_thread, 0, sizeof(struct pthread));
			new_thread->tcb = _libc_allocate_tls();
			if (new_thread->tcb == NULL)
				PANIC("Cannot allocate TLS and TCB");
			new_thread->slice_usec = -1;
			new_thread->stack = stack;
			new_thread->start_routine = start_routine;
			new_thread->arg = arg;

			new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
			    PTHREAD_CANCEL_DEFERRED;

			/*
			 * Write a magic value to the thread structure
			 * to help identify valid ones:
			 */
			new_thread->magic = PTHREAD_MAGIC;

			/* Initialise the thread for signals: */
			new_thread->sigmask = curthread->sigmask;
			new_thread->sigmask_seqno = 0;

			/* Initialize the signal frame: */
			new_thread->curframe = NULL;

			/* Initialise the jump buffer: */
			_setjmp(new_thread->ctx.jb);

			/*
			 * Set up new stack frame so that it looks like it
			 * returned from a longjmp() to the beginning of
			 * _thread_start().
			 */
			SET_RETURN_ADDR_JB(new_thread->ctx.jb, _thread_start);

			/* The stack starts high and builds down: */
			SET_STACK_JB(new_thread->ctx.jb,
			    (long)new_thread->stack + pattr->stacksize_attr
			    - sizeof(double));

			/* Copy the thread attributes: */
			memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr));

			/*
			 * Check if this thread is to inherit the scheduling
			 * attributes from its parent:
			 */
			if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
				/* Copy the scheduling attributes: */
				new_thread->base_priority =
				    curthread->base_priority &
				    ~PTHREAD_SIGNAL_PRIORITY;
				new_thread->attr.prio =
				    curthread->base_priority &
				    ~PTHREAD_SIGNAL_PRIORITY;
				new_thread->attr.sched_policy =
				    curthread->attr.sched_policy;
			} else {
				/*
				 * Use just the thread priority, leaving the
				 * other scheduling attributes as their
				 * default values:
				 */
				new_thread->base_priority =
				    new_thread->attr.prio;
			}
			new_thread->active_priority = new_thread->base_priority;
			new_thread->inherited_priority = 0;

			/* Initialize joiner to NULL (no joiner): */
			new_thread->joiner = NULL;

			/* Initialize the mutex queue: */
			TAILQ_INIT(&new_thread->mutexq);

			/* Initialise hooks in the thread structure: */
			new_thread->specific_data = NULL;
			new_thread->cleanup = NULL;
			new_thread->flags = 0;
			new_thread->poll_data.nfds = 0;
			new_thread->poll_data.fds = NULL;
			new_thread->continuation = NULL;

			/*
			 * Defer signals to protect the scheduling queues
			 * from access by the signal handler:
			 */
			_thread_kern_sig_defer();

			/*
			 * Initialise the unique id which GDB uses to
			 * track threads.
			 */
			new_thread->uniqueid = next_uniqueid++;

			/*
			 * Check if the garbage collector thread
			 * needs to be started.
			 */
			f_gc = (TAILQ_FIRST(&_thread_list) == _thread_initial);

			/* Add the thread to the linked list of all threads: */
			TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle);

			if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
				new_thread->flags |= PTHREAD_FLAGS_SUSPENDED;
				new_thread->state = PS_SUSPENDED;
			} else {
				new_thread->state = PS_RUNNING;
				PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
			}

			/*
			 * Undefer and handle pending signals, yielding
			 * if necessary.
			 */
			_thread_kern_sig_undefer();

			/* Return a pointer to the thread structure: */
			(*thread) = new_thread;

			if (f_gc != 0) {
				/* Install the scheduling timer: */
				itimer.it_interval.tv_sec = 0;
				itimer.it_interval.tv_usec = _clock_res_usec;
				itimer.it_value = itimer.it_interval;
				if (setitimer(_ITIMER_SCHED_TIMER, &itimer,
				    NULL) != 0)
					PANIC("Cannot set interval timer");
			}

			/* Schedule the new user thread: */
			_thread_kern_sched(NULL);

			/*
			 * Start a garbage collector thread
			 * if necessary.
			 */
			if (f_gc && pthread_create(&gc_thread,NULL,
				    _thread_gc,NULL) != 0)
				PANIC("Can't create gc thread");

		}
	}

	/* Return the status: */
	return (ret);
}
Beispiel #23
0
int
accept(int fd, struct sockaddr * name, socklen_t *namelen)
{
	struct pthread	*curthread = _get_curthread();
	int             ret;
	int		newfd;
	enum fd_entry_mode init_mode;

	/* This is a cancellation point: */
	_thread_enter_cancellation_point();

	/* Lock the file descriptor: */
	if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) {
		/* Enter a loop to wait for a connection request: */
		while ((ret = _thread_sys_accept(fd, name, namelen)) < 0) {
			/* Check if the socket is to block: */
			if ((_thread_fd_table[fd]->status_flags->flags & O_NONBLOCK) == 0 &&
			    (errno == EWOULDBLOCK || errno == EAGAIN)) {
				/* Save the socket file descriptor: */
				curthread->data.fd.fd = fd;
				curthread->data.fd.fname = __FILE__;
				curthread->data.fd.branch = __LINE__;

				/* Set the timeout: */
				_thread_kern_set_timeout(NULL);
				curthread->interrupted = 0;
				curthread->closing_fd = 0;

				/* Schedule the next thread: */
				_thread_kern_sched_state(PS_FDR_WAIT, __FILE__,
							 __LINE__);

				/* Check if the wait was interrupted: */
				if (curthread->interrupted) {
					/* Return an error status: */
					errno = EINTR;
					ret = -1;
					break;
				} else if (curthread->closing_fd) {
					/* Return an error status: */
					errno = EBADF;
					ret = -1;
					break;
				}
			} else {
				/*
				 * Another error has occurred, so exit the
				 * loop here: 
				 */
				break;
			}
		}

		/*
		 * If no errors initialize the file descriptor table
		 * for the new socket. If the client's view of the
		 * status_flags for fd is blocking, then force newfd
		 * to be viewed as blocking too.
		 */
		if (ret != -1) {
			newfd = ret;

			if ((_thread_fd_table[fd]->status_flags->flags & O_NONBLOCK) == 0)
				init_mode = FD_INIT_BLOCKING;
			else
				init_mode = FD_INIT_NEW;
			if((ret = _thread_fd_table_init(newfd, init_mode, NULL)) != -1)
				ret = newfd;
			else {
				/* quitely close the fd */
				_thread_sys_close(ret);
			}
		}

		/* Unlock the file descriptor: */
		_FD_UNLOCK(fd, FD_RDWR);
	}

	/* No longer in a cancellation point: */
	_thread_leave_cancellation_point();

	/* Return the socket file descriptor or -1 on error: */
	return (ret);
}
Beispiel #24
0
ssize_t
readv(int fd, const struct iovec * iov, int iovcnt)
{
	struct pthread	*curthread = _get_curthread();
	ssize_t	ret;
	int	type;

	/* This is a cancellation point: */
	_thread_enter_cancellation_point();

	/* Lock the file descriptor for read: */
	if ((ret = _FD_LOCK(fd, FD_READ, NULL)) == 0) {
		/* Get the read/write mode type: */
		type = _thread_fd_table[fd]->status_flags->flags & O_ACCMODE;

		/* Check if the file is not open for read: */
		if (type != O_RDONLY && type != O_RDWR) {
			/* File is not open for read: */
			errno = EBADF;
			_FD_UNLOCK(fd, FD_READ);
			_thread_leave_cancellation_point();
			return (-1);
		}

		/* Perform a non-blocking readv syscall: */
		while ((ret = _thread_sys_readv(fd, iov, iovcnt)) < 0) {
			if ((_thread_fd_table[fd]->status_flags->flags & O_NONBLOCK) == 0 &&
			    (errno == EWOULDBLOCK || errno == EAGAIN)) {
				curthread->data.fd.fd = fd;
				_thread_kern_set_timeout(_FD_RCVTIMEO(fd));

				/* Reset the interrupted operation flag: */
				curthread->interrupted = 0;
				curthread->closing_fd = 0;
				curthread->timeout = 0;

				_thread_kern_sched_state(PS_FDR_WAIT,
				    __FILE__, __LINE__);

				/*
				 * Check if the operation was
				 * interrupted by a signal,
				 * a closing fd or timed out.
				 */
				if (curthread->interrupted) {
					errno = EINTR;
					ret = -1;
					break;
				} else if (curthread->closing_fd) {
					errno = EBADF;
					ret = -1;
					break;
				} else if (curthread->timeout) {
					errno = EWOULDBLOCK;
					ret = -1;
					break;
				}
			} else {
				break;
			}
		}
		_FD_UNLOCK(fd, FD_READ);
	}

	/* No longer in a cancellation point: */
	_thread_leave_cancellation_point();

	return (ret);
}
Beispiel #25
0
void 
_thread_cleanupspecific(void)
{
	struct pthread	*curthread = _get_curthread();
	void		(*destructor)( void *);
	const void	*data = NULL;
	int		key;
	int		i;

	if (curthread->specific == NULL)
		return;

	/* Lock the key table: */
	THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
	for (i = 0; (i < PTHREAD_DESTRUCTOR_ITERATIONS) &&
	    (curthread->specific_data_count > 0); i++) {
		for (key = 0; (key < PTHREAD_KEYS_MAX) &&
		    (curthread->specific_data_count > 0); key++) {
			destructor = NULL;

			if (_thread_keytable[key].allocated &&
			    (curthread->specific[key].data != NULL)) {
				if (curthread->specific[key].seqno ==
				    _thread_keytable[key].seqno) {
					data = curthread->specific[key].data;
					destructor = _thread_keytable[key].destructor;
				}
				curthread->specific[key].data = NULL;
				curthread->specific_data_count--;
			}
			else if (curthread->specific[key].data != NULL) {
				/* 
				 * This can happen if the key is deleted via
				 * pthread_key_delete without first setting the value
				 * to NULL in all threads.  POSIX says that the
				 * destructor is not invoked in this case.
				 */
				curthread->specific[key].data = NULL;
				curthread->specific_data_count--;
			}

			/*
			 * If there is a destructor, call it
			 * with the key table entry unlocked:
			 */
			if (destructor != NULL) {
				/*
				 * Don't hold the lock while calling the
				 * destructor:
				 */
				THR_LOCK_RELEASE(curthread, &_keytable_lock);
				destructor(__DECONST(void *, data));
				THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
			}
		}
	}
	THR_LOCK_RELEASE(curthread, &_keytable_lock);
	free(curthread->specific);
	curthread->specific = NULL;
	if (curthread->specific_data_count > 0)
		stderr_debug("Thread %p has exited with leftover "
		    "thread-specific data after %d destructor iterations\n",
		    curthread, PTHREAD_DESTRUCTOR_ITERATIONS);
}
Beispiel #26
0
int
_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
		       const struct timespec * abstime)
{
	struct pthread	*curthread = _get_curthread();
	int	rval = 0;
	int	done = 0;
	int	interrupted = 0;
	int	seqno;

	_thread_enter_cancellation_point();
	
	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
	    abstime->tv_nsec >= 1000000000)
		return (EINVAL);
	/*
	 * If the condition variable is statically initialized, perform dynamic
	 * initialization.
	 */
	if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0)
		return (rval);

	/*
	 * Enter a loop waiting for a condition signal or broadcast
	 * to wake up this thread.  A loop is needed in case the waiting
	 * thread is interrupted by a signal to execute a signal handler.
	 * It is not (currently) possible to remain in the waiting queue
	 * while running a handler.  Instead, the thread is interrupted
	 * and backed out of the waiting queue prior to executing the
	 * signal handler.
	 */
	do {
		/* Lock the condition variable structure: */
		_SPINLOCK(&(*cond)->lock);

		/*
		 * If the condvar was statically allocated, properly
		 * initialize the tail queue.
		 */
		if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
			TAILQ_INIT(&(*cond)->c_queue);
			(*cond)->c_flags |= COND_FLAGS_INITED;
		}

		/* Process according to condition variable type: */
		switch ((*cond)->c_type) {
		/* Fast condition variable: */
		case COND_TYPE_FAST:
			if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
			    ((*cond)->c_mutex != *mutex))) {
				/* Return invalid argument error: */
				rval = EINVAL;

				/* Unlock the condition variable structure: */
				_SPINUNLOCK(&(*cond)->lock);
			} else {
				/* Set the wakeup time: */
				curthread->wakeup_time.tv_sec =
				    abstime->tv_sec;
				curthread->wakeup_time.tv_nsec =
				    abstime->tv_nsec;

				/* Reset the timeout and interrupted flags: */
				curthread->timeout = 0;
				curthread->interrupted = 0;

				/*
				 * Queue the running thread for the condition
				 * variable:
				 */
				cond_queue_enq(*cond, curthread);

				/* Remember the mutex and sequence number: */
				(*cond)->c_mutex = *mutex;
				seqno = (*cond)->c_seqno;

				/* Unlock the mutex: */
				if ((rval = _mutex_cv_unlock(mutex)) != 0) {
					/*
					 * Cannot unlock the mutex, so remove
					 * the running thread from the condition
					 * variable queue: 
					 */
					cond_queue_remove(*cond, curthread);

					/* Check for no more waiters: */
					if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
						(*cond)->c_mutex = NULL;

					/* Unlock the condition variable structure: */
					_SPINUNLOCK(&(*cond)->lock);
				} else {
					/*
					 * Schedule the next thread and unlock
					 * the condition variable structure:
					 */
					_thread_kern_sched_state_unlock(PS_COND_WAIT,
				  	     &(*cond)->lock, __FILE__, __LINE__);

					done = (seqno != (*cond)->c_seqno);

					interrupted = curthread->interrupted;

					/*
					 * Check if the wait was interrupted
					 * (canceled) or needs to be resumed
					 * after handling a signal.
					 */
					if (interrupted != 0) {
						/*
						 * Lock the mutex and ignore any
						 * errors.  Note that even
						 * though this thread may have
						 * been canceled, POSIX requires
						 * that the mutex be reacquired
						 * prior to cancellation.
						 */
						(void)_mutex_cv_lock(mutex);
					} else {
						/*
						 * Lock the condition variable
						 * while removing the thread.
						 */
						_SPINLOCK(&(*cond)->lock);

						cond_queue_remove(*cond,
						    curthread);

						/* Check for no more waiters: */
						if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
							(*cond)->c_mutex = NULL;

						_SPINUNLOCK(&(*cond)->lock);

						/* Lock the mutex: */
						rval = _mutex_cv_lock(mutex);

						/*
						 * Return ETIMEDOUT if the wait
						 * timed out and there wasn't an
						 * error locking the mutex:
						 */
						if ((curthread->timeout != 0)
						    && rval == 0)
							rval = ETIMEDOUT;
					}
				}
			}
			break;

		/* Trap invalid condition variable types: */
		default:
			/* Unlock the condition variable structure: */
			_SPINUNLOCK(&(*cond)->lock);

			/* Return an invalid argument error: */
			rval = EINVAL;
			break;
		}

		if ((interrupted != 0) && (curthread->continuation != NULL))
			curthread->continuation((void *) curthread);
	} while ((done == 0) && (rval == 0));

	_thread_leave_cancellation_point();

	/* Return the completion status: */
	return (rval);
}
Beispiel #27
0
ssize_t
_writev(int fd, const struct iovec * iov, int iovcnt)
{
	struct pthread	*curthread = _get_curthread();
	int	blocking;
	int	idx = 0;
	int	type;
	ssize_t cnt;
	ssize_t n;
	ssize_t num = 0;
	ssize_t	ret;
	struct iovec liov[20];
	struct iovec *p_iov = liov;

	/* Check if the array size exceeds to compiled in size: */
	if (iovcnt > (sizeof(liov) / sizeof(struct iovec))) {
		/* Allocate memory for the local array: */
		if ((p_iov = (struct iovec *)
		    malloc(iovcnt * sizeof(struct iovec))) == NULL) {
			/* Insufficient memory: */
			errno = ENOMEM;
			return (-1);
		}
	}

	/* Copy the caller's array so that it can be modified locally: */
	memcpy(p_iov,iov,iovcnt * sizeof(struct iovec));

	/* Lock the file descriptor for write: */
	if ((ret = _FD_LOCK(fd, FD_WRITE, NULL)) == 0) {
		/* Get the read/write mode type: */
		type = _thread_fd_getflags(fd) & O_ACCMODE;

		/* Check if the file is not open for write: */
		if (type != O_WRONLY && type != O_RDWR) {
			/* File is not open for write: */
			errno = EBADF;
			_FD_UNLOCK(fd, FD_WRITE);
			return (-1);
		}

		/* Check if file operations are to block */
		blocking = ((_thread_fd_getflags(fd) & O_NONBLOCK) == 0);

		/*
		 * Loop while no error occurs and until the expected number
		 * of bytes are written if performing a blocking write:
		 */
		while (ret == 0) {
			/* Perform a non-blocking write syscall: */
			n = __sys_extpwritev(fd, &p_iov[idx], iovcnt - idx, O_FNONBLOCKING, -1);

			/* Check if one or more bytes were written: */
			if (n > 0) {
				/*
				 * Keep a count of the number of bytes
				 * written:
				 */
				num += n;

				/*
				 * Enter a loop to check if a short write
				 * occurred and move the index to the
				 * array entry where the short write
				 * ended:
				 */
				cnt = n;
				while (cnt > 0 && idx < iovcnt) {
					/*
					 * If the residual count exceeds
					 * the size of this vector, then
					 * it was completely written:
					 */
					if (cnt >= p_iov[idx].iov_len)
						/*
						 * Decrement the residual
						 * count and increment the
						 * index to the next array
						 * entry:
						 */
						cnt -= p_iov[idx++].iov_len;
					else {
						/*
						 * This entry was only
						 * partially written, so
						 * adjust it's length
						 * and base pointer ready
						 * for the next write:
						 */
						p_iov[idx].iov_len -= cnt;
						p_iov[idx].iov_base += cnt;
						cnt = 0;
					}
				}
			} else if (n == 0) {
				/*
				 * Avoid an infinite loop if the last iov_len is
				 * 0.
				 */
				while (idx < iovcnt && p_iov[idx].iov_len == 0)
					idx++;

				if (idx == iovcnt) {
					ret = num;
					break;
				}
			}
                       
			/*
			 * If performing a blocking write, check if the
			 * write would have blocked or if some bytes
			 * were written but there are still more to
			 * write:
			 */
			if (blocking && ((n < 0 && (errno == EWOULDBLOCK ||
			    errno == EAGAIN)) || (n >= 0 && idx < iovcnt))) {
				curthread->data.fd.fd = fd;
				_thread_kern_set_timeout(NULL);

				/* Reset the interrupted operation flag: */
				curthread->interrupted = 0;

				_thread_kern_sched_state(PS_FDW_WAIT,
				    __FILE__, __LINE__);

				/*
				 * Check if the operation was
				 * interrupted by a signal
				 */
				if (curthread->interrupted) {
					if (num > 0) {
						/* Return partial success: */
						ret = num;
					} else {
						/* Return an error: */
						errno = EINTR;
						ret = -1;
					}
				}

			/*
			 * If performing a non-blocking write,
			 * just return whatever the write syscall did:
			 */
			} else if (!blocking) {
				/* A non-blocking call might return zero: */
				ret = n;
				break;

			/*
			 * If there was an error, return partial success
			 * (if any bytes were written) or else the error:
			 */
			} else if (n < 0) {
				if (num > 0)
					ret = num;
				else
					ret = n;

			/* Check if the write has completed: */
			} else if (idx == iovcnt)
				/* Return the number of bytes written: */
				ret = num;
		}
		_FD_UNLOCK(fd, FD_RDWR);
	}

	/* If memory was allocated for the array, free it: */
	if (p_iov != liov)
		free(p_iov);

	return (ret);
}
Beispiel #28
0
pthread_t
pthread_self(void)
{
	/* Return the running thread pointer: */
	return (_get_curthread());
}
Beispiel #29
0
 *
 *   4) Any thread (first found/easiest to deliver) that has the
 *      signal unmasked.
 */

#ifndef SYSTEM_SCOPE_ONLY

static void *
sig_daemon(void *arg __unused)
{
	int i;
	kse_critical_t crit;
	struct timespec ts;
	sigset_t set;
	struct kse *curkse;
	struct pthread *curthread = _get_curthread();

	DBG_MSG("signal daemon started(%p)\n", curthread);
	
	curthread->name = strdup("signal thread");
	crit = _kse_critical_enter();
	curkse = _get_curkse();

	/*
	 * Daemon thread is a bound thread and we must be created with
	 * all signals masked
	 */
#if 0	
	SIGFILLSET(set);
	__sys_sigprocmask(SIG_SETMASK, &set, NULL);
#endif	
Beispiel #30
0
/*
 * Some notes on new thread creation and first time initializion
 * to enable multi-threading.
 *
 * There are basically two things that need to be done.
 *
 *   1) The internal library variables must be initialized.
 *   2) Upcalls need to be enabled to allow multiple threads
 *      to be run.
 *
 * The first may be done as a result of other pthread functions
 * being called.  When _thr_initial is null, _libpthread_init is
 * called to initialize the internal variables; this also creates
 * or sets the initial thread.  It'd be nice to automatically
 * have _libpthread_init called on program execution so we don't
 * have to have checks throughout the library.
 *
 * The second part is only triggered by the creation of the first
 * thread (other than the initial/main thread).  If the thread
 * being created is a scope system thread, then a new KSE/KSEG
 * pair needs to be allocated.  Also, if upcalls haven't been
 * enabled on the initial thread's KSE, they must be now that
 * there is more than one thread; this could be delayed until
 * the initial KSEG has more than one thread.
 */
int
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
	       void *(*start_routine) (void *), void *arg)
{
	struct pthread *curthread, *new_thread;
	struct kse *kse = NULL;
	struct kse_group *kseg = NULL;
	kse_critical_t crit;
	int ret = 0;

	if (_thr_initial == NULL)
		_libpthread_init(NULL);

	/*
	 * Turn on threaded mode, if failed, it is unnecessary to
	 * do further work.
	 */
	if (_kse_isthreaded() == 0 && _kse_setthreaded(1)) {
		return (EAGAIN);
	}
	curthread = _get_curthread();

	/*
	 * Allocate memory for the thread structure.
	 * Some functions use malloc, so don't put it
	 * in a critical region.
	 */
	if ((new_thread = _thr_alloc(curthread)) == NULL) {
		/* Insufficient memory to create a thread: */
		ret = EAGAIN;
	} else {
		/* Check if default thread attributes are required: */
		if (attr == NULL || *attr == NULL)
			/* Use the default thread attributes: */
			new_thread->attr = _pthread_attr_default;
		else {
			new_thread->attr = *(*attr);
			if ((*attr)->sched_inherit == PTHREAD_INHERIT_SCHED) {
				/* inherit scheduling contention scop */
				if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
					new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
				else
					new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
				/*
				 * scheduling policy and scheduling parameters will be
				 * inherited in following code.
				 */
			}
		}
		if (_thread_scope_system > 0)
			new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
		else if ((_thread_scope_system < 0)
		    && (thread != &_thr_sig_daemon))
			new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
		if (create_stack(&new_thread->attr) != 0) {
			/* Insufficient memory to create a stack: */
			ret = EAGAIN;
			_thr_free(curthread, new_thread);
		}
		else if (((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) &&
		    (((kse = _kse_alloc(curthread, 1)) == NULL)
		    || ((kseg = _kseg_alloc(curthread)) == NULL))) {
			/* Insufficient memory to create a new KSE/KSEG: */
			ret = EAGAIN;
			if (kse != NULL) {
				kse->k_kcb->kcb_kmbx.km_flags |= KMF_DONE;
				_kse_free(curthread, kse);
			}
			free_stack(&new_thread->attr);
			_thr_free(curthread, new_thread);
		}
		else {
			if (kseg != NULL) {
				/* Add the KSE to the KSEG's list of KSEs. */
				TAILQ_INSERT_HEAD(&kseg->kg_kseq, kse, k_kgqe);
				kseg->kg_ksecount = 1;
				kse->k_kseg = kseg;
				kse->k_schedq = &kseg->kg_schedq;
			}
			/*
			 * Write a magic value to the thread structure
			 * to help identify valid ones:
			 */
			new_thread->magic = THR_MAGIC;

			new_thread->slice_usec = -1;
			new_thread->start_routine = start_routine;
			new_thread->arg = arg;
			new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
			    PTHREAD_CANCEL_DEFERRED;

			/* No thread is wanting to join to this one: */
			new_thread->joiner = NULL;

			/*
			 * Initialize the machine context.
			 * Enter a critical region to get consistent context.
			 */
			crit = _kse_critical_enter();
			THR_GETCONTEXT(&new_thread->tcb->tcb_tmbx.tm_context);
			/* Initialize the thread for signals: */
			new_thread->sigmask = curthread->sigmask;
			_kse_critical_leave(crit);

			new_thread->tcb->tcb_tmbx.tm_udata = new_thread;
			new_thread->tcb->tcb_tmbx.tm_context.uc_sigmask =
			    new_thread->sigmask;
			new_thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_size =
			    new_thread->attr.stacksize_attr;
			new_thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_sp =
			    new_thread->attr.stackaddr_attr;
			makecontext(&new_thread->tcb->tcb_tmbx.tm_context,
			    (void (*)(void))thread_start, 3, new_thread,
			    start_routine, arg);
			/*
			 * Check if this thread is to inherit the scheduling
			 * attributes from its parent:
			 */
			if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
				/*
				 * Copy the scheduling attributes.
				 * Lock the scheduling lock to get consistent
				 * scheduling parameters.
				 */
				THR_SCHED_LOCK(curthread, curthread);
				new_thread->base_priority =
				    curthread->base_priority &
				    ~THR_SIGNAL_PRIORITY;
				new_thread->attr.prio =
				    curthread->base_priority &
				    ~THR_SIGNAL_PRIORITY;
				new_thread->attr.sched_policy =
				    curthread->attr.sched_policy;
				THR_SCHED_UNLOCK(curthread, curthread);
			} else {
				/*
				 * Use just the thread priority, leaving the
				 * other scheduling attributes as their
				 * default values:
				 */
				new_thread->base_priority =
				    new_thread->attr.prio;
			}
			new_thread->active_priority = new_thread->base_priority;
			new_thread->inherited_priority = 0;

			/* Initialize the mutex queue: */
			TAILQ_INIT(&new_thread->mutexq);

			/* Initialise hooks in the thread structure: */
			new_thread->specific = NULL;
			new_thread->specific_data_count = 0;
			new_thread->cleanup = NULL;
			new_thread->flags = 0;
			new_thread->tlflags = 0;
			new_thread->sigbackout = NULL;
			new_thread->continuation = NULL;
			new_thread->wakeup_time.tv_sec = -1;
			new_thread->lock_switch = 0;
			sigemptyset(&new_thread->sigpend);
			new_thread->check_pending = 0;
			new_thread->locklevel = 0;
			new_thread->rdlock_count = 0;
			new_thread->sigstk.ss_sp = 0;
			new_thread->sigstk.ss_size = 0;
			new_thread->sigstk.ss_flags = SS_DISABLE;
			new_thread->oldsigmask = NULL;

			if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) {
				new_thread->state = PS_SUSPENDED;
				new_thread->flags = THR_FLAGS_SUSPENDED;
			}
			else
				new_thread->state = PS_RUNNING;

			/*
			 * System scope threads have their own kse and
			 * kseg.  Process scope threads are all hung
			 * off the main process kseg.
			 */
			if ((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) == 0) {
				new_thread->kseg = _kse_initial->k_kseg;
				new_thread->kse = _kse_initial;
			}
			else {
				kse->k_curthread = NULL;
				kse->k_kseg->kg_flags |= KGF_SINGLE_THREAD;
				new_thread->kse = kse;
				new_thread->kseg = kse->k_kseg;
				kse->k_kcb->kcb_kmbx.km_udata = kse;
				kse->k_kcb->kcb_kmbx.km_curthread = NULL;
			}

			/*
			 * Schedule the new thread starting a new KSEG/KSE
			 * pair if necessary.
			 */
			ret = _thr_schedule_add(curthread, new_thread);
			if (ret != 0)
				free_thread(curthread, new_thread);
			else {
				/* Return a pointer to the thread structure: */
				(*thread) = new_thread;
			}
		}
	}

	/* Return the status: */
	return (ret);
}