Ejemplo n.º 1
0
int
_sem_post(sem_t *sem)
{
	int	retval;

	_SEM_CHECK_VALIDITY(sem);

	/*
	 * sem_post() is required to be safe to call from within signal
	 * handlers.  Thus, we must defer signals.
	 */
	_thread_kern_sig_defer();

	pthread_mutex_lock(&(*sem)->lock);

	(*sem)->count++;
	if ((*sem)->nwaiters > 0)
		pthread_cond_signal(&(*sem)->gtzero);

	pthread_mutex_unlock(&(*sem)->lock);

	_thread_kern_sig_undefer();
	retval = 0;
  RETURN:
	return retval;
}
Ejemplo n.º 2
0
/*
 * Validate the signal number and thread.  If valid process the signal.
 */
int
pthread_kill(pthread_t pthread, int sig)
{
	int ret;

	if (sig >= 0 && sig < NSIG) {
		ret = _find_thread(pthread);
		if (ret == 0 && sig != 0) {
			if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) {
				_thread_kern_sig_defer();
				if (pthread->state == PS_SIGWAIT &&
				    sigismember(pthread->data.sigwait, sig)) {
					PTHREAD_NEW_STATE(pthread,PS_RUNNING);
					pthread->signo = sig;
				} else {
					_thread_kill_siginfo(sig);
					_thread_signal(pthread,sig);
				}
				_thread_kern_sig_undefer();
			}
		}
	} else
		ret = EINVAL;

	return ret;
}
Ejemplo n.º 3
0
void
_mutex_lock_backout(pthread_t pthread)
{
	struct pthread_mutex	*mutex;

	/*
	 * Defer signals to protect the scheduling queues from
	 * access by the signal handler:
	 */
	_thread_kern_sig_defer();
	if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
		mutex = pthread->data.mutex;

		/* Lock the mutex structure: */
		_SPINLOCK(&mutex->lock);

		mutex_queue_remove(mutex, pthread);

		/* This thread is no longer waiting for the mutex: */
		pthread->data.mutex = NULL;

		/* Unlock the mutex structure: */
		_SPINUNLOCK(&mutex->lock);

	}
	/*
	 * Undefer and handle pending signals, yielding if
	 * necessary:
	 */
	_thread_kern_sig_undefer();
}
Ejemplo n.º 4
0
int
_pthread_detach(pthread_t pthread)
{
	int             rval = 0;

	/* Check for invalid calling parameters: */
	if (pthread == NULL || pthread->magic != PTHREAD_MAGIC)
		/* Return an invalid argument error: */
		rval = EINVAL;

	/* Check if the thread has not been detached: */
	else if ((pthread->attr.flags & PTHREAD_DETACHED) == 0) {
		/* Flag the thread as detached: */
		pthread->attr.flags |= PTHREAD_DETACHED;

		/*
		 * Defer signals to protect the scheduling queues from
		 * access by the signal handler:
		 */
		_thread_kern_sig_defer();

		/* Check if there is a joiner: */
		if (pthread->joiner != NULL) {
			struct pthread	*joiner = pthread->joiner;

			/* Make the thread runnable: */
			PTHREAD_NEW_STATE(joiner, PS_RUNNING);

			/* Set the return value for the woken thread: */
			joiner->join_status.error = ESRCH;
			joiner->join_status.ret = NULL;
			joiner->join_status.thread = NULL;

			/*
			 * Disconnect the joiner from the thread being detached:
			 */
			pthread->joiner = NULL;
		}

		/*
		 * Undefer and handle pending signals, yielding if a
		 * scheduling signal occurred while in the critical region.
		 */
		_thread_kern_sig_undefer();
	} else
		/* Return an error: */
		rval = EINVAL;

	/* Return the completion status: */
	return (rval);
}
Ejemplo n.º 5
0
/*
 * Note: a thread calling wait4 may have its state changed to waiting
 * until awakened by a signal.  Also note that system(3), for example,
 * blocks SIGCHLD and calls waitpid (which calls wait4).  If the process
 * started by system(3) doesn't finish before this function is called the
 * function will never awaken -- system(3) also ignores SIGINT and SIGQUIT.
 *
 * Thus always unmask SIGCHLD here.
 */
pid_t
wait4(pid_t pid, int *istat, int options, struct rusage * rusage)
{
    struct pthread	*curthread = _get_curthread();
    pid_t           ret;
    sigset_t	mask, omask;

    /* This is a cancellation point: */
    _thread_enter_cancellation_point();

    _thread_kern_sig_defer();

    sigemptyset(&mask);
    sigaddset(&mask, SIGCHLD);
    sigprocmask(SIG_UNBLOCK, &mask, &omask);

    /* Perform a non-blocking wait4 syscall: */
    while ((ret = _thread_sys_wait4(pid, istat, options | WNOHANG, rusage)) == 0 && (options & WNOHANG) == 0) {
        /* Reset the interrupted operation flag: */
        curthread->interrupted = 0;

        /* Schedule the next thread while this one waits: */
        _thread_kern_sched_state(PS_WAIT_WAIT, __FILE__, __LINE__);

        /* Check if this call was interrupted by a signal: */
        if (curthread->interrupted) {
            errno = EINTR;
            ret = -1;
            break;
        }
    }

    sigprocmask(SIG_SETMASK, &omask, NULL);

    _thread_kern_sig_undefer();

    /* No longer in a cancellation point: */
    _thread_leave_cancellation_point();

    return (ret);
}
Ejemplo n.º 6
0
void
_cond_wait_backout(pthread_t pthread)
{
	pthread_cond_t	cond;

	cond = pthread->data.cond;
	if (cond != NULL) {
		/*
		 * Defer signals to protect the scheduling queues
		 * from access by the signal handler:
		 */
		_thread_kern_sig_defer();

		/* Lock the condition variable structure: */
		_SPINLOCK(&cond->lock);

		/* Process according to condition variable type: */
		switch (cond->c_type) {
		/* Fast condition variable: */
		case COND_TYPE_FAST:
			cond_queue_remove(cond, pthread);

			/* Check for no more waiters: */
			if (TAILQ_FIRST(&cond->c_queue) == NULL)
				cond->c_mutex = NULL;
			break;

		default:
			break;
		}

		/* Unlock the condition variable structure: */
		_SPINUNLOCK(&cond->lock);

		/*
		 * Undefer and handle pending signals, yielding if
		 * necessary:
		 */
		_thread_kern_sig_undefer();
	}
}
Ejemplo n.º 7
0
int
getsockopt(int fd, int level, int optname, void *optval, socklen_t *optlen)
{
	int             ret;
	struct fd_table_entry *entry;

	ret = _thread_fd_table_init(fd, FD_INIT_UNKNOWN, NULL);
	if (ret == 0) {
		entry = _thread_fd_table[fd];
		 
		_thread_kern_sig_defer();
		if (entry->state == FD_ENTRY_OPEN) {
			ret = _thread_sys_getsockopt(fd, level, optname, optval, optlen);
		} else {
			ret = -1;
			errno = EBADF;
		}
		_thread_kern_sig_undefer();
	}

	return ret;
}
Ejemplo n.º 8
0
int
_pthread_cond_broadcast(pthread_cond_t * cond)
{
	int             rval = 0;
	pthread_t       pthread;

	if (cond == NULL)
		rval = EINVAL;
       /*
        * If the condition variable is statically initialized, perform dynamic
        * initialization.
        */
	else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
		/*
		 * Defer signals to protect the scheduling queues
		 * from access by the signal handler:
		 */
		_thread_kern_sig_defer();

		/* Lock the condition variable structure: */
		_SPINLOCK(&(*cond)->lock);

		/* Process according to condition variable type: */
		switch ((*cond)->c_type) {
		/* Fast condition variable: */
		case COND_TYPE_FAST:
			/* Increment the sequence number: */
			(*cond)->c_seqno++;

			/*
			 * Enter a loop to bring all threads off the
			 * condition queue:
			 */
			while ((pthread = cond_queue_deq(*cond)) != NULL) {
				/*
				 * Wake up the signaled thread:
				 */
				PTHREAD_NEW_STATE(pthread, PS_RUNNING);
			}

			/* There are no more waiting threads: */
			(*cond)->c_mutex = NULL;
			break;
	
		/* Trap invalid condition variable types: */
		default:
			/* Return an invalid argument error: */
			rval = EINVAL;
			break;
		}

		/* Unlock the condition variable structure: */
		_SPINUNLOCK(&(*cond)->lock);

		/*
		 * Undefer and handle pending signals, yielding if
		 * necessary:
		 */
		_thread_kern_sig_undefer();
	}

	/* Return the completion status: */
	return (rval);
}
Ejemplo n.º 9
0
int
_pthread_create(pthread_t *thread, const pthread_attr_t *attr,
	       void *(*start_routine) (void *), void *arg)
{
	struct pthread	*curthread = _get_curthread();
	struct itimerval itimer;
	int		f_gc = 0;
	int             ret = 0;
	pthread_t       gc_thread;
	pthread_t       new_thread;
	pthread_attr_t	pattr;
	void           *stack;

	if (thread == NULL)
		return (EINVAL);

	/*
	 * Locking functions in libc are required when there are
	 * threads other than the initial thread.
	 */
	__isthreaded = 1;

	/* Allocate memory for the thread structure: */
	if ((new_thread = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
		/* Insufficient memory to create a thread: */
		ret = EAGAIN;
	} else {
		/* Check if default thread attributes are required: */
		if (attr == NULL || *attr == NULL) {
			/* Use the default thread attributes: */
			pattr = &pthread_attr_default;
		} else {
			pattr = *attr;
		}
		/* Check if a stack was specified in the thread attributes: */
		if ((stack = pattr->stackaddr_attr) != NULL) {
		}
		/* Allocate memory for a default-size stack: */
		else if (pattr->stacksize_attr == PTHREAD_STACK_DEFAULT) {
			struct stack	*spare_stack;
			
			/* Allocate or re-use a default-size stack. */
			
			/*
			 * Use the garbage collector mutex for synchronization
			 * of the spare stack list.
			 */
			if (pthread_mutex_lock(&_gc_mutex) != 0)
				PANIC("Cannot lock gc mutex");
			
			if ((spare_stack = SLIST_FIRST(&_stackq)) != NULL) {
				/* Use the spare stack. */
				SLIST_REMOVE_HEAD(&_stackq, qe);
				
				/* Unlock the garbage collector mutex. */
				if (pthread_mutex_unlock(&_gc_mutex) != 0)
					PANIC("Cannot unlock gc mutex");
				
				stack = sizeof(struct stack)
				    + (void *) spare_stack
				    - PTHREAD_STACK_DEFAULT;
			} else {
				/* Allocate a new stack. */
				stack = _next_stack + PTHREAD_STACK_GUARD;

				/*
				 * Even if stack allocation fails, we don't want
				 * to try to use this location again, so
				 * unconditionally decrement _next_stack.  Under
				 * normal operating conditions, the most likely
				 * reason for an mmap() error is a stack
				 * overflow of the adjacent thread stack.
				 */
				_next_stack -= (PTHREAD_STACK_DEFAULT
				    + PTHREAD_STACK_GUARD);

				/* Unlock the garbage collector mutex. */
				if (pthread_mutex_unlock(&_gc_mutex) != 0)
					PANIC("Cannot unlock gc mutex");

				/* Stack: */
				if (mmap(stack, PTHREAD_STACK_DEFAULT,
				    PROT_READ | PROT_WRITE, MAP_STACK,
				    -1, 0) == MAP_FAILED) {
					ret = EAGAIN;
					free(new_thread);
				}
			}
		}
		/*
		 * The user wants a stack of a particular size.  Lets hope they
		 * really know what they want, and simply malloc the stack.
		 */
		else if ((stack = (void *) malloc(pattr->stacksize_attr))
		    == NULL) {
			/* Insufficient memory to create a thread: */
			ret = EAGAIN;
			free(new_thread);
		}

		/* Check for errors: */
		if (ret != 0) {
		} else {
			/* Initialise the thread structure: */
			memset(new_thread, 0, sizeof(struct pthread));
			new_thread->tcb = _libc_allocate_tls();
			if (new_thread->tcb == NULL)
				PANIC("Cannot allocate TLS and TCB");
			new_thread->slice_usec = -1;
			new_thread->stack = stack;
			new_thread->start_routine = start_routine;
			new_thread->arg = arg;

			new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
			    PTHREAD_CANCEL_DEFERRED;

			/*
			 * Write a magic value to the thread structure
			 * to help identify valid ones:
			 */
			new_thread->magic = PTHREAD_MAGIC;

			/* Initialise the thread for signals: */
			new_thread->sigmask = curthread->sigmask;
			new_thread->sigmask_seqno = 0;

			/* Initialize the signal frame: */
			new_thread->curframe = NULL;

			/* Initialise the jump buffer: */
			_setjmp(new_thread->ctx.jb);

			/*
			 * Set up new stack frame so that it looks like it
			 * returned from a longjmp() to the beginning of
			 * _thread_start().
			 */
			SET_RETURN_ADDR_JB(new_thread->ctx.jb, _thread_start);

			/* The stack starts high and builds down: */
			SET_STACK_JB(new_thread->ctx.jb,
			    (long)new_thread->stack + pattr->stacksize_attr
			    - sizeof(double));

			/* Copy the thread attributes: */
			memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr));

			/*
			 * Check if this thread is to inherit the scheduling
			 * attributes from its parent:
			 */
			if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
				/* Copy the scheduling attributes: */
				new_thread->base_priority =
				    curthread->base_priority &
				    ~PTHREAD_SIGNAL_PRIORITY;
				new_thread->attr.prio =
				    curthread->base_priority &
				    ~PTHREAD_SIGNAL_PRIORITY;
				new_thread->attr.sched_policy =
				    curthread->attr.sched_policy;
			} else {
				/*
				 * Use just the thread priority, leaving the
				 * other scheduling attributes as their
				 * default values:
				 */
				new_thread->base_priority =
				    new_thread->attr.prio;
			}
			new_thread->active_priority = new_thread->base_priority;
			new_thread->inherited_priority = 0;

			/* Initialize joiner to NULL (no joiner): */
			new_thread->joiner = NULL;

			/* Initialize the mutex queue: */
			TAILQ_INIT(&new_thread->mutexq);

			/* Initialise hooks in the thread structure: */
			new_thread->specific_data = NULL;
			new_thread->cleanup = NULL;
			new_thread->flags = 0;
			new_thread->poll_data.nfds = 0;
			new_thread->poll_data.fds = NULL;
			new_thread->continuation = NULL;

			/*
			 * Defer signals to protect the scheduling queues
			 * from access by the signal handler:
			 */
			_thread_kern_sig_defer();

			/*
			 * Initialise the unique id which GDB uses to
			 * track threads.
			 */
			new_thread->uniqueid = next_uniqueid++;

			/*
			 * Check if the garbage collector thread
			 * needs to be started.
			 */
			f_gc = (TAILQ_FIRST(&_thread_list) == _thread_initial);

			/* Add the thread to the linked list of all threads: */
			TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle);

			if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
				new_thread->flags |= PTHREAD_FLAGS_SUSPENDED;
				new_thread->state = PS_SUSPENDED;
			} else {
				new_thread->state = PS_RUNNING;
				PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
			}

			/*
			 * Undefer and handle pending signals, yielding
			 * if necessary.
			 */
			_thread_kern_sig_undefer();

			/* Return a pointer to the thread structure: */
			(*thread) = new_thread;

			if (f_gc != 0) {
				/* Install the scheduling timer: */
				itimer.it_interval.tv_sec = 0;
				itimer.it_interval.tv_usec = _clock_res_usec;
				itimer.it_value = itimer.it_interval;
				if (setitimer(_ITIMER_SCHED_TIMER, &itimer,
				    NULL) != 0)
					PANIC("Cannot set interval timer");
			}

			/* Schedule the new user thread: */
			_thread_kern_sched(NULL);

			/*
			 * Start a garbage collector thread
			 * if necessary.
			 */
			if (f_gc && pthread_create(&gc_thread,NULL,
				    _thread_gc,NULL) != 0)
				PANIC("Can't create gc thread");

		}
	}

	/* Return the status: */
	return (ret);
}
Ejemplo n.º 10
0
static inline int
mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
{
	struct pthread	*curthread = _get_curthread();
	int	ret = 0;

	if (mutex == NULL || *mutex == NULL) {
		ret = EINVAL;
	} else {
		/*
		 * Defer signals to protect the scheduling queues from
		 * access by the signal handler:
		 */
		_thread_kern_sig_defer();

		/* Lock the mutex structure: */
		_SPINLOCK(&(*mutex)->lock);

		/* Process according to mutex type: */
		switch ((*mutex)->m_protocol) {
		/* Default POSIX mutex: */
		case PTHREAD_PRIO_NONE:
			/*
			 * Check if the running thread is not the owner of the
			 * mutex:
			 */
			if ((*mutex)->m_owner != curthread) {
				/*
				 * Return an invalid argument error for no
				 * owner and a permission error otherwise:
				 */
				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
			}
			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
			    ((*mutex)->m_data.m_count > 0)) {
				/* Decrement the count: */
				(*mutex)->m_data.m_count--;
			} else {
				/*
				 * Clear the count in case this is recursive
				 * mutex.
				 */
				(*mutex)->m_data.m_count = 0;

				/* Remove the mutex from the threads queue. */
				_MUTEX_ASSERT_IS_OWNED(*mutex);
				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
				    (*mutex), m_qe);
				_MUTEX_INIT_LINK(*mutex);

				/*
				 * Get the next thread from the queue of
				 * threads waiting on the mutex: 
				 */
				if (((*mutex)->m_owner =
			  	    mutex_queue_deq(*mutex)) != NULL) {
					/* Make the new owner runnable: */
					PTHREAD_NEW_STATE((*mutex)->m_owner,
					    PS_RUNNING);

					/*
					 * Add the mutex to the threads list of
					 * owned mutexes:
					 */
					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
					    (*mutex), m_qe);

					/*
					 * The owner is no longer waiting for
					 * this mutex:
					 */
					(*mutex)->m_owner->data.mutex = NULL;
				}
			}
			break;

		/* POSIX priority inheritence mutex: */
		case PTHREAD_PRIO_INHERIT:
			/*
			 * Check if the running thread is not the owner of the
			 * mutex:
			 */
			if ((*mutex)->m_owner != curthread) {
				/*
				 * Return an invalid argument error for no
				 * owner and a permission error otherwise:
				 */
				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
			}
			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
			    ((*mutex)->m_data.m_count > 0)) {
				/* Decrement the count: */
				(*mutex)->m_data.m_count--;
			} else {
				/*
				 * Clear the count in case this is recursive
				 * mutex.
				 */
				(*mutex)->m_data.m_count = 0;

				/*
				 * Restore the threads inherited priority and
				 * recompute the active priority (being careful
				 * not to override changes in the threads base
				 * priority subsequent to locking the mutex).
				 */
				curthread->inherited_priority =
					(*mutex)->m_saved_prio;
				curthread->active_priority =
				    MAX(curthread->inherited_priority,
				    curthread->base_priority);

				/*
				 * This thread now owns one less priority mutex.
				 */
				curthread->priority_mutex_count--;

				/* Remove the mutex from the threads queue. */
				_MUTEX_ASSERT_IS_OWNED(*mutex);
				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
				    (*mutex), m_qe);
				_MUTEX_INIT_LINK(*mutex);

				/*
				 * Get the next thread from the queue of threads
				 * waiting on the mutex: 
				 */
				if (((*mutex)->m_owner = 
				    mutex_queue_deq(*mutex)) == NULL)
					/* This mutex has no priority. */
					(*mutex)->m_prio = 0;
				else {
					/*
					 * Track number of priority mutexes owned:
					 */
					(*mutex)->m_owner->priority_mutex_count++;

					/*
					 * Add the mutex to the threads list
					 * of owned mutexes:
					 */
					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
					    (*mutex), m_qe);

					/*
					 * The owner is no longer waiting for
					 * this mutex:
					 */
					(*mutex)->m_owner->data.mutex = NULL;

					/*
					 * Set the priority of the mutex.  Since
					 * our waiting threads are in descending
					 * priority order, the priority of the
					 * mutex becomes the active priority of
					 * the thread we just dequeued.
					 */
					(*mutex)->m_prio =
					    (*mutex)->m_owner->active_priority;

					/*
					 * Save the owning threads inherited
					 * priority:
					 */
					(*mutex)->m_saved_prio =
						(*mutex)->m_owner->inherited_priority;

					/*
					 * The owning threads inherited priority
					 * now becomes his active priority (the
					 * priority of the mutex).
					 */
					(*mutex)->m_owner->inherited_priority =
						(*mutex)->m_prio;

					/*
					 * Make the new owner runnable:
					 */
					PTHREAD_NEW_STATE((*mutex)->m_owner,
					    PS_RUNNING);
				}
			}
			break;

		/* POSIX priority ceiling mutex: */
		case PTHREAD_PRIO_PROTECT:
			/*
			 * Check if the running thread is not the owner of the
			 * mutex:
			 */
			if ((*mutex)->m_owner != curthread) {
				/*
				 * Return an invalid argument error for no
				 * owner and a permission error otherwise:
				 */
				ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
			}
			else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
			    ((*mutex)->m_data.m_count > 0)) {
				/* Decrement the count: */
				(*mutex)->m_data.m_count--;
			} else {
				/*
				 * Clear the count in case this is recursive
				 * mutex.
				 */
				(*mutex)->m_data.m_count = 0;

				/*
				 * Restore the threads inherited priority and
				 * recompute the active priority (being careful
				 * not to override changes in the threads base
				 * priority subsequent to locking the mutex).
				 */
				curthread->inherited_priority =
					(*mutex)->m_saved_prio;
				curthread->active_priority =
				    MAX(curthread->inherited_priority,
				    curthread->base_priority);

				/*
				 * This thread now owns one less priority mutex.
				 */
				curthread->priority_mutex_count--;

				/* Remove the mutex from the threads queue. */
				_MUTEX_ASSERT_IS_OWNED(*mutex);
				TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
				    (*mutex), m_qe);
				_MUTEX_INIT_LINK(*mutex);

				/*
				 * Enter a loop to find a waiting thread whose
				 * active priority will not cause a ceiling
				 * violation:
				 */
				while ((((*mutex)->m_owner =
				    mutex_queue_deq(*mutex)) != NULL) &&
				    ((*mutex)->m_owner->active_priority >
				     (*mutex)->m_prio)) {
					/*
					 * Either the mutex ceiling priority
					 * been lowered and/or this threads
					 * priority has been raised subsequent
					 * to this thread being queued on the
					 * waiting list.
					 */
					tls_set_tcb((*mutex)->m_owner->tcb);
					errno = EINVAL;
					tls_set_tcb(curthread->tcb);
					PTHREAD_NEW_STATE((*mutex)->m_owner,
					    PS_RUNNING);
					/*
					 * The thread is no longer waiting for
					 * this mutex:
					 */
					(*mutex)->m_owner->data.mutex = NULL;
				}

				/* Check for a new owner: */
				if ((*mutex)->m_owner != NULL) {
					/*
					 * Track number of priority mutexes owned:
					 */
					(*mutex)->m_owner->priority_mutex_count++;

					/*
					 * Add the mutex to the threads list
					 * of owned mutexes:
					 */
					TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
					    (*mutex), m_qe);

					/*
					 * The owner is no longer waiting for
					 * this mutex:
					 */
					(*mutex)->m_owner->data.mutex = NULL;

					/*
					 * Save the owning threads inherited
					 * priority:
					 */
					(*mutex)->m_saved_prio =
						(*mutex)->m_owner->inherited_priority;

					/*
					 * The owning thread inherits the
					 * ceiling priority of the mutex and
					 * executes at that priority:
					 */
					(*mutex)->m_owner->inherited_priority =
					    (*mutex)->m_prio;
					(*mutex)->m_owner->active_priority =
					    (*mutex)->m_prio;

					/*
					 * Make the new owner runnable:
					 */
					PTHREAD_NEW_STATE((*mutex)->m_owner,
					    PS_RUNNING);
				}
			}
			break;

		/* Trap invalid mutex types: */
		default:
			/* Return an invalid argument error: */
			ret = EINVAL;
			break;
		}

		if ((ret == 0) && (add_reference != 0)) {
			/* Increment the reference count: */
			(*mutex)->m_refcount++;
		}

		/* Unlock the mutex structure: */
		_SPINUNLOCK(&(*mutex)->lock);

		/*
		 * Undefer and handle pending signals, yielding if
		 * necessary:
		 */
		_thread_kern_sig_undefer();
	}

	/* Return the completion status: */
	return (ret);
}
Ejemplo n.º 11
0
int
_pthread_mutex_lock(pthread_mutex_t * mutex)
{
	struct pthread	*curthread = _get_curthread();
	int	ret = 0;

	if (_thread_initial == NULL)
		_thread_init();

	if (mutex == NULL)
		return (EINVAL);

	/*
	 * If the mutex is statically initialized, perform the dynamic
	 * initialization:
	 */
	if ((*mutex == NULL) &&
	    ((ret = init_static(mutex)) != 0))
		return (ret);

	/* Reset the interrupted flag: */
	curthread->interrupted = 0;

	/*
	 * Enter a loop waiting to become the mutex owner.  We need a
	 * loop in case the waiting thread is interrupted by a signal
	 * to execute a signal handler.  It is not (currently) possible
	 * to remain in the waiting queue while running a handler.
	 * Instead, the thread is interrupted and backed out of the
	 * waiting queue prior to executing the signal handler.
	 */
	do {
		/*
		 * Defer signals to protect the scheduling queues from
		 * access by the signal handler:
		 */
		_thread_kern_sig_defer();

		/* Lock the mutex structure: */
		_SPINLOCK(&(*mutex)->lock);

		/*
		 * If the mutex was statically allocated, properly
		 * initialize the tail queue.
		 */
		if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
			TAILQ_INIT(&(*mutex)->m_queue);
			(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
			_MUTEX_INIT_LINK(*mutex);
		}

		/* Process according to mutex type: */
		switch ((*mutex)->m_protocol) {
		/* Default POSIX mutex: */
		case PTHREAD_PRIO_NONE:
			if ((*mutex)->m_owner == NULL) {
				/* Lock the mutex for this thread: */
				(*mutex)->m_owner = curthread;

				/* Add to the list of owned mutexes: */
				_MUTEX_ASSERT_NOT_OWNED(*mutex);
				TAILQ_INSERT_TAIL(&curthread->mutexq,
				    (*mutex), m_qe);

			} else if ((*mutex)->m_owner == curthread)
				ret = mutex_self_lock(*mutex);
			else {
				/*
				 * Join the queue of threads waiting to lock
				 * the mutex: 
				 */
				mutex_queue_enq(*mutex, curthread);

				/*
				 * Keep a pointer to the mutex this thread
				 * is waiting on:
				 */
				curthread->data.mutex = *mutex;

				/*
				 * Unlock the mutex structure and schedule the
				 * next thread:
				 */
				_thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
				    &(*mutex)->lock, __FILE__, __LINE__);

				/* Lock the mutex structure again: */
				_SPINLOCK(&(*mutex)->lock);
			}
			break;

		/* POSIX priority inheritence mutex: */
		case PTHREAD_PRIO_INHERIT:
			/* Check if this mutex is not locked: */
			if ((*mutex)->m_owner == NULL) {
				/* Lock the mutex for this thread: */
				(*mutex)->m_owner = curthread;

				/* Track number of priority mutexes owned: */
				curthread->priority_mutex_count++;

				/*
				 * The mutex takes on attributes of the
				 * running thread when there are no waiters.
				 */
				(*mutex)->m_prio = curthread->active_priority;
				(*mutex)->m_saved_prio =
				    curthread->inherited_priority;
				curthread->inherited_priority =
				    (*mutex)->m_prio;

				/* Add to the list of owned mutexes: */
				_MUTEX_ASSERT_NOT_OWNED(*mutex);
				TAILQ_INSERT_TAIL(&curthread->mutexq,
				    (*mutex), m_qe);

			} else if ((*mutex)->m_owner == curthread)
				ret = mutex_self_lock(*mutex);
			else {
				/*
				 * Join the queue of threads waiting to lock
				 * the mutex: 
				 */
				mutex_queue_enq(*mutex, curthread);

				/*
				 * Keep a pointer to the mutex this thread
				 * is waiting on:
				 */
				curthread->data.mutex = *mutex;

				if (curthread->active_priority >
				    (*mutex)->m_prio)
					/* Adjust priorities: */
					mutex_priority_adjust(*mutex);

				/*
				 * Unlock the mutex structure and schedule the
				 * next thread:
				 */
				_thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
				    &(*mutex)->lock, __FILE__, __LINE__);

				/* Lock the mutex structure again: */
				_SPINLOCK(&(*mutex)->lock);
			}
			break;

		/* POSIX priority protection mutex: */
		case PTHREAD_PRIO_PROTECT:
			/* Check for a priority ceiling violation: */
			if (curthread->active_priority > (*mutex)->m_prio)
				ret = EINVAL;

			/* Check if this mutex is not locked: */
			else if ((*mutex)->m_owner == NULL) {
				/*
				 * Lock the mutex for the running
				 * thread:
				 */
				(*mutex)->m_owner = curthread;

				/* Track number of priority mutexes owned: */
				curthread->priority_mutex_count++;

				/*
				 * The running thread inherits the ceiling
				 * priority of the mutex and executes at that
				 * priority:
				 */
				curthread->active_priority = (*mutex)->m_prio;
				(*mutex)->m_saved_prio =
				    curthread->inherited_priority;
				curthread->inherited_priority =
				    (*mutex)->m_prio;

				/* Add to the list of owned mutexes: */
				_MUTEX_ASSERT_NOT_OWNED(*mutex);
				TAILQ_INSERT_TAIL(&curthread->mutexq,
				    (*mutex), m_qe);
			} else if ((*mutex)->m_owner == curthread)
				ret = mutex_self_lock(*mutex);
			else {
				/*
				 * Join the queue of threads waiting to lock
				 * the mutex: 
				 */
				mutex_queue_enq(*mutex, curthread);

				/*
				 * Keep a pointer to the mutex this thread
				 * is waiting on:
				 */
				curthread->data.mutex = *mutex;

				/* Clear any previous error: */
				errno = 0;

				/*
				 * Unlock the mutex structure and schedule the
				 * next thread:
				 */
				_thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
				    &(*mutex)->lock, __FILE__, __LINE__);

				/* Lock the mutex structure again: */
				_SPINLOCK(&(*mutex)->lock);

				/*
				 * The threads priority may have changed while
				 * waiting for the mutex causing a ceiling
				 * violation.
				 */
				ret = errno;
				errno = 0;
			}
			break;

		/* Trap invalid mutex types: */
		default:
			/* Return an invalid argument error: */
			ret = EINVAL;
			break;
		}

		/*
		 * Check to see if this thread was interrupted and
		 * is still in the mutex queue of waiting threads:
		 */
		if (curthread->interrupted != 0)
			mutex_queue_remove(*mutex, curthread);

		/* Unlock the mutex structure: */
		_SPINUNLOCK(&(*mutex)->lock);

		/*
		 * Undefer and handle pending signals, yielding if
		 * necessary:
		 */
		_thread_kern_sig_undefer();
	} while (((*mutex)->m_owner != curthread) && (ret == 0) &&
	    (curthread->interrupted == 0));

	if (curthread->interrupted != 0 &&
	    curthread->continuation != NULL)
		curthread->continuation((void *) curthread);

	/* Return the completion status: */
	return (ret);
}
Ejemplo n.º 12
0
int
_pthread_mutex_trylock(pthread_mutex_t * mutex)
{
	struct pthread	*curthread = _get_curthread();
	int	ret = 0;

	if (mutex == NULL)
		ret = EINVAL;

	/*
	 * If the mutex is statically initialized, perform the dynamic
	 * initialization:
	 */
	else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
		/*
		 * Defer signals to protect the scheduling queues from
		 * access by the signal handler:
		 */
		_thread_kern_sig_defer();

		/* Lock the mutex structure: */
		_SPINLOCK(&(*mutex)->lock);

		/*
		 * If the mutex was statically allocated, properly
		 * initialize the tail queue.
		 */
		if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
			TAILQ_INIT(&(*mutex)->m_queue);
			_MUTEX_INIT_LINK(*mutex);
			(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
		}

		/* Process according to mutex type: */
		switch ((*mutex)->m_protocol) {
		/* Default POSIX mutex: */
		case PTHREAD_PRIO_NONE:	
			/* Check if this mutex is not locked: */
			if ((*mutex)->m_owner == NULL) {
				/* Lock the mutex for the running thread: */
				(*mutex)->m_owner = curthread;

				/* Add to the list of owned mutexes: */
				_MUTEX_ASSERT_NOT_OWNED(*mutex);
				TAILQ_INSERT_TAIL(&curthread->mutexq,
				    (*mutex), m_qe);
			} else if ((*mutex)->m_owner == curthread)
				ret = mutex_self_trylock(*mutex);
			else
				/* Return a busy error: */
				ret = EBUSY;
			break;

		/* POSIX priority inheritence mutex: */
		case PTHREAD_PRIO_INHERIT:
			/* Check if this mutex is not locked: */
			if ((*mutex)->m_owner == NULL) {
				/* Lock the mutex for the running thread: */
				(*mutex)->m_owner = curthread;

				/* Track number of priority mutexes owned: */
				curthread->priority_mutex_count++;

				/*
				 * The mutex takes on the attributes of the
				 * running thread when there are no waiters.
				 */
				(*mutex)->m_prio = curthread->active_priority;
				(*mutex)->m_saved_prio =
				    curthread->inherited_priority;

				/* Add to the list of owned mutexes: */
				_MUTEX_ASSERT_NOT_OWNED(*mutex);
				TAILQ_INSERT_TAIL(&curthread->mutexq,
				    (*mutex), m_qe);
			} else if ((*mutex)->m_owner == curthread)
				ret = mutex_self_trylock(*mutex);
			else
				/* Return a busy error: */
				ret = EBUSY;
			break;

		/* POSIX priority protection mutex: */
		case PTHREAD_PRIO_PROTECT:
			/* Check for a priority ceiling violation: */
			if (curthread->active_priority > (*mutex)->m_prio)
				ret = EINVAL;

			/* Check if this mutex is not locked: */
			else if ((*mutex)->m_owner == NULL) {
				/* Lock the mutex for the running thread: */
				(*mutex)->m_owner = curthread;

				/* Track number of priority mutexes owned: */
				curthread->priority_mutex_count++;

				/*
				 * The running thread inherits the ceiling
				 * priority of the mutex and executes at that
				 * priority.
				 */
				curthread->active_priority = (*mutex)->m_prio;
				(*mutex)->m_saved_prio =
				    curthread->inherited_priority;
				curthread->inherited_priority =
				    (*mutex)->m_prio;

				/* Add to the list of owned mutexes: */
				_MUTEX_ASSERT_NOT_OWNED(*mutex);
				TAILQ_INSERT_TAIL(&curthread->mutexq,
				    (*mutex), m_qe);
			} else if ((*mutex)->m_owner == curthread)
				ret = mutex_self_trylock(*mutex);
			else
				/* Return a busy error: */
				ret = EBUSY;
			break;

		/* Trap invalid mutex types: */
		default:
			/* Return an invalid argument error: */
			ret = EINVAL;
			break;
		}

		/* Unlock the mutex structure: */
		_SPINUNLOCK(&(*mutex)->lock);

		/*
		 * Undefer and handle pending signals, yielding if
		 * necessary:
		 */
		_thread_kern_sig_undefer();
	}

	/* Return the completion status: */
	return (ret);
}
Ejemplo n.º 13
0
int
_pthread_create(pthread_t *thread, const pthread_attr_t *attr,
	       void *(*start_routine) (void *), void *arg)
{
	struct pthread	*curthread = _get_curthread();
	struct itimerval itimer;
	int		f_gc = 0;
	int             ret = 0;
	pthread_t       gc_thread;
	pthread_t       new_thread;
	pthread_attr_t	pattr;
	void           *stack;
#if !defined(__ia64__)
	u_long 		stackp;
#endif

	if (thread == NULL)
		return(EINVAL);

	/*
	 * Locking functions in libc are required when there are
	 * threads other than the initial thread.
	 */
	__isthreaded = 1;

	/* Allocate memory for the thread structure: */
	if ((new_thread = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
		/* Insufficient memory to create a thread: */
		ret = EAGAIN;
	} else {
		/* Check if default thread attributes are required: */
		if (attr == NULL || *attr == NULL) {
			/* Use the default thread attributes: */
			pattr = &_pthread_attr_default;
		} else {
			pattr = *attr;
		}
		/* Check if a stack was specified in the thread attributes: */
		if ((stack = pattr->stackaddr_attr) != NULL) {
		}
		/* Allocate a stack: */
		else {
			stack = _thread_stack_alloc(pattr->stacksize_attr,
			    pattr->guardsize_attr);
			if (stack == NULL) {
				ret = EAGAIN;
				free(new_thread);
			}
		}

		/* Check for errors: */
		if (ret != 0) {
		} else {
			/* Initialise the thread structure: */
			memset(new_thread, 0, sizeof(struct pthread));
			new_thread->slice_usec = -1;
			new_thread->stack = stack;
			new_thread->start_routine = start_routine;
			new_thread->arg = arg;

			new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
			    PTHREAD_CANCEL_DEFERRED;

			/*
			 * Write a magic value to the thread structure
			 * to help identify valid ones:
			 */
			new_thread->magic = PTHREAD_MAGIC;

			/* Initialise the thread for signals: */
			new_thread->sigmask = curthread->sigmask;
			new_thread->sigmask_seqno = 0;

			/* Initialize the signal frame: */
			new_thread->curframe = NULL;

			/* Initialise the jump buffer: */
			_setjmp(new_thread->ctx.jb);

			/*
			 * Set up new stack frame so that it looks like it
			 * returned from a longjmp() to the beginning of
			 * _thread_start().
			 */
			SET_RETURN_ADDR_JB(new_thread->ctx.jb, _thread_start);

#if !defined(__ia64__)
			stackp = (long)new_thread->stack + pattr->stacksize_attr - sizeof(double);
#if defined(__amd64__)
			stackp &= ~0xFUL;
#endif
			/* The stack starts high and builds down: */
			SET_STACK_JB(new_thread->ctx.jb, stackp);
#else
			SET_STACK_JB(new_thread->ctx.jb,
			    (long)new_thread->stack, pattr->stacksize_attr);
#endif

			/* Copy the thread attributes: */
			memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr));

			/*
			 * Check if this thread is to inherit the scheduling
			 * attributes from its parent:
			 */
			if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
				/* Copy the scheduling attributes: */
				new_thread->base_priority =
				    curthread->base_priority &
				    ~PTHREAD_SIGNAL_PRIORITY;
				new_thread->attr.prio =
				    curthread->base_priority &
				    ~PTHREAD_SIGNAL_PRIORITY;
				new_thread->attr.sched_policy =
				    curthread->attr.sched_policy;
			} else {
				/*
				 * Use just the thread priority, leaving the
				 * other scheduling attributes as their
				 * default values:
				 */
				new_thread->base_priority =
				    new_thread->attr.prio;
			}
			new_thread->active_priority = new_thread->base_priority;
			new_thread->inherited_priority = 0;

			/* Initialize joiner to NULL (no joiner): */
			new_thread->joiner = NULL;

			/* Initialize the mutex queue: */
			TAILQ_INIT(&new_thread->mutexq);

			/* Initialise hooks in the thread structure: */
			new_thread->specific = NULL;
			new_thread->cleanup = NULL;
			new_thread->flags = 0;
			new_thread->poll_data.nfds = 0;
			new_thread->poll_data.fds = NULL;
			new_thread->continuation = NULL;

			/*
			 * Defer signals to protect the scheduling queues
			 * from access by the signal handler:
			 */
			_thread_kern_sig_defer();

			/*
			 * Initialise the unique id which GDB uses to
			 * track threads.
			 */
			new_thread->uniqueid = next_uniqueid++;

			/*
			 * Check if the garbage collector thread
			 * needs to be started.
			 */
			f_gc = (TAILQ_FIRST(&_thread_list) == _thread_initial);

			/* Add the thread to the linked list of all threads: */
			TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle);

			if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
				new_thread->flags |= PTHREAD_FLAGS_SUSPENDED;
				new_thread->state = PS_SUSPENDED;
			} else {
				new_thread->state = PS_RUNNING;
				PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
			}

			/*
			 * Undefer and handle pending signals, yielding
			 * if necessary.
			 */
			_thread_kern_sig_undefer();

			/* Return a pointer to the thread structure: */
			(*thread) = new_thread;

			if (f_gc != 0) {
				/* Install the scheduling timer: */
				itimer.it_interval.tv_sec = 0;
				itimer.it_interval.tv_usec = _clock_res_usec;
				itimer.it_value = itimer.it_interval;
				if (setitimer(_ITIMER_SCHED_TIMER, &itimer,
				    NULL) != 0)
					PANIC("Cannot set interval timer");
			}

			/* Schedule the new user thread: */
			_thread_kern_sched(NULL);

			/*
			 * Start a garbage collector thread
			 * if necessary.
			 */
			if (f_gc && _pthread_create(&gc_thread, NULL,
				    _thread_gc, NULL) != 0)
				PANIC("Can't create gc thread");

		}
	}

	/* Return the status: */
	return (ret);
}
Ejemplo n.º 14
0
void 
funlockfile(FILE * fp)
{
	int	idx = file_idx(fp);
	struct	file_lock	*p;

	/*
	 * Defer signals to protect the scheduling queues from
	 * access by the signal handler:
	 */
	_thread_kern_sig_defer();

	/* Lock the hash table: */
	_SPINLOCK(&hash_lock);

	/*
	 * Get a pointer to the lock for the file and check that
	 * the running thread is the one with the lock:
	 */
	if ((p = find_lock(idx, fp)) != NULL &&
	    p->owner == _thread_run) {
		/*
		 * Check if this thread has locked the FILE
		 * more than once:
		 */
		if (p->count > 1)
			/*
			 * Decrement the count of the number of
			 * times the running thread has locked this
			 * file:
			 */
			p->count--;
		else {
			/*
			 * The running thread will release the
			 * lock now:
			 */
			p->count = 0;

			/* Get the new owner of the lock: */
			if ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) {
				/* Pop the thread off the queue: */
				TAILQ_REMOVE(&p->l_head,p->owner,qe);

				/*
				 * This is the first lock for the new
				 * owner:
				 */
				p->count = 1;

				/* Allow the new owner to run: */
				PTHREAD_NEW_STATE(p->owner,PS_RUNNING);
			}
		}
	}

	/* Unlock the hash table: */
	_SPINUNLOCK(&hash_lock);

	/*
	 * Undefer and handle pending signals, yielding if
	 * necessary:
	 */
	_thread_kern_sig_undefer();
}