Exemplo n.º 1
0
int
close(int fd)
{
	int		ret;

	/* This is a cancelation point: */
	_thread_enter_cancellation_point();

	if ((fd < 0) || (fd >= _thread_max_fdtsize) ||
	    (fd == _thread_kern_pipe[0]) || (fd == _thread_kern_pipe[1])) {
		errno = EBADF;
		ret = -1;
	} else if ((ret = _FD_LOCK(fd, FD_RDWR_CLOSE, NULL)) != -1) {
		/*
		 * We need to hold the entry spinlock till after
		 * _thread_sys_close() to stop races caused by the
		 * fd state transition.
		 */
		_SPINLOCK(&_thread_fd_table[fd]->lock);

		_thread_fd_entry_close(fd);

		/* Close the file descriptor: */
		ret = _thread_sys_close(fd);

		_SPINUNLOCK(&_thread_fd_table[fd]->lock);

		_FD_UNLOCK(fd, FD_RDWR_CLOSE);
	}

	/* No longer in a cancellation point: */
	_thread_leave_cancellation_point();

	return (ret);
}
Exemplo n.º 2
0
int
_sem_wait(sem_t *sem)
{
	int	retval;

	_thread_enter_cancellation_point();
	
	_SEM_CHECK_VALIDITY(sem);

	pthread_mutex_lock(&(*sem)->lock);

	while ((*sem)->count == 0) {
		(*sem)->nwaiters++;
		pthread_cond_wait(&(*sem)->gtzero, &(*sem)->lock);
		(*sem)->nwaiters--;
	}
	(*sem)->count--;

	pthread_mutex_unlock(&(*sem)->lock);

	retval = 0;
  RETURN:
	_thread_leave_cancellation_point();
	return retval;
}
Exemplo n.º 3
0
unsigned int
sleep(unsigned int seconds)
{
	unsigned int	ret;

	_thread_enter_cancellation_point();
	ret = __sleep(seconds);
	_thread_leave_cancellation_point();
	
	return ret;
}
Exemplo n.º 4
0
int
_pause(void)
{
	int	ret;

	_thread_enter_cancellation_point();
	ret = __pause();
	_thread_leave_cancellation_point();
	
	return ret;
}
Exemplo n.º 5
0
int
__sigsuspend(const sigset_t * set)
{
	int	ret;

	_thread_enter_cancellation_point();
	ret = _sigsuspend(set);
	_thread_leave_cancellation_point();

	return ret;
}
Exemplo n.º 6
0
ssize_t
writev(int fd, const struct iovec *iov, int iovcnt)
{
	ssize_t ret;

	_thread_enter_cancellation_point();
	ret = _writev(fd, iov, iovcnt);
	_thread_leave_cancellation_point();

	return ret;
}
Exemplo n.º 7
0
int
tcdrain(int fd)
{
	int	ret;
	
	_thread_enter_cancellation_point();
	ret = __tcdrain(fd);
	_thread_leave_cancellation_point();

	return ret;
}
Exemplo n.º 8
0
int
poll(struct pollfd *fds, unsigned int nfds, int timeout)
{
	int ret;

	_thread_enter_cancellation_point();
	ret = _poll(fds, nfds, timeout);
	_thread_leave_cancellation_point();

	return ret;
}
Exemplo n.º 9
0
pid_t
_waitpid(pid_t wpid, int *status, int options)
{
	pid_t	ret;

	_thread_enter_cancellation_point();
	ret = __waitpid(wpid, status, options);
	_thread_leave_cancellation_point();
	
	return ret;
}
Exemplo n.º 10
0
ssize_t
__sendmsg(int fd, const struct msghdr *msg, int flags)
{
	int ret;

	_thread_enter_cancellation_point();
	ret = _sendmsg(fd, msg, flags);
	_thread_leave_cancellation_point();

	return (ret);
}
Exemplo n.º 11
0
ssize_t
recvfrom(int fd, void *buf, size_t len, int flags, struct sockaddr * from, socklen_t *from_len)
{
	struct pthread	*curthread = _get_curthread();
	ssize_t		ret;

	/* This is a cancellation point: */
	_thread_enter_cancellation_point();

	if ((ret = _FD_LOCK(fd, FD_READ, NULL)) == 0) {
		while ((ret = _thread_sys_recvfrom(fd, buf, len, flags, from, from_len)) < 0) {
			if (!(_thread_fd_table[fd]->status_flags->flags & O_NONBLOCK) &&
			    !(flags & MSG_DONTWAIT) &&
			    ((errno == EWOULDBLOCK) || (errno == EAGAIN))) {
				curthread->data.fd.fd = fd;

				/* Set the timeout: */
				_thread_kern_set_timeout(_FD_RCVTIMEO(fd));
				curthread->interrupted = 0;
				curthread->closing_fd = 0;
				curthread->timeout = 0;
				_thread_kern_sched_state(PS_FDR_WAIT, __FILE__, __LINE__);

				/* Check if the wait was interrupted: */
				if (curthread->interrupted) {
					/* Return an error status: */
					errno = EINTR;
					ret = -1;
					break;
				} else if (curthread->closing_fd) {
					/* Return an error status: */
					errno = EBADF;
					ret = -1;
					break;
				} else if (curthread->timeout) {
					/* Return an error status: */
					errno = EWOULDBLOCK;
					ret = -1;
					break;
				}
			} else {
				ret = -1;
				break;
			}
		}
		_FD_UNLOCK(fd, FD_READ);
	}

	/* No longer in a cancellation point: */
	_thread_leave_cancellation_point();

	return (ret);
}
Exemplo n.º 12
0
int
nanosleep(const struct timespec * time_to_sleep, struct timespec *
    time_remaining)
{
	int	ret;

	_thread_enter_cancellation_point();
	ret = _nanosleep(time_to_sleep, time_remaining);
	_thread_leave_cancellation_point();

	return ret;
}
Exemplo n.º 13
0
int
_aio_suspend(const struct aiocb * const iocbs[], int niocb, const struct
    timespec *timeout)
{
	int	ret;

	_thread_enter_cancellation_point();
	ret = __sys_aio_suspend(iocbs, niocb, timeout);
	_thread_leave_cancellation_point();

	return ret;
}
Exemplo n.º 14
0
ssize_t
preadv(int fd, const struct iovec * iov, int iovcnt, off_t offset)
{
	ssize_t	ret;

	/* This is a cancellation point: */
	_thread_enter_cancellation_point();

	ret = _thread_sys_preadv(fd, iov, iovcnt, offset);

	/* No longer in a cancellation point: */
	_thread_leave_cancellation_point();

	return (ret);
}
Exemplo n.º 15
0
int
__msync(void *addr, size_t len, int flags)
{
	int	ret;

	/*
	 * XXX This is quite pointless unless we know how to get the
	 * file descriptor associated with the memory, and lock it for
	 * write. The only real use of this wrapper is to guarantee
	 * a cancellation point, as per the standard. sigh.
	 */
	_thread_enter_cancellation_point();
	ret = _msync(addr, len, flags);
	_thread_leave_cancellation_point();

	return ret;
}
Exemplo n.º 16
0
/*
 * Note: a thread calling wait4 may have its state changed to waiting
 * until awakened by a signal.  Also note that system(3), for example,
 * blocks SIGCHLD and calls waitpid (which calls wait4).  If the process
 * started by system(3) doesn't finish before this function is called the
 * function will never awaken -- system(3) also ignores SIGINT and SIGQUIT.
 *
 * Thus always unmask SIGCHLD here.
 */
pid_t
wait4(pid_t pid, int *istat, int options, struct rusage * rusage)
{
    struct pthread	*curthread = _get_curthread();
    pid_t           ret;
    sigset_t	mask, omask;

    /* This is a cancellation point: */
    _thread_enter_cancellation_point();

    _thread_kern_sig_defer();

    sigemptyset(&mask);
    sigaddset(&mask, SIGCHLD);
    sigprocmask(SIG_UNBLOCK, &mask, &omask);

    /* Perform a non-blocking wait4 syscall: */
    while ((ret = _thread_sys_wait4(pid, istat, options | WNOHANG, rusage)) == 0 && (options & WNOHANG) == 0) {
        /* Reset the interrupted operation flag: */
        curthread->interrupted = 0;

        /* Schedule the next thread while this one waits: */
        _thread_kern_sched_state(PS_WAIT_WAIT, __FILE__, __LINE__);

        /* Check if this call was interrupted by a signal: */
        if (curthread->interrupted) {
            errno = EINTR;
            ret = -1;
            break;
        }
    }

    sigprocmask(SIG_SETMASK, &omask, NULL);

    _thread_kern_sig_undefer();

    /* No longer in a cancellation point: */
    _thread_leave_cancellation_point();

    return (ret);
}
Exemplo n.º 17
0
int
_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
		       const struct timespec * abstime)
{
	struct pthread	*curthread = _get_curthread();
	int	rval = 0;
	int	done = 0;
	int	interrupted = 0;
	int	seqno;

	_thread_enter_cancellation_point();
	
	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
	    abstime->tv_nsec >= 1000000000)
		return (EINVAL);
	/*
	 * If the condition variable is statically initialized, perform dynamic
	 * initialization.
	 */
	if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0)
		return (rval);

	/*
	 * Enter a loop waiting for a condition signal or broadcast
	 * to wake up this thread.  A loop is needed in case the waiting
	 * thread is interrupted by a signal to execute a signal handler.
	 * It is not (currently) possible to remain in the waiting queue
	 * while running a handler.  Instead, the thread is interrupted
	 * and backed out of the waiting queue prior to executing the
	 * signal handler.
	 */
	do {
		/* Lock the condition variable structure: */
		_SPINLOCK(&(*cond)->lock);

		/*
		 * If the condvar was statically allocated, properly
		 * initialize the tail queue.
		 */
		if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
			TAILQ_INIT(&(*cond)->c_queue);
			(*cond)->c_flags |= COND_FLAGS_INITED;
		}

		/* Process according to condition variable type: */
		switch ((*cond)->c_type) {
		/* Fast condition variable: */
		case COND_TYPE_FAST:
			if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
			    ((*cond)->c_mutex != *mutex))) {
				/* Return invalid argument error: */
				rval = EINVAL;

				/* Unlock the condition variable structure: */
				_SPINUNLOCK(&(*cond)->lock);
			} else {
				/* Set the wakeup time: */
				curthread->wakeup_time.tv_sec =
				    abstime->tv_sec;
				curthread->wakeup_time.tv_nsec =
				    abstime->tv_nsec;

				/* Reset the timeout and interrupted flags: */
				curthread->timeout = 0;
				curthread->interrupted = 0;

				/*
				 * Queue the running thread for the condition
				 * variable:
				 */
				cond_queue_enq(*cond, curthread);

				/* Remember the mutex and sequence number: */
				(*cond)->c_mutex = *mutex;
				seqno = (*cond)->c_seqno;

				/* Unlock the mutex: */
				if ((rval = _mutex_cv_unlock(mutex)) != 0) {
					/*
					 * Cannot unlock the mutex, so remove
					 * the running thread from the condition
					 * variable queue: 
					 */
					cond_queue_remove(*cond, curthread);

					/* Check for no more waiters: */
					if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
						(*cond)->c_mutex = NULL;

					/* Unlock the condition variable structure: */
					_SPINUNLOCK(&(*cond)->lock);
				} else {
					/*
					 * Schedule the next thread and unlock
					 * the condition variable structure:
					 */
					_thread_kern_sched_state_unlock(PS_COND_WAIT,
				  	     &(*cond)->lock, __FILE__, __LINE__);

					done = (seqno != (*cond)->c_seqno);

					interrupted = curthread->interrupted;

					/*
					 * Check if the wait was interrupted
					 * (canceled) or needs to be resumed
					 * after handling a signal.
					 */
					if (interrupted != 0) {
						/*
						 * Lock the mutex and ignore any
						 * errors.  Note that even
						 * though this thread may have
						 * been canceled, POSIX requires
						 * that the mutex be reacquired
						 * prior to cancellation.
						 */
						(void)_mutex_cv_lock(mutex);
					} else {
						/*
						 * Lock the condition variable
						 * while removing the thread.
						 */
						_SPINLOCK(&(*cond)->lock);

						cond_queue_remove(*cond,
						    curthread);

						/* Check for no more waiters: */
						if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
							(*cond)->c_mutex = NULL;

						_SPINUNLOCK(&(*cond)->lock);

						/* Lock the mutex: */
						rval = _mutex_cv_lock(mutex);

						/*
						 * Return ETIMEDOUT if the wait
						 * timed out and there wasn't an
						 * error locking the mutex:
						 */
						if ((curthread->timeout != 0)
						    && rval == 0)
							rval = ETIMEDOUT;
					}
				}
			}
			break;

		/* Trap invalid condition variable types: */
		default:
			/* Unlock the condition variable structure: */
			_SPINUNLOCK(&(*cond)->lock);

			/* Return an invalid argument error: */
			rval = EINVAL;
			break;
		}

		if ((interrupted != 0) && (curthread->continuation != NULL))
			curthread->continuation((void *) curthread);
	} while ((done == 0) && (rval == 0));

	_thread_leave_cancellation_point();

	/* Return the completion status: */
	return (rval);
}
Exemplo n.º 18
0
ssize_t
read(int fd, void *buf, size_t nbytes)
{
    struct pthread	*curthread = _get_curthread();
    ssize_t	ret;
    int	type;

    /* This is a cancellation point: */
    _thread_enter_cancellation_point();

    /* POSIX says to do just this: */
    if (nbytes == 0)
        ret = 0;

    /* Lock the file descriptor for read: */
    else if ((ret = _FD_LOCK(fd, FD_READ, NULL)) == 0) {
        /* Get the read/write mode type: */
        type = _thread_fd_table[fd]->status_flags->flags & O_ACCMODE;

        /* Check if the file is not open for read: */
        if (type != O_RDONLY && type != O_RDWR) {
            /* File is not open for read: */
            errno = EBADF;
            ret = -1;
        }

        /* Perform a non-blocking read syscall: */
        else while ((ret = _thread_sys_read(fd, buf, nbytes)) < 0) {
                if ((_thread_fd_table[fd]->status_flags->flags & O_NONBLOCK) == 0 &&
                        (errno == EWOULDBLOCK || errno == EAGAIN)) {
                    curthread->data.fd.fd = fd;
                    _thread_kern_set_timeout(NULL);

                    /* Reset the interrupted operation flag: */
                    curthread->interrupted = 0;
                    curthread->closing_fd = 0;

                    _thread_kern_sched_state(PS_FDR_WAIT,
                                             __FILE__, __LINE__);

                    /*
                     * Check if the operation was
                     * interrupted by a signal or
                     * a closing fd.
                     */
                    if (curthread->interrupted) {
                        errno = EINTR;
                        ret = -1;
                        break;
                    } else if (curthread->closing_fd) {
                        errno = EBADF;
                        ret = -1;
                        break;
                    }
                } else {
                    break;
                }
            }
        _FD_UNLOCK(fd, FD_READ);
    }

    /* No longer in a cancellation point: */
    _thread_leave_cancellation_point();

    return (ret);
}
Exemplo n.º 19
0
int
closefrom(int fd)
{
	int ret = 0;
	int safe_fd;
	int lock_fd;
	int *flags;

	_thread_enter_cancellation_point();
	
	if (fd < 0 || fd >= _thread_max_fdtsize) {
		errno = EBADF;
		ret = -1;
	} else {
		safe_fd = _thread_kern_pipe[0] > _thread_kern_pipe[1] ?
			_thread_kern_pipe[0] : _thread_kern_pipe[1];

		/*
		 * close individual files until we get past the pipe
		 * fds.  Attempting to close a pipe fd is a no-op.
		 */
		for (safe_fd++; fd < safe_fd; fd++)
			close(fd);

		flags = calloc((size_t)_thread_max_fdtsize, sizeof *flags);
		if (flags == NULL) {
			/* use calloc errno */
			ret = -1;
		} else {
			/* Lock and record all fd entries */
			for (lock_fd = fd; lock_fd < _thread_max_fdtsize; lock_fd++) {
				if (_thread_fd_table[lock_fd] != NULL &&
			   	 _thread_fd_table[lock_fd]->state != FD_ENTRY_CLOSED) {
					ret = _FD_LOCK(lock_fd, FD_RDWR_CLOSE, NULL);
					if (ret != -1)
						flags[lock_fd] = 1;
					else
						break;
				}
			}

			if (ret != -1) {
				/*
				 * Close the entries and reset the non-bocking
				 * flag when needed.
				 */
				for (lock_fd = fd; lock_fd < _thread_max_fdtsize; lock_fd++) {
					if (flags[lock_fd] != 0) {
						_thread_fd_entry_close(lock_fd);
					}
				}
				/*
				 * Now let the system do its thing. It is not practical
				 * to try to prevent races with other threads that can
				 * create new file descriptors. We just have to assume
				 * the application is well behaved when using closefrom.
				 */
				ret = _thread_sys_closefrom(fd);
			}

			/*
			 * Unlock any locked entries.
			 */
			for (lock_fd = fd; lock_fd < _thread_max_fdtsize; lock_fd++) {
				if (flags[lock_fd] != 0) {
					_FD_UNLOCK(lock_fd, FD_RDWR_CLOSE);
				}
			}
			free(flags);
		}
	}

	_thread_leave_cancellation_point();

	return (ret);

}
Exemplo n.º 20
0
ssize_t
writev(int fd, const struct iovec * iov, int iovcnt)
{
	struct pthread	*curthread = _get_curthread();
	int	blocking;
	int	idx = 0;
	int	type;
	ssize_t num = 0;
	size_t cnt;
	ssize_t n;
	ssize_t	ret;
	struct iovec liov[20];
	struct iovec *p_iov = liov;

	/* This is a cancellation point: */
	_thread_enter_cancellation_point();

	/* Check if the array size exceeds to compiled in size: */
	if (iovcnt > (int) (sizeof(liov) / sizeof(struct iovec))) {
		/* Allocate memory for the local array: */
		if ((p_iov = (struct iovec *)
		    malloc((size_t)iovcnt * sizeof(struct iovec))) == NULL) {
			/* Insufficient memory: */
			errno = ENOMEM;
			_thread_leave_cancellation_point();
			return (-1);
		}
	} else if (iovcnt <= 0) {
		errno = EINVAL;
		_thread_leave_cancellation_point();
		return (-1);
	}

	/* Copy the caller's array so that it can be modified locally: */
	memcpy(p_iov,iov,(size_t)iovcnt * sizeof(struct iovec));

	/* Lock the file descriptor for write: */
	if ((ret = _FD_LOCK(fd, FD_WRITE, NULL)) == 0) {
		/* Get the read/write mode type: */
		type = _thread_fd_table[fd]->status_flags->flags & O_ACCMODE;

		/* Check if the file is not open for write: */
		if (type != O_WRONLY && type != O_RDWR) {
			/* File is not open for write: */
			errno = EBADF;
			_FD_UNLOCK(fd, FD_WRITE);
			if (p_iov != liov)
				free(p_iov);
			_thread_leave_cancellation_point();
			return (-1);
		}

		/* Check if file operations are to block */
		blocking = ((_thread_fd_table[fd]->status_flags->flags & O_NONBLOCK) == 0);

		/*
		 * Loop while no error occurs and until the expected number
		 * of bytes are written if performing a blocking write:
		 */
		while (ret == 0) {
			/* Perform a non-blocking write syscall: */
			n = _thread_sys_writev(fd, &p_iov[idx], iovcnt - idx);

			/* Check if one or more bytes were written: */
			if (n > 0) {
				/*
				 * Keep a count of the number of bytes
				 * written:
				 */
				num += n;

				/*
				 * Enter a loop to check if a short write
				 * occurred and move the index to the
				 * array entry where the short write
				 * ended:
				 */
				cnt = (size_t)n;
				while (cnt > 0 && idx < iovcnt) {
					/*
					 * If the residual count exceeds
					 * the size of this vector, then
					 * it was completely written:
					 */
					if (cnt >= p_iov[idx].iov_len)
						/*
						 * Decrement the residual
						 * count and increment the
						 * index to the next array
						 * entry:
						 */
						cnt -= p_iov[idx++].iov_len;
					else {
						/*
						 * This entry was only
						 * partially written, so
						 * adjust it's length
						 * and base pointer ready
						 * for the next write:
						 */
						p_iov[idx].iov_len -= cnt;
						p_iov[idx].iov_base =
						    (char *)p_iov[idx].iov_base
						    + (ptrdiff_t)cnt;
						cnt = 0;
					}
				}
			} else if (n == 0) {
				/*
				 * Avoid an infinite loop if the last iov_len is
				 * 0.
				 */
				while (idx < iovcnt && p_iov[idx].iov_len == 0)
					idx++;

				if (idx == iovcnt) {
					ret = num;
					break;
				}
			}

			/*
			 * If performing a blocking write, check if the
			 * write would have blocked or if some bytes
			 * were written but there are still more to
			 * write:
			 */
			if (blocking && ((n < 0 && (errno == EWOULDBLOCK ||
			    errno == EAGAIN)) || (n >= 0 && idx < iovcnt))) {
				curthread->data.fd.fd = fd;
				_thread_kern_set_timeout(NULL);

				/* Reset the interrupted operation flag: */
				curthread->interrupted = 0;
				curthread->closing_fd = 0;

				_thread_kern_sched_state(PS_FDW_WAIT,
				    __FILE__, __LINE__);

				/*
				 * Check if the operation was
				 * interrupted by a signal
				 */
				if (curthread->interrupted || curthread->closing_fd) {
					if (num > 0) {
						/* Return partial success: */
						ret = num;
					} else {
						/* Return an error: */
						if (curthread->closing_fd)
							errno = EBADF;
						else
							errno = EINTR;
						ret = -1;
					}
				}

			/*
			 * If performing a non-blocking write,
			 * just return whatever the write syscall did:
			 */
			} else if (!blocking) {
				/* A non-blocking call might return zero: */
				ret = n;
				break;

			/*
			 * If there was an error, return partial success
			 * (if any bytes were written) or else the error:
			 */
			} else if (n < 0) {
				if (num > 0)
					ret = num;
				else
					ret = n;

			/* Check if the write has completed: */
			} else if (idx == iovcnt)
				/* Return the number of bytes written: */
				ret = num;
		}
		_FD_UNLOCK(fd, FD_WRITE);
	}

	/* If memory was allocated for the array, free it: */
	if (p_iov != liov)
		free(p_iov);

	/* No longer in a cancellation point: */
	_thread_leave_cancellation_point();

	return (ret);
}
Exemplo n.º 21
0
int
connect(int fd, const struct sockaddr * name, socklen_t namelen)
{
	struct pthread	*curthread = _get_curthread();
	struct sockaddr tmpname;
	socklen_t	errnolen, tmpnamelen;
	int             ret;

	/* This is a cancellation point: */
	_thread_enter_cancellation_point();

	if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) {
		if ((ret = _thread_sys_connect(fd, name, namelen)) < 0) {
			if (!(_thread_fd_table[fd]->status_flags->flags & O_NONBLOCK) &&
			((errno == EWOULDBLOCK) || (errno == EINPROGRESS) ||
			 (errno == EALREADY) || (errno == EAGAIN))) {
				curthread->data.fd.fd = fd;

				/* Reset the interrupted operation flag: */
				curthread->interrupted = 0;
				curthread->closing_fd = 0;

				/* Set the timeout: */
				_thread_kern_set_timeout(NULL);
				_thread_kern_sched_state(PS_FDW_WAIT, __FILE__, __LINE__);

				/*
				 * Check if the operation was
				 * interrupted by a signal or
				 * a closing fd.
				 */
				if (curthread->interrupted) {
					errno = EINTR;
					ret = -1;
				} else if (curthread->closing_fd) {
					errno = EBADF;
					ret = -1;
				} else {
					tmpnamelen = sizeof(tmpname);
					/* 0 now lets see if it really worked */
					if (((ret = _thread_sys_getpeername(fd, &tmpname, &tmpnamelen)) < 0) &&
					    (errno == ENOTCONN)) {

						/*
						 * Get the error, this function
						 * should not fail 
						 */
						errnolen = sizeof(errno);
						_thread_sys_getsockopt(fd, SOL_SOCKET, SO_ERROR, &errno, &errnolen);
					}
				}
			} else {
				ret = -1;
			}
		}
		_FD_UNLOCK(fd, FD_RDWR);
	}

	/* No longer in a cancellation point: */
	_thread_leave_cancellation_point();

	return (ret);
}
Exemplo n.º 22
0
int
accept(int fd, struct sockaddr * name, socklen_t *namelen)
{
	struct pthread	*curthread = _get_curthread();
	int             ret;
	int		newfd;
	enum fd_entry_mode init_mode;

	/* This is a cancellation point: */
	_thread_enter_cancellation_point();

	/* Lock the file descriptor: */
	if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) {
		/* Enter a loop to wait for a connection request: */
		while ((ret = _thread_sys_accept(fd, name, namelen)) < 0) {
			/* Check if the socket is to block: */
			if ((_thread_fd_table[fd]->status_flags->flags & O_NONBLOCK) == 0 &&
			    (errno == EWOULDBLOCK || errno == EAGAIN)) {
				/* Save the socket file descriptor: */
				curthread->data.fd.fd = fd;
				curthread->data.fd.fname = __FILE__;
				curthread->data.fd.branch = __LINE__;

				/* Set the timeout: */
				_thread_kern_set_timeout(NULL);
				curthread->interrupted = 0;
				curthread->closing_fd = 0;

				/* Schedule the next thread: */
				_thread_kern_sched_state(PS_FDR_WAIT, __FILE__,
							 __LINE__);

				/* Check if the wait was interrupted: */
				if (curthread->interrupted) {
					/* Return an error status: */
					errno = EINTR;
					ret = -1;
					break;
				} else if (curthread->closing_fd) {
					/* Return an error status: */
					errno = EBADF;
					ret = -1;
					break;
				}
			} else {
				/*
				 * Another error has occurred, so exit the
				 * loop here: 
				 */
				break;
			}
		}

		/*
		 * If no errors initialize the file descriptor table
		 * for the new socket. If the client's view of the
		 * status_flags for fd is blocking, then force newfd
		 * to be viewed as blocking too.
		 */
		if (ret != -1) {
			newfd = ret;

			if ((_thread_fd_table[fd]->status_flags->flags & O_NONBLOCK) == 0)
				init_mode = FD_INIT_BLOCKING;
			else
				init_mode = FD_INIT_NEW;
			if((ret = _thread_fd_table_init(newfd, init_mode, NULL)) != -1)
				ret = newfd;
			else {
				/* quitely close the fd */
				_thread_sys_close(ret);
			}
		}

		/* Unlock the file descriptor: */
		_FD_UNLOCK(fd, FD_RDWR);
	}

	/* No longer in a cancellation point: */
	_thread_leave_cancellation_point();

	/* Return the socket file descriptor or -1 on error: */
	return (ret);
}