/*
 * Lock a location for the running thread. Yield to allow other
 * threads to run if this thread is blocked because the lock is
 * not available. Note that this function does not sleep. It
 * assumes that the lock will be available very soon.
 *
 * This function checks if the running thread has already locked the
 * location, warns if this occurs and creates a thread dump before
 * returning.
 */
void
_spinlock_debug(spinlock_t *lck, char *fname, int lineno)
{
	struct pthread	*curthread = _get_curthread();
	int cnt = 0;

	/*
	 * Try to grab the lock and loop if another thread grabs
	 * it before we do.
	 */
	while(_atomic_lock(&lck->access_lock)) {
		cnt++;
		if (cnt > 100) {
			char str[256];
			snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) was left locked from %s (%d)\n", _getprogname(), curthread, lck, fname, lineno, lck->fname, lck->lineno);
			__sys_extpwrite(2, str, strlen(str), O_FBLOCKING, -1);
			__sleep(1);
			cnt = 0;
		}

		/* Block the thread until the lock. */
		curthread->data.spinlock = lck;
		_thread_kern_sched_state(PS_SPINBLOCK, fname, lineno);
	}

	/* The running thread now owns the lock: */
	lck->lock_owner = (long) curthread;
	lck->fname = fname;
	lck->lineno = lineno;
}
ssize_t
_sendmsg(int fd, const struct msghdr *msg, int flags)
{
	struct pthread	*curthread = _get_curthread();
	int             ret;

	if ((ret = _FD_LOCK(fd, FD_WRITE, NULL)) == 0) {
		while ((ret = __sys_sendmsg(fd, msg, flags)) < 0) {
			if (!(_thread_fd_getflags(fd) & O_NONBLOCK)
			    && ((errno == EWOULDBLOCK) || (errno == EAGAIN))) {
				curthread->data.fd.fd = fd;

				/* Set the timeout: */
				_thread_kern_set_timeout(NULL);
				curthread->interrupted = 0;
				_thread_kern_sched_state(PS_FDW_WAIT, __FILE__, __LINE__);

				/* Check if the operation was interrupted: */
				if (curthread->interrupted) {
					errno = EINTR;
					ret = -1;
					break;
				}
			} else {
				ret = -1;
				break;
			}
		}
		_FD_UNLOCK(fd, FD_WRITE);
	}
	return (ret);
}
Exemple #3
0
void
flockfile(FILE * fp)
{
	int	idx = file_idx(fp);
	struct	file_lock	*p;

	/* Lock the hash table: */
	_SPINLOCK(&hash_lock);

	/* Check if the static array has not been initialised: */
	if (!init_done) {
		/* Initialise the global array: */
		memset(flh,0,sizeof(flh));

		/* Flag the initialisation as complete: */
		init_done = 1;
	}

	/* Get a pointer to any existing lock for the file: */
	if ((p = find_lock(idx, fp)) == NULL) {
		/*
		 * The file is not locked, so this thread can
		 * grab the lock:
		 */
		p = do_lock(idx, fp);

		/* Unlock the hash table: */
		_SPINUNLOCK(&hash_lock);

	/*
	 * The file is already locked, so check if the
	 * running thread is the owner:
	 */
	} else if (p->owner == _thread_run) {
		/*
		 * The running thread is already the
		 * owner, so increment the count of
		 * the number of times it has locked
		 * the file:
		 */
		p->count++;

		/* Unlock the hash table: */
		_SPINUNLOCK(&hash_lock);
	} else {
		/*
		 * The file is locked for another thread.
		 * Append this thread to the queue of
		 * threads waiting on the lock.
		 */
		TAILQ_INSERT_TAIL(&p->l_head,_thread_run,qe);

		/* Unlock the hash table: */
		_SPINUNLOCK(&hash_lock);

		/* Wait on the FILE lock: */
		_thread_kern_sched_state(PS_FILE_WAIT, "", 0);
	}
}
Exemple #4
0
int 
_poll(struct pollfd *fds, unsigned int nfds, int timeout)
{
	struct pthread	*curthread = _get_curthread();
	struct timespec	ts;
	int		numfds = nfds;
	int             i, ret = 0;
	struct pthread_poll_data data;

	if (numfds > _thread_dtablesize) {
		numfds = _thread_dtablesize;
	}
	/* Check if a timeout was specified: */
	if (timeout == INFTIM) {
		/* Wait for ever: */
		_thread_kern_set_timeout(NULL);
	} else if (timeout > 0) {
		/* Convert the timeout in msec to a timespec: */
		ts.tv_sec = timeout / 1000;
		ts.tv_nsec = (timeout % 1000) * 1000000;

		/* Set the wake up time: */
		_thread_kern_set_timeout(&ts);
	} else if (timeout < 0) {
		/* a timeout less than zero but not == INFTIM is invalid */
		errno = EINVAL;
		return (-1);
	}

	if (((ret = __sys_poll(fds, numfds, 0)) == 0) && (timeout != 0)) {
		data.nfds = numfds;
		data.fds = fds;

		/*
		 * Clear revents in case of a timeout which leaves fds
		 * unchanged:
		 */
		for (i = 0; i < numfds; i++) {
			fds[i].revents = 0;
		}

		curthread->data.poll_data = &data;
		curthread->interrupted = 0;
		_thread_kern_sched_state(PS_POLL_WAIT, __FILE__, __LINE__);
		if (curthread->interrupted) {
			errno = EINTR;
			ret = -1;
		} else {
			ret = data.nfds;
		}
	}

	return (ret);
}
Exemple #5
0
ssize_t
recvfrom(int fd, void *buf, size_t len, int flags, struct sockaddr * from, socklen_t *from_len)
{
	struct pthread	*curthread = _get_curthread();
	ssize_t		ret;

	/* This is a cancellation point: */
	_thread_enter_cancellation_point();

	if ((ret = _FD_LOCK(fd, FD_READ, NULL)) == 0) {
		while ((ret = _thread_sys_recvfrom(fd, buf, len, flags, from, from_len)) < 0) {
			if (!(_thread_fd_table[fd]->status_flags->flags & O_NONBLOCK) &&
			    !(flags & MSG_DONTWAIT) &&
			    ((errno == EWOULDBLOCK) || (errno == EAGAIN))) {
				curthread->data.fd.fd = fd;

				/* Set the timeout: */
				_thread_kern_set_timeout(_FD_RCVTIMEO(fd));
				curthread->interrupted = 0;
				curthread->closing_fd = 0;
				curthread->timeout = 0;
				_thread_kern_sched_state(PS_FDR_WAIT, __FILE__, __LINE__);

				/* Check if the wait was interrupted: */
				if (curthread->interrupted) {
					/* Return an error status: */
					errno = EINTR;
					ret = -1;
					break;
				} else if (curthread->closing_fd) {
					/* Return an error status: */
					errno = EBADF;
					ret = -1;
					break;
				} else if (curthread->timeout) {
					/* Return an error status: */
					errno = EWOULDBLOCK;
					ret = -1;
					break;
				}
			} else {
				ret = -1;
				break;
			}
		}
		_FD_UNLOCK(fd, FD_READ);
	}

	/* No longer in a cancellation point: */
	_thread_leave_cancellation_point();

	return (ret);
}
int
_sigsuspend(const sigset_t * set)
{
	struct pthread	*curthread = _get_curthread();
	int             ret = -1;
	sigset_t        oset, sigset;

	/* Check if a new signal set was provided by the caller: */
	if (set != NULL) {
		/* Save the current signal mask: */
		oset = curthread->sigmask;

		/* Change the caller's mask: */
		curthread->sigmask = *set;

		/*
		 * Check if there are pending signals for the running
		 * thread or process that aren't blocked:
		 */
		sigset = curthread->sigpend;
		SIGSETOR(sigset, _process_sigpending);
		SIGSETNAND(sigset, curthread->sigmask);
		if (SIGNOTEMPTY(sigset)) {
			/*
			 * Call the kernel scheduler which will safely
			 * install a signal frame for the running thread:
			 */
			_thread_kern_sched_sig();
		} else {
			/* Wait for a signal: */
			_thread_kern_sched_state(PS_SIGSUSPEND,
			    __FILE__, __LINE__);
		}

		/* Always return an interrupted error: */
		errno = EINTR;

		/* Restore the signal mask: */
		curthread->sigmask = oset;
	} else {
		/* Return an invalid argument error: */
		errno = EINVAL;
	}

	/* Return the completion status: */
	return (ret);
}
/*
 * Lock a location for the running thread. Yield to allow other
 * threads to run if this thread is blocked because the lock is
 * not available. Note that this function does not sleep. It
 * assumes that the lock will be available very soon.
 */
void
_spinlock(spinlock_t *lck)
{
	struct pthread	*curthread = _get_curthread();

	/*
	 * Try to grab the lock and loop if another thread grabs
	 * it before we do.
	 */
	while(_atomic_lock(&lck->access_lock)) {
		/* Block the thread until the lock. */
		curthread->data.spinlock = lck;
		_thread_kern_sched_state(PS_SPINBLOCK, __FILE__, __LINE__);
	}

	/* The running thread now owns the lock: */
	lck->lock_owner = (long) curthread;
}
Exemple #8
0
/*
 * Note: a thread calling wait4 may have its state changed to waiting
 * until awakened by a signal.  Also note that system(3), for example,
 * blocks SIGCHLD and calls waitpid (which calls wait4).  If the process
 * started by system(3) doesn't finish before this function is called the
 * function will never awaken -- system(3) also ignores SIGINT and SIGQUIT.
 *
 * Thus always unmask SIGCHLD here.
 */
pid_t
wait4(pid_t pid, int *istat, int options, struct rusage * rusage)
{
    struct pthread	*curthread = _get_curthread();
    pid_t           ret;
    sigset_t	mask, omask;

    /* This is a cancellation point: */
    _thread_enter_cancellation_point();

    _thread_kern_sig_defer();

    sigemptyset(&mask);
    sigaddset(&mask, SIGCHLD);
    sigprocmask(SIG_UNBLOCK, &mask, &omask);

    /* Perform a non-blocking wait4 syscall: */
    while ((ret = _thread_sys_wait4(pid, istat, options | WNOHANG, rusage)) == 0 && (options & WNOHANG) == 0) {
        /* Reset the interrupted operation flag: */
        curthread->interrupted = 0;

        /* Schedule the next thread while this one waits: */
        _thread_kern_sched_state(PS_WAIT_WAIT, __FILE__, __LINE__);

        /* Check if this call was interrupted by a signal: */
        if (curthread->interrupted) {
            errno = EINTR;
            ret = -1;
            break;
        }
    }

    sigprocmask(SIG_SETMASK, &omask, NULL);

    _thread_kern_sig_undefer();

    /* No longer in a cancellation point: */
    _thread_leave_cancellation_point();

    return (ret);
}
ssize_t
read(int fd, void *buf, size_t nbytes)
{
    struct pthread	*curthread = _get_curthread();
    ssize_t	ret;
    int	type;

    /* This is a cancellation point: */
    _thread_enter_cancellation_point();

    /* POSIX says to do just this: */
    if (nbytes == 0)
        ret = 0;

    /* Lock the file descriptor for read: */
    else if ((ret = _FD_LOCK(fd, FD_READ, NULL)) == 0) {
        /* Get the read/write mode type: */
        type = _thread_fd_table[fd]->status_flags->flags & O_ACCMODE;

        /* Check if the file is not open for read: */
        if (type != O_RDONLY && type != O_RDWR) {
            /* File is not open for read: */
            errno = EBADF;
            ret = -1;
        }

        /* Perform a non-blocking read syscall: */
        else while ((ret = _thread_sys_read(fd, buf, nbytes)) < 0) {
                if ((_thread_fd_table[fd]->status_flags->flags & O_NONBLOCK) == 0 &&
                        (errno == EWOULDBLOCK || errno == EAGAIN)) {
                    curthread->data.fd.fd = fd;
                    _thread_kern_set_timeout(NULL);

                    /* Reset the interrupted operation flag: */
                    curthread->interrupted = 0;
                    curthread->closing_fd = 0;

                    _thread_kern_sched_state(PS_FDR_WAIT,
                                             __FILE__, __LINE__);

                    /*
                     * Check if the operation was
                     * interrupted by a signal or
                     * a closing fd.
                     */
                    if (curthread->interrupted) {
                        errno = EINTR;
                        ret = -1;
                        break;
                    } else if (curthread->closing_fd) {
                        errno = EBADF;
                        ret = -1;
                        break;
                    }
                } else {
                    break;
                }
            }
        _FD_UNLOCK(fd, FD_READ);
    }

    /* No longer in a cancellation point: */
    _thread_leave_cancellation_point();

    return (ret);
}
Exemple #10
0
void
pthread_exit(void *status)
{
	struct pthread	*curthread = _get_curthread();
	pthread_t pthread;

	/* Check if this thread is already in the process of exiting: */
	if ((curthread->flags & PTHREAD_EXITING) != 0) {
		PANIC("Thread has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!");
	}

	/* Flag this thread as exiting: */
	curthread->flags |= PTHREAD_EXITING;

	/* Save the return value: */
	curthread->ret = status;

	while (curthread->cleanup != NULL) {
		pthread_cleanup_pop(1);
	}
	if (curthread->attr.cleanup_attr != NULL) {
		curthread->attr.cleanup_attr(curthread->attr.arg_attr);
	}
	/* Check if there is thread specific data: */
	if (curthread->specific_data != NULL) {
		/* Run the thread-specific data destructors: */
		_thread_cleanupspecific();
	}

	/*
	 * Lock the garbage collector mutex to ensure that the garbage
	 * collector is not using the dead thread list.
	 */
	if (pthread_mutex_lock(&_gc_mutex) != 0)
		PANIC("Cannot lock gc mutex");

	/* Add this thread to the list of dead threads. */
	TAILQ_INSERT_HEAD(&_dead_list, curthread, dle);

	/*
	 * Signal the garbage collector thread that there is something
	 * to clean up.
	 */
	if (pthread_cond_signal(&_gc_cond) != 0)
		PANIC("Cannot signal gc cond");

	/*
	 * Avoid a race condition where a scheduling signal can occur
	 * causing the garbage collector thread to run.  If this happens,
	 * the current thread can be cleaned out from under us.
	 */
	_thread_kern_sig_defer();

	/* Unlock the garbage collector mutex: */
	if (pthread_mutex_unlock(&_gc_mutex) != 0)
		PANIC("Cannot unlock gc mutex");

	/* Check if there is a thread joining this one: */
	if (curthread->joiner != NULL) {
		pthread = curthread->joiner;
		curthread->joiner = NULL;

		switch (pthread->suspended) {
		case SUSP_JOIN:
			/*
			 * The joining thread is suspended.  Change the
			 * suspension state to make the thread runnable when it
			 * is resumed:
			 */
			pthread->suspended = SUSP_NO;
			break;
		case SUSP_NO:
			/* Make the joining thread runnable: */
			PTHREAD_NEW_STATE(pthread, PS_RUNNING);
			break;
		default:
			PANIC("Unreachable code reached");
		}

		/* Set the return value for the joining thread: */
		pthread->join_status.ret = curthread->ret;
		pthread->join_status.error = 0;
		pthread->join_status.thread = NULL;

		/* Make this thread collectable by the garbage collector. */
		PTHREAD_ASSERT(((curthread->attr.flags & PTHREAD_DETACHED) ==
		    0), "Cannot join a detached thread");
		curthread->attr.flags |= PTHREAD_DETACHED;
	}

	/* Remove this thread from the thread list: */
	TAILQ_REMOVE(&_thread_list, curthread, tle);

	/* This thread will never be re-scheduled. */
	_thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__);

	/* This point should not be reached. */
	PANIC("Dead thread has resumed");
}
Exemple #11
0
ssize_t
writev(int fd, const struct iovec * iov, int iovcnt)
{
	struct pthread	*curthread = _get_curthread();
	int	blocking;
	int	idx = 0;
	int	type;
	ssize_t num = 0;
	size_t cnt;
	ssize_t n;
	ssize_t	ret;
	struct iovec liov[20];
	struct iovec *p_iov = liov;

	/* This is a cancellation point: */
	_thread_enter_cancellation_point();

	/* Check if the array size exceeds to compiled in size: */
	if (iovcnt > (int) (sizeof(liov) / sizeof(struct iovec))) {
		/* Allocate memory for the local array: */
		if ((p_iov = (struct iovec *)
		    malloc((size_t)iovcnt * sizeof(struct iovec))) == NULL) {
			/* Insufficient memory: */
			errno = ENOMEM;
			_thread_leave_cancellation_point();
			return (-1);
		}
	} else if (iovcnt <= 0) {
		errno = EINVAL;
		_thread_leave_cancellation_point();
		return (-1);
	}

	/* Copy the caller's array so that it can be modified locally: */
	memcpy(p_iov,iov,(size_t)iovcnt * sizeof(struct iovec));

	/* Lock the file descriptor for write: */
	if ((ret = _FD_LOCK(fd, FD_WRITE, NULL)) == 0) {
		/* Get the read/write mode type: */
		type = _thread_fd_table[fd]->status_flags->flags & O_ACCMODE;

		/* Check if the file is not open for write: */
		if (type != O_WRONLY && type != O_RDWR) {
			/* File is not open for write: */
			errno = EBADF;
			_FD_UNLOCK(fd, FD_WRITE);
			if (p_iov != liov)
				free(p_iov);
			_thread_leave_cancellation_point();
			return (-1);
		}

		/* Check if file operations are to block */
		blocking = ((_thread_fd_table[fd]->status_flags->flags & O_NONBLOCK) == 0);

		/*
		 * Loop while no error occurs and until the expected number
		 * of bytes are written if performing a blocking write:
		 */
		while (ret == 0) {
			/* Perform a non-blocking write syscall: */
			n = _thread_sys_writev(fd, &p_iov[idx], iovcnt - idx);

			/* Check if one or more bytes were written: */
			if (n > 0) {
				/*
				 * Keep a count of the number of bytes
				 * written:
				 */
				num += n;

				/*
				 * Enter a loop to check if a short write
				 * occurred and move the index to the
				 * array entry where the short write
				 * ended:
				 */
				cnt = (size_t)n;
				while (cnt > 0 && idx < iovcnt) {
					/*
					 * If the residual count exceeds
					 * the size of this vector, then
					 * it was completely written:
					 */
					if (cnt >= p_iov[idx].iov_len)
						/*
						 * Decrement the residual
						 * count and increment the
						 * index to the next array
						 * entry:
						 */
						cnt -= p_iov[idx++].iov_len;
					else {
						/*
						 * This entry was only
						 * partially written, so
						 * adjust it's length
						 * and base pointer ready
						 * for the next write:
						 */
						p_iov[idx].iov_len -= cnt;
						p_iov[idx].iov_base =
						    (char *)p_iov[idx].iov_base
						    + (ptrdiff_t)cnt;
						cnt = 0;
					}
				}
			} else if (n == 0) {
				/*
				 * Avoid an infinite loop if the last iov_len is
				 * 0.
				 */
				while (idx < iovcnt && p_iov[idx].iov_len == 0)
					idx++;

				if (idx == iovcnt) {
					ret = num;
					break;
				}
			}

			/*
			 * If performing a blocking write, check if the
			 * write would have blocked or if some bytes
			 * were written but there are still more to
			 * write:
			 */
			if (blocking && ((n < 0 && (errno == EWOULDBLOCK ||
			    errno == EAGAIN)) || (n >= 0 && idx < iovcnt))) {
				curthread->data.fd.fd = fd;
				_thread_kern_set_timeout(NULL);

				/* Reset the interrupted operation flag: */
				curthread->interrupted = 0;
				curthread->closing_fd = 0;

				_thread_kern_sched_state(PS_FDW_WAIT,
				    __FILE__, __LINE__);

				/*
				 * Check if the operation was
				 * interrupted by a signal
				 */
				if (curthread->interrupted || curthread->closing_fd) {
					if (num > 0) {
						/* Return partial success: */
						ret = num;
					} else {
						/* Return an error: */
						if (curthread->closing_fd)
							errno = EBADF;
						else
							errno = EINTR;
						ret = -1;
					}
				}

			/*
			 * If performing a non-blocking write,
			 * just return whatever the write syscall did:
			 */
			} else if (!blocking) {
				/* A non-blocking call might return zero: */
				ret = n;
				break;

			/*
			 * If there was an error, return partial success
			 * (if any bytes were written) or else the error:
			 */
			} else if (n < 0) {
				if (num > 0)
					ret = num;
				else
					ret = n;

			/* Check if the write has completed: */
			} else if (idx == iovcnt)
				/* Return the number of bytes written: */
				ret = num;
		}
		_FD_UNLOCK(fd, FD_WRITE);
	}

	/* If memory was allocated for the array, free it: */
	if (p_iov != liov)
		free(p_iov);

	/* No longer in a cancellation point: */
	_thread_leave_cancellation_point();

	return (ret);
}
Exemple #12
0
int
accept(int fd, struct sockaddr * name, socklen_t *namelen)
{
	struct pthread	*curthread = _get_curthread();
	int             ret;
	int		newfd;
	enum fd_entry_mode init_mode;

	/* This is a cancellation point: */
	_thread_enter_cancellation_point();

	/* Lock the file descriptor: */
	if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) {
		/* Enter a loop to wait for a connection request: */
		while ((ret = _thread_sys_accept(fd, name, namelen)) < 0) {
			/* Check if the socket is to block: */
			if ((_thread_fd_table[fd]->status_flags->flags & O_NONBLOCK) == 0 &&
			    (errno == EWOULDBLOCK || errno == EAGAIN)) {
				/* Save the socket file descriptor: */
				curthread->data.fd.fd = fd;
				curthread->data.fd.fname = __FILE__;
				curthread->data.fd.branch = __LINE__;

				/* Set the timeout: */
				_thread_kern_set_timeout(NULL);
				curthread->interrupted = 0;
				curthread->closing_fd = 0;

				/* Schedule the next thread: */
				_thread_kern_sched_state(PS_FDR_WAIT, __FILE__,
							 __LINE__);

				/* Check if the wait was interrupted: */
				if (curthread->interrupted) {
					/* Return an error status: */
					errno = EINTR;
					ret = -1;
					break;
				} else if (curthread->closing_fd) {
					/* Return an error status: */
					errno = EBADF;
					ret = -1;
					break;
				}
			} else {
				/*
				 * Another error has occurred, so exit the
				 * loop here: 
				 */
				break;
			}
		}

		/*
		 * If no errors initialize the file descriptor table
		 * for the new socket. If the client's view of the
		 * status_flags for fd is blocking, then force newfd
		 * to be viewed as blocking too.
		 */
		if (ret != -1) {
			newfd = ret;

			if ((_thread_fd_table[fd]->status_flags->flags & O_NONBLOCK) == 0)
				init_mode = FD_INIT_BLOCKING;
			else
				init_mode = FD_INIT_NEW;
			if((ret = _thread_fd_table_init(newfd, init_mode, NULL)) != -1)
				ret = newfd;
			else {
				/* quitely close the fd */
				_thread_sys_close(ret);
			}
		}

		/* Unlock the file descriptor: */
		_FD_UNLOCK(fd, FD_RDWR);
	}

	/* No longer in a cancellation point: */
	_thread_leave_cancellation_point();

	/* Return the socket file descriptor or -1 on error: */
	return (ret);
}
int
connect(int fd, const struct sockaddr * name, socklen_t namelen)
{
	struct pthread	*curthread = _get_curthread();
	struct sockaddr tmpname;
	socklen_t	errnolen, tmpnamelen;
	int             ret;

	/* This is a cancellation point: */
	_thread_enter_cancellation_point();

	if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) {
		if ((ret = _thread_sys_connect(fd, name, namelen)) < 0) {
			if (!(_thread_fd_table[fd]->status_flags->flags & O_NONBLOCK) &&
			((errno == EWOULDBLOCK) || (errno == EINPROGRESS) ||
			 (errno == EALREADY) || (errno == EAGAIN))) {
				curthread->data.fd.fd = fd;

				/* Reset the interrupted operation flag: */
				curthread->interrupted = 0;
				curthread->closing_fd = 0;

				/* Set the timeout: */
				_thread_kern_set_timeout(NULL);
				_thread_kern_sched_state(PS_FDW_WAIT, __FILE__, __LINE__);

				/*
				 * Check if the operation was
				 * interrupted by a signal or
				 * a closing fd.
				 */
				if (curthread->interrupted) {
					errno = EINTR;
					ret = -1;
				} else if (curthread->closing_fd) {
					errno = EBADF;
					ret = -1;
				} else {
					tmpnamelen = sizeof(tmpname);
					/* 0 now lets see if it really worked */
					if (((ret = _thread_sys_getpeername(fd, &tmpname, &tmpnamelen)) < 0) &&
					    (errno == ENOTCONN)) {

						/*
						 * Get the error, this function
						 * should not fail 
						 */
						errnolen = sizeof(errno);
						_thread_sys_getsockopt(fd, SOL_SOCKET, SO_ERROR, &errno, &errnolen);
					}
				}
			} else {
				ret = -1;
			}
		}
		_FD_UNLOCK(fd, FD_RDWR);
	}

	/* No longer in a cancellation point: */
	_thread_leave_cancellation_point();

	return (ret);
}
int
_nanosleep(const struct timespec * time_to_sleep,
    struct timespec * time_remaining)
{
	struct pthread	*curthread = _get_curthread();
	int             ret = 0;
	struct timespec current_time;
	struct timespec current_time1;
	struct timespec remaining_time;
	struct timeval  tv;

	/* Check if the time to sleep is legal: */
	if (time_to_sleep == NULL || time_to_sleep->tv_sec < 0 ||
		time_to_sleep->tv_nsec < 0 || time_to_sleep->tv_nsec >= 1000000000) {
		/* Return an EINVAL error : */
		errno = EINVAL;
		ret = -1;
	} else {
		/*
		 * As long as we're going to get the time of day, we
		 * might as well store it in the global time of day:
		 */
		gettimeofday((struct timeval *) &_sched_tod, NULL);
		GET_CURRENT_TOD(tv);
		TIMEVAL_TO_TIMESPEC(&tv, &current_time);

		/* Calculate the time for the current thread to wake up: */
		curthread->wakeup_time.tv_sec = current_time.tv_sec + time_to_sleep->tv_sec;
		curthread->wakeup_time.tv_nsec = current_time.tv_nsec + time_to_sleep->tv_nsec;

		/* Check if the nanosecond field has overflowed: */
		if (curthread->wakeup_time.tv_nsec >= 1000000000) {
			/* Wrap the nanosecond field: */
			curthread->wakeup_time.tv_sec += 1;
			curthread->wakeup_time.tv_nsec -= 1000000000;
		}
		curthread->interrupted = 0;

		/* Reschedule the current thread to sleep: */
		_thread_kern_sched_state(PS_SLEEP_WAIT, __FILE__, __LINE__);

		/*
		 * As long as we're going to get the time of day, we
		 * might as well store it in the global time of day:
		 */
		gettimeofday((struct timeval *) &_sched_tod, NULL);
		GET_CURRENT_TOD(tv);
		TIMEVAL_TO_TIMESPEC(&tv, &current_time1);

		/* Calculate the remaining time to sleep: */
		remaining_time.tv_sec = time_to_sleep->tv_sec + current_time.tv_sec - current_time1.tv_sec;
		remaining_time.tv_nsec = time_to_sleep->tv_nsec + current_time.tv_nsec - current_time1.tv_nsec;

		/* Check if the nanosecond field has underflowed: */
		if (remaining_time.tv_nsec < 0) {
			/* Handle the underflow: */
			remaining_time.tv_sec -= 1;
			remaining_time.tv_nsec += 1000000000;
		}

		/* Check if the nanosecond field has overflowed: */
		if (remaining_time.tv_nsec >= 1000000000) {
			/* Handle the overflow: */
			remaining_time.tv_sec += 1;
			remaining_time.tv_nsec -= 1000000000;
		}

		/* Check if the sleep was longer than the required time: */
		if (remaining_time.tv_sec < 0) {
			/* Reset the time left: */
			remaining_time.tv_sec = 0;
			remaining_time.tv_nsec = 0;
		}

		/* Check if the time remaining is to be returned: */
		if (time_remaining != NULL) {
			/* Return the actual time slept: */
			time_remaining->tv_sec = remaining_time.tv_sec;
			time_remaining->tv_nsec = remaining_time.tv_nsec;
		}

		/* Check if the sleep was interrupted: */
		if (curthread->interrupted) {
			/* Return an EINTR error : */
			errno = EINTR;
			ret = -1;
		}
	}
	return (ret);
}