Exemplo n.º 1
0
/*
 *--------------------------------------------------------------
 *
 * OS_CloseRead --
 *
 *	Cancel outstanding asynchronous reads and prevent subsequent
 *      reads from completing.
 *
 * Results:
 *	Socket or file is shutdown. Return values mimic Unix shutdown:
 *		0 success, -1 failure
 *
 *--------------------------------------------------------------
 */
int OS_CloseRead(int fd)
{
    if(asyncIoTable[AIO_RD_IX(fd)].inUse != 0) {
        asyncIoTable[AIO_RD_IX(fd)].inUse = 0;
        FD_CLR(fd, &readFdSet);
    }

    return shutdown(fd, 0);
}
Exemplo n.º 2
0
/*
 *--------------------------------------------------------------
 *
 * OS_AsyncRead --
 *
 *	This initiates an asynchronous read on the file
 *	handle which may be a socket or named pipe.
 *
 *	We also must save the ProcPtr and ClientData, so later
 *	when the io completes, we know who to call.
 *
 *	We don't look at any results here (the ReadFile may
 *	return data if it is cached) but do all completion
 *	processing in OS_Select when we get the io completion
 *	port done notifications.  Then we call the callback.
 *
 * Results:
 *	-1 if error, 0 otherwise.
 *
 * Side effects:
 *	Asynchronous I/O operation is queued for completion.
 *
 *--------------------------------------------------------------
 */
int OS_AsyncRead(int fd, int offset, void *buf, int len,
		 OS_AsyncProc procPtr, ClientData clientData)
{
    int index = AIO_RD_IX(fd);

    ASSERT(asyncIoTable != NULL);
    asyncIoInUse = TRUE;

    if(fd > maxFd)
        maxFd = fd;

    while (index >= asyncIoTableSize) {
        GrowAsyncTable();
    }

    ASSERT(asyncIoTable[index].inUse == 0);
    asyncIoTable[index].procPtr = procPtr;
    asyncIoTable[index].clientData = clientData;
    asyncIoTable[index].fd = fd;
    asyncIoTable[index].len = len;
    asyncIoTable[index].offset = offset;
    asyncIoTable[index].buf = buf;
    asyncIoTable[index].inUse = 1;
    FD_SET(fd, &readFdSet);
    return 0;
}
Exemplo n.º 3
0
/*
 *--------------------------------------------------------------
 *
 * OS_Close --
 *
 *	Closes the descriptor.  This is a pass through to the
 *      Unix close.
 *
 * Results:
 *	0 for success, -1 on failure
 *
 * Side effects:
 *	None.
 *
 *--------------------------------------------------------------
 */
int OS_Close(int fd, int shutdown_ok)
{
    if (fd == -1)
        return 0;

    if (asyncIoInUse) {
        int index = AIO_RD_IX(fd);

        FD_CLR(fd, &readFdSet);
        FD_CLR(fd, &readFdSetPost);
        if (asyncIoTable[index].inUse != 0) {
            asyncIoTable[index].inUse = 0;
        }

        FD_CLR(fd, &writeFdSet);
        FD_CLR(fd, &writeFdSetPost);
        index = AIO_WR_IX(fd);
        if (asyncIoTable[index].inUse != 0) {
            asyncIoTable[index].inUse = 0;
        }

        if (maxFd == fd) {
            maxFd--;
        }
    }

    /*
     * shutdown() the send side and then read() from client until EOF
     * or a timeout expires.  This is done to minimize the potential
     * that a TCP RST will be sent by our TCP stack in response to 
     * receipt of additional data from the client.  The RST would
     * cause the client to discard potentially useful response data.
     */

    if (shutdown_ok)
    {
        if (shutdown(fd, 1) == 0)
        {
            struct timeval tv;
            fd_set rfds;
            int rv;
            char trash[1024];

            FD_ZERO(&rfds);

            do 
            {
                FD_SET(fd, &rfds);
                tv.tv_sec = 2;
                tv.tv_usec = 0;
                rv = select(fd + 1, &rfds, NULL, NULL, &tv);
            }
            while (rv > 0 && read(fd, trash, sizeof(trash)) > 0);
        }
    }

    return close(fd);
}
Exemplo n.º 4
0
/*
 *--------------------------------------------------------------
 *
 * OS_Close --
 *
 *	Closes the descriptor.  This is a pass through to the
 *      Unix close.
 *
 * Results:
 *	0 for success, -1 on failure
 *
 * Side effects:
 *	None.
 *
 *--------------------------------------------------------------
 */
int OS_Close(int fd)
{
    if (fd == -1)
        return 0;

    if (asyncIoInUse) {
        int index = AIO_RD_IX(fd);

        FD_CLR(fd, &readFdSet);
        FD_CLR(fd, &readFdSetPost);
        if (asyncIoTable[index].inUse != 0) {
            asyncIoTable[index].inUse = 0;
        }

        FD_CLR(fd, &writeFdSet);
        FD_CLR(fd, &writeFdSetPost);
        index = AIO_WR_IX(fd);
        if (asyncIoTable[index].inUse != 0) {
            asyncIoTable[index].inUse = 0;
        }

        if (maxFd == fd) {
            maxFd--;
        }
    }

    /*
     * shutdown() the send side and then read() from client until EOF
     * or a timeout expires.  This is done to minimize the potential
     * that a TCP RST will be sent by our TCP stack in response to 
     * receipt of additional data from the client.  The RST would
     * cause the client to discard potentially useful response data.
     */

    if (shutdown(fd, 1) == 0)
    {
        struct pollfd pfd;
        int rv;
        char trash[1024];

        pfd.fd = fd;
        pfd.events = POLLIN;

        do 
        {
            rv = poll(&pfd, 1, libfcgiOsClosePollTimeout);
        }
        while (rv > 0 && read(fd, trash, sizeof(trash)) > 0);
    }

    return close(fd);
}
Exemplo n.º 5
0
/*
 *--------------------------------------------------------------
 *
 * OS_AsyncReadStdin --
 *
 *	This initiates an asynchronous read on the standard
 *	input handle.
 *
 *      The abstraction is necessary because Windows NT does not
 *      have a clean way of "select"ing a file descriptor for
 *      I/O.
 *
 * Results:
 *	-1 if error, 0 otherwise.
 *
 * Side effects:
 *	Asynchronous bit is set in the readfd variable and
 *      request is enqueued.
 *
 *--------------------------------------------------------------
 */
int OS_AsyncReadStdin(void *buf, int len, OS_AsyncProc procPtr,
                      ClientData clientData)
{
    int index = AIO_RD_IX(STDIN_FILENO);

    ASSERT(asyncIoTable[index].inUse == 0);
    asyncIoTable[index].procPtr = procPtr;
    asyncIoTable[index].clientData = clientData;
    asyncIoTable[index].fd = STDIN_FILENO;
    asyncIoTable[index].len = len;
    asyncIoTable[index].offset = 0;
    asyncIoTable[index].buf = buf;
    asyncIoTable[index].inUse = 1;
    FD_SET(STDIN_FILENO, &readFdSet);
    if(STDIN_FILENO > maxFd)
        maxFd = STDIN_FILENO;
    return 0;
}
Exemplo n.º 6
0
/*
 *--------------------------------------------------------------
 *
 * OS_Close --
 *
 *	Closes the descriptor.  This is a pass through to the
 *      Unix close.
 *
 * Results:
 *	0 for success, -1 on failure
 *
 * Side effects:
 *	None.
 *
 *--------------------------------------------------------------
 */
int OS_Close(int fd)
{
    int index = AIO_RD_IX(fd);

    FD_CLR(fd, &readFdSet);
    FD_CLR(fd, &readFdSetPost);
    if(asyncIoTable[index].inUse != 0) {
        asyncIoTable[index].inUse = 0;
    }

    FD_CLR(fd, &writeFdSet);
    FD_CLR(fd, &writeFdSetPost);
    index = AIO_WR_IX(fd);
    if(asyncIoTable[index].inUse != 0) {
        asyncIoTable[index].inUse = 0;
    }
    if(maxFd == fd)
        maxFd--;
    return close(fd);
}
Exemplo n.º 7
0
/*
 *--------------------------------------------------------------
 *
 * OS_DoIo --
 *
 *	This function was formerly OS_Select.  It's purpose is
 *      to pull I/O completion events off the queue and dispatch
 *      them to the appropriate place.
 *
 * Results:
 *	Returns 0.
 *
 * Side effects:
 *	Handlers are called.
 *
 *--------------------------------------------------------------
 */
int OS_DoIo(struct timeval *tmo)
{
    int fd, len, selectStatus;
    OS_AsyncProc procPtr;
    ClientData clientData;
    AioInfo *aioPtr;
    fd_set readFdSetCpy;
    fd_set writeFdSetCpy;

    asyncIoInUse = TRUE;
    FD_ZERO(&readFdSetCpy);
    FD_ZERO(&writeFdSetCpy);

    for(fd = 0; fd <= maxFd; fd++) {
        if(FD_ISSET(fd, &readFdSet)) {
            FD_SET(fd, &readFdSetCpy);
        }
        if(FD_ISSET(fd, &writeFdSet)) {
            FD_SET(fd, &writeFdSetCpy);
        }
    }

    /*
     * If there were no completed events from a prior call, see if there's
     * any work to do.
     */
    if(numRdPosted == 0 && numWrPosted == 0) {
        selectStatus = select((maxFd+1), &readFdSetCpy, &writeFdSetCpy,
                              NULL, tmo);
        if(selectStatus < 0) {
            exit(errno);
	}

        for(fd = 0; fd <= maxFd; fd++) {
	    /*
	     * Build up a list of completed events.  We'll work off of
	     * this list as opposed to looping through the read and write
	     * fd sets since they can be affected by a callbacl routine.
	     */
	    if(FD_ISSET(fd, &readFdSetCpy)) {
	        numRdPosted++;
		FD_SET(fd, &readFdSetPost);
		FD_CLR(fd, &readFdSet);
	    }

            if(FD_ISSET(fd, &writeFdSetCpy)) {
	        numWrPosted++;
	        FD_SET(fd, &writeFdSetPost);
		FD_CLR(fd, &writeFdSet);
	    }
        }
    }

    if(numRdPosted == 0 && numWrPosted == 0)
        return 0;

    for(fd = 0; fd <= maxFd; fd++) {
        /*
	 * Do reads and dispatch callback.
	 */
        if(FD_ISSET(fd, &readFdSetPost)
	   && asyncIoTable[AIO_RD_IX(fd)].inUse) {

	    numRdPosted--;
	    FD_CLR(fd, &readFdSetPost);
	    aioPtr = &asyncIoTable[AIO_RD_IX(fd)];

	    len = read(aioPtr->fd, aioPtr->buf, aioPtr->len);

	    procPtr = aioPtr->procPtr;
	    aioPtr->procPtr = NULL;
	    clientData = aioPtr->clientData;
	    aioPtr->inUse = 0;

	    (*procPtr)(clientData, len);
	}

        /*
	 * Do writes and dispatch callback.
	 */
        if(FD_ISSET(fd, &writeFdSetPost) &&
           asyncIoTable[AIO_WR_IX(fd)].inUse) {

	    numWrPosted--;
	    FD_CLR(fd, &writeFdSetPost);
	    aioPtr = &asyncIoTable[AIO_WR_IX(fd)];

	    len = write(aioPtr->fd, aioPtr->buf, aioPtr->len);

	    procPtr = aioPtr->procPtr;
	    aioPtr->procPtr = NULL;
	    clientData = aioPtr->clientData;
	    aioPtr->inUse = 0;
	    (*procPtr)(clientData, len);
	}
    }
    return 0;
}