Beispiel #1
0
/* Handle stdout from CGI child.  Duplicate of logic from the _read
 * method of the real APR pipe bucket implementation. */
static apr_status_t aikido_read_stdout(apr_bucket *a, apr_file_t *out,
                                    const char **str, apr_size_t *len)
{
    char *buf;
    apr_status_t rv;

    *str = NULL;
    *len = APR_BUCKET_BUFF_SIZE;
    buf = apr_bucket_alloc(*len, a->list); /* XXX: check for failure? */

    rv = apr_file_read(out, buf, len);

    if (rv != APR_SUCCESS && rv != APR_EOF) {
        apr_bucket_free(buf);
        return rv;
    }

    if (*len > 0) {
        struct aikido_bucket_data *data = a->data;
        apr_bucket_heap *h;

        /* Change the current bucket to refer to what we read */
        a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free);
        h = a->data;
        h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */
        *str = buf;
        APR_BUCKET_INSERT_AFTER(a, aikido_bucket_dup(data, a->list));
    }
    else {
        apr_bucket_free(buf);
        a = apr_bucket_immortal_make(a, "", 0);
        *str = a->data;
    }
    return rv;
}
Beispiel #2
0
static apr_status_t pipe_bucket_read(apr_bucket *a, const char **str,
                                     apr_size_t *len, apr_read_type_e block)
{
    apr_file_t *p = a->data;
    char *buf;
    apr_status_t rv;
    apr_interval_time_t timeout;

    if (block == APR_NONBLOCK_READ) {
        apr_file_pipe_timeout_get(p, &timeout);
        apr_file_pipe_timeout_set(p, 0);
    }

    *str = NULL;
    *len = APR_BUCKET_BUFF_SIZE;
    buf = apr_bucket_alloc(*len, a->list); /* XXX: check for failure? */

    rv = apr_file_read(p, buf, len);

    if (block == APR_NONBLOCK_READ) {
        apr_file_pipe_timeout_set(p, timeout);
    }

    if (rv != APR_SUCCESS && rv != APR_EOF) {
        apr_bucket_free(buf);
        return rv;
    }
    /*
     * If there's more to read we have to keep the rest of the pipe
     * for later.  Otherwise, we'll close the pipe.
     * XXX: Note that more complicated bucket types that 
     * refer to data not in memory and must therefore have a read()
     * function similar to this one should be wary of copying this
     * code because if they have a destroy function they probably
     * want to migrate the bucket's subordinate structure from the
     * old bucket to a raw new one and adjust it as appropriate,
     * rather than destroying the old one and creating a completely
     * new bucket.
     */
    if (*len > 0) {
        apr_bucket_heap *h;
        /* Change the current bucket to refer to what we read */
        a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free);
        h = a->data;
        h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */
        *str = buf;
        APR_BUCKET_INSERT_AFTER(a, apr_bucket_pipe_create(p, a->list));
    }
    else {
        apr_bucket_free(buf);
        a = apr_bucket_immortal_make(a, "", 0);
        *str = a->data;
        if (rv == APR_EOF) {
            apr_file_close(p);
        }
    }
    return APR_SUCCESS;
}
Beispiel #3
0
APU_DECLARE(apr_bucket *) apr_bucket_immortal_create(const char *buf,
                                                     apr_size_t length,
                                                     apr_bucket_alloc_t *list)
{
    apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);

    APR_BUCKET_INIT(b);
    b->free = apr_bucket_free;
    b->list = list;
    return apr_bucket_immortal_make(b, buf, length);
}
static apr_status_t bucket_socket_ex_read(apr_bucket *a, const char **str,
                                          apr_size_t *len,
                                          apr_read_type_e block)
{
    socket_ex_data *data = a->data;
    apr_socket_t *p = data->sock;
    char *buf;
    apr_status_t rv;
    apr_interval_time_t timeout;

    if (block == APR_NONBLOCK_READ) {
        apr_socket_timeout_get(p, &timeout);
        apr_socket_timeout_set(p, 0);
    }

    *str = NULL;
    *len = APR_BUCKET_BUFF_SIZE;
    buf = apr_bucket_alloc(*len, a->list);

    rv = apr_socket_recv(p, buf, len);

    if (block == APR_NONBLOCK_READ) {
        apr_socket_timeout_set(p, timeout);
    }

    if (rv != APR_SUCCESS && rv != APR_EOF) {
        apr_bucket_free(buf);
        return rv;
    }

    if (*len > 0) {
        apr_bucket_heap *h;

        /* count for stats */
        *data->counter += *len;

        /* Change the current bucket to refer to what we read */
        a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free);
        h = a->data;
        h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */
        *str = buf;
        APR_BUCKET_INSERT_AFTER(a, bucket_socket_ex_create(data, a->list));
    }
    else {
        apr_bucket_free(buf);
        a = apr_bucket_immortal_make(a, "", 0);
        *str = a->data;
    }
    return APR_SUCCESS;
}
Beispiel #5
0
static apr_status_t
bucket_read(apr_bucket *bucket, const char **str, apr_size_t *len, apr_read_type_e block) {
	char *buf;
	ssize_t ret;
	BucketData *data;
	
	data = (BucketData *) bucket->data;
	*str = NULL;
	*len = 0;
	
	if (!data->bufferResponse && block == APR_NONBLOCK_READ) {
		/*
		 * The bucket brigade that Hooks::handleRequest() passes using
		 * ap_pass_brigade() is always passed through ap_content_length_filter,
		 * which is a filter which attempts to read all data from the
		 * bucket brigade and computes the Content-Length header from
		 * that. We don't want this to happen; because suppose that the
		 * Rails application sends back 1 GB of data, then
		 * ap_content_length_filter will buffer this entire 1 GB of data
		 * in memory before passing it to the HTTP client.
		 *
		 * ap_content_length_filter aborts and passes the bucket brigade
		 * down the filter chain when it encounters an APR_EAGAIN, except
		 * for the first read. So by returning APR_EAGAIN on every
		 * non-blocking read request, we can prevent ap_content_length_filter
		 * from buffering all data.
		 */
		//return APR_EAGAIN;
	}
	
	buf = (char *) apr_bucket_alloc(APR_BUCKET_BUFF_SIZE, bucket->list);
	if (buf == NULL) {
		return APR_ENOMEM;
	}
	
	do {
		ret = read(data->state->connection, buf, APR_BUCKET_BUFF_SIZE);
	} while (ret == -1 && errno == EINTR);
	
	if (ret > 0) {
		apr_bucket_heap *h;
		
		data->state->bytesRead += ret;
		
		*str = buf;
		*len = ret;
		bucket->data = NULL;
		
		/* Change the current bucket (which is a Passenger Bucket) into a heap bucket
		 * that contains the data that we just read. This newly created heap bucket
		 * will be the first in the bucket list.
		 */
		bucket = apr_bucket_heap_make(bucket, buf, *len, apr_bucket_free);
		h = (apr_bucket_heap *) bucket->data;
		h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */
		
		/* And after this newly created bucket we insert a new Passenger Bucket
		 * which can read the next chunk from the stream.
		 */
		APR_BUCKET_INSERT_AFTER(bucket, passenger_bucket_create(
			data->state, bucket->list, data->bufferResponse));
		
		/* The newly created Passenger Bucket has a reference to the session
		 * object, so we can delete data here.
		 */
		delete data;
		
		return APR_SUCCESS;
		
	} else if (ret == 0) {
		data->state->completed = true;
		delete data;
		bucket->data = NULL;
		
		apr_bucket_free(buf);
		
		bucket = apr_bucket_immortal_make(bucket, "", 0);
		*str = (const char *) bucket->data;
		*len = 0;
		return APR_SUCCESS;
		
	} else /* ret == -1 */ {
		int e = errno;
		data->state->completed = true;
		data->state->errorCode = e;
		delete data;
		bucket->data = NULL;
		apr_bucket_free(buf);
		return APR_FROM_OS_ERROR(e);
	}
}
//本函数只处理一次buf读取操作,loop在caller中执行
static apr_status_t socket_bucket_read(apr_bucket *a, const char **str,
                                       apr_size_t *len, apr_read_type_e block)
{
	//把数据从a->data中读入到a->list中的node空间中
    apr_socket_t *p = a->data;
    char *buf;
    apr_status_t rv;
    apr_interval_time_t timeout;

    if (block == APR_NONBLOCK_READ) {
        apr_socket_timeout_get(p, &timeout);
        apr_socket_timeout_set(p, 0);
    }

    *str = NULL;
    *len = APR_BUCKET_BUFF_SIZE;
    buf = apr_bucket_alloc(*len, a->list); /* XXX: check for failure? */

    rv = apr_socket_recv(p, buf, len);

    if (block == APR_NONBLOCK_READ) {
        apr_socket_timeout_set(p, timeout);
    }

    if (rv != APR_SUCCESS && rv != APR_EOF) {
        apr_bucket_free(buf);
        return rv;
    }
    /*
     * If there's more to read we have to keep the rest of the socket
     * for later. XXX: Note that more complicated bucket types that
     * refer to data not in memory and must therefore have a read()
     * function similar to this one should be wary of copying this
     * code because if they have a destroy function they probably
     * want to migrate the bucket's subordinate structure from the
     * old bucket to a raw new one and adjust it as appropriate,
     * rather than destroying the old one and creating a completely
     * new bucket.
     * 如果read后还有剩余,则需要保留socket剩余部分下次再读。
     * 类似这样的需要一个read()操作的非内存bucket,请谨慎copy当前代码,
     * 原因见上面解释
     *
     *
     * Even if there is nothing more to read, don't close the socket here
     * as we have to use it to send any response :)  We could shut it 
     * down for reading, but there is no benefit to doing so.
     * read完成后不需要close/shut down socket
     *
     *
     */
    if (*len > 0) {
        apr_bucket_heap *h;
        /* Change the current bucket to refer to what we read */
		//把当前读取的那部分数据组织成一个heap bucket,并返回起始地址
        a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free);
        h = a->data;
        h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */
        *str = buf;
		//剩下的p作为一个新的socket bucket
        APR_BUCKET_INSERT_AFTER(a, apr_bucket_socket_create(p, a->list));
    }
    else {
        apr_bucket_free(buf);
        a = apr_bucket_immortal_make(a, "", 0);
        *str = a->data;
    }
    return APR_SUCCESS;
}