Exemple #1
0
static int
insert_html_fragment_at_head(ap_filter_t * f,
			     apr_bucket_brigade * bb, triger_conf_t * cfg)
{
    triger_module_ctx_t *ctx = f->ctx;
    apr_bucket *tmp_b = ctx->triger_bucket->b;
    int ret = 0;
    apr_size_t pos;
    apr_bucket *js;
    if (ctx->find)
	goto last;
    js = apr_bucket_transient_create(cfg->js, (apr_size_t)
				     strlen(cfg->js) + 1,
				     f->r->connection->bucket_alloc);

    if (!js)
	goto last;
    if (ctx->triger_bucket->head_start_tag_pos != -1) {
	ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
		      "The <head> tag found, insert at the end: %s",
		      cfg->js);
	pos = ctx->triger_bucket->head_start_tag_pos;
	if (pos + 1 < ctx->triger_bucket->len) {
	    apr_bucket_split(tmp_b, pos + 1);
	    APR_BUCKET_INSERT_AFTER(tmp_b, js);
	} else {
	    APR_BUCKET_INSERT_AFTER(tmp_b, js);
	}
	ctx->find = 1;
    }
  last:
    return ret;
}
Exemple #2
0
/* Handle stdout from CGI child.  Duplicate of logic from the _read
 * method of the real APR pipe bucket implementation. */
static apr_status_t aikido_read_stdout(apr_bucket *a, apr_file_t *out,
                                    const char **str, apr_size_t *len)
{
    char *buf;
    apr_status_t rv;

    *str = NULL;
    *len = APR_BUCKET_BUFF_SIZE;
    buf = apr_bucket_alloc(*len, a->list); /* XXX: check for failure? */

    rv = apr_file_read(out, buf, len);

    if (rv != APR_SUCCESS && rv != APR_EOF) {
        apr_bucket_free(buf);
        return rv;
    }

    if (*len > 0) {
        struct aikido_bucket_data *data = a->data;
        apr_bucket_heap *h;

        /* Change the current bucket to refer to what we read */
        a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free);
        h = a->data;
        h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */
        *str = buf;
        APR_BUCKET_INSERT_AFTER(a, aikido_bucket_dup(data, a->list));
    }
    else {
        apr_bucket_free(buf);
        a = apr_bucket_immortal_make(a, "", 0);
        *str = a->data;
    }
    return rv;
}
Exemple #3
0
static apr_status_t pipe_bucket_read(apr_bucket *a, const char **str,
                                     apr_size_t *len, apr_read_type_e block)
{
    apr_file_t *p = a->data;
    char *buf;
    apr_status_t rv;
    apr_interval_time_t timeout;

    if (block == APR_NONBLOCK_READ) {
        apr_file_pipe_timeout_get(p, &timeout);
        apr_file_pipe_timeout_set(p, 0);
    }

    *str = NULL;
    *len = APR_BUCKET_BUFF_SIZE;
    buf = apr_bucket_alloc(*len, a->list); /* XXX: check for failure? */

    rv = apr_file_read(p, buf, len);

    if (block == APR_NONBLOCK_READ) {
        apr_file_pipe_timeout_set(p, timeout);
    }

    if (rv != APR_SUCCESS && rv != APR_EOF) {
        apr_bucket_free(buf);
        return rv;
    }
    /*
     * If there's more to read we have to keep the rest of the pipe
     * for later.  Otherwise, we'll close the pipe.
     * XXX: Note that more complicated bucket types that 
     * refer to data not in memory and must therefore have a read()
     * function similar to this one should be wary of copying this
     * code because if they have a destroy function they probably
     * want to migrate the bucket's subordinate structure from the
     * old bucket to a raw new one and adjust it as appropriate,
     * rather than destroying the old one and creating a completely
     * new bucket.
     */
    if (*len > 0) {
        apr_bucket_heap *h;
        /* Change the current bucket to refer to what we read */
        a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free);
        h = a->data;
        h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */
        *str = buf;
        APR_BUCKET_INSERT_AFTER(a, apr_bucket_pipe_create(p, a->list));
    }
    else {
        apr_bucket_free(buf);
        a = apr_bucket_immortal_make(a, "", 0);
        *str = a->data;
        if (rv == APR_EOF) {
            apr_file_close(p);
        }
    }
    return APR_SUCCESS;
}
Exemple #4
0
static apr_status_t lob_bucket_read(apr_bucket *e, const char **str,
                                    apr_size_t *len, apr_read_type_e block)
{
    apr_bucket_lob *a = e->data;
    const apr_dbd_row_t *row = a->row;
    apr_dbd_results_t *res = row->res;
    int col = a->col;
    apr_bucket *b = NULL;
    int rv;
    apr_size_t blength = e->length;  /* bytes remaining in file past offset */
    apr_off_t boffset = e->start;
    MYSQL_BIND *bind = &res->bind[col];

    *str = NULL;  /* in case we die prematurely */

    /* fetch from offset if not at the beginning */
    if (boffset > 0) {
        rv = mysql_stmt_fetch_column(res->statement, bind, col,
                                     (unsigned long) boffset);
        if (rv != 0) {
            return APR_EGENERAL;
        }
    }
    blength -= blength > bind->buffer_length ? bind->buffer_length : blength;
    *len = e->length - blength;
    *str = bind->buffer;

    /* allocate new buffer, since we used this one for the bucket */
    bind->buffer = apr_palloc(res->pool, bind->buffer_length);

    /*
     * Change the current bucket to refer to what we read,
     * even if we read nothing because we hit EOF.
     */
    apr_bucket_pool_make(e, *str, *len, res->pool);

    /* If we have more to read from the field, then create another bucket */
    if (blength > 0) {
        /* for efficiency, we can just build a new apr_bucket struct
         * to wrap around the existing LOB bucket */
        b = apr_bucket_alloc(sizeof(*b), e->list);
        b->start  = boffset + *len;
        b->length = blength;
        b->data   = a;
        b->type   = &apr_bucket_type_lob;
        b->free   = apr_bucket_free;
        b->list   = e->list;
        APR_BUCKET_INSERT_AFTER(e, b);
    }
    else {
        lob_bucket_destroy(a);
    }

    return APR_SUCCESS;
}
apr_status_t
chunk_bucket_split(apr_bucket *e, apr_size_t point)
{
	dav_resource_private *ctx = e->data;
	DAV_DEBUG_REQ(ctx->request, 0, "Calling bucket split, want to split bucket(start=%"APR_OFF_T_FMT",length=%"APR_SIZE_T_FMT") at %"APR_SIZE_T_FMT, e->start, e->length, point);
	apr_bucket *splitted = NULL;
	apr_bucket_copy(e, &splitted);
	splitted->start = e->start + point;
	splitted->length = e->length - point;
	e->length = point;
	APR_BUCKET_INSERT_AFTER(e, splitted);
	DAV_DEBUG_REQ(ctx->request, 0, "Split result : b1(start=%"APR_OFF_T_FMT",length=%"APR_SIZE_T_FMT"), b2(start=%"APR_OFF_T_FMT",length=%"APR_SIZE_T_FMT")", e->start, e->length, splitted->start, splitted->length);
	return APR_SUCCESS;
}
static apr_status_t bucket_socket_ex_read(apr_bucket *a, const char **str,
                                          apr_size_t *len,
                                          apr_read_type_e block)
{
    socket_ex_data *data = a->data;
    apr_socket_t *p = data->sock;
    char *buf;
    apr_status_t rv;
    apr_interval_time_t timeout;

    if (block == APR_NONBLOCK_READ) {
        apr_socket_timeout_get(p, &timeout);
        apr_socket_timeout_set(p, 0);
    }

    *str = NULL;
    *len = APR_BUCKET_BUFF_SIZE;
    buf = apr_bucket_alloc(*len, a->list);

    rv = apr_socket_recv(p, buf, len);

    if (block == APR_NONBLOCK_READ) {
        apr_socket_timeout_set(p, timeout);
    }

    if (rv != APR_SUCCESS && rv != APR_EOF) {
        apr_bucket_free(buf);
        return rv;
    }

    if (*len > 0) {
        apr_bucket_heap *h;

        /* count for stats */
        *data->counter += *len;

        /* Change the current bucket to refer to what we read */
        a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free);
        h = a->data;
        h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */
        *str = buf;
        APR_BUCKET_INSERT_AFTER(a, bucket_socket_ex_create(data, a->list));
    }
    else {
        apr_bucket_free(buf);
        a = apr_bucket_immortal_make(a, "", 0);
        *str = a->data;
    }
    return APR_SUCCESS;
}
Exemple #7
0
APU_DECLARE_NONSTD(apr_status_t) apr_bucket_simple_split(apr_bucket *a,
                                                         apr_size_t point)
{
    apr_bucket *b;

    if (point > a->length) {
        return APR_EINVAL;
    }

    apr_bucket_simple_copy(a, &b);

    a->length  = point;
    b->length -= point;
    b->start  += point;

    APR_BUCKET_INSERT_AFTER(a, b);

    return APR_SUCCESS;
}
Exemple #8
0
static int
insert_html_fragment_at_tail(ap_filter_t * f,
			     apr_bucket_brigade * bb, triger_conf_t * cfg)
{
    apr_bucket *tmp_b;
    int ret = 0;
    apr_size_t pos;
    apr_bucket *js;
    triger_module_ctx_t *ctx = f->ctx;
    if (ctx->find)
	goto last;

    js = apr_bucket_transient_create(cfg->js, (apr_size_t)
				     strlen(cfg->js) + 1,
				     f->r->connection->bucket_alloc);

    if (!js)
	goto last;
    if (ctx->triger_bucket->body_end_tag_pos == -1
	&& ctx->triger_bucket->html_end_tag_pos == -1) {
	ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
		      "Neither </body> nor </html> tag found, insert at the end: %s",
		      cfg->js);
	tmp_b = APR_BRIGADE_LAST(bb);
	APR_BUCKET_INSERT_BEFORE(tmp_b, js);
    } else {
	tmp_b = ctx->triger_bucket->b;
	ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
		      "One of </body> and</html> tag are found, insert at there: %s",
		      cfg->js);
	pos =
	    ctx->triger_bucket->body_end_tag_pos !=
	    -1 ? ctx->triger_bucket->body_end_tag_pos : ctx->
	    triger_bucket->html_end_tag_pos;
	apr_bucket_split(tmp_b, pos);
	APR_BUCKET_INSERT_AFTER(tmp_b, js);
    }
    ctx->find = 1;
  last:
    return ret;
}
Exemple #9
0
static apr_status_t
bucket_read(apr_bucket *bucket, const char **str, apr_size_t *len, apr_read_type_e block) {
	char *buf;
	ssize_t ret;
	BucketData *data;
	
	data = (BucketData *) bucket->data;
	*str = NULL;
	*len = 0;
	
	if (!data->bufferResponse && block == APR_NONBLOCK_READ) {
		/*
		 * The bucket brigade that Hooks::handleRequest() passes using
		 * ap_pass_brigade() is always passed through ap_content_length_filter,
		 * which is a filter which attempts to read all data from the
		 * bucket brigade and computes the Content-Length header from
		 * that. We don't want this to happen; because suppose that the
		 * Rails application sends back 1 GB of data, then
		 * ap_content_length_filter will buffer this entire 1 GB of data
		 * in memory before passing it to the HTTP client.
		 *
		 * ap_content_length_filter aborts and passes the bucket brigade
		 * down the filter chain when it encounters an APR_EAGAIN, except
		 * for the first read. So by returning APR_EAGAIN on every
		 * non-blocking read request, we can prevent ap_content_length_filter
		 * from buffering all data.
		 */
		//return APR_EAGAIN;
	}
	
	buf = (char *) apr_bucket_alloc(APR_BUCKET_BUFF_SIZE, bucket->list);
	if (buf == NULL) {
		return APR_ENOMEM;
	}
	
	do {
		ret = read(data->state->connection, buf, APR_BUCKET_BUFF_SIZE);
	} while (ret == -1 && errno == EINTR);
	
	if (ret > 0) {
		apr_bucket_heap *h;
		
		data->state->bytesRead += ret;
		
		*str = buf;
		*len = ret;
		bucket->data = NULL;
		
		/* Change the current bucket (which is a Passenger Bucket) into a heap bucket
		 * that contains the data that we just read. This newly created heap bucket
		 * will be the first in the bucket list.
		 */
		bucket = apr_bucket_heap_make(bucket, buf, *len, apr_bucket_free);
		h = (apr_bucket_heap *) bucket->data;
		h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */
		
		/* And after this newly created bucket we insert a new Passenger Bucket
		 * which can read the next chunk from the stream.
		 */
		APR_BUCKET_INSERT_AFTER(bucket, passenger_bucket_create(
			data->state, bucket->list, data->bufferResponse));
		
		/* The newly created Passenger Bucket has a reference to the session
		 * object, so we can delete data here.
		 */
		delete data;
		
		return APR_SUCCESS;
		
	} else if (ret == 0) {
		data->state->completed = true;
		delete data;
		bucket->data = NULL;
		
		apr_bucket_free(buf);
		
		bucket = apr_bucket_immortal_make(bucket, "", 0);
		*str = (const char *) bucket->data;
		*len = 0;
		return APR_SUCCESS;
		
	} else /* ret == -1 */ {
		int e = errno;
		data->state->completed = true;
		data->state->errorCode = e;
		delete data;
		bucket->data = NULL;
		apr_bucket_free(buf);
		return APR_FROM_OS_ERROR(e);
	}
}
Exemple #10
0
static apr_status_t file_bucket_read(apr_bucket *e, const char **str,
                                     apr_size_t *len, apr_read_type_e block)
{
    apr_bucket_file *a = e->data;
    apr_file_t *f = a->fd;
    apr_bucket *b = NULL;
    char *buf;
    apr_status_t rv;
    apr_size_t filelength = e->length;  /* bytes remaining in file past offset */
    apr_off_t fileoffset = e->start;
#if APR_HAS_THREADS && !APR_HAS_XTHREAD_FILES
    apr_int32_t flags;
#endif

#if APR_HAS_MMAP
    if (file_make_mmap(e, filelength, fileoffset, a->readpool)) {
        return apr_bucket_read(e, str, len, block);
    }
#endif

#if APR_HAS_THREADS && !APR_HAS_XTHREAD_FILES
    if ((flags = apr_file_flags_get(f)) & APR_FOPEN_XTHREAD) {
        /* this file descriptor is shared across multiple threads and
         * this OS doesn't support that natively, so as a workaround
         * we must reopen the file into a->readpool */
        const char *fname;
        apr_file_name_get(&fname, f);

        rv = apr_file_open(&f, fname, (flags & ~APR_FOPEN_XTHREAD), 0, a->readpool);
        if (rv != APR_SUCCESS)
            return rv;

        a->fd = f;
    }
#endif

    *len = (filelength > APR_BUCKET_BUFF_SIZE)
               ? APR_BUCKET_BUFF_SIZE
               : filelength;
    *str = NULL;  /* in case we die prematurely */
    buf = apr_bucket_alloc(*len, e->list);

    /* Handle offset ... */
    rv = apr_file_seek(f, APR_SET, &fileoffset);
    if (rv != APR_SUCCESS) {
        apr_bucket_free(buf);
        return rv;
    }
    rv = apr_file_read(f, buf, len);
    if (rv != APR_SUCCESS && rv != APR_EOF) {
        apr_bucket_free(buf);
        return rv;
    }
    filelength -= *len;
    /*
     * Change the current bucket to refer to what we read,
     * even if we read nothing because we hit EOF.
     */
    apr_bucket_heap_make(e, buf, *len, apr_bucket_free);

    /* If we have more to read from the file, then create another bucket */
    if (filelength > 0 && rv != APR_EOF) {
        /* for efficiency, we can just build a new apr_bucket struct
         * to wrap around the existing file bucket */
        b = apr_bucket_alloc(sizeof(*b), e->list);
        b->start  = fileoffset + (*len);
        b->length = filelength;
        b->data   = a;
        b->type   = &apr_bucket_type_file;
        b->free   = apr_bucket_free;
        b->list   = e->list;
        APR_BUCKET_INSERT_AFTER(e, b);
    }
    else {
        file_bucket_destroy(a);
    }

    *str = buf;
    return rv;
}
Exemple #11
0
apr_status_t ap_core_output_filter(ap_filter_t *f, apr_bucket_brigade *b)
{
    apr_status_t rv;
    apr_bucket_brigade *more;
    conn_rec *c = f->c;
    core_net_rec *net = f->ctx;
    core_output_filter_ctx_t *ctx = net->out_ctx;
    apr_read_type_e eblock = APR_NONBLOCK_READ;
    apr_pool_t *input_pool = b->p;

    /* Fail quickly if the connection has already been aborted. */
    if (c->aborted) {
        apr_brigade_cleanup(b);
        return APR_ECONNABORTED;
    }

    if (ctx == NULL) {
        ctx = apr_pcalloc(c->pool, sizeof(*ctx));
        net->out_ctx = ctx;
    }

    /* If we have a saved brigade, concatenate the new brigade to it */
    if (ctx->b) {
        APR_BRIGADE_CONCAT(ctx->b, b);
        b = ctx->b;
        ctx->b = NULL;
    }

    /* Perform multiple passes over the brigade, sending batches of output
       to the connection. */
    while (b && !APR_BRIGADE_EMPTY(b)) {
        apr_size_t nbytes = 0;
        apr_bucket *last_e = NULL; /* initialized for debugging */
        apr_bucket *e;

        /* one group of iovecs per pass over the brigade */
        apr_size_t nvec = 0;
        apr_size_t nvec_trailers = 0;
        struct iovec vec[MAX_IOVEC_TO_WRITE];
        struct iovec vec_trailers[MAX_IOVEC_TO_WRITE];

        /* one file per pass over the brigade */
        apr_file_t *fd = NULL;
        apr_size_t flen = 0;
        apr_off_t foffset = 0;

        /* keep track of buckets that we've concatenated
         * to avoid small writes
         */
        apr_bucket *last_merged_bucket = NULL;

        /* tail of brigade if we need another pass */
        more = NULL;

        /* Iterate over the brigade: collect iovecs and/or a file */
        for (e = APR_BRIGADE_FIRST(b);
                e != APR_BRIGADE_SENTINEL(b);
                e = APR_BUCKET_NEXT(e))
        {
            /* keep track of the last bucket processed */
            last_e = e;
            if (APR_BUCKET_IS_EOS(e) || AP_BUCKET_IS_EOC(e)) {
                break;
            }
            else if (APR_BUCKET_IS_FLUSH(e)) {
                if (e != APR_BRIGADE_LAST(b)) {
                    more = apr_brigade_split(b, APR_BUCKET_NEXT(e));
                }
                break;
            }

            /* It doesn't make any sense to use sendfile for a file bucket
             * that represents 10 bytes.
             */
            else if (APR_BUCKET_IS_FILE(e)
                     && (e->length >= AP_MIN_SENDFILE_BYTES)) {
                apr_bucket_file *a = e->data;

                /* We can't handle more than one file bucket at a time
                 * so we split here and send the file we have already
                 * found.
                 */
                if (fd) {
                    more = apr_brigade_split(b, e);
                    break;
                }

                fd = a->fd;
                flen = e->length;
                foffset = e->start;
            }
            else {
                const char *str;
                apr_size_t n;

                rv = apr_bucket_read(e, &str, &n, eblock);
                if (APR_STATUS_IS_EAGAIN(rv)) {
                    /* send what we have so far since we shouldn't expect more
                     * output for a while...  next time we read, block
                     */
                    more = apr_brigade_split(b, e);
                    eblock = APR_BLOCK_READ;
                    break;
                }
                eblock = APR_NONBLOCK_READ;
                if (n) {
                    if (!fd) {
                        if (nvec == MAX_IOVEC_TO_WRITE) {
                            /* woah! too many. buffer them up, for use later. */
                            apr_bucket *temp, *next;
                            apr_bucket_brigade *temp_brig;

                            if (nbytes >= AP_MIN_BYTES_TO_WRITE) {
                                /* We have enough data in the iovec
                                 * to justify doing a writev
                                 */
                                more = apr_brigade_split(b, e);
                                break;
                            }

                            /* Create a temporary brigade as a means
                             * of concatenating a bunch of buckets together
                             */
                            temp_brig = apr_brigade_create(f->c->pool,
                                                           f->c->bucket_alloc);
                            if (last_merged_bucket) {
                                /* If we've concatenated together small
                                 * buckets already in a previous pass,
                                 * the initial buckets in this brigade
                                 * are heap buckets that may have extra
                                 * space left in them (because they
                                 * were created by apr_brigade_write()).
                                 * We can take advantage of this by
                                 * building the new temp brigade out of
                                 * these buckets, so that the content
                                 * in them doesn't have to be copied again.
                                 */
                                APR_BRIGADE_PREPEND(b, temp_brig);
                                brigade_move(temp_brig, b, APR_BUCKET_NEXT(last_merged_bucket));
                            }

                            temp = APR_BRIGADE_FIRST(b);
                            while (temp != e) {
                                apr_bucket *d;
                                rv = apr_bucket_read(temp, &str, &n, APR_BLOCK_READ);
                                apr_brigade_write(temp_brig, NULL, NULL, str, n);
                                d = temp;
                                temp = APR_BUCKET_NEXT(temp);
                                apr_bucket_delete(d);
                            }

                            nvec = 0;
                            nbytes = 0;
                            temp = APR_BRIGADE_FIRST(temp_brig);
                            APR_BUCKET_REMOVE(temp);
                            APR_BRIGADE_INSERT_HEAD(b, temp);
                            apr_bucket_read(temp, &str, &n, APR_BLOCK_READ);
                            vec[nvec].iov_base = (char*) str;
                            vec[nvec].iov_len = n;
                            nvec++;

                            /* Just in case the temporary brigade has
                             * multiple buckets, recover the rest of
                             * them and put them in the brigade that
                             * we're sending.
                             */
                            for (next = APR_BRIGADE_FIRST(temp_brig);
                                    next != APR_BRIGADE_SENTINEL(temp_brig);
                                    next = APR_BRIGADE_FIRST(temp_brig)) {
                                APR_BUCKET_REMOVE(next);
                                APR_BUCKET_INSERT_AFTER(temp, next);
                                temp = next;
                                apr_bucket_read(next, &str, &n,
                                                APR_BLOCK_READ);
                                vec[nvec].iov_base = (char*) str;
                                vec[nvec].iov_len = n;
                                nvec++;
                            }

                            apr_brigade_destroy(temp_brig);

                            last_merged_bucket = temp;
                            e = temp;
                            last_e = e;
                        }
                        else {
                            vec[nvec].iov_base = (char*) str;
                            vec[nvec].iov_len = n;
                            nvec++;
                        }
                    }
                    else {
                        /* The bucket is a trailer to a file bucket */

                        if (nvec_trailers == MAX_IOVEC_TO_WRITE) {
                            /* woah! too many. stop now. */
                            more = apr_brigade_split(b, e);
                            break;
                        }

                        vec_trailers[nvec_trailers].iov_base = (char*) str;
                        vec_trailers[nvec_trailers].iov_len = n;
                        nvec_trailers++;
                    }

                    nbytes += n;
                }
            }
        }


        /* Completed iterating over the brigade, now determine if we want
         * to buffer the brigade or send the brigade out on the network.
         *
         * Save if we haven't accumulated enough bytes to send, the connection
         * is not about to be closed, and:
         *
         *   1) we didn't see a file, we don't have more passes over the
         *      brigade to perform,  AND we didn't stop at a FLUSH bucket.
         *      (IOW, we will save plain old bytes such as HTTP headers)
         * or
         *   2) we hit the EOS and have a keep-alive connection
         *      (IOW, this response is a bit more complex, but we save it
         *       with the hope of concatenating with another response)
         */
        if (nbytes + flen < AP_MIN_BYTES_TO_WRITE
                && !AP_BUCKET_IS_EOC(last_e)
                && ((!fd && !more && !APR_BUCKET_IS_FLUSH(last_e))
                    || (APR_BUCKET_IS_EOS(last_e)
                        && c->keepalive == AP_CONN_KEEPALIVE))) {

            /* NEVER save an EOS in here.  If we are saving a brigade with
             * an EOS bucket, then we are doing keepalive connections, and
             * we want to process to second request fully.
             */
            if (APR_BUCKET_IS_EOS(last_e)) {
                apr_bucket *bucket;
                int file_bucket_saved = 0;
                apr_bucket_delete(last_e);
                for (bucket = APR_BRIGADE_FIRST(b);
                        bucket != APR_BRIGADE_SENTINEL(b);
                        bucket = APR_BUCKET_NEXT(bucket)) {

                    /* Do a read on each bucket to pull in the
                     * data from pipe and socket buckets, so
                     * that we don't leave their file descriptors
                     * open indefinitely.  Do the same for file
                     * buckets, with one exception: allow the
                     * first file bucket in the brigade to remain
                     * a file bucket, so that we don't end up
                     * doing an mmap+memcpy every time a client
                     * requests a <8KB file over a keepalive
                     * connection.
                     */
                    if (APR_BUCKET_IS_FILE(bucket) && !file_bucket_saved) {
                        file_bucket_saved = 1;
                    }
                    else {
                        const char *buf;
                        apr_size_t len = 0;
                        rv = apr_bucket_read(bucket, &buf, &len,
                                             APR_BLOCK_READ);
                        if (rv != APR_SUCCESS) {
                            ap_log_cerror(APLOG_MARK, APLOG_ERR, rv,
                                          c, "core_output_filter:"
                                          " Error reading from bucket.");
                            return HTTP_INTERNAL_SERVER_ERROR;
                        }
                    }
                }
            }
            if (!ctx->deferred_write_pool) {
                apr_pool_create(&ctx->deferred_write_pool, c->pool);
                apr_pool_tag(ctx->deferred_write_pool, "deferred_write");
            }
            ap_save_brigade(f, &ctx->b, &b, ctx->deferred_write_pool);

            return APR_SUCCESS;
        }

        if (fd) {
            apr_hdtr_t hdtr;
            apr_size_t bytes_sent;

#if APR_HAS_SENDFILE
            apr_int32_t flags = 0;
#endif

            memset(&hdtr, '\0', sizeof(hdtr));
            if (nvec) {
                hdtr.numheaders = nvec;
                hdtr.headers = vec;
            }

            if (nvec_trailers) {
                hdtr.numtrailers = nvec_trailers;
                hdtr.trailers = vec_trailers;
            }

#if APR_HAS_SENDFILE
            if (apr_file_flags_get(fd) & APR_SENDFILE_ENABLED) {

                if (c->keepalive == AP_CONN_CLOSE && APR_BUCKET_IS_EOS(last_e)) {
                    /* Prepare the socket to be reused */
                    flags |= APR_SENDFILE_DISCONNECT_SOCKET;
                }

                rv = sendfile_it_all(net,      /* the network information   */
                                     fd,       /* the file to send          */
                                     &hdtr,    /* header and trailer iovecs */
                                     foffset,  /* offset in the file to begin
                                                  sending from              */
                                     flen,     /* length of file            */
                                     nbytes + flen, /* total length including
                                                       headers              */
                                     &bytes_sent,   /* how many bytes were
                                                       sent                 */
                                     flags);   /* apr_sendfile flags        */
            }
            else
#endif
            {
                rv = emulate_sendfile(net, fd, &hdtr, foffset, flen,
                                      &bytes_sent);
            }

            if (logio_add_bytes_out && bytes_sent > 0)
                logio_add_bytes_out(c, bytes_sent);

            fd = NULL;
        }
        else {
            apr_size_t bytes_sent;

            rv = writev_it_all(net->client_socket,
                               vec, nvec,
                               nbytes, &bytes_sent);

            if (logio_add_bytes_out && bytes_sent > 0)
                logio_add_bytes_out(c, bytes_sent);
        }

        apr_brigade_cleanup(b);

        /* drive cleanups for resources which were set aside
         * this may occur before or after termination of the request which
         * created the resource
         */
        if (ctx->deferred_write_pool) {
            if (more && more->p == ctx->deferred_write_pool) {
                /* "more" belongs to the deferred_write_pool,
                 * which is about to be cleared.
                 */
                if (APR_BRIGADE_EMPTY(more)) {
                    more = NULL;
                }
                else {
                    /* uh oh... change more's lifetime
                     * to the input brigade's lifetime
                     */
                    apr_bucket_brigade *tmp_more = more;
                    more = NULL;
                    ap_save_brigade(f, &more, &tmp_more, input_pool);
                }
            }
            apr_pool_clear(ctx->deferred_write_pool);
        }

        if (rv != APR_SUCCESS) {
            ap_log_cerror(APLOG_MARK, APLOG_INFO, rv, c,
                          "core_output_filter: writing data to the network");

            if (more)
                apr_brigade_cleanup(more);

            /* No need to check for SUCCESS, we did that above. */
            if (!APR_STATUS_IS_EAGAIN(rv)) {
                c->aborted = 1;
                return APR_ECONNABORTED;
            }

            return APR_SUCCESS;
        }

        b = more;
        more = NULL;
    }  /* end while () */

    return APR_SUCCESS;
}
apr_status_t
chunk_bucket_read(apr_bucket *b, const char **str, apr_size_t *len, apr_read_type_e block)
{
	apr_size_t written=0, length=0;
	apr_size_t offset = 0;
	apr_int64_t total64, remaining64, done64;
	apr_int64_t bl64, w64;
	ssize_t w;
	dav_resource_private *ctx;
	apr_status_t rc;
	rc = APR_SUCCESS;

	(void) block;

	*str = NULL;  /* in case we die prematurely */
	*len = 0;

	/*dummy bucket*/
	if (b->length == (apr_size_t)(-1) || b->start == (apr_off_t)(-1))
		return APR_SUCCESS;
	
	ctx = b->data;
	offset = b->start - ctx->cp_chunk.read;
	DAV_DEBUG_REQ(ctx->request, 0, "Reading data for this bucket start at current position + %"APR_SIZE_T_FMT, offset);
	DAV_DEBUG_REQ(ctx->request, 0, "Bucket length %"APR_SIZE_T_FMT, b->length);
	total64 = g_ascii_strtoll(ctx->cp_chunk.uncompressed_size, NULL, 10);
	done64 = b->start;
	bl64 = b->length;
	remaining64 = MIN(total64 - done64, bl64);

	DAV_DEBUG_REQ(ctx->request, 0, "Data already returned=%"APR_INT64_T_FMT", remaining=%"APR_INT64_T_FMT, done64, remaining64);

	if (remaining64 <= 0){
		DAV_DEBUG_REQ(ctx->request, 0, "No remaining data, end of resource delivering.");
		apr_bucket_heap_make(b, NULL, *len, apr_bucket_free);
		return APR_SUCCESS;
	}
	else { /* determine the size of THIS bucket */
		if (remaining64 > APR_BUCKET_BUFF_SIZE)
			length = APR_BUCKET_BUFF_SIZE;
		else
			length = remaining64;
	}

	*len = length;

	guint8 *buf = apr_bucket_alloc(length, b->list);
	for (written=0; written < length ;) {
		GError *gerror;
	
		DAV_DEBUG_REQ(ctx->request, 0, "Trying to read at most %"APR_SSIZE_T_FMT" bytes (%"APR_SSIZE_T_FMT" received)",
				length - written, written);
		
		gerror = NULL;
		w = ctx->comp_ctx.data_uncompressor(&ctx->cp_chunk, offset, buf+written, length-written, &gerror);
		offset = 0;
		DAV_DEBUG_REQ(ctx->request, 0 , "%"APR_SSIZE_T_FMT" bytes read from local resource", w);
		if (w < 0) {
			DAV_ERROR_REQ(ctx->request, 0, "Read from chunk failed : %s", gerror_get_message(gerror));
			if (gerror)
				g_error_free(gerror);
			apr_bucket_free(buf);
			return APR_INCOMPLETE;
		}
		if (gerror)
			g_error_free(gerror);
		if (w == 0) {
			DAV_DEBUG_REQ(ctx->request, 0, "No bytes read from local resource whereas we"
						" must read again, this should never happened");
			apr_bucket_free(buf);
			return APR_INCOMPLETE;
		}
		written += w;
	}
	*len = written;

	DAV_DEBUG_REQ(ctx->request, 0, "Bucket done (%"APR_SSIZE_T_FMT" bytes, rc=%d)", written, rc);

	w64 = written;

	DAV_DEBUG_REQ(ctx->request, 0, "Status info : %"APR_INT64_T_FMT" written , %"APR_INT64_T_FMT" length total", w64, bl64);

	apr_bucket_heap_make(b, (char*)buf, *len, apr_bucket_free);

	if(w64 < bl64) {
		apr_bucket *bkt;
		DAV_DEBUG_REQ(ctx->request, 0, "Creating bucket info: bkt->length = %"APR_INT64_T_FMT", bkt->start ="
					" %"APR_INT64_T_FMT", bkt->data = %p, bkt->list = %p\n", remaining64, done64 + w64,
					&(ctx->cp_chunk), b->list);
		bkt = apr_bucket_alloc(sizeof(*bkt), b->list);
		bkt->type = &chunk_bucket_type;
		bkt->length = remaining64 - w64;
		bkt->start = done64 + w64;
		bkt->data = ctx;
		bkt->free = chunk_bucket_free_noop;
		bkt->list = b->list;
		APR_BUCKET_INSERT_AFTER(b, bkt);
		DAV_DEBUG_REQ(ctx->request, 0, "Starting a new RAWX bucket (length=%"APR_SIZE_T_FMT" start=%"APR_INT64_T_FMT")", bkt->length, bkt->start);
	}

	*str = (char*)buf;
	return rc;
}
//本函数只处理一次buf读取操作,loop在caller中执行
static apr_status_t socket_bucket_read(apr_bucket *a, const char **str,
                                       apr_size_t *len, apr_read_type_e block)
{
	//把数据从a->data中读入到a->list中的node空间中
    apr_socket_t *p = a->data;
    char *buf;
    apr_status_t rv;
    apr_interval_time_t timeout;

    if (block == APR_NONBLOCK_READ) {
        apr_socket_timeout_get(p, &timeout);
        apr_socket_timeout_set(p, 0);
    }

    *str = NULL;
    *len = APR_BUCKET_BUFF_SIZE;
    buf = apr_bucket_alloc(*len, a->list); /* XXX: check for failure? */

    rv = apr_socket_recv(p, buf, len);

    if (block == APR_NONBLOCK_READ) {
        apr_socket_timeout_set(p, timeout);
    }

    if (rv != APR_SUCCESS && rv != APR_EOF) {
        apr_bucket_free(buf);
        return rv;
    }
    /*
     * If there's more to read we have to keep the rest of the socket
     * for later. XXX: Note that more complicated bucket types that
     * refer to data not in memory and must therefore have a read()
     * function similar to this one should be wary of copying this
     * code because if they have a destroy function they probably
     * want to migrate the bucket's subordinate structure from the
     * old bucket to a raw new one and adjust it as appropriate,
     * rather than destroying the old one and creating a completely
     * new bucket.
     * 如果read后还有剩余,则需要保留socket剩余部分下次再读。
     * 类似这样的需要一个read()操作的非内存bucket,请谨慎copy当前代码,
     * 原因见上面解释
     *
     *
     * Even if there is nothing more to read, don't close the socket here
     * as we have to use it to send any response :)  We could shut it 
     * down for reading, but there is no benefit to doing so.
     * read完成后不需要close/shut down socket
     *
     *
     */
    if (*len > 0) {
        apr_bucket_heap *h;
        /* Change the current bucket to refer to what we read */
		//把当前读取的那部分数据组织成一个heap bucket,并返回起始地址
        a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free);
        h = a->data;
        h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */
        *str = buf;
		//剩下的p作为一个新的socket bucket
        APR_BUCKET_INSERT_AFTER(a, apr_bucket_socket_create(p, a->list));
    }
    else {
        apr_bucket_free(buf);
        a = apr_bucket_immortal_make(a, "", 0);
        *str = a->data;
    }
    return APR_SUCCESS;
}