Ejemplo n.º 1
0
static int read_msg(request_rec *r, msg_t **msg)
{
    conn_rec *c = r->connection;
    apr_bucket_brigade *bb = apr_brigade_create(r->pool, c->bucket_alloc);
    char *dbuf = apr_palloc(r->pool, MAX_MSG_SIZE);
    int dbpos = 0;
    int seen_eos = 0;

    do {
        apr_status_t rv;
        apr_bucket *bucket;

        rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
                            APR_BLOCK_READ, HUGE_STRING_LEN);

        if (rv != APR_SUCCESS) {
            ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                          "reading request entity data");
            return HTTP_INTERNAL_SERVER_ERROR;
        }

        for (bucket = APR_BRIGADE_FIRST(bb);
                bucket != APR_BRIGADE_SENTINEL(bb);
                bucket = APR_BUCKET_NEXT(bucket))
        {
            const char *data;
            apr_size_t len;

            if (APR_BUCKET_IS_EOS(bucket)) {
                seen_eos = 1;
                break;
            }

            /* We can't do much with this. */
            if (APR_BUCKET_IS_FLUSH(bucket))
                continue;

            /* read */
            apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);

            if (dbpos < MAX_MSG_SIZE) {
                int cursize = (dbpos + len) > MAX_MSG_SIZE ?
                              (MAX_MSG_SIZE - dbpos) : len;

                memcpy(dbuf + dbpos, data, cursize);
                dbpos += cursize;
            }
        }

        apr_brigade_cleanup(bb);

    } while (!seen_eos);

    (*msg) = apr_pcalloc(r->pool, sizeof(msg_t));
    (*msg)->data = dbuf;
    (*msg)->data[dbpos] = '\0';
    (*msg)->size = dbpos;

    return OK;
}
Ejemplo n.º 2
0
int h2_util_has_flush_or_eos(apr_bucket_brigade *bb) {
    apr_bucket *b;
    for (b = APR_BRIGADE_FIRST(bb);
         b != APR_BRIGADE_SENTINEL(bb);
         b = APR_BUCKET_NEXT(b))
    {
        if (APR_BUCKET_IS_EOS(b) || APR_BUCKET_IS_FLUSH(b)) {
            return 1;
        }
    }
    return 0;
}
Ejemplo n.º 3
0
apr_status_t jxr_send_brigade(jaxer_connection * ac, apr_bucket_brigade * bb)
{
	apr_bucket *bucket;
	apr_status_t rv;
	
	compat_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, ac->request, "mod_jaxer: sending a brigade (sock=%d)", ac->sock);

	for (bucket = APR_BRIGADE_FIRST(bb);
		 bucket != APR_BRIGADE_SENTINEL(bb);
		 bucket = APR_BUCKET_NEXT(bucket))
	{
		char *write_buf;
		apr_size_t write_buf_len;
		
		if (APR_BUCKET_IS_EOS(bucket))
			break;

		if (APR_BUCKET_IS_FLUSH(bucket))
			continue;

		if ((rv =
			 apr_bucket_read(bucket, (const char **)&write_buf, &write_buf_len,
							 APR_BLOCK_READ)) != APR_SUCCESS) {
			compat_log_rerror(APLOG_MARK, APLOG_WARNING, rv, ac->request,
						 "mod_jaxer: can't read request from bucket");
			return rv;
		}

		{
			int type = jxr_msg_get_type(write_buf);
			apr_size_t pos; // not used
			apr_size_t len = jxr_msg_get_length(write_buf, &pos);
			compat_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, ac->request, "mod_jaxer: sending a brigade (type=%s len=%d)", sBlockType[type], len);
		}

		/* Write the buffer to jaxer server */
		if(0 > jxr_socket_sendfull(ac, write_buf, (int) write_buf_len))
		{
		
			compat_log_rerror(APLOG_MARK, APLOG_WARNING, APR_FROM_OS_ERROR(rv), ac->request,
					"mod_jaxer: can't write to socket");
			return apr_get_os_error();
		}
	}
	compat_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, ac->request, "mod_jaxer: sent a brigade (sock=%d)", ac->sock);

	return APR_SUCCESS;
}
Ejemplo n.º 4
0
static int cdn_html_filter(ap_filter_t * f, apr_bucket_brigade * bb)
{
  apr_bucket *b;
  const char *buf = 0;
  apr_size_t bytes = 0;

  /* now do HTML filtering if necessary, and pass the brigade onward */
  saxctxt *ctxt = check_html_filter_init(f);
  if (!ctxt)
    return ap_pass_brigade(f->next, bb);

  for(b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) {
    if(APR_BUCKET_IS_EOS(b) || APR_BUCKET_IS_FLUSH(b)) {
      consume_buffer(ctxt, buf, 0, 1);
      APR_BUCKET_REMOVE(b);
      APR_BRIGADE_INSERT_TAIL(ctxt->bb, b);
      ap_pass_brigade(ctxt->f->next, ctxt->bb);
      return APR_SUCCESS;
    }

    if(apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS && buf) {
      if(ctxt->parser == NULL) {

        /*
         * for now, always output utf-8; we could incorporate
         * mod_proxy_html's output transcoding with little problem if
         * necessary
         */
        ap_set_content_type(f->r, "text/html;charset=utf-8");

        if(!initialize_parser(f, ctxt, &buf, bytes)) {
          apr_status_t rv = ap_pass_brigade(ctxt->f->next, bb);
          ap_remove_output_filter(f);
          return rv;
        } else
          ap_fputs(f->next, ctxt->bb, ctxt->cfg->doctype);
      }
      consume_buffer(ctxt, buf, bytes, 0);
    }

  }

  /*ap_fflush(ctxt->f->next, ctxt->bb) ; */      /* uncomment for debug */
  apr_brigade_cleanup(bb);
  return APR_SUCCESS;
}
Ejemplo n.º 5
0
/**
 * Process input stream
 */
static apr_status_t helocon_filter_in(ap_filter_t *f, apr_bucket_brigade *b, ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes)
{
    conn_rec *c = f->c;
    my_ctx *ctx = f->ctx;

    // Fail quickly if the connection has already been aborted.
    if (c->aborted) {
        apr_brigade_cleanup(b);
        return APR_ECONNABORTED;
    }
    // Fast passthrough
    if (ctx->phase == PHASE_DONE) {
        return ap_get_brigade(f->next, b, mode, block, readbytes);
    }

    // Process Head
    do {
#ifdef DEBUG
        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (1)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase);
#endif

        if (APR_BRIGADE_EMPTY(b)) {
#ifdef DEBUG
            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (2)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase);
#endif
            apr_status_t s = ap_get_brigade(f->next, b, ctx->mode, APR_BLOCK_READ, ctx->need);
            if (s != APR_SUCCESS) {
#ifdef DEBUG
                ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (fail)(1)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase);
#endif
                return s;
            }
        }
        if (ctx->phase == PHASE_DONE) {
            return APR_SUCCESS;
        }
        if (APR_BRIGADE_EMPTY(b)) {
#ifdef DEBUG
            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (empty)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase);
#endif
            return APR_SUCCESS;
        }
        apr_bucket *e = NULL;
        for (e = APR_BRIGADE_FIRST(b); e != APR_BRIGADE_SENTINEL(b); e = APR_BUCKET_NEXT(e)) {
            if (e->type == NULL) {
#ifdef DEBUG
                ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (type=NULL)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase);
#endif
                return APR_SUCCESS;
            }

            // We need more data
            if (ctx->need > 0) {
                const char *str = NULL;
                apr_size_t length = 0;
                apr_status_t s = apr_bucket_read(e, &str, &length, APR_BLOCK_READ);
                if (s != APR_SUCCESS) {
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " recv=%" APR_OFF_T_FMT " phase=%d readed=%" APR_SIZE_T_FMT " (fail)(2)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->recv, ctx->phase, length);
#endif
                    return s;
                }
#ifdef DEBUG
                ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " recv=%" APR_OFF_T_FMT " phase=%d readed=%" APR_SIZE_T_FMT " (3)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->recv, ctx->phase, length);
#endif
                if (length > 0) {
                    if ((ctx->offset + length) > PROXY_MAX_LENGTH) { // Overflow
                        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in ERROR: PROXY protocol header overflow from=%s to port=%d length=%" APR_OFF_T_FMT, _CLIENT_IP, c->local_addr->port, (ctx->offset + length));
                        goto ABORT_CONN2;
                    }
                    memcpy(ctx->buf + ctx->offset, str, length);
                    if (ctx->pad != ctx->magic) {
                        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in padding magic fail (bad=%d vs good=%d)", ctx->pad, ctx->magic);
                        goto ABORT_CONN;
                    }
                    ctx->offset += length;
                    ctx->recv += length;
                    ctx->need -= length;
                    ctx->buf[ctx->offset] = 0;
                    // delete HEAD
                    if (e->length > length) {
                        apr_bucket_split(e, length);
                    }
                }
                apr_bucket_delete(e);
                if (length == 0) {
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG bucket flush=%d meta=%d", APR_BUCKET_IS_FLUSH(e) ? 1 : 0, APR_BUCKET_IS_METADATA(e) ? 1 : 0);
#endif
                    continue;
                }
            }
            // Handle GETLINE mode
            if (ctx->mode == AP_MODE_GETLINE) {
                if ((ctx->need > 0) && (ctx->recv > 2)) {
                    char *end = memchr(ctx->buf, '\r', ctx->offset - 1);
                    if (end) {
#ifdef DEBUG
                        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: GETLINE OK");
#endif
                        if ((end[0] == '\r') && (end[1] == '\n')) {
                            ctx->need = 0;
                        }
                    }
                }
            }
            if (ctx->need <= 0) {
#ifdef DEBUG
                ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " recv=%" APR_OFF_T_FMT " phase=%d (4)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->recv, ctx->phase);
#endif
                switch (ctx->phase) {
                case PHASE_WANT_HEAD: {
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d checking=%s buf=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase, "HEAD", ctx->buf);
#endif
                    // TEST Command
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=TEST CHECK");
#endif
                    if (strncmp(TEST, ctx->buf, 4) == 0) {
                        apr_socket_t *csd = ap_get_module_config(c->conn_config, &core_module);
                        apr_size_t length = strlen(TEST_RES_OK);
                        apr_socket_send(csd, TEST_RES_OK, &length);
                        apr_socket_shutdown(csd, APR_SHUTDOWN_WRITE);
                        apr_socket_close(csd);

#ifdef DEBUG
                        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=TEST OK");
#endif

                        // No need to check for SUCCESS, we did that above
                        c->aborted = 1;
                        apr_brigade_cleanup(b);
                        return APR_ECONNABORTED;
                    }
                    // HELO Command
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=HELO CHECK");
#endif
                    if (strncmp(HELO, ctx->buf, 4) == 0) {
#ifdef DEBUG
                        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=HELO OK");
#endif
                        ctx->phase = PHASE_WANT_BINIP;
                        ctx->mode = AP_MODE_READBYTES;
                        ctx->need = 4;
                        ctx->recv = 0;
                        break;
                    }
                    // PROXY Command
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=PROXY CHECK");
#endif
                    if (strncmp(PROXY, ctx->buf, 4) == 0) {
#ifdef DEBUG
                        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=PROXY OK");
#endif
                        ctx->phase = PHASE_WANT_LINE;
                        ctx->mode = AP_MODE_GETLINE;
                        ctx->need = PROXY_MAX_LENGTH - ctx->offset;
                        ctx->recv = 0;
                        break;
                    }
                    // ELSE... GET / POST / etc
                    ctx->phase = PHASE_DONE;
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG from: %s:%d to port=%d newBucket (1) size=%" APR_OFF_T_FMT, _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->offset);
#endif
                    // Restore original data
                    if (ctx->offset) {
                        e = apr_bucket_heap_create(ctx->buf, ctx->offset, NULL, c->bucket_alloc);
                        APR_BRIGADE_INSERT_HEAD(b, e);
                        goto END_CONN;
                    }
                    break;
                }
                case PHASE_WANT_BINIP: {
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d checking=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase, "BINIP");
#endif
                    // REWRITE CLIENT IP
                    const char *new_ip = fromBinIPtoString(c->pool, ctx->buf+4);
                    if (!new_ip) {
                        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in ERROR: HELO+IP invalid");
                        goto ABORT_CONN;
                    }

                    apr_table_set(c->notes, NOTE_REWRITE_IP, new_ip);
                    ctx->phase = PHASE_DONE;
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG from: %s:%d to port=%d newip=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, new_ip);
#endif
                    break;
                }
                case PHASE_WANT_LINE: {
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d checking=%s buf=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase, "LINE", ctx->buf);
#endif
                    ctx->phase = PHASE_DONE;
                    char *end = memchr(ctx->buf, '\r', ctx->offset - 1);
                    if (!end) {
                        goto ABORT_CONN;
                    }
                    if ((end[0] != '\r') || (end[1] != '\n')) {
                        goto ABORT_CONN;
                    }
                    if (!process_proxy_header(f)) {
                        goto ABORT_CONN;
                    }
                    // Restore original data
                    int count = (ctx->offset - ((end - ctx->buf) + 2));
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG from: %s:%d to port=%d newBucket (2) size=%d rest=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, count, end + 2);
#endif
                    if (count > 0) {
                        e = apr_bucket_heap_create(end + 2, count, NULL, c->bucket_alloc);
                        APR_BRIGADE_INSERT_HEAD(b, e);
                        goto END_CONN;
                    }
                    break;
                }
                }
                if (ctx->phase == PHASE_DONE) {
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d (DONE)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase);
#endif
                    ctx->mode = mode;
                    ctx->need = 0;
                    ctx->recv = 0;
                }
                break;
            }
        }
    } while (ctx->phase != PHASE_DONE);

END_CONN:
    return ap_get_brigade(f->next, b, mode, block, readbytes);

ABORT_CONN:
    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in ERROR: PROXY protocol header invalid from=%s to port=%d", _CLIENT_IP, c->local_addr->port);
ABORT_CONN2:
    c->aborted = 1;
    apr_brigade_cleanup(b);
    return APR_ECONNABORTED;
}
Ejemplo n.º 6
0
apr_status_t ap_http_chunk_filter(ap_filter_t *f, apr_bucket_brigade *b)
{
#define ASCII_CRLF  "\015\012"
#define ASCII_ZERO  "\060"
    conn_rec *c = f->r->connection;
    apr_bucket_brigade *more;
    apr_bucket *e;
    apr_status_t rv;

    for (more = NULL; b; b = more, more = NULL) {
        apr_off_t bytes = 0;
        apr_bucket *eos = NULL;
        apr_bucket *flush = NULL;
        /* XXX: chunk_hdr must remain at this scope since it is used in a
         *      transient bucket.
         */
        char chunk_hdr[20]; /* enough space for the snprintf below */


        for (e = APR_BRIGADE_FIRST(b);
             e != APR_BRIGADE_SENTINEL(b);
             e = APR_BUCKET_NEXT(e))
        {
            if (APR_BUCKET_IS_EOS(e)) {
                /* there shouldn't be anything after the eos */
                eos = e;
                break;
            }
            if (AP_BUCKET_IS_ERROR(e)
                && (((ap_bucket_error *)(e->data))->status
                    == HTTP_BAD_GATEWAY)) {
                /*
                 * We had a broken backend. Memorize this in the filter
                 * context.
                 */
                f->ctx = &bad_gateway_seen;
                continue;
            }
            if (APR_BUCKET_IS_FLUSH(e)) {
                flush = e;
                more = apr_brigade_split(b, APR_BUCKET_NEXT(e));
                break;
            }
            else if (e->length == (apr_size_t)-1) {
                /* unknown amount of data (e.g. a pipe) */
                const char *data;
                apr_size_t len;

                rv = apr_bucket_read(e, &data, &len, APR_BLOCK_READ);
                if (rv != APR_SUCCESS) {
                    return rv;
                }
                if (len > 0) {
                    /*
                     * There may be a new next bucket representing the
                     * rest of the data stream on which a read() may
                     * block so we pass down what we have so far.
                     */
                    bytes += len;
                    more = apr_brigade_split(b, APR_BUCKET_NEXT(e));
                    break;
                }
                else {
                    /* If there was nothing in this bucket then we can
                     * safely move on to the next one without pausing
                     * to pass down what we have counted up so far.
                     */
                    continue;
                }
            }
            else {
                bytes += e->length;
            }
        }

        /*
         * XXX: if there aren't very many bytes at this point it may
         * be a good idea to set them aside and return for more,
         * unless we haven't finished counting this brigade yet.
         */
        /* if there are content bytes, then wrap them in a chunk */
        if (bytes > 0) {
            apr_size_t hdr_len;
            /*
             * Insert the chunk header, specifying the number of bytes in
             * the chunk.
             */
            hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr),
                                   "%" APR_UINT64_T_HEX_FMT CRLF, (apr_uint64_t)bytes);
            ap_xlate_proto_to_ascii(chunk_hdr, hdr_len);
            e = apr_bucket_transient_create(chunk_hdr, hdr_len,
                                            c->bucket_alloc);
            APR_BRIGADE_INSERT_HEAD(b, e);

            /*
             * Insert the end-of-chunk CRLF before an EOS or
             * FLUSH bucket, or appended to the brigade
             */
            e = apr_bucket_immortal_create(ASCII_CRLF, 2, c->bucket_alloc);
            if (eos != NULL) {
                APR_BUCKET_INSERT_BEFORE(eos, e);
            }
            else if (flush != NULL) {
                APR_BUCKET_INSERT_BEFORE(flush, e);
            }
            else {
                APR_BRIGADE_INSERT_TAIL(b, e);
            }
        }

        /* RFC 2616, Section 3.6.1
         *
         * If there is an EOS bucket, then prefix it with:
         *   1) the last-chunk marker ("0" CRLF)
         *   2) the trailer
         *   3) the end-of-chunked body CRLF
         *
         * We only do this if we have not seen an error bucket with
         * status HTTP_BAD_GATEWAY. We have memorized an
         * error bucket that we had seen in the filter context.
         * The error bucket with status HTTP_BAD_GATEWAY indicates that the
         * connection to the backend (mod_proxy) broke in the middle of the
         * response. In order to signal the client that something went wrong
         * we do not create the last-chunk marker and set c->keepalive to
         * AP_CONN_CLOSE in the core output filter.
         *
         * XXX: it would be nice to combine this with the end-of-chunk
         * marker above, but this is a bit more straight-forward for
         * now.
         */
        if (eos && !f->ctx) {
            /* XXX: (2) trailers ... does not yet exist */
            e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF
                                           /* <trailers> */
                                           ASCII_CRLF, 5, c->bucket_alloc);
            APR_BUCKET_INSERT_BEFORE(eos, e);
        }

        /* pass the brigade to the next filter. */
        rv = ap_pass_brigade(f->next, b);
        if (rv != APR_SUCCESS || eos != NULL) {
            return rv;
        }
    }
    return APR_SUCCESS;
}
Ejemplo n.º 7
0
apr_status_t ap_core_output_filter(ap_filter_t *f, apr_bucket_brigade *b)
{
    apr_status_t rv;
    apr_bucket_brigade *more;
    conn_rec *c = f->c;
    core_net_rec *net = f->ctx;
    core_output_filter_ctx_t *ctx = net->out_ctx;
    apr_read_type_e eblock = APR_NONBLOCK_READ;
    apr_pool_t *input_pool = b->p;

    /* Fail quickly if the connection has already been aborted. */
    if (c->aborted) {
        apr_brigade_cleanup(b);
        return APR_ECONNABORTED;
    }

    if (ctx == NULL) {
        ctx = apr_pcalloc(c->pool, sizeof(*ctx));
        net->out_ctx = ctx;
    }

    /* If we have a saved brigade, concatenate the new brigade to it */
    if (ctx->b) {
        APR_BRIGADE_CONCAT(ctx->b, b);
        b = ctx->b;
        ctx->b = NULL;
    }

    /* Perform multiple passes over the brigade, sending batches of output
       to the connection. */
    while (b && !APR_BRIGADE_EMPTY(b)) {
        apr_size_t nbytes = 0;
        apr_bucket *last_e = NULL; /* initialized for debugging */
        apr_bucket *e;

        /* one group of iovecs per pass over the brigade */
        apr_size_t nvec = 0;
        apr_size_t nvec_trailers = 0;
        struct iovec vec[MAX_IOVEC_TO_WRITE];
        struct iovec vec_trailers[MAX_IOVEC_TO_WRITE];

        /* one file per pass over the brigade */
        apr_file_t *fd = NULL;
        apr_size_t flen = 0;
        apr_off_t foffset = 0;

        /* keep track of buckets that we've concatenated
         * to avoid small writes
         */
        apr_bucket *last_merged_bucket = NULL;

        /* tail of brigade if we need another pass */
        more = NULL;

        /* Iterate over the brigade: collect iovecs and/or a file */
        for (e = APR_BRIGADE_FIRST(b);
                e != APR_BRIGADE_SENTINEL(b);
                e = APR_BUCKET_NEXT(e))
        {
            /* keep track of the last bucket processed */
            last_e = e;
            if (APR_BUCKET_IS_EOS(e) || AP_BUCKET_IS_EOC(e)) {
                break;
            }
            else if (APR_BUCKET_IS_FLUSH(e)) {
                if (e != APR_BRIGADE_LAST(b)) {
                    more = apr_brigade_split(b, APR_BUCKET_NEXT(e));
                }
                break;
            }

            /* It doesn't make any sense to use sendfile for a file bucket
             * that represents 10 bytes.
             */
            else if (APR_BUCKET_IS_FILE(e)
                     && (e->length >= AP_MIN_SENDFILE_BYTES)) {
                apr_bucket_file *a = e->data;

                /* We can't handle more than one file bucket at a time
                 * so we split here and send the file we have already
                 * found.
                 */
                if (fd) {
                    more = apr_brigade_split(b, e);
                    break;
                }

                fd = a->fd;
                flen = e->length;
                foffset = e->start;
            }
            else {
                const char *str;
                apr_size_t n;

                rv = apr_bucket_read(e, &str, &n, eblock);
                if (APR_STATUS_IS_EAGAIN(rv)) {
                    /* send what we have so far since we shouldn't expect more
                     * output for a while...  next time we read, block
                     */
                    more = apr_brigade_split(b, e);
                    eblock = APR_BLOCK_READ;
                    break;
                }
                eblock = APR_NONBLOCK_READ;
                if (n) {
                    if (!fd) {
                        if (nvec == MAX_IOVEC_TO_WRITE) {
                            /* woah! too many. buffer them up, for use later. */
                            apr_bucket *temp, *next;
                            apr_bucket_brigade *temp_brig;

                            if (nbytes >= AP_MIN_BYTES_TO_WRITE) {
                                /* We have enough data in the iovec
                                 * to justify doing a writev
                                 */
                                more = apr_brigade_split(b, e);
                                break;
                            }

                            /* Create a temporary brigade as a means
                             * of concatenating a bunch of buckets together
                             */
                            temp_brig = apr_brigade_create(f->c->pool,
                                                           f->c->bucket_alloc);
                            if (last_merged_bucket) {
                                /* If we've concatenated together small
                                 * buckets already in a previous pass,
                                 * the initial buckets in this brigade
                                 * are heap buckets that may have extra
                                 * space left in them (because they
                                 * were created by apr_brigade_write()).
                                 * We can take advantage of this by
                                 * building the new temp brigade out of
                                 * these buckets, so that the content
                                 * in them doesn't have to be copied again.
                                 */
                                APR_BRIGADE_PREPEND(b, temp_brig);
                                brigade_move(temp_brig, b, APR_BUCKET_NEXT(last_merged_bucket));
                            }

                            temp = APR_BRIGADE_FIRST(b);
                            while (temp != e) {
                                apr_bucket *d;
                                rv = apr_bucket_read(temp, &str, &n, APR_BLOCK_READ);
                                apr_brigade_write(temp_brig, NULL, NULL, str, n);
                                d = temp;
                                temp = APR_BUCKET_NEXT(temp);
                                apr_bucket_delete(d);
                            }

                            nvec = 0;
                            nbytes = 0;
                            temp = APR_BRIGADE_FIRST(temp_brig);
                            APR_BUCKET_REMOVE(temp);
                            APR_BRIGADE_INSERT_HEAD(b, temp);
                            apr_bucket_read(temp, &str, &n, APR_BLOCK_READ);
                            vec[nvec].iov_base = (char*) str;
                            vec[nvec].iov_len = n;
                            nvec++;

                            /* Just in case the temporary brigade has
                             * multiple buckets, recover the rest of
                             * them and put them in the brigade that
                             * we're sending.
                             */
                            for (next = APR_BRIGADE_FIRST(temp_brig);
                                    next != APR_BRIGADE_SENTINEL(temp_brig);
                                    next = APR_BRIGADE_FIRST(temp_brig)) {
                                APR_BUCKET_REMOVE(next);
                                APR_BUCKET_INSERT_AFTER(temp, next);
                                temp = next;
                                apr_bucket_read(next, &str, &n,
                                                APR_BLOCK_READ);
                                vec[nvec].iov_base = (char*) str;
                                vec[nvec].iov_len = n;
                                nvec++;
                            }

                            apr_brigade_destroy(temp_brig);

                            last_merged_bucket = temp;
                            e = temp;
                            last_e = e;
                        }
                        else {
                            vec[nvec].iov_base = (char*) str;
                            vec[nvec].iov_len = n;
                            nvec++;
                        }
                    }
                    else {
                        /* The bucket is a trailer to a file bucket */

                        if (nvec_trailers == MAX_IOVEC_TO_WRITE) {
                            /* woah! too many. stop now. */
                            more = apr_brigade_split(b, e);
                            break;
                        }

                        vec_trailers[nvec_trailers].iov_base = (char*) str;
                        vec_trailers[nvec_trailers].iov_len = n;
                        nvec_trailers++;
                    }

                    nbytes += n;
                }
            }
        }


        /* Completed iterating over the brigade, now determine if we want
         * to buffer the brigade or send the brigade out on the network.
         *
         * Save if we haven't accumulated enough bytes to send, the connection
         * is not about to be closed, and:
         *
         *   1) we didn't see a file, we don't have more passes over the
         *      brigade to perform,  AND we didn't stop at a FLUSH bucket.
         *      (IOW, we will save plain old bytes such as HTTP headers)
         * or
         *   2) we hit the EOS and have a keep-alive connection
         *      (IOW, this response is a bit more complex, but we save it
         *       with the hope of concatenating with another response)
         */
        if (nbytes + flen < AP_MIN_BYTES_TO_WRITE
                && !AP_BUCKET_IS_EOC(last_e)
                && ((!fd && !more && !APR_BUCKET_IS_FLUSH(last_e))
                    || (APR_BUCKET_IS_EOS(last_e)
                        && c->keepalive == AP_CONN_KEEPALIVE))) {

            /* NEVER save an EOS in here.  If we are saving a brigade with
             * an EOS bucket, then we are doing keepalive connections, and
             * we want to process to second request fully.
             */
            if (APR_BUCKET_IS_EOS(last_e)) {
                apr_bucket *bucket;
                int file_bucket_saved = 0;
                apr_bucket_delete(last_e);
                for (bucket = APR_BRIGADE_FIRST(b);
                        bucket != APR_BRIGADE_SENTINEL(b);
                        bucket = APR_BUCKET_NEXT(bucket)) {

                    /* Do a read on each bucket to pull in the
                     * data from pipe and socket buckets, so
                     * that we don't leave their file descriptors
                     * open indefinitely.  Do the same for file
                     * buckets, with one exception: allow the
                     * first file bucket in the brigade to remain
                     * a file bucket, so that we don't end up
                     * doing an mmap+memcpy every time a client
                     * requests a <8KB file over a keepalive
                     * connection.
                     */
                    if (APR_BUCKET_IS_FILE(bucket) && !file_bucket_saved) {
                        file_bucket_saved = 1;
                    }
                    else {
                        const char *buf;
                        apr_size_t len = 0;
                        rv = apr_bucket_read(bucket, &buf, &len,
                                             APR_BLOCK_READ);
                        if (rv != APR_SUCCESS) {
                            ap_log_cerror(APLOG_MARK, APLOG_ERR, rv,
                                          c, "core_output_filter:"
                                          " Error reading from bucket.");
                            return HTTP_INTERNAL_SERVER_ERROR;
                        }
                    }
                }
            }
            if (!ctx->deferred_write_pool) {
                apr_pool_create(&ctx->deferred_write_pool, c->pool);
                apr_pool_tag(ctx->deferred_write_pool, "deferred_write");
            }
            ap_save_brigade(f, &ctx->b, &b, ctx->deferred_write_pool);

            return APR_SUCCESS;
        }

        if (fd) {
            apr_hdtr_t hdtr;
            apr_size_t bytes_sent;

#if APR_HAS_SENDFILE
            apr_int32_t flags = 0;
#endif

            memset(&hdtr, '\0', sizeof(hdtr));
            if (nvec) {
                hdtr.numheaders = nvec;
                hdtr.headers = vec;
            }

            if (nvec_trailers) {
                hdtr.numtrailers = nvec_trailers;
                hdtr.trailers = vec_trailers;
            }

#if APR_HAS_SENDFILE
            if (apr_file_flags_get(fd) & APR_SENDFILE_ENABLED) {

                if (c->keepalive == AP_CONN_CLOSE && APR_BUCKET_IS_EOS(last_e)) {
                    /* Prepare the socket to be reused */
                    flags |= APR_SENDFILE_DISCONNECT_SOCKET;
                }

                rv = sendfile_it_all(net,      /* the network information   */
                                     fd,       /* the file to send          */
                                     &hdtr,    /* header and trailer iovecs */
                                     foffset,  /* offset in the file to begin
                                                  sending from              */
                                     flen,     /* length of file            */
                                     nbytes + flen, /* total length including
                                                       headers              */
                                     &bytes_sent,   /* how many bytes were
                                                       sent                 */
                                     flags);   /* apr_sendfile flags        */
            }
            else
#endif
            {
                rv = emulate_sendfile(net, fd, &hdtr, foffset, flen,
                                      &bytes_sent);
            }

            if (logio_add_bytes_out && bytes_sent > 0)
                logio_add_bytes_out(c, bytes_sent);

            fd = NULL;
        }
        else {
            apr_size_t bytes_sent;

            rv = writev_it_all(net->client_socket,
                               vec, nvec,
                               nbytes, &bytes_sent);

            if (logio_add_bytes_out && bytes_sent > 0)
                logio_add_bytes_out(c, bytes_sent);
        }

        apr_brigade_cleanup(b);

        /* drive cleanups for resources which were set aside
         * this may occur before or after termination of the request which
         * created the resource
         */
        if (ctx->deferred_write_pool) {
            if (more && more->p == ctx->deferred_write_pool) {
                /* "more" belongs to the deferred_write_pool,
                 * which is about to be cleared.
                 */
                if (APR_BRIGADE_EMPTY(more)) {
                    more = NULL;
                }
                else {
                    /* uh oh... change more's lifetime
                     * to the input brigade's lifetime
                     */
                    apr_bucket_brigade *tmp_more = more;
                    more = NULL;
                    ap_save_brigade(f, &more, &tmp_more, input_pool);
                }
            }
            apr_pool_clear(ctx->deferred_write_pool);
        }

        if (rv != APR_SUCCESS) {
            ap_log_cerror(APLOG_MARK, APLOG_INFO, rv, c,
                          "core_output_filter: writing data to the network");

            if (more)
                apr_brigade_cleanup(more);

            /* No need to check for SUCCESS, we did that above. */
            if (!APR_STATUS_IS_EAGAIN(rv)) {
                c->aborted = 1;
                return APR_ECONNABORTED;
            }

            return APR_SUCCESS;
        }

        b = more;
        more = NULL;
    }  /* end while () */

    return APR_SUCCESS;
}
Ejemplo n.º 8
0
/*
 * This routine is called to perform any module-specific fixing of header
 * fields, et cetera.  It is invoked just before any content-handler.
 *
 * This is a RUN_ALL HOOK.
 */
static int fixups(request_rec *r) {
    int ret;
    dir_config_t *dcfg = (dir_config_t *) ap_get_module_config(r->per_dir_config, &defender_module);
    // Stop if Defender not enabled
    if (!dcfg->defender)
        return DECLINED;

    // Stop if this is not the main request
    if (r->main != NULL || r->prev != NULL)
        return DECLINED;

    // Process only if POST / PUT request
    if (r->method_number != M_POST && r->method_number != M_PUT)
        return DECLINED;

    defender_config_t *defc = (defender_config_t *) ap_get_module_config(r->request_config, &defender_module);
    RuntimeScanner *scanner = defc->vpRuntimeScanner;

    if (scanner->contentLengthProvided && scanner->contentLength == 0)
        return scanner->processBody();

    if (scanner->contentType == CONTENT_TYPE_UNSUPPORTED)
        return scanner->processBody();

    if (scanner->bodyLimitExceeded)
        return scanner->processBody();

    if (!scanner->contentLengthProvided)
        return HTTP_NOT_IMPLEMENTED;

    if (scanner->transferEncodingProvided /*&& scanner->transferEncoding == TRANSFER_ENCODING_UNSUPPORTED*/)
        return HTTP_NOT_IMPLEMENTED;

    // Retrieve the body
    // Pre-allocate necessary bytes
    scanner->body.reserve(scanner->contentLength);

    // Wait for the body to be fully received 
    apr_bucket_brigade *bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
    unsigned int prev_nr_buckets = 0;
    if (bb == NULL)
        goto read_error_out;
    do {
        int rc = ap_get_brigade(r->input_filters, bb, AP_MODE_SPECULATIVE, APR_BLOCK_READ, LONG_MAX);
        if (rc != APR_SUCCESS) {
            ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, "Error reading body: %s", get_apr_error(r->pool, rc));
            goto read_error_out;
        }

        unsigned int nr_buckets = 0;
        // Iterate over buckets
        for (apr_bucket *bucket = APR_BRIGADE_FIRST(bb);
             bucket != APR_BRIGADE_SENTINEL(bb); bucket = APR_BUCKET_NEXT(bucket)) {
            // Stop if we reach the EOS bucket
            if (APR_BUCKET_IS_EOS(bucket))
                break;

            // Ignore non data buckets
            if (APR_BUCKET_IS_METADATA(bucket) || APR_BUCKET_IS_FLUSH(bucket))
                continue;

            nr_buckets++;
            // Skip already copied buckets
            if (nr_buckets <= prev_nr_buckets)
                continue;

            const char *buf;
            apr_size_t nbytes;
            int rv = apr_bucket_read(bucket, &buf, &nbytes, APR_BLOCK_READ);
            if (rv != APR_SUCCESS) {
                ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, "Failed reading input / bucket: %s",
                              get_apr_error(r->pool, rv));
                goto read_error_out;
            }

//            cerr << "bucket #" << nr_buckets - 1 << " received, nbytes: " << nbytes << ", total len: "
//                 << scanner->body.length() + nbytes << endl;

            // More bytes in the BODY than specified in the content-length
            if (scanner->body.length() + nbytes > scanner->contentLength) {
                ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, "Too much POST data: received body of %lu bytes but "
                        "got content-length: %lu", scanner->body.length() + nbytes, scanner->contentLength);
                goto read_error_out;
            }

            // More bytes in the BODY than specified by the allowed body limit
            if (scanner->body.length() + nbytes > dcfg->requestBodyLimit) {
                scanner->bodyLimitExceeded = true;
                ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, "Body limit exceeded (%lu)", dcfg->requestBodyLimit);
                break;
            }

            scanner->body.append(buf, nbytes);
        }
        prev_nr_buckets = nr_buckets;

        if (scanner->body.length() == scanner->contentLength)
            break;

        apr_brigade_cleanup(bb);
        apr_sleep(1000);
    } while (true);

//    cerr << "[pid " << getpid() << "] read " << scanner->body.length() << " bytes, ";
//    cerr << "content-length: " << scanner->contentLength << endl;
//    cerr << "body: " << scanner->body << endl;

    // Run scanner
    ret = scanner->processBody();

    if (dcfg->useenv)
        ret = pass_in_env(r, scanner);

//    cerr << "[pid " << getpid() << "] body (" << scanner->body.length() << ") scanned" << endl;

    return ret;

    read_error_out:
    if (dcfg->useenv) return DECLINED;
    return HTTP_INTERNAL_SERVER_ERROR;
}
Ejemplo n.º 9
0
apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, 
                          apr_size_t maxlen, int *pfile_handles_allowed, 
                          const char *msg)
{
    apr_status_t status = APR_SUCCESS;
    int same_alloc;
    
    AP_DEBUG_ASSERT(to);
    AP_DEBUG_ASSERT(from);
    same_alloc = (to->bucket_alloc == from->bucket_alloc);

    if (!FILE_MOVE) {
        pfile_handles_allowed = NULL;
    }
    
    if (!APR_BRIGADE_EMPTY(from)) {
        apr_bucket *b, *end;
        
        status = last_not_included(from, maxlen, same_alloc,
                                   pfile_handles_allowed, &end);
        if (status != APR_SUCCESS) {
            return status;
        }
        
        while (!APR_BRIGADE_EMPTY(from) && status == APR_SUCCESS) {
            b = APR_BRIGADE_FIRST(from);
            if (b == end) {
                break;
            }
            
            if (same_alloc || (b->list == to->bucket_alloc)) {
                /* both brigades use the same bucket_alloc and auto-cleanups
                 * have the same life time. It's therefore safe to just move
                 * directly. */
                APR_BUCKET_REMOVE(b);
                APR_BRIGADE_INSERT_TAIL(to, b);
#if LOG_BUCKETS
                ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
                              "h2_util_move: %s, passed bucket(same bucket_alloc) "
                              "%ld-%ld, type=%s",
                              msg, (long)b->start, (long)b->length, 
                              APR_BUCKET_IS_METADATA(b)? 
                              (APR_BUCKET_IS_EOS(b)? "EOS": 
                               (APR_BUCKET_IS_FLUSH(b)? "FLUSH" : "META")) : 
                              (APR_BUCKET_IS_FILE(b)? "FILE" : "DATA"));
#endif
            }
            else if (DEEP_COPY) {
                /* we have not managed the magic of passing buckets from
                 * one thread to another. Any attempts result in
                 * cleanup of pools scrambling memory.
                 */
                if (APR_BUCKET_IS_METADATA(b)) {
                    if (APR_BUCKET_IS_EOS(b)) {
                        APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc));
                    }
                    else if (APR_BUCKET_IS_FLUSH(b)) {
                        APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc));
                    }
                    else {
                        /* ignore */
                    }
                }
                else if (pfile_handles_allowed 
                         && *pfile_handles_allowed > 0 
                         && APR_BUCKET_IS_FILE(b)) {
                    /* We do not want to read files when passing buckets, if
                     * we can avoid it. However, what we've come up so far
                     * is not working corrently, resulting either in crashes or
                     * too many open file descriptors.
                     */
                    apr_bucket_file *f = (apr_bucket_file *)b->data;
                    apr_file_t *fd = f->fd;
                    int setaside = (f->readpool != to->p);
#if LOG_BUCKETS
                    ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
                                  "h2_util_move: %s, moving FILE bucket %ld-%ld "
                                  "from=%lx(p=%lx) to=%lx(p=%lx), setaside=%d",
                                  msg, (long)b->start, (long)b->length, 
                                  (long)from, (long)from->p, 
                                  (long)to, (long)to->p, setaside);
#endif
                    if (setaside) {
                        status = apr_file_setaside(&fd, fd, to->p);
                        if (status != APR_SUCCESS) {
                            ap_log_perror(APLOG_MARK, APLOG_ERR, status, to->p,
                                          APLOGNO(02947) "h2_util: %s, setaside FILE", 
                                          msg);
                            return status;
                        }
                    }
                    apr_brigade_insert_file(to, fd, b->start, b->length, 
                                            to->p);
                    --(*pfile_handles_allowed);
                }
                else {
                    const char *data;
                    apr_size_t len;
                    status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
                    if (status == APR_SUCCESS && len > 0) {
                        status = apr_brigade_write(to, NULL, NULL, data, len);
#if LOG_BUCKETS
                        ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
                                      "h2_util_move: %s, copied bucket %ld-%ld "
                                      "from=%lx(p=%lx) to=%lx(p=%lx)",
                                      msg, (long)b->start, (long)b->length, 
                                      (long)from, (long)from->p, 
                                      (long)to, (long)to->p);
#endif
                    }
                }
                apr_bucket_delete(b);
            }
            else {
                apr_bucket_setaside(b, to->p);
                APR_BUCKET_REMOVE(b);
                APR_BRIGADE_INSERT_TAIL(to, b);
#if LOG_BUCKETS
                ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
                              "h2_util_move: %s, passed setaside bucket %ld-%ld "
                              "from=%lx(p=%lx) to=%lx(p=%lx)",
                              msg, (long)b->start, (long)b->length, 
                              (long)from, (long)from->p, 
                              (long)to, (long)to->p);
#endif
            }
        }
    }
    
    return status;
}
Ejemplo n.º 10
0
int fcgi_server_send_stdin_record(fcgi_request_t *fr, uint16_t request_id,
		void *record_buffer)
{
	apr_bucket_brigade *bb = apr_brigade_create(fr->r->pool,
			fr->r->connection->bucket_alloc);

	int seen_eos = 0, server_stopped_reading = 0;
	apr_status_t rv;

	do {
		apr_bucket *bucket;

		rv = ap_get_brigade(fr->r->input_filters, bb, AP_MODE_READBYTES,
				APR_BLOCK_READ, HUGE_STRING_LEN);

		if (rv != APR_SUCCESS) {
			ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, fr->r,
					"FastCGI: error reading request entity data");
			return HTTP_INTERNAL_SERVER_ERROR;
		}

		for (bucket = APR_BRIGADE_FIRST(bb);
				bucket != APR_BRIGADE_SENTINEL(bb);
				bucket = APR_BUCKET_NEXT(bucket))
		{
			const char *data;
			apr_size_t len;

			if (APR_BUCKET_IS_EOS(bucket)) {
				seen_eos = 1;
				break;
			}

			/* We can't do much with this. */
			if (APR_BUCKET_IS_FLUSH(bucket)) {
				continue;
			}

			/* if the FastCGI server stopped reading, we still must read to EOS. */
			if (server_stopped_reading) {
				continue;
			}

			/* read from client */
			apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);

			/* build FCGI_STDIN record
			 *
			 * A bucket can hold up to 8000 bytes. Since FastCGI packets can be
			 * up to 64K we don't have to worry about to much stdin data being
			 * read in one single iteration.
			 */
			int padding_length = fcgi_record_build((fcgi_header_t)record_buffer,
					request_id, FCGI_STDIN, len);

			/* send header data to the FastCGI server */
			ssize_t bytes_sent = socket_send(fr,
					record_buffer, FCGI_HEADER_LEN);

			if (bytes_sent == -1) {
				server_stopped_reading = 1;
				continue;
			}

			/* send stdin data to the FastCGI server */
			bytes_sent = socket_send(fr, (char *)data, len);

			if (bytes_sent == -1) {
				server_stopped_reading = 1;
				continue;
			}

			/* send padding to the FastCGI server */
			bytes_sent = socket_send(fr,
					((char *)record_buffer) + FCGI_HEADER_LEN + len,
					padding_length);

			if (bytes_sent == -1) {
				server_stopped_reading = 1;
				continue;
			}
		}

		apr_brigade_cleanup(bb);
	} while (!seen_eos);

	apr_brigade_cleanup(bb);

	/* build void FCGI_STDIN record */
	fcgi_header_set((fcgi_header_t)record_buffer, FCGI_VERSION_1, FCGI_STDIN, request_id, 0, 0);

	/* send data to the FastCGI server */
	ssize_t bytes_sent = socket_send(fr, record_buffer, FCGI_HEADER_LEN);

	if (bytes_sent == -1) {
		ap_log_rerror(APLOG_MARK, APLOG_ERR, errno, fr->r,
				"FastCGI: failed to write to backend server (id=%u)", request_id);
		return HTTP_INTERNAL_SERVER_ERROR;
	}

	return OK;
}
Ejemplo n.º 11
0
static int aikido_handler(request_rec *r)
{
    apr_bucket_brigade *bb;
    apr_bucket *b;
    int seen_eos, child_stopped_reading;
    apr_pool_t *p;
    apr_status_t rv;
    conn_rec *c = r->connection;
    void *aikidoreq;
    int inpipes[2];
    int outpipes[2];
    apr_file_t *infile;
    apr_file_t *outfile;
    int e;
    pthread_t tid;
    pthread_attr_t attrs;
    ThreadData tdata;
    void *tresult;

    if (aikido == NULL) {
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                      "mod_aikido failed to initalize");
        return HTTP_INTERNAL_SERVER_ERROR;
    }
    p = r->main ? r->main->pool : r->pool;

    if (r->method_number == M_OPTIONS) {
        r->allowed |= (AP_METHOD_BIT << M_GET);
        r->allowed |= (AP_METHOD_BIT << M_POST);
        return DECLINED;
    }

    // create the request object
    aikidoreq = aikido_new_request (r);

    if (!aikido_check_uri(aikido, aikidoreq)) {
        aikido_delete_request (aikidoreq);
        return DECLINED;
    }

    e = socketpair (AF_UNIX, SOCK_STREAM, 0, inpipes);
    e |= socketpair (AF_UNIX, SOCK_STREAM, 0, outpipes);
    if (e != 0) {
        perror ("socketpair");
        aikido_delete_request (aikidoreq);
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                      "Failed to create pipes");
        return HTTP_INTERNAL_SERVER_ERROR;
    }

    // redirect the output from the webapp to our outfile
    apr_os_file_put (&outfile, &outpipes[0], O_RDONLY, r->pool);   // open for reading from outpipes pipe

    // redirect the input to the webapp from out infile
    apr_os_file_put (&infile, &inpipes[1], O_CREAT | O_WRONLY, r->pool);        // open for writing to inpipes pipe

    ap_add_common_vars(r);
    ap_add_cgi_vars(r);


    bb = apr_brigade_create(r->pool, c->bucket_alloc);
    seen_eos = 0;
    child_stopped_reading = 0;
    do {
        apr_bucket *bucket;

        rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
                            APR_BLOCK_READ, HUGE_STRING_LEN);
       
        if (rv != APR_SUCCESS) {
            close (inpipes[0]);
            close (inpipes[1]);
            close (outpipes[0]);
            close (outpipes[1]);
            aikido_delete_request (aikidoreq);
            return rv;
        }

        bucket = APR_BRIGADE_FIRST(bb);
        while (bucket != APR_BRIGADE_SENTINEL(bb)) {
            const char *data;
            apr_size_t len;

            if (APR_BUCKET_IS_EOS(bucket)) {
                seen_eos = 1;
                break;
            }

            /* We can't do much with this. */
            if (APR_BUCKET_IS_FLUSH(bucket)) {
                continue;
            }

            /* If the child stopped, we still must read to EOS. */
            if (child_stopped_reading) {
                continue;
            } 

            /* read */
            apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
            
            /* Keep writing data to the child until done or too much time
             * elapses with no progress or an error occurs.
             */
            rv = apr_file_write_full(infile, data, len, NULL);

            if (rv != APR_SUCCESS) {
                /* silly script stopped reading, soak up remaining message */
                child_stopped_reading = 1;
            }
            bucket = APR_BUCKET_NEXT(bucket);
        }
        apr_brigade_cleanup(bb);
    }
    while (!seen_eos);

    /* Is this flush really needed? */
    apr_file_flush(infile);
    apr_file_close(infile);

    apr_brigade_cleanup(bb);

    /* start the reader thread */
    e = pthread_attr_init (&attrs);
    if (e != 0) {
        close (inpipes[0]);
        close (inpipes[1]);
        close (outpipes[0]);
        close (outpipes[1]);
        aikido_delete_request (aikidoreq);
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                      "Failed to allocate script thread attributes");
        return HTTP_INTERNAL_SERVER_ERROR;
    }

    e = pthread_attr_setdetachstate (&attrs, PTHREAD_CREATE_JOINABLE) ;
    if (e != 0) {
        close (inpipes[0]);
        close (inpipes[1]);
        close (outpipes[0]);
        close (outpipes[1]);
        aikido_delete_request (aikidoreq);
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                      "Failed to set script thread attributes");
        return HTTP_INTERNAL_SERVER_ERROR;
    }


    tdata.req = aikidoreq;
    tdata.in = inpipes[0];
    tdata.out = outpipes[1];

    e = pthread_create (&tid, NULL, script_thread, &tdata) ;
    if (e != 0) {
        close (inpipes[0]);
        close (inpipes[1]);
        close (outpipes[0]);
        close (outpipes[1]);
        aikido_delete_request (aikidoreq);
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                      "Failed to run script thread");
        return HTTP_INTERNAL_SERVER_ERROR;
    }

    /* script thread is now executing script with i/o redirected to the pipes */


    /* read the script output from the pipe*/
    apr_file_pipe_timeout_set(outfile, 0);

    b = aikido_bucket_create(r, outfile, c->bucket_alloc);
    if (b == NULL) {
        pthread_join (tid, &tresult);       /* wait for script to complete */
        close (inpipes[0]);
        close (inpipes[1]);
        close (outpipes[0]);
        close (outpipes[1]);
        aikido_delete_request (aikidoreq);
        return HTTP_INTERNAL_SERVER_ERROR;
    }

    APR_BRIGADE_INSERT_TAIL(bb, b);
    b = apr_bucket_eos_create(c->bucket_alloc);
    APR_BRIGADE_INSERT_TAIL(bb, b);

    ap_pass_brigade(r->output_filters, bb);

    /* wait for the script thread to complete */
    pthread_join (tid, &tresult);
    
    /* terminate script output */
    close (inpipes[0]);
    close (inpipes[1]);
    close (outpipes[0]);
    close (outpipes[1]);

    /* delete the aikido request objects */
    aikido_delete_request (aikidoreq);

    return OK;
}
Ejemplo n.º 12
0
static PyObject * _conn_read(conn_rec *c, ap_input_mode_t mode, long len)
{

    apr_bucket *b;
    apr_bucket_brigade *bb;
    apr_status_t rc;
    long bytes_read;
    PyObject *result;
    char *buffer;
    long bufsize;

    bb = apr_brigade_create(c->pool, c->bucket_alloc);

    bufsize = len == 0 ? HUGE_STRING_LEN : len;

    while (APR_BRIGADE_EMPTY(bb)) {
        Py_BEGIN_ALLOW_THREADS;
        rc = ap_get_brigade(c->input_filters, bb, mode, APR_BLOCK_READ, bufsize);
        Py_END_ALLOW_THREADS;

        if (rc != APR_SUCCESS) {
            PyErr_SetObject(PyExc_IOError,
                            PyString_FromString("Connection read error"));
            return NULL;
        }
    }

    /*
     * loop through the brigade reading buckets into the string
     */

    b = APR_BRIGADE_FIRST(bb);

    if (APR_BUCKET_IS_EOS(b)) {
        apr_bucket_delete(b);
        Py_INCREF(Py_None);
        return Py_None;
    }

    /* PYTHON 2.5: 'PyString_FromStringAndSize' uses Py_ssize_t for input parameters */
    result = PyString_FromStringAndSize(NULL, bufsize);

    /* possibly no more memory */
    if (result == NULL)
        return PyErr_NoMemory();

    buffer = PyString_AS_STRING((PyStringObject *) result);

    bytes_read = 0;

    while ((bytes_read < len || len == 0) &&
           !(b == APR_BRIGADE_SENTINEL(bb) ||
             APR_BUCKET_IS_EOS(b) || APR_BUCKET_IS_FLUSH(b))) {

        const char *data;
        apr_size_t size;
        apr_bucket *old;

        if (apr_bucket_read(b, &data, &size, APR_BLOCK_READ) != APR_SUCCESS) {
            PyErr_SetObject(PyExc_IOError,
                            PyString_FromString("Connection read error"));
            return NULL;
        }

        if (bytes_read + size > bufsize) {
            apr_bucket_split(b, bufsize - bytes_read);
            size = bufsize - bytes_read;
            /* now the bucket is the exact size we need */
        }

        memcpy(buffer, data, size);
        buffer += size;
        bytes_read += size;

        /* time to grow destination string? */
        if (len == 0 && bytes_read == bufsize) {

            /* PYTHON 2.5: '_PyString_Resize' uses Py_ssize_t for input parameters */
            _PyString_Resize(&result, bufsize + HUGE_STRING_LEN);
            buffer = PyString_AS_STRING((PyStringObject *) result);
            buffer += bufsize;
            bufsize += HUGE_STRING_LEN;
        }


        if (mode == AP_MODE_GETLINE || len == 0) {
            apr_bucket_delete(b);
            break;
        }

        old = b;
        b = APR_BUCKET_NEXT(b);
        apr_bucket_delete(old);
    }

    /* resize if necessary */
    if (bytes_read < len || len == 0)
        /* PYTHON 2.5: '_PyString_Resize' uses Py_ssize_t for input parameters */
        if(_PyString_Resize(&result, bytes_read))
            return NULL;

    return result;
}
Ejemplo n.º 13
0
static int femto_post(request_rec* r)
{
  int bufbytes = 16*1024 - 1;
  char* dbuf = NULL;
  apr_size_t dbpos = 0;
  apr_bucket_brigade *bb;
  //apr_bucket *b;
  int seen_eos;
  conn_rec *c;
  apr_status_t rv;

  c = r->connection;


  // Read all of the put/post args into 'dbuf'
  // this code taken from mod_cgi.
  bb = apr_brigade_create(r->pool, c->bucket_alloc);
  seen_eos = 0;
  dbpos = 0;
  dbuf = apr_palloc(r->pool, bufbytes + 1);

  do {
      apr_bucket *bucket;

      rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
                          APR_BLOCK_READ, HUGE_STRING_LEN);

      if (rv != APR_SUCCESS) {
          ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                        "Error reading request entity data");
          return HTTP_INTERNAL_SERVER_ERROR;
      }

      for (bucket = APR_BRIGADE_FIRST(bb);
           bucket != APR_BRIGADE_SENTINEL(bb);
           bucket = APR_BUCKET_NEXT(bucket))
      {
          const char *data;
          apr_size_t len;

          if (APR_BUCKET_IS_EOS(bucket)) {
              seen_eos = 1;
              break;
          }

          /* We can't do much with this. */
          if (APR_BUCKET_IS_FLUSH(bucket)) {
              continue;
          }

          /* read */
          apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);

          {
              int cursize;

              if ((dbpos + len) > bufbytes) {
                  cursize = bufbytes - dbpos;
              } else {
                  cursize = len;
              }
              memcpy(dbuf + dbpos, data, cursize);
              dbpos += cursize;
          }
      }
      apr_brigade_cleanup(bb);
  }
  while (!seen_eos);

  dbuf[dbpos] = '\0';

  if(dbpos == 0) return HTTP_INTERNAL_SERVER_ERROR;

  return femto_do_request(r, dbuf);
}
Ejemplo n.º 14
0
static apr_status_t bucketeer_out_filter(ap_filter_t *f,
                                         apr_bucket_brigade *bb)
{
    apr_bucket *e;
    request_rec *r = f->r;
    bucketeer_ctx_t *ctx = f->ctx;
    bucketeer_filter_config_t *c;

    c = ap_get_module_config(r->server->module_config, &bucketeer_module);

    /* If have a context, it means we've done this before successfully. */
    if (!ctx) {
        if (!r->content_type || strncmp(r->content_type, "text/", 5)) {
            ap_remove_output_filter(f);
            return ap_pass_brigade(f->next, bb);
        }

        /* We're cool with filtering this. */
        ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
        ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
        apr_table_unset(f->r->headers_out, "Content-Length");
    }

    for (e = APR_BRIGADE_FIRST(bb);
         e != APR_BRIGADE_SENTINEL(bb);
         e = APR_BUCKET_NEXT(e))
    {
        const char *data;
        apr_size_t len, i, lastpos;

        if (APR_BUCKET_IS_EOS(e)) {
            APR_BUCKET_REMOVE(e);
            APR_BRIGADE_INSERT_TAIL(ctx->bb, e);

            /* Okay, we've seen the EOS.
             * Time to pass it along down the chain.
             */
            return ap_pass_brigade(f->next, ctx->bb);
        }

        if (APR_BUCKET_IS_FLUSH(e)) {
            /*
             * Ignore flush buckets for the moment..
             * we decide what to stream
             */
            continue;
        }

        if (APR_BUCKET_IS_METADATA(e)) {
            /* metadata bucket */
            apr_bucket *cpy;
            apr_bucket_copy(e, &cpy);
            APR_BRIGADE_INSERT_TAIL(ctx->bb, cpy);
            continue;
        }

        /* read */
        apr_bucket_read(e, &data, &len, APR_BLOCK_READ);

        if (len > 0) {
            lastpos = 0;
            for (i = 0; i < len; i++) {
                if (data[i] == c->flushdelimiter ||
                    data[i] == c->bucketdelimiter ||
                    data[i] == c->passdelimiter) {
                    apr_bucket *p;
                    if (i - lastpos > 0) {
                        p = apr_bucket_pool_create(apr_pmemdup(f->r->pool,
                                                               &data[lastpos],
                                                               i - lastpos),
                                                    i - lastpos,
                                                    f->r->pool,
                                                    f->c->bucket_alloc);
                        APR_BRIGADE_INSERT_TAIL(ctx->bb, p);
                    }
                    lastpos = i + 1;
                    if (data[i] == c->flushdelimiter) {
                        p = apr_bucket_flush_create(f->c->bucket_alloc);
                        APR_BRIGADE_INSERT_TAIL(ctx->bb, p);
                    }
                    if (data[i] == c->passdelimiter) {
                        apr_status_t rv;

                        rv = ap_pass_brigade(f->next, ctx->bb);
                        if (rv) {
                            return rv;
                        }
                    }
                }
            }
            /* XXX: really should append this to the next 'real' bucket */
            if (lastpos < i) {
                apr_bucket *p;
                p = apr_bucket_pool_create(apr_pmemdup(f->r->pool,
                                                       &data[lastpos],
                                                       i - lastpos),
                                           i - lastpos,
                                           f->r->pool,
                                           f->c->bucket_alloc);
                lastpos = i;
                APR_BRIGADE_INSERT_TAIL(ctx->bb, p);
            }
        }
    }

    return APR_SUCCESS;
}
Ejemplo n.º 15
0
/* Bring the data from the brigade (which represents the result of the
 * request_rec out filter chain) into the h2_mplx for further sending
 * on the master connection. 
 */
static apr_status_t slave_out(h2_task *task, ap_filter_t* f, 
                              apr_bucket_brigade* bb)
{
    apr_bucket *b;
    apr_status_t status = APR_SUCCESS;
    int flush = 0, blocking;
    
    if (task->frozen) {
        h2_util_bb_log(task->c, task->stream_id, APLOG_TRACE2,
                       "frozen task output write, ignored", bb);
        while (!APR_BRIGADE_EMPTY(bb)) {
            b = APR_BRIGADE_FIRST(bb);
            if (AP_BUCKET_IS_EOR(b)) {
                APR_BUCKET_REMOVE(b);
                task->eor = b;
            }
            else {
                apr_bucket_delete(b);
            }
        }
        return APR_SUCCESS;
    }

    /* we send block once we opened the output, so someone is there
     * reading it *and* the task is not assigned to a h2_req_engine */
    blocking = (!task->assigned && task->output.opened);
    if (!task->output.opened) {
        for (b = APR_BRIGADE_FIRST(bb);
             b != APR_BRIGADE_SENTINEL(bb);
             b = APR_BUCKET_NEXT(b)) {
            if (APR_BUCKET_IS_FLUSH(b)) {
                flush = 1;
                break;
            }
        }
    }
    
    if (task->output.bb && !APR_BRIGADE_EMPTY(task->output.bb)) {
        /* still have data buffered from previous attempt.
         * setaside and append new data and try to pass the complete data */
        if (!APR_BRIGADE_EMPTY(bb)) {
            status = ap_save_brigade(f, &task->output.bb, &bb, task->pool);
        }
        if (status == APR_SUCCESS) {
            status = send_out(task, task->output.bb, blocking);
        } 
    }
    else {
        /* no data buffered here, try to pass the brigade directly */
        status = send_out(task, bb, blocking); 
        if (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
            /* could not write all, buffer the rest */
            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, APLOGNO(03405)
                          "h2_slave_out(%s): saving brigade", 
                          task->id);
            status = ap_save_brigade(f, &task->output.bb, &bb, task->pool);
            flush = 1;
        }
    }
    
    if (status == APR_SUCCESS && !task->output.opened && flush) {
        /* got a flush or could not write all, time to tell someone to read */
        status = open_output(task);
    }
    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, task->c, 
                  "h2_slave_out(%s): slave_out leave", task->id);    
    return status;
}
Ejemplo n.º 16
0
apr_status_t mgs_filter_output(ap_filter_t * f, apr_bucket_brigade * bb) {
    apr_size_t ret;
    mgs_handle_t *ctxt = (mgs_handle_t *) f->ctx;
    apr_status_t status = APR_SUCCESS;
    apr_read_type_e rblock = APR_NONBLOCK_READ;

    if (f->c->aborted) {
        apr_brigade_cleanup(bb);
        return APR_ECONNABORTED;
    }

    if (ctxt->status == 0) {
        gnutls_do_handshake(ctxt);
    }

    if (ctxt->status < 0) {
        return ap_pass_brigade(f->next, bb);
    }

    while (!APR_BRIGADE_EMPTY(bb)) {
        apr_bucket *bucket = APR_BRIGADE_FIRST(bb);

        if (APR_BUCKET_IS_EOS(bucket)) {
            return ap_pass_brigade(f->next, bb);
        } else if (APR_BUCKET_IS_FLUSH(bucket)) {
            /* Try Flush */
            if (write_flush(ctxt) < 0) {
                /* Flush Error */
                return ctxt->output_rc;
            }
            /* cleanup! */
            apr_bucket_delete(bucket);
        } else if (AP_BUCKET_IS_EOC(bucket)) {
            /* End Of Connection */
            if (ctxt->session != NULL) {
                /* Try A Clean Shutdown */
                do {
                    ret = gnutls_bye(ctxt->session, GNUTLS_SHUT_WR);
                } while (ret == GNUTLS_E_INTERRUPTED || ret == GNUTLS_E_AGAIN);
                /* De-Initialize Session */
                gnutls_deinit(ctxt->session);
                ctxt->session = NULL;
            }
            /* cleanup! */
            apr_bucket_delete(bucket);
            /* Pass next brigade! */
            return ap_pass_brigade(f->next, bb);
        } else {
            /* filter output */
            const char *data;
            apr_size_t len;

            status = apr_bucket_read(bucket, &data, &len, rblock);

            if (APR_STATUS_IS_EAGAIN(status)) {
                /* No data available so Flush! */
                if (write_flush(ctxt) < 0) {
                    return ctxt->output_rc;
                }
                /* Try again with a blocking read. */
                rblock = APR_BLOCK_READ;
                continue;
            }

            rblock = APR_NONBLOCK_READ;

            if (!APR_STATUS_IS_EOF(status)
                    && (status != APR_SUCCESS)) {
                return status;
            }

            if (len > 0) {

                if (ctxt->session == NULL) {
                    ret = GNUTLS_E_INVALID_REQUEST;
                } else {
                    do {
                        ret =
                                gnutls_record_send
                                (ctxt->session, data,
                                len);
                    } while (ret == GNUTLS_E_INTERRUPTED
                            || ret == GNUTLS_E_AGAIN);
                }

                if (ret < 0) {
                    /* error sending output */
                    ap_log_error(APLOG_MARK,
                            APLOG_INFO,
                            ctxt->output_rc,
                            ctxt->c->base_server,
                            "GnuTLS: Error writing data."
                            " (%d) '%s'",
                            (int) ret,
                            gnutls_strerror(ret));
                    if (ctxt->output_rc == APR_SUCCESS) {
                        ctxt->output_rc =
                                APR_EGENERAL;
                        return ctxt->output_rc;
                    }
                } else if (ret != len) {
                    /* Not able to send the entire bucket,
                       split it and send it again. */
                    apr_bucket_split(bucket, ret);
                }
            }

            apr_bucket_delete(bucket);
        }
    }

    return status;
}
Ejemplo n.º 17
0
apr_status_t h2_util_copy(apr_bucket_brigade *to, apr_bucket_brigade *from, 
                          apr_size_t maxlen, const char *msg)
{
    apr_status_t status = APR_SUCCESS;
    int same_alloc;

    (void)msg;
    AP_DEBUG_ASSERT(to);
    AP_DEBUG_ASSERT(from);
    same_alloc = (to->bucket_alloc == from->bucket_alloc);

    if (!APR_BRIGADE_EMPTY(from)) {
        apr_bucket *b, *end, *cpy;
        
        status = last_not_included(from, maxlen, 0, 0, &end);
        if (status != APR_SUCCESS) {
            return status;
        }

        for (b = APR_BRIGADE_FIRST(from);
             b != APR_BRIGADE_SENTINEL(from) && b != end;
             b = APR_BUCKET_NEXT(b))
        {
            if (same_alloc) {
                status = apr_bucket_copy(b, &cpy);
                if (status != APR_SUCCESS) {
                    break;
                }
                APR_BRIGADE_INSERT_TAIL(to, cpy);
            }
            else {
                if (APR_BUCKET_IS_METADATA(b)) {
                    if (APR_BUCKET_IS_EOS(b)) {
                        APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc));
                    }
                    else if (APR_BUCKET_IS_FLUSH(b)) {
                        APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc));
                    }
                    else {
                        /* ignore */
                    }
                }
                else {
                    const char *data;
                    apr_size_t len;
                    status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
                    if (status == APR_SUCCESS && len > 0) {
                        status = apr_brigade_write(to, NULL, NULL, data, len);
#if LOG_BUCKETS                        
                        ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
                                      "h2_util_copy: %s, copied bucket %ld-%ld "
                                      "from=%lx(p=%lx) to=%lx(p=%lx)",
                                      msg, (long)b->start, (long)b->length, 
                                      (long)from, (long)from->p, 
                                      (long)to, (long)to->p);
#endif
                    }
                }
            }
        }
    }
    return status;
}
Ejemplo n.º 18
0
apr_status_t ap_core_output_filter(ap_filter_t *f, apr_bucket_brigade *new_bb)
{
    conn_rec *c = f->c;
    core_net_rec *net = f->ctx;
    core_output_filter_ctx_t *ctx = net->out_ctx;
    apr_bucket_brigade *bb = NULL;
    apr_bucket *bucket, *next, *flush_upto = NULL;
    apr_size_t bytes_in_brigade, non_file_bytes_in_brigade;
    int eor_buckets_in_brigade, morphing_bucket_in_brigade;
    apr_status_t rv;
    int loglevel = ap_get_conn_module_loglevel(c, APLOG_MODULE_INDEX);

    /* Fail quickly if the connection has already been aborted. */
    if (c->aborted) {
        if (new_bb != NULL) {
            apr_brigade_cleanup(new_bb);
        }
        return APR_ECONNABORTED;
    }

    if (ctx == NULL) {
        ctx = apr_pcalloc(c->pool, sizeof(*ctx));
        net->out_ctx = (core_output_filter_ctx_t *)ctx;
        /*
         * Need to create tmp brigade with correct lifetime. Passing
         * NULL to apr_brigade_split_ex would result in a brigade
         * allocated from bb->pool which might be wrong.
         */
        ctx->tmp_flush_bb = apr_brigade_create(c->pool, c->bucket_alloc);
        /* same for buffered_bb and ap_save_brigade */
        ctx->buffered_bb = apr_brigade_create(c->pool, c->bucket_alloc);
    }

    if (new_bb != NULL)
        bb = new_bb;

    if ((ctx->buffered_bb != NULL) &&
        !APR_BRIGADE_EMPTY(ctx->buffered_bb)) {
        if (new_bb != NULL) {
            APR_BRIGADE_PREPEND(bb, ctx->buffered_bb);
        }
        else {
            bb = ctx->buffered_bb;
        }
        c->data_in_output_filters = 0;
    }
    else if (new_bb == NULL) {
        return APR_SUCCESS;
    }

    /* Scan through the brigade and decide whether to attempt a write,
     * and how much to write, based on the following rules:
     *
     *  1) The new_bb is null: Do a nonblocking write of as much as
     *     possible: do a nonblocking write of as much data as possible,
     *     then save the rest in ctx->buffered_bb.  (If new_bb == NULL,
     *     it probably means that the MPM is doing asynchronous write
     *     completion and has just determined that this connection
     *     is writable.)
     *
     *  2) Determine if and up to which bucket we need to do a blocking
     *     write:
     *
     *  a) The brigade contains a flush bucket: Do a blocking write
     *     of everything up that point.
     *
     *  b) The request is in CONN_STATE_HANDLER state, and the brigade
     *     contains at least THRESHOLD_MAX_BUFFER bytes in non-file
     *     buckets: Do blocking writes until the amount of data in the
     *     buffer is less than THRESHOLD_MAX_BUFFER.  (The point of this
     *     rule is to provide flow control, in case a handler is
     *     streaming out lots of data faster than the data can be
     *     sent to the client.)
     *
     *  c) The request is in CONN_STATE_HANDLER state, and the brigade
     *     contains at least MAX_REQUESTS_IN_PIPELINE EOR buckets:
     *     Do blocking writes until less than MAX_REQUESTS_IN_PIPELINE EOR
     *     buckets are left. (The point of this rule is to prevent too many
     *     FDs being kept open by pipelined requests, possibly allowing a
     *     DoS).
     *
     *  d) The brigade contains a morphing bucket: If there was no other
     *     reason to do a blocking write yet, try reading the bucket. If its
     *     contents fit into memory before THRESHOLD_MAX_BUFFER is reached,
     *     everything is fine. Otherwise we need to do a blocking write the
     *     up to and including the morphing bucket, because ap_save_brigade()
     *     would read the whole bucket into memory later on.
     *
     *  3) Actually do the blocking write up to the last bucket determined
     *     by rules 2a-d. The point of doing only one flush is to make as
     *     few calls to writev() as possible.
     *
     *  4) If the brigade contains at least THRESHOLD_MIN_WRITE
     *     bytes: Do a nonblocking write of as much data as possible,
     *     then save the rest in ctx->buffered_bb.
     */

    if (new_bb == NULL) {
        rv = send_brigade_nonblocking(net->client_socket, bb,
                                      &(ctx->bytes_written), c);
        if (rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv)) {
            /* The client has aborted the connection */
            ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c,
                          "core_output_filter: writing data to the network");
            apr_brigade_cleanup(bb);
            c->aborted = 1;
            return rv;
        }
        setaside_remaining_output(f, ctx, bb, c);
        return APR_SUCCESS;
    }

    bytes_in_brigade = 0;
    non_file_bytes_in_brigade = 0;
    eor_buckets_in_brigade = 0;
    morphing_bucket_in_brigade = 0;

    for (bucket = APR_BRIGADE_FIRST(bb); bucket != APR_BRIGADE_SENTINEL(bb);
         bucket = next) {
        next = APR_BUCKET_NEXT(bucket);

        if (!APR_BUCKET_IS_METADATA(bucket)) {
            if (bucket->length == (apr_size_t)-1) {
                /*
                 * A setaside of morphing buckets would read everything into
                 * memory. Instead, we will flush everything up to and
                 * including this bucket.
                 */
                morphing_bucket_in_brigade = 1;
            }
            else {
                bytes_in_brigade += bucket->length;
                if (!APR_BUCKET_IS_FILE(bucket))
                    non_file_bytes_in_brigade += bucket->length;
            }
        }
        else if (AP_BUCKET_IS_EOR(bucket)) {
            eor_buckets_in_brigade++;
        }

        if (APR_BUCKET_IS_FLUSH(bucket)
            || non_file_bytes_in_brigade >= THRESHOLD_MAX_BUFFER
            || morphing_bucket_in_brigade
            || eor_buckets_in_brigade > MAX_REQUESTS_IN_PIPELINE) {
            /* this segment of the brigade MUST be sent before returning. */

            if (loglevel >= APLOG_TRACE6) {
                char *reason = APR_BUCKET_IS_FLUSH(bucket) ?
                               "FLUSH bucket" :
                               (non_file_bytes_in_brigade >= THRESHOLD_MAX_BUFFER) ?
                               "THRESHOLD_MAX_BUFFER" :
                               morphing_bucket_in_brigade ? "morphing bucket" :
                               "MAX_REQUESTS_IN_PIPELINE";
                ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, c,
                              "will flush because of %s", reason);
                ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, c,
                              "seen in brigade%s: bytes: %" APR_SIZE_T_FMT
                              ", non-file bytes: %" APR_SIZE_T_FMT ", eor "
                              "buckets: %d, morphing buckets: %d",
                              flush_upto == NULL ? " so far"
                                                 : " since last flush point",
                              bytes_in_brigade,
                              non_file_bytes_in_brigade,
                              eor_buckets_in_brigade,
                              morphing_bucket_in_brigade);
            }
            /*
             * Defer the actual blocking write to avoid doing many writes.
             */
            flush_upto = next;

            bytes_in_brigade = 0;
            non_file_bytes_in_brigade = 0;
            eor_buckets_in_brigade = 0;
            morphing_bucket_in_brigade = 0;
        }
    }

    if (flush_upto != NULL) {
        ctx->tmp_flush_bb = apr_brigade_split_ex(bb, flush_upto,
                                                 ctx->tmp_flush_bb);
        if (loglevel >= APLOG_TRACE8) {
                ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, c,
                              "flushing now");
        }
        rv = send_brigade_blocking(net->client_socket, bb,
                                   &(ctx->bytes_written), c);
        if (rv != APR_SUCCESS) {
            /* The client has aborted the connection */
            ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c,
                          "core_output_filter: writing data to the network");
            apr_brigade_cleanup(bb);
            c->aborted = 1;
            return rv;
        }
        if (loglevel >= APLOG_TRACE8) {
                ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, c,
                              "total bytes written: %" APR_SIZE_T_FMT,
                              ctx->bytes_written);
        }
        APR_BRIGADE_CONCAT(bb, ctx->tmp_flush_bb);
    }

    if (loglevel >= APLOG_TRACE8) {
        ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, c,
                      "brigade contains: bytes: %" APR_SIZE_T_FMT
                      ", non-file bytes: %" APR_SIZE_T_FMT
                      ", eor buckets: %d, morphing buckets: %d",
                      bytes_in_brigade, non_file_bytes_in_brigade,
                      eor_buckets_in_brigade, morphing_bucket_in_brigade);
    }

    if (bytes_in_brigade >= THRESHOLD_MIN_WRITE) {
        rv = send_brigade_nonblocking(net->client_socket, bb,
                                      &(ctx->bytes_written), c);
        if ((rv != APR_SUCCESS) && (!APR_STATUS_IS_EAGAIN(rv))) {
            /* The client has aborted the connection */
            ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c,
                          "core_output_filter: writing data to the network");
            apr_brigade_cleanup(bb);
            c->aborted = 1;
            return rv;
        }
        if (loglevel >= APLOG_TRACE8) {
                ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, c,
                              "tried nonblocking write, total bytes "
                              "written: %" APR_SIZE_T_FMT,
                              ctx->bytes_written);
        }
    }

    setaside_remaining_output(f, ctx, bb, c);
    return APR_SUCCESS;
}
Ejemplo n.º 19
0
apr_status_t h2_beam_receive(h2_bucket_beam *beam, 
                             apr_bucket_brigade *bb, 
                             apr_read_type_e block,
                             apr_off_t readbytes)
{
    h2_beam_lock bl;
    apr_bucket *bred, *bgreen, *ng;
    int transferred = 0;
    apr_status_t status = APR_SUCCESS;
    apr_off_t remain = readbytes;
    
    /* Called from the green thread to take buckets from the beam */
    if (enter_yellow(beam, &bl) == APR_SUCCESS) {
transfer:
        if (beam->aborted) {
            if (beam->green && !APR_BRIGADE_EMPTY(beam->green)) {
                apr_brigade_cleanup(beam->green);
            }
            status = APR_ECONNABORTED;
            goto leave;
        }

        /* transfer enough buckets from our green brigade, if we have one */
        while (beam->green
               && !APR_BRIGADE_EMPTY(beam->green)
               && (readbytes <= 0 || remain >= 0)) {
            bgreen = APR_BRIGADE_FIRST(beam->green);
            if (readbytes > 0 && bgreen->length > 0 && remain <= 0) {
                break;
            }            
            APR_BUCKET_REMOVE(bgreen);
            APR_BRIGADE_INSERT_TAIL(bb, bgreen);
            remain -= bgreen->length;
            ++transferred;
        }

        /* transfer from our red brigade, transforming red buckets to
         * green ones until we have enough */
        while (!H2_BLIST_EMPTY(&beam->red) && (readbytes <= 0 || remain >= 0)) {
            bred = H2_BLIST_FIRST(&beam->red);
            bgreen = NULL;
            
            if (readbytes > 0 && bred->length > 0 && remain <= 0) {
                break;
            }
                        
            if (APR_BUCKET_IS_METADATA(bred)) {
                if (APR_BUCKET_IS_EOS(bred)) {
                    bgreen = apr_bucket_eos_create(bb->bucket_alloc);
                    beam->close_sent = 1;
                }
                else if (APR_BUCKET_IS_FLUSH(bred)) {
                    bgreen = apr_bucket_flush_create(bb->bucket_alloc);
                }
                else {
                    /* put red into hold, no green sent out */
                }
            }
            else if (APR_BUCKET_IS_FILE(bred)) {
                /* This is set aside into the target brigade pool so that 
                 * any read operation messes with that pool and not 
                 * the red one. */
                apr_bucket_file *f = (apr_bucket_file *)bred->data;
                apr_file_t *fd = f->fd;
                int setaside = (f->readpool != bb->p);
                
                if (setaside) {
                    status = apr_file_setaside(&fd, fd, bb->p);
                    if (status != APR_SUCCESS) {
                        goto leave;
                    }
                    ++beam->files_beamed;
                }
                ng = apr_brigade_insert_file(bb, fd, bred->start, bred->length, 
                                             bb->p);
#if APR_HAS_MMAP
                /* disable mmap handling as this leads to segfaults when
                 * the underlying file is changed while memory pointer has
                 * been handed out. See also PR 59348 */
                apr_bucket_file_enable_mmap(ng, 0);
#endif
                remain -= bred->length;
                ++transferred;
                APR_BUCKET_REMOVE(bred);
                H2_BLIST_INSERT_TAIL(&beam->hold, bred);
                ++transferred;
                continue;
            }
            else {
                /* create a "green" standin bucket. we took care about the
                 * underlying red bucket and its data when we placed it into
                 * the red brigade.
                 * the beam bucket will notify us on destruction that bred is
                 * no longer needed. */
                bgreen = h2_beam_bucket_create(beam, bred, bb->bucket_alloc,
                                               beam->buckets_sent++);
            }
            
            /* Place the red bucket into our hold, to be destroyed when no
             * green bucket references it any more. */
            APR_BUCKET_REMOVE(bred);
            H2_BLIST_INSERT_TAIL(&beam->hold, bred);
            beam->received_bytes += bred->length;
            if (bgreen) {
                APR_BRIGADE_INSERT_TAIL(bb, bgreen);
                remain -= bgreen->length;
                ++transferred;
            }
        }

        if (readbytes > 0 && remain < 0) {
            /* too much, put some back */
            remain = readbytes;
            for (bgreen = APR_BRIGADE_FIRST(bb);
                 bgreen != APR_BRIGADE_SENTINEL(bb);
                 bgreen = APR_BUCKET_NEXT(bgreen)) {
                 remain -= bgreen->length;
                 if (remain < 0) {
                     apr_bucket_split(bgreen, bgreen->length+remain);
                     beam->green = apr_brigade_split_ex(bb, 
                                                        APR_BUCKET_NEXT(bgreen), 
                                                        beam->green);
                     break;
                 }
            }
        }

        if (beam->closed 
            && (!beam->green || APR_BRIGADE_EMPTY(beam->green))
            && H2_BLIST_EMPTY(&beam->red)) {
            /* beam is closed and we have nothing more to receive */ 
            if (!beam->close_sent) {
                apr_bucket *b = apr_bucket_eos_create(bb->bucket_alloc);
                APR_BRIGADE_INSERT_TAIL(bb, b);
                beam->close_sent = 1;
                ++transferred;
                status = APR_SUCCESS;
            }
        }
        
        if (transferred) {
            status = APR_SUCCESS;
        }
        else if (beam->closed) {
            status = APR_EOF;
        }
        else if (block == APR_BLOCK_READ && bl.mutex && beam->m_cond) {
            status = wait_cond(beam, bl.mutex);
            if (status != APR_SUCCESS) {
                goto leave;
            }
            goto transfer;
        }
        else {
            status = APR_EAGAIN;
        }
leave:        
        leave_yellow(beam, &bl);
    }
    return status;
}
Ejemplo n.º 20
0
static apr_status_t
rate_limit_filter(ap_filter_t *f, apr_bucket_brigade *bb)
{
    apr_status_t rv = APR_SUCCESS;
    rl_ctx_t *ctx = f->ctx;
    apr_bucket_alloc_t *ba = f->r->connection->bucket_alloc;

    /* Set up our rl_ctx_t on first use */
    if (ctx == NULL) {
        const char *rl = NULL;
        int ratelimit;
        int burst = 0;

        /* no subrequests. */
        if (f->r->main != NULL) {
            ap_remove_output_filter(f);
            return ap_pass_brigade(f->next, bb);
        }

        /* Configuration: rate limit */
        rl = apr_table_get(f->r->subprocess_env, "rate-limit");

        if (rl == NULL) {
            ap_remove_output_filter(f);
            return ap_pass_brigade(f->next, bb);
        }
        
        /* rl is in kilo bytes / second  */
        ratelimit = atoi(rl) * 1024;
        if (ratelimit <= 0) {
            /* remove ourselves */
            ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r,
                          APLOGNO(03488) "rl: disabling: rate-limit = %s (too high?)", rl);
            ap_remove_output_filter(f);
            return ap_pass_brigade(f->next, bb);
        }

        /* Configuration: optional initial burst */
        rl = apr_table_get(f->r->subprocess_env, "rate-initial-burst");
        if (rl != NULL) {
            burst = atoi(rl) * 1024;
            if (burst <= 0) {
               ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r,
                             APLOGNO(03489) "rl: disabling burst: rate-initial-burst = %s (too high?)", rl);
               burst = 0;
            }
        }

        /* Set up our context */
        ctx = apr_palloc(f->r->pool, sizeof(rl_ctx_t));
        f->ctx = ctx;
        ctx->state = RATE_LIMIT;
        ctx->speed = ratelimit;
        ctx->burst = burst;
        ctx->do_sleep = 0;

        /* calculate how many bytes / interval we want to send */
        /* speed is bytes / second, so, how many  (speed / 1000 % interval) */
        ctx->chunk_size = (ctx->speed / (1000 / RATE_INTERVAL_MS));
        ctx->tmpbb = apr_brigade_create(f->r->pool, ba);
        ctx->holdingbb = apr_brigade_create(f->r->pool, ba);
    }
    else {
        APR_BRIGADE_PREPEND(bb, ctx->holdingbb);
    }

    while (!APR_BRIGADE_EMPTY(bb)) {
        apr_bucket *e;

        if (ctx->state == RATE_FULLSPEED) {
            /* Find where we 'stop' going full speed. */
            for (e = APR_BRIGADE_FIRST(bb);
                 e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) {
                if (AP_RL_BUCKET_IS_END(e)) {
                    apr_brigade_split_ex(bb, e, ctx->holdingbb);
                    ctx->state = RATE_LIMIT;
                    break;
                }
            }

            e = apr_bucket_flush_create(ba);
            APR_BRIGADE_INSERT_TAIL(bb, e);
            rv = ap_pass_brigade(f->next, bb);
            apr_brigade_cleanup(bb);

            if (rv != APR_SUCCESS) {
                ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, f->r, APLOGNO(01455)
                              "rl: full speed brigade pass failed.");
                return rv;
            }
        }
        else {
            for (e = APR_BRIGADE_FIRST(bb);
                 e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) {
                if (AP_RL_BUCKET_IS_START(e)) {
                    apr_brigade_split_ex(bb, e, ctx->holdingbb);
                    ctx->state = RATE_FULLSPEED;
                    break;
                }
            }

            while (!APR_BRIGADE_EMPTY(bb)) {
                apr_off_t len = ctx->chunk_size + ctx->burst;

                APR_BRIGADE_CONCAT(ctx->tmpbb, bb);

                /*
                 * Pull next chunk of data; the initial amount is our
                 * burst allotment (if any) plus a chunk.  All subsequent
                 * iterations are just chunks with whatever remaining
                 * burst amounts we have left (in case not done in the
                 * first bucket).
                 */
                rv = apr_brigade_partition(ctx->tmpbb, len, &e);
                if (rv != APR_SUCCESS && rv != APR_INCOMPLETE) {
                    ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r, APLOGNO(01456)
                                  "rl: partition failed.");
                    return rv;
                }
                /* Send next metadata now if any */
                while (e != APR_BRIGADE_SENTINEL(ctx->tmpbb)
                       && APR_BUCKET_IS_METADATA(e)) {
                    e = APR_BUCKET_NEXT(e);
                }
                if (e != APR_BRIGADE_SENTINEL(ctx->tmpbb)) {
                    apr_brigade_split_ex(ctx->tmpbb, e, bb);
                }
                else {
                    apr_brigade_length(ctx->tmpbb, 1, &len);
                }

                /*
                 * Adjust the burst amount depending on how much
                 * we've done up to now.
                 */
                if (ctx->burst) {
                    ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
                        APLOGNO(03485) "rl: burst %d; len %"APR_OFF_T_FMT, ctx->burst, len);
                    if (len < ctx->burst) {
                        ctx->burst -= len;
                    }
                    else {
                        ctx->burst = 0;
                    }
                }

                e = APR_BRIGADE_LAST(ctx->tmpbb);
                if (APR_BUCKET_IS_EOS(e)) {
                    ap_remove_output_filter(f);
                }
                else if (!APR_BUCKET_IS_FLUSH(e)) {
                    if (APR_BRIGADE_EMPTY(bb)) {
                        /* Wait for more (or next call) */
                        break;
                    }
                    e = apr_bucket_flush_create(ba);
                    APR_BRIGADE_INSERT_TAIL(ctx->tmpbb, e);
                }

#if defined(RLFDEBUG)
                brigade_dump(f->r, ctx->tmpbb);
                brigade_dump(f->r, bb);
#endif /* RLFDEBUG */

                if (ctx->do_sleep) {
                    apr_sleep(RATE_INTERVAL_MS * 1000);
                }
                else {
                    ctx->do_sleep = 1;
                }

                rv = ap_pass_brigade(f->next, ctx->tmpbb);
                apr_brigade_cleanup(ctx->tmpbb);

                if (rv != APR_SUCCESS) {
                    /* Most often, user disconnects from stream */
                    ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, f->r, APLOGNO(01457)
                                  "rl: brigade pass failed.");
                    return rv;
                }
            }
        }

        if (!APR_BRIGADE_EMPTY(ctx->holdingbb)) {
            /* Any rate-limited data in tmpbb is sent unlimited along
             * with the rest.
             */
            APR_BRIGADE_CONCAT(bb, ctx->tmpbb);
            APR_BRIGADE_CONCAT(bb, ctx->holdingbb);
        }
    }

#if defined(RLFDEBUG)
    brigade_dump(f->r, ctx->tmpbb);
#endif /* RLFDEBUG */

    /* Save remaining tmpbb with the correct lifetime for the next call */
    return ap_save_brigade(f, &ctx->holdingbb, &ctx->tmpbb, f->r->pool);
}
static int cgi_handler(request_rec *r)
{
    int nph;
    apr_size_t dbpos = 0;
    const char *argv0;
    const char *command;
    const char **argv;
    char *dbuf = NULL;
    apr_file_t *script_out = NULL, *script_in = NULL, *script_err = NULL;
    apr_bucket_brigade *bb;
    apr_bucket *b;
    int is_included;
    int seen_eos, child_stopped_reading;
    apr_pool_t *p;
    cgi_server_conf *conf;
    apr_status_t rv;
    cgi_exec_info_t e_info;
    conn_rec *c = r->connection;

    if(strcmp(r->handler, CGI_MAGIC_TYPE) && strcmp(r->handler, "cgi-script"))
        return DECLINED;

    is_included = !strcmp(r->protocol, "INCLUDED");

    p = r->main ? r->main->pool : r->pool;

    argv0 = apr_filepath_name_get(r->filename);
    nph = !(strncmp(argv0, "nph-", 4));
    conf = ap_get_module_config(r->server->module_config, &cgi_module);

    if (!(ap_allow_options(r) & OPT_EXECCGI) && !is_scriptaliased(r))
        return log_scripterror(r, conf, HTTP_FORBIDDEN, 0,
                               "Options ExecCGI is off in this directory");
    if (nph && is_included)
        return log_scripterror(r, conf, HTTP_FORBIDDEN, 0,
                               "attempt to include NPH CGI script");

    if (r->finfo.filetype == 0)
        return log_scripterror(r, conf, HTTP_NOT_FOUND, 0,
                               "script not found or unable to stat");
    if (r->finfo.filetype == APR_DIR)
        return log_scripterror(r, conf, HTTP_FORBIDDEN, 0,
                               "attempt to invoke directory as script");

    if ((r->used_path_info == AP_REQ_REJECT_PATH_INFO) &&
        r->path_info && *r->path_info)
    {
        /* default to accept */
        return log_scripterror(r, conf, HTTP_NOT_FOUND, 0,
                               "AcceptPathInfo off disallows user's path");
    }
/*
    if (!ap_suexec_enabled) {
        if (!ap_can_exec(&r->finfo))
            return log_scripterror(r, conf, HTTP_FORBIDDEN, 0,
                                   "file permissions deny server execution");
    }

*/
    ap_add_common_vars(r);
    ap_add_cgi_vars(r);

    e_info.process_cgi = 1;
    e_info.cmd_type    = APR_PROGRAM;
    e_info.detached    = 0;
    e_info.in_pipe     = APR_CHILD_BLOCK;
    e_info.out_pipe    = APR_CHILD_BLOCK;
    e_info.err_pipe    = APR_CHILD_BLOCK;
    e_info.prog_type   = RUN_AS_CGI;
    e_info.bb          = NULL;
    e_info.ctx         = NULL;
    e_info.next        = NULL;
    e_info.addrspace   = 0;

    /* build the command line */
    if ((rv = cgi_build_command(&command, &argv, r, p, &e_info)) != APR_SUCCESS) {
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                      "don't know how to spawn child process: %s",
                      r->filename);
        return HTTP_INTERNAL_SERVER_ERROR;
    }

    /* run the script in its own process */
    if ((rv = run_cgi_child(&script_out, &script_in, &script_err,
                            command, argv, r, p, &e_info)) != APR_SUCCESS) {
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                      "couldn't spawn child process: %s", r->filename);
        return HTTP_INTERNAL_SERVER_ERROR;
    }

    /* Transfer any put/post args, CERN style...
     * Note that we already ignore SIGPIPE in the core server.
     */
    bb = apr_brigade_create(r->pool, c->bucket_alloc);
    seen_eos = 0;
    child_stopped_reading = 0;
    if (conf->logname) {
        dbuf = apr_palloc(r->pool, conf->bufbytes + 1);
        dbpos = 0;
    }
    do {
        apr_bucket *bucket;

        rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
                            APR_BLOCK_READ, HUGE_STRING_LEN);

        if (rv != APR_SUCCESS) {
            ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                          "Error reading request entity data");
            return ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
        }

        for (bucket = APR_BRIGADE_FIRST(bb);
             bucket != APR_BRIGADE_SENTINEL(bb);
             bucket = APR_BUCKET_NEXT(bucket))
        {
            const char *data;
            apr_size_t len;

            if (APR_BUCKET_IS_EOS(bucket)) {
                seen_eos = 1;
                break;
            }

            /* We can't do much with this. */
            if (APR_BUCKET_IS_FLUSH(bucket)) {
                continue;
            }

            /* If the child stopped, we still must read to EOS. */
            if (child_stopped_reading) {
                continue;
            }

            /* read */
            apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);

            if (conf->logname && dbpos < conf->bufbytes) {
                int cursize;

                if ((dbpos + len) > conf->bufbytes) {
                    cursize = conf->bufbytes - dbpos;
                }
                else {
                    cursize = len;
                }
                memcpy(dbuf + dbpos, data, cursize);
                dbpos += cursize;
            }

            /* Keep writing data to the child until done or too much time
             * elapses with no progress or an error occurs.
             */
            rv = apr_file_write_full(script_out, data, len, NULL);

            if (rv != APR_SUCCESS) {
                /* silly script stopped reading, soak up remaining message */
                child_stopped_reading = 1;
            }
        }
        apr_brigade_cleanup(bb);
    }
    while (!seen_eos);

    if (conf->logname) {
        dbuf[dbpos] = '\0';
    }
    /* Is this flush really needed? */
    apr_file_flush(script_out);
    apr_file_close(script_out);

    AP_DEBUG_ASSERT(script_in != NULL);

    apr_brigade_cleanup(bb);

#if APR_FILES_AS_SOCKETS
    apr_file_pipe_timeout_set(script_in, 0);
    apr_file_pipe_timeout_set(script_err, 0);

    b = cgi_bucket_create(r, script_in, script_err, c->bucket_alloc);
#else
    b = apr_bucket_pipe_create(script_in, c->bucket_alloc);
#endif
    APR_BRIGADE_INSERT_TAIL(bb, b);
    b = apr_bucket_eos_create(c->bucket_alloc);
    APR_BRIGADE_INSERT_TAIL(bb, b);

    /* Handle script return... */
    if (!nph) {
        const char *location;
        char sbuf[MAX_STRING_LEN];
        int ret;

        if ((ret = ap_scan_script_header_err_brigade(r, bb, sbuf))) {
            ret = log_script(r, conf, ret, dbuf, sbuf, bb, script_err);

            /*
             * ret could be HTTP_NOT_MODIFIED in the case that the CGI script
             * does not set an explicit status and ap_meets_conditions, which
             * is called by ap_scan_script_header_err_brigade, detects that
             * the conditions of the requests are met and the response is
             * not modified.
             * In this case set r->status and return OK in order to prevent
             * running through the error processing stack as this would
             * break with mod_cache, if the conditions had been set by
             * mod_cache itself to validate a stale entity.
             * BTW: We circumvent the error processing stack anyway if the
             * CGI script set an explicit status code (whatever it is) and
             * the only possible values for ret here are:
             *
             * HTTP_NOT_MODIFIED          (set by ap_meets_conditions)
             * HTTP_PRECONDITION_FAILED   (set by ap_meets_conditions)
             * HTTP_INTERNAL_SERVER_ERROR (if something went wrong during the
             * processing of the response of the CGI script, e.g broken headers
             * or a crashed CGI process).
             */
            if (ret == HTTP_NOT_MODIFIED) {
                r->status = ret;
                return OK;
            }

            return ret;
        }

        location = apr_table_get(r->headers_out, "Location");

        if (location && r->status == 200) {
            /* For a redirect whether internal or not, discard any
             * remaining stdout from the script, and log any remaining
             * stderr output, as normal. */
            discard_script_output(bb);
            apr_brigade_destroy(bb);
            apr_file_pipe_timeout_set(script_err, r->server->timeout);
            log_script_err(r, script_err);
        }

        if (location && location[0] == '/' && r->status == 200) {
            /* This redirect needs to be a GET no matter what the original
             * method was.
             */
            r->method = apr_pstrdup(r->pool, "GET");
            r->method_number = M_GET;

            /* We already read the message body (if any), so don't allow
             * the redirected request to think it has one.  We can ignore
             * Transfer-Encoding, since we used REQUEST_CHUNKED_ERROR.
             */
            apr_table_unset(r->headers_in, "Content-Length");

            ap_internal_redirect_handler(location, r);
            return OK;
        }
        else if (location && r->status == 200) {
            /* XX Note that if a script wants to produce its own Redirect
             * body, it now has to explicitly *say* "Status: 302"
             */
            return HTTP_MOVED_TEMPORARILY;
        }

        rv = ap_pass_brigade(r->output_filters, bb);
    }
    else /* nph */ {
        struct ap_filter_t *cur;

        /* get rid of all filters up through protocol...  since we
         * haven't parsed off the headers, there is no way they can
         * work
         */

        cur = r->proto_output_filters;
        while (cur && cur->frec->ftype < AP_FTYPE_CONNECTION) {
            cur = cur->next;
        }
        r->output_filters = r->proto_output_filters = cur;

        rv = ap_pass_brigade(r->output_filters, bb);
    }

    /* don't soak up script output if errors occurred writing it
     * out...  otherwise, we prolong the life of the script when the
     * connection drops or we stopped sending output for some other
     * reason */
    if (rv == APR_SUCCESS && !r->connection->aborted) {
        apr_file_pipe_timeout_set(script_err, r->server->timeout);
        log_script_err(r, script_err);
    }

    apr_file_close(script_err);

    return OK;                      /* NOT r->status, even if it has changed. */
}
Ejemplo n.º 22
0
/* The handler.  Create a new parser and/or filter context where appropriate
 * and parse the chunks of data received from the brigade
 */
static int idlChunkHandler( ap_filter_t *f, apr_bucket_brigade *brigade ) {

	idlChunkContext* ctx = f->ctx;
	apr_bucket* currentBucket = NULL;
	apr_pool_t* pool = f->r->pool;
	const char* data;
  	apr_size_t len;
    osrfStringArray* params = NULL;
    mparams = NULL;

	/* load the per-dir/location config */
	idlChunkConfig* config = ap_get_module_config( 
			f->r->per_dir_config, &idlchunk_module );

	ap_log_rerror(APLOG_MARK, APLOG_ERR, 
			0, f->r, "IDLCHUNK Config:\nContent Type = %s, "
			"Strip PI = %s, Strip Comments = %s, Doctype = %s", 
			config->contentType, 
			(config->stripPI) ? "yes" : "no", 
			(config->stripComments) ? "yes" : "no",
			config->doctype);

	/* set the content type based on the config */
	ap_set_content_type(f->r, config->contentType);

	//ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, "Set content type");

    params = apacheParseParms(f->r); /* free me */
    mparams = apacheGetParamValues( params, "class" ); /* free me */

    all = 1;

    if (mparams && mparams->size > 0) all = 0;

	//ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, "Parsed the params, if any");

	/* create the XML parser */
	int firstrun = 0;
	if( parser == NULL ) {
		firstrun = 1;
		parser = XML_ParserCreate("UTF-8");
		XML_SetUserData(parser, f);
		XML_SetElementHandler(parser, startElement, endElement);
		XML_SetCharacterDataHandler(parser, charHandler);
		if(!config->stripDoctype)
			XML_SetStartDoctypeDeclHandler( parser, doctypeHandler );
		if(!config->stripPI)
			XML_SetProcessingInstructionHandler(parser, handlePI);
		if(!config->stripComments)
			XML_SetCommentHandler(parser, handleComment);
	}

	/* create the filter context */
	if( ctx == NULL ) {
		f->ctx = ctx = apr_pcalloc( pool, sizeof(*ctx));
		ctx->brigade = apr_brigade_create( pool, f->c->bucket_alloc );
		ctx->parser = parser;
	}


	if(firstrun) { /* we haven't started writing the data to the stream yet */

		/* go ahead and write the doctype out if we have one defined */
		if(config->doctype) {
			ap_log_rerror( APLOG_MARK, APLOG_DEBUG, 
					0, f->r, "IDLCHUNK DOCTYPE => %s", config->doctype);
			_fwrite(f, "%s\n", config->doctype);
		}
	}


	/* cycle through the buckets in the brigade */
	while (!APR_BRIGADE_EMPTY(brigade)) {

		/* grab the next bucket */
		currentBucket = APR_BRIGADE_FIRST(brigade);

		/* clean up when we're done */
		if (APR_BUCKET_IS_EOS(currentBucket) || APR_BUCKET_IS_FLUSH(currentBucket)) {
    	  	APR_BUCKET_REMOVE(currentBucket);
			APR_BRIGADE_INSERT_TAIL(ctx->brigade, currentBucket);
			ap_pass_brigade(f->next, ctx->brigade);
			XML_ParserFree(parser);
            if (params) osrfStringArrayFree(params);
            if (mparams) osrfStringArrayFree(mparams);
			parser = NULL;
		  	return APR_SUCCESS;
    	}

		/* read the incoming data */
		int s = apr_bucket_read(currentBucket, &data, &len, APR_NONBLOCK_READ);
		if( s != APR_SUCCESS ) {
			ap_log_rerror( APLOG_MARK, APLOG_ERR, 0, f->r, 
					"IDLCHUNK error reading data from filter with status %d", s);
            if (params) osrfStringArrayFree(params);
            if (mparams) osrfStringArrayFree(mparams);
			return s;
		}

		if (len > 0) {

			ap_log_rerror( APLOG_MARK, APLOG_DEBUG, 
					0, f->r, "IDLCHUNK read %d bytes", (int)len);

			/* push data into the XML push parser */
			if ( XML_Parse(ctx->parser, data, len, 0) == XML_STATUS_ERROR ) {

                char tmp[len+1];
                memcpy(tmp, data, len);
                tmp[len] = '\0';

				/* log and die on XML errors */
				ap_log_rerror( APLOG_MARK, APLOG_ERR, 0, f->r, 
                    "IDLCHUNK XML Parse Error: %s at line %d: parsing %s: data %s",
					XML_ErrorString(XML_GetErrorCode(ctx->parser)), 
					(int) XML_GetCurrentLineNumber(ctx->parser), f->r->filename, tmp);

				XML_ParserFree(parser);
                if (params) osrfStringArrayFree(params);
                if (mparams) osrfStringArrayFree(mparams);
				parser = NULL;
				return HTTP_INTERNAL_SERVER_ERROR; 
			}
    	}

		/* so a subrequest doesn't re-read this bucket */
		apr_bucket_delete(currentBucket); 
  	}

	apr_brigade_destroy(brigade);
    if (params) osrfStringArrayFree(params);
    if (mparams) osrfStringArrayFree(mparams);
  	return APR_SUCCESS;	
}
Ejemplo n.º 23
0
/*
 * HTTP/1.1 chunked transfer encoding filter.
 */
static apr_status_t chunk_filter(ap_filter_t *f, apr_bucket_brigade *b)
{
#define ASCII_CRLF  "\015\012"
#define ASCII_ZERO  "\060"
    conn_rec *c = f->r->connection;
    apr_bucket_brigade *more;
    apr_bucket *e;
    apr_status_t rv;

    for (more = NULL; b; b = more, more = NULL) {
        apr_off_t bytes = 0;
        apr_bucket *eos = NULL;
        apr_bucket *flush = NULL;
        /* XXX: chunk_hdr must remain at this scope since it is used in a 
         *      transient bucket.
         */
        char chunk_hdr[20]; /* enough space for the snprintf below */

        APR_BRIGADE_FOREACH(e, b) {
            if (APR_BUCKET_IS_EOS(e)) {
                /* there shouldn't be anything after the eos */
                eos = e;
                break;
            }
            if (APR_BUCKET_IS_FLUSH(e)) {
                flush = e;
                more = apr_brigade_split(b, APR_BUCKET_NEXT(e));
                break;
            }
            else if (e->length == (apr_size_t)-1) {
                /* unknown amount of data (e.g. a pipe) */
                const char *data;
                apr_size_t len;

                rv = apr_bucket_read(e, &data, &len, APR_BLOCK_READ);
                if (rv != APR_SUCCESS) {
                    return rv;
                }
                if (len > 0) {
                    /*
                     * There may be a new next bucket representing the
                     * rest of the data stream on which a read() may
                     * block so we pass down what we have so far.
                     */
                    bytes += len;
                    more = apr_brigade_split(b, APR_BUCKET_NEXT(e));
                    break;
                }
                else {
                    /* If there was nothing in this bucket then we can
                     * safely move on to the next one without pausing
                     * to pass down what we have counted up so far.
                     */
                    continue;
                }
            }
            else {
                bytes += e->length;
            }
        }

        /*
         * XXX: if there aren't very many bytes at this point it may
         * be a good idea to set them aside and return for more,
         * unless we haven't finished counting this brigade yet.
         */
        /* if there are content bytes, then wrap them in a chunk */
        if (bytes > 0) {
            apr_size_t hdr_len;
            /*
             * Insert the chunk header, specifying the number of bytes in
             * the chunk.
             */
            /* XXX might be nice to have APR_OFF_T_FMT_HEX */
            hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr),
                                   "%qx" CRLF, (apr_uint64_t)bytes);
            ap_xlate_proto_to_ascii(chunk_hdr, hdr_len);
            e = apr_bucket_transient_create(chunk_hdr, hdr_len,
                                            c->bucket_alloc);
            APR_BRIGADE_INSERT_HEAD(b, e);

            /*
             * Insert the end-of-chunk CRLF before an EOS or
             * FLUSH bucket, or appended to the brigade
             */
            e = apr_bucket_immortal_create(ASCII_CRLF, 2, c->bucket_alloc);
            if (eos != NULL) {
                APR_BUCKET_INSERT_BEFORE(eos, e);
            }
            else if (flush != NULL) {
                APR_BUCKET_INSERT_BEFORE(flush, e);
            }
            else {
                APR_BRIGADE_INSERT_TAIL(b, e);
            }
        }

        /* RFC 2616, Section 3.6.1
         *
         * If there is an EOS bucket, then prefix it with:
         *   1) the last-chunk marker ("0" CRLF)
         *   2) the trailer
         *   3) the end-of-chunked body CRLF
         *
         * If there is no EOS bucket, then do nothing.
         *
         * XXX: it would be nice to combine this with the end-of-chunk
         * marker above, but this is a bit more straight-forward for
         * now.
         */
        if (eos != NULL) {
            /* XXX: (2) trailers ... does not yet exist */
            e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF
                                           /* <trailers> */
                                           ASCII_CRLF, 5, c->bucket_alloc);
            APR_BUCKET_INSERT_BEFORE(eos, e);
        }

        /* pass the brigade to the next filter. */
        rv = ap_pass_brigade(f->next, b);
        if (rv != APR_SUCCESS || eos != NULL) {
            return rv;
        }
    }
    return APR_SUCCESS;
}
Ejemplo n.º 24
0
static am_status_t get_request_body(am_request_t *rq) {
    const char *thisfunc = "get_request_body():";
    request_rec *r = (request_rec *) (rq != NULL ? rq->ctx : NULL);
    apr_bucket_brigade *bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
    int eos_found = 0, read_bytes = 0;
    apr_status_t read_status = 0;
    am_status_t status = AM_ERROR;
    char *out = NULL;

    if (r == NULL || rq == NULL) {
        return AM_EINVAL;
    }

    do {
        apr_bucket *ob;
        read_status = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
                APR_BLOCK_READ, HUGE_STRING_LEN);
        if (read_status != APR_SUCCESS) {
            if (out != NULL) free(out);
            return AM_ERROR;
        }

        ob = APR_BRIGADE_FIRST(bb);
        while (ob != APR_BRIGADE_SENTINEL(bb)) {
            const char *data;
            apr_size_t data_size;

            if (APR_BUCKET_IS_EOS(ob)) {
                eos_found = 1;
                break;
            }

            if (APR_BUCKET_IS_FLUSH(ob)) {
                continue;
            }

            /* read data */
            apr_bucket_read(ob, &data, &data_size, APR_BLOCK_READ);
            /* process data */
            out = realloc(out, read_bytes + data_size + 1);
            if (out == NULL) {
                status = AM_ENOMEM;
                eos_found = 1;
                break;
            }

            memcpy(out + read_bytes, data, data_size);
            read_bytes += (int) data_size;
            out[read_bytes] = 0;

            ob = APR_BUCKET_NEXT(ob);
            status = AM_SUCCESS;
        }
        apr_brigade_destroy(bb);

    } while (eos_found == 0);

    apr_brigade_destroy(bb);

    rq->post_data = out;
    rq->post_data_sz = read_bytes;

    if (status == AM_SUCCESS) {
        am_log_debug(rq->instance_id, "%s read %d bytes \n%s", thisfunc,
                read_bytes, LOGEMPTY(out));
        /* remove the content length since the body has been read */
        r->clength = 0;
        apr_table_unset(r->headers_in, "Content-Length");
    }
    return status;
}
Ejemplo n.º 25
0
static PyObject *_filter_read(filterobject *self, PyObject *args, int readline)
{

    apr_bucket *b;
    long bytes_read;
    PyObject *result;
    char *buffer;
    long bufsize;
    int newline = 0;
    long len = -1;
    conn_rec *c = self->request_obj->request_rec->connection;

    if (! PyArg_ParseTuple(args, "|l", &len)) 
        return NULL;

    if (self->closed) {
        PyErr_SetString(PyExc_ValueError, "I/O operation on closed filter");
        return NULL;
    }

    if (self->is_input) {

        /* does the output brigade exist? */
        if (!self->bb_in) {
            self->bb_in = apr_brigade_create(self->f->r->pool, 
                                             c->bucket_alloc);
        }

        Py_BEGIN_ALLOW_THREADS;
        self->rc = ap_get_brigade(self->f->next, self->bb_in, self->mode, 
                                  APR_BLOCK_READ, self->readbytes);
        Py_END_ALLOW_THREADS;

        if (!APR_STATUS_IS_EAGAIN(self->rc) && !(self->rc == APR_SUCCESS)) {
            PyErr_SetObject(PyExc_IOError, 
                            PyString_FromString("Input filter read error"));
            return NULL;
        }
    }

    /* 
     * loop through the brigade reading buckets into the string 
     */

    b = APR_BRIGADE_FIRST(self->bb_in);

    if (b == APR_BRIGADE_SENTINEL(self->bb_in))
        return PyString_FromString("");

    /* reached eos ? */
    if (APR_BUCKET_IS_EOS(b)) {
        apr_bucket_delete(b);
        Py_INCREF(Py_None);
        return Py_None;
    }

    bufsize = len < 0 ? HUGE_STRING_LEN : len;
    /* PYTHON 2.5: 'PyString_FromStringAndSize' uses Py_ssize_t for input parameters */ 
    result = PyString_FromStringAndSize(NULL, bufsize);

    /* possibly no more memory */
    if (result == NULL) 
        return PyErr_NoMemory();
    
    buffer = PyString_AS_STRING((PyStringObject *) result);

    bytes_read = 0;

    while ((bytes_read < len || len == -1) && 
           !(APR_BUCKET_IS_EOS(b) || APR_BUCKET_IS_FLUSH(b) ||
             b == APR_BRIGADE_SENTINEL(self->bb_in))) {

        const char *data;
        apr_size_t size;
        apr_bucket *old;
        int i;

        if (apr_bucket_read(b, &data, &size, APR_BLOCK_READ) != APR_SUCCESS) {
            PyErr_SetObject(PyExc_IOError, 
                            PyString_FromString("Filter read error"));
            return NULL;
        }

        if (bytes_read + size > bufsize) {
            apr_bucket_split(b, bufsize - bytes_read);
            size = bufsize - bytes_read;
            /* now the bucket is the exact size we need */
        }

        if (readline) {

            /* scan for newline */
            for (i=0; i<size; i++) {
                if (data[i] == '\n') {
                    if (i+1 != size) {   /* (no need to split if we're at end of bucket) */
                        
                        /* split after newline */
                        apr_bucket_split(b, i+1);   
                        size = i + 1;
                    }
                    newline = 1;
                    break;
                }
            }
        }

        memcpy(buffer, data, size);
        buffer += size;
        bytes_read += size;

        /* time to grow destination string? */
        if (newline == 0 && len < 0 && bytes_read == bufsize) {

            /* PYTHON 2.5: '_PyString_Resize' uses Py_ssize_t for input parameters */
            _PyString_Resize(&result, bufsize + HUGE_STRING_LEN);
            buffer = PyString_AS_STRING((PyStringObject *) result);
            buffer += bytes_read;

            bufsize += HUGE_STRING_LEN;
        }

        if (readline && newline) {
            apr_bucket_delete(b);
            break;
        }

        old = b;
        b = APR_BUCKET_NEXT(b);
        apr_bucket_delete(old);
        
/*         if (self->is_input) { */

/*             if (b == APR_BRIGADE_SENTINEL(self->bb_in)) { */
/*                 /\* brigade ended, but no EOS - get another */
/*                    brigade *\/ */

/*                 Py_BEGIN_ALLOW_THREADS; */
/*                 self->rc = ap_get_brigade(self->f->next, self->bb_in, self->mode,  */
/*                                           APR_BLOCK_READ, self->readbytes); */
/*                 Py_END_ALLOW_THREADS; */

/*                 if (! APR_STATUS_IS_SUCCESS(self->rc)) { */
/*                     PyErr_SetObject(PyExc_IOError,  */
/*                                     PyString_FromString("Input filter read error")); */
/*                     return NULL; */
/*                 } */
/*                 b = APR_BRIGADE_FIRST(self->bb_in); */
/*             } */
/*         }  */
    }

    /* resize if necessary */
    if (bytes_read < len || len < 0) 
        /* PYTHON 2.5: '_PyString_Resize' uses Py_ssize_t for input parameters */
        if(_PyString_Resize(&result, bytes_read))
            return NULL;

    return result;
}