Пример #1
0
static apr_status_t
read_complete_body(request_rec *r, apr_bucket_brigade *kept_body)
{
    apr_bucket_brigade *tmp_bb;
    apr_bucket *t_bucket1, *t_bucket2;
    unsigned short eos_seen = 0;
    apr_status_t status;

    tmp_bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);

    while (!eos_seen) {
        status = ap_get_brigade(
                        r->input_filters,
                        tmp_bb,
                        AP_MODE_READBYTES,
                        APR_BLOCK_READ,
                        HUGE_STRING_LEN);

        /* This means the filter discovered an error.
         * Furthermore input-filter already handeld the error and sends
         * something to the output chain.
         * For example ap_http_filter does this if LimitRequestBody is reached
         */
        if (status == AP_FILTER_ERROR) {
            apr_brigade_destroy(tmp_bb);
            return AP_FILTER_ERROR;
        }

        /* Cool no need to search for the eos bucket */
        if (APR_STATUS_IS_EOF(status)) {
            apr_brigade_destroy(tmp_bb);
            return APR_SUCCESS;
        }

        if (status != APR_SUCCESS) {
            apr_brigade_destroy(tmp_bb);
            return status;
        }

        ITER_BRIGADE(t_bucket1, tmp_bb) {

            apr_bucket_copy(t_bucket1, &t_bucket2);

            /* If SSL is used TRANSIENT buckets are returned.
             * However we need this bucket for a longer period than
             * this function call, hence 'setaside' the bucket.
             */
            if APR_BUCKET_IS_TRANSIENT(t_bucket2) {
                apr_bucket_setaside(t_bucket2, r->pool);
            }

            APR_BRIGADE_INSERT_TAIL(kept_body, t_bucket2);

            if (!eos_seen && APR_BUCKET_IS_EOS(t_bucket1)) {
                eos_seen = 1;
            }
        }
        apr_brigade_cleanup(tmp_bb);
    }
Пример #2
0
AP_DECLARE(apr_status_t) ap_save_brigade(ap_filter_t *f,
                                         apr_bucket_brigade **saveto,
                                         apr_bucket_brigade **b, apr_pool_t *p)
{
    apr_bucket *e;
    apr_status_t rv, srv = APR_SUCCESS;

    /* If have never stored any data in the filter, then we had better
     * create an empty bucket brigade so that we can concat.
     */
    if (!(*saveto)) {
        *saveto = apr_brigade_create(p, f->c->bucket_alloc);
    }

    for (e = APR_BRIGADE_FIRST(*b);
         e != APR_BRIGADE_SENTINEL(*b);
         e = APR_BUCKET_NEXT(e))
    {
        rv = apr_bucket_setaside(e, p);

        /* If the bucket type does not implement setaside, then
         * (hopefully) morph it into a bucket type which does, and set
         * *that* aside... */
        if (rv == APR_ENOTIMPL) {
            const char *s;
            apr_size_t n;

            rv = apr_bucket_read(e, &s, &n, APR_BLOCK_READ);
            if (rv == APR_SUCCESS) {
                rv = apr_bucket_setaside(e, p);
            }
        }

        if (rv != APR_SUCCESS) {
            srv = rv;
            /* Return an error but still save the brigade if
             * ->setaside() is really not implemented. */
            if (rv != APR_ENOTIMPL) {
                return rv;
            }
        }
    }
    APR_BRIGADE_CONCAT(*saveto, *b);
    return srv;
}
Пример #3
0
/**
 * @internal
 *
 * "Sniffs" the output (response) data from the connection stream.
 */
static int ironbee_output_filter (ap_filter_t *f, apr_bucket_brigade *bb)
{
    apr_bucket *b;
#if 0
    conn_rec *c = f->c;
    ironbee_conn_context *ctx = f->ctx;
    ib_conn_t *iconn = ctx->iconn;
    ib_core_cfg_t *corecfg;
    int buffering = 0;

    /* Configure. */
    ib_context_module_config(iconn->ctx, ib_core_module(), (void *)&corecfg);
    if (corecfg != NULL) {
        buffering = (int)corecfg->buffer_res;
    }
#endif


    for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) {
#if 0
        /// @todo Should this be done?  Maybe only for proxy?
        if (APR_BUCKET_IS_EOS(b)) {
            /// @todo Do we need to do this? Maybe only for proxy.
            apr_bucket *flush = apr_bucket_flush_create(f->c->bucket_alloc);
            APR_BUCKET_INSERT_BEFORE(b, flush);
        }

        if (buffering) {
            /// @todo setaside into our own pool to destroy later???
            apr_bucket_setaside(b, c->pool);
            process_bucket(f, b);
            APR_BUCKET_REMOVE(b);
        }
        else {
#endif
            process_bucket(f, b);
#if 0
        }
#endif
    }

    return ap_pass_brigade(f->next, bb);
}
Пример #4
0
/**
 * @internal
 *
 * "Sniffs" the input (request) data from the connection stream and tries
 * to determine who closed a connection and why.
 */
static int ironbee_input_filter(ap_filter_t *f, apr_bucket_brigade *bb,
                                ap_input_mode_t mode, apr_read_type_e block,
                                apr_off_t readbytes)
{
    conn_rec *c = f->c;
    ironbee_conn_context *ctx = f->ctx;
    ib_conn_t *iconn = ctx->iconn;
    ib_core_cfg_t *corecfg;
    ib_stream_t *istream;
    apr_bucket *b;
    apr_status_t rc;
    int buffering = 0;

    /* Any mode not handled just gets passed through. */
    if ((mode != AP_MODE_GETLINE) && (mode != AP_MODE_READBYTES)) {
        return ap_get_brigade(f->next, bb, mode, block, readbytes);
    }

    /* Configure. */
    ib_context_module_config(iconn->ctx, ib_core_module(), (void *)&corecfg);
    if (corecfg != NULL) {
        buffering = (int)corecfg->buffer_req;
    }

    /* When buffering, data is removed from the brigade and handed
     * to IronBee. The filter must not return an empty brigade in this
     * case and keeps reading until there is processed data that comes
     * back from IronBee.
     */
    do {
        ib_tx_t *itx = iconn->tx;

        /* If there is any processed data, then send it now. */
        if (buffering && (itx != NULL)) {
            ib_sdata_t *sdata;

            /* Take any data from the drain (processed data) and
             * inject it back into the filter brigade.
             */
            ib_fctl_drain(itx->fctl, &istream);
            if ((istream != NULL) && (istream->nelts > 0)) {
                int done = 0;

                while (!done) {
                    apr_bucket *ibucket = NULL;

                    /// @todo Handle multi-bucket lines
                    if (mode == AP_MODE_GETLINE) {
                        done = 1;
                    }

                    ib_stream_pull(istream, &sdata);
                    if (sdata == NULL) {
                        /* No more data left. */
                        break;
                    }

                    switch (sdata->type) {
                        case IB_STREAM_DATA:
#ifdef IB_DEBUG
                            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                                         IB_PRODUCT_NAME ": DATA[%d]: %.*s", (int)sdata->dlen, (int)sdata->dlen, (char *)sdata->data);
#endif
                            
                            /// @todo Is this creating a copy?  Just need a reference.
                            ibucket = apr_bucket_heap_create(sdata->data,
                                                             sdata->dlen,
                                                             NULL,
                                                             bb->bucket_alloc);
                            break;
                        case IB_STREAM_FLUSH:
#ifdef IB_DEBUG
                            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                                         IB_PRODUCT_NAME ": FLUSH");
#endif
                            ibucket = apr_bucket_flush_create(bb->bucket_alloc);
                            break;
                        case IB_STREAM_EOH:
#ifdef IB_DEBUG
                            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                                         IB_PRODUCT_NAME ": EOH");
#endif
                            /// @todo Do something here???
                            break;
                        case IB_STREAM_EOB:
#ifdef IB_DEBUG
                            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                                         IB_PRODUCT_NAME ": EOB");
#endif
                            /// @todo Do something here???
                            break;
                        case IB_STREAM_EOS:
#ifdef IB_DEBUG
                            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                                         IB_PRODUCT_NAME ": EOS");
#endif
                            ibucket = apr_bucket_eos_create(bb->bucket_alloc);
                            break;
                        default:
                            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                                         IB_PRODUCT_NAME ": UNKNOWN stream data type %d", sdata->type);
                    }

                    if (ibucket != NULL) {
                        APR_BRIGADE_INSERT_TAIL(bb, ibucket);
                    }
                }

                /* Need to send any processed data to avoid deadlock. */
                if (!APR_BRIGADE_EMPTY(bb)) {
                    return APR_SUCCESS;
                }
            }
        }

        /* Fetch data from the next filter. */
        if (buffering) {
            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                         "FETCH BRIGADE (buffering)");

            /* Normally Apache will request the headers line-by-line, but
             * IronBee does not require this.  So, here the request is
             * fetched with READBYTES and IronBee will then break
             * it back up into lines when it is injected back into
             * the brigade after the data is processed.
             */
            rc = ap_get_brigade(f->next,
                                bb,
                                AP_MODE_READBYTES,
                                block,
                                HUGE_STRING_LEN);
        }
        else {
            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                         "FETCH BRIGADE (non-buffering)");
            rc = ap_get_brigade(f->next, bb, mode, block, readbytes);
        }

        /* Check for any timeouts/disconnects/errors. */
        if (APR_STATUS_IS_TIMEUP(rc)) {
            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                         IB_PRODUCT_NAME ": %s server closed connection (%d)",
                         f->frec->name, rc);

            ap_remove_input_filter(f);
            return rc;
        }
        else if (APR_STATUS_IS_EOF(rc) || apr_get_os_error() == ECONNRESET) {
            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                         IB_PRODUCT_NAME ": %s client closed connection (%d)",
                         f->frec->name, rc);

            ap_remove_input_filter(f);
            return rc;
        }
        else if (rc != APR_SUCCESS) {
            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                         IB_PRODUCT_NAME ": %s returned %d (0x%08x) - %s",
                         f->frec->name, rc, rc, strerror(apr_get_os_error()));

            return rc;
        }

        /* Process data. */
        for (b = APR_BRIGADE_FIRST(bb);
             b != APR_BRIGADE_SENTINEL(bb);
             b = APR_BUCKET_NEXT(b))
        {
            if (buffering) {
                /// @todo setaside into our own pool to destroy later???
                apr_bucket_setaside(b, c->pool);
                process_bucket(f, b);
                APR_BUCKET_REMOVE(b);
            }
            else {
                process_bucket(f, b);
            }
        }
    } while (buffering);

    return APR_SUCCESS;
}
static apr_status_t line_edit_filter(ap_filter_t* f, apr_bucket_brigade* bb) {
  int i, j;
  unsigned int match ;
  unsigned int nmatch = 10 ;
  ap_regmatch_t pmatch[10] ;
  const char* bufp;
  const char* subs ;
  apr_size_t bytes ;
  apr_size_t fbytes ;
  apr_size_t offs ;
  const char* buf ;
  const char* le = NULL ;
  const char* le_n ;
  const char* le_r ;
  char* fbuf ;
  apr_bucket* b = APR_BRIGADE_FIRST(bb) ;
  apr_bucket* b1 ;
  int found = 0 ;
  apr_status_t rv ;

  apr_bucket_brigade* bbline ;
  line_edit_cfg* cfg
	= ap_get_module_config(f->r->per_dir_config, &line_edit_module) ;
  rewriterule* rules = (rewriterule*) cfg->rewriterules->elts ;
  rewriterule* newrule;

  line_edit_ctx* ctx = f->ctx ;
  if (ctx == NULL) {

    /* check env to see if we're wanted, to give basic control with 2.0 */
    buf = apr_table_get(f->r->subprocess_env, "LineEdit");
    if (buf && f->r->content_type) {
      char* lcbuf = apr_pstrdup(f->r->pool, buf) ;
      char* lctype = apr_pstrdup(f->r->pool, f->r->content_type) ;
      char* c ;

      for (c = lcbuf; *c; ++c)
	if (isupper(*c))
	  *c = tolower(*c) ;

      for (c = lctype; *c; ++c)
	if (isupper(*c))
	  *c = tolower(*c) ;
	else if (*c == ';') {
	  *c = 0 ;
	  break ;
	}

      if (!strstr(lcbuf, lctype)) {
	/* don't filter this content type */
	ap_filter_t* fnext = f->next ;
	ap_remove_output_filter(f) ;
	return ap_pass_brigade(fnext, bb) ;
      }
    }

    ctx = f->ctx = apr_palloc(f->r->pool, sizeof(line_edit_ctx)) ;
    ctx->bbsave = apr_brigade_create(f->r->pool, f->c->bucket_alloc) ;

    /* If we have any regex matches, we'll need to copy everything, so we
     * have null-terminated strings to parse.  That's a lot of memory if
     * we're streaming anything big.  So we'll use (and reuse) a local
     * subpool.  Fall back to the request pool if anything bad happens.
     */
    ctx->lpool = f->r->pool ;
    for (i = 0; i < cfg->rewriterules->nelts; ++i) {
      if ( rules[i].flags & M_REGEX ) {
        if (apr_pool_create(&ctx->lpool, f->r->pool) != APR_SUCCESS) {
	  ctx->lpool = f->r->pool ;
        }
        break ;
      }
    }
    /* If we have env interpolation, we'll need a private copy of
     * our rewrite rules with this requests env.  Otherwise we can
     * save processing time by using the original.
     *
     * If one ENV is found, we also have to copy all previous and
     * subsequent rules, even those with no interpolation.
     */
    ctx->rewriterules = cfg->rewriterules;
    for (i = 0; i < cfg->rewriterules->nelts; ++i) {
      found |= (rules[i].flags & M_ENV) ;
      if ( found ) {
	if (ctx->rewriterules == cfg->rewriterules) {
	  ctx->rewriterules = apr_array_make(f->r->pool,
		cfg->rewriterules->nelts, sizeof(rewriterule));
	  for (j = 0; j < i; ++j) {
            newrule = apr_array_push (((line_edit_ctx*)ctx)->rewriterules) ;
	    newrule->from = rules[j].from;
	    newrule->to = rules[j].to;
	    newrule->flags = rules[j].flags;
	    newrule->length = rules[j].length;
	  }
	}
	/* this rule needs to be interpolated */
        newrule = apr_array_push (((line_edit_ctx*)ctx)->rewriterules) ;
	newrule->from = rules[i].from;
	if (rules[i].flags & M_ENV) {
	  newrule->to = interpolate_env(f->r, rules[i].to);
	} else {
	  newrule->to = rules[i].to ;
	}
	newrule->flags = rules[i].flags;
	newrule->length = rules[i].length;
      }
    }
    /* for back-compatibility with Apache 2.0, set some protocol stuff */
    apr_table_unset(f->r->headers_out, "Content-Length") ;
    apr_table_unset(f->r->headers_out, "Content-MD5") ;
    apr_table_unset(f->r->headers_out, "Accept-Ranges") ;
  }
  /* by now our rules are in ctx->rewriterules */
  rules = (rewriterule*) ctx->rewriterules->elts ;

  /* bbline is what goes to the next filter,
   * so we (can) have a new one each time.
   */
  bbline = apr_brigade_create(f->r->pool, f->c->bucket_alloc) ;

  /* first ensure we have no mid-line breaks that might be in the
   * middle of a search string causing us to miss it!  At the same
   * time we split into lines to avoid pattern-matching over big
   * chunks of memory.
   */
  while ( b != APR_BRIGADE_SENTINEL(bb) ) {
    if ( !APR_BUCKET_IS_METADATA(b) ) {
      if ( apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS ) {
	if ( bytes == 0 ) {
	  APR_BUCKET_REMOVE(b) ;
	} else while ( bytes > 0 ) {
	  switch (cfg->lineend) {

	  case LINEEND_UNIX:
	    le = memchr(buf, '\n', bytes) ;
	    break ;

	  case LINEEND_MAC:
	    le = memchr(buf, '\r', bytes) ;
	    break ;

	  case LINEEND_DOS:
	    /* Edge-case issue: if a \r\n spans buckets it'll get missed.
	     * Not a problem for present purposes, but would be an issue
	     * if we claimed to support pattern matching on the lineends.
	     */
	    found = 0 ;
	    le = memchr(buf+1, '\n', bytes-1) ;
	    while ( le && !found ) {
	      if ( le[-1] == '\r' ) {
	        found = 1 ;
	      } else {
	        le = memchr(le+1, '\n', bytes-1 - (le+1 - buf)) ;
	      }
	    }
	    if ( !found )
	      le = 0 ;
	    break;

	  case LINEEND_ANY:
	  case LINEEND_UNSET:
	    /* Edge-case notabug: if a \r\n spans buckets it'll get seen as
	     * two line-ends.  It'll insert the \n as a one-byte bucket.
	     */
	    le_n = memchr(buf, '\n', bytes) ;
	    le_r = memchr(buf, '\r', bytes) ;
	    if ( le_n != NULL )
	      if ( le_n == le_r + sizeof(char))
	        le = le_n ;
	      else if ( (le_r < le_n) && (le_r != NULL) )
	        le = le_r ;
	      else
	        le = le_n ;
	    else
	      le = le_r ;
	    break;

	  case LINEEND_NONE:
	    le = 0 ;
	    break;

	  case LINEEND_CUSTOM:
	    le = memchr(buf, cfg->lechar, bytes) ;
	    break;
	  }
	  if ( le ) {
	    /* found a lineend in this bucket. */
	    offs = 1 + ((unsigned int)le-(unsigned int)buf) / sizeof(char) ;
	    apr_bucket_split(b, offs) ;
	    bytes -= offs ;
	    buf += offs ;
	    b1 = APR_BUCKET_NEXT(b) ;
	    APR_BUCKET_REMOVE(b);

	    /* Is there any previous unterminated content ? */
	    if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) {
	      /* append this to any content waiting for a lineend */
	      APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b) ;
	      rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, f->r->pool) ;
	      /* make b a new bucket of the flattened stuff */
	      b = apr_bucket_pool_create(fbuf, fbytes, f->r->pool,
			f->r->connection->bucket_alloc) ;

	      /* bbsave has been consumed, so clear it */
	      apr_brigade_cleanup(ctx->bbsave) ;
	    }
	    /* b now contains exactly one line */
	    APR_BRIGADE_INSERT_TAIL(bbline, b);
	    b = b1 ;
	  } else {
	    /* no lineend found.  Remember the dangling content */
	    APR_BUCKET_REMOVE(b);
	    APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b);
	    bytes = 0 ;
	  }
	} /* while bytes > 0 */
      } else {
	/* bucket read failed - oops !  Let's remove it. */
	APR_BUCKET_REMOVE(b);
      }
    } else if ( APR_BUCKET_IS_EOS(b) ) {
      /* If there's data to pass, send it in one bucket */
      if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) {
        rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, f->r->pool) ;
        b1 = apr_bucket_pool_create(fbuf, fbytes, f->r->pool,
		f->r->connection->bucket_alloc) ;
        APR_BRIGADE_INSERT_TAIL(bbline, b1);
      }
      apr_brigade_cleanup(ctx->bbsave) ;
      /* start again rather than segfault if a seriously buggy
       * filter in front of us sent a bogus EOS
       */
      f->ctx = NULL ;

      /* move the EOS to the new brigade */
      APR_BUCKET_REMOVE(b);
      APR_BRIGADE_INSERT_TAIL(bbline, b);
    } else {
      /* chop flush or unknown metadata bucket types */
      apr_bucket_delete(b);
    }
    /* OK, reset pointer to what's left (since we're not in a for-loop) */
    b = APR_BRIGADE_FIRST(bb) ;
  }

  /* OK, now we have a bunch of complete lines in bbline,
   * so we can apply our edit rules
   */

  /* When we get a match, we split the line into before+match+after.
   * To flatten that back into one buf every time would be inefficient.
   * So we treat it as three separate bufs to apply future rules.
   *
   * We can only reasonably do that by looping over buckets *inside*
   * the loop over rules.
   *
   * That means concepts like one-match-per-line or start-of-line-only
   * won't work, except for the first rule.  So we won't pretend.
   */
  for (i = 0; i < ctx->rewriterules->nelts; ++i) {
    for ( b = APR_BRIGADE_FIRST(bbline) ;
	b != APR_BRIGADE_SENTINEL(bbline) ;
	b = APR_BUCKET_NEXT(b) ) {
      if ( !APR_BUCKET_IS_METADATA(b)
	&& (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS)) {
	if ( rules[i].flags & M_REGEX ) {
	  bufp = apr_pstrmemdup(ctx->lpool, buf, bytes) ;
	  while ( ! ap_regexec(rules[i].from.r, bufp, nmatch, pmatch, 0) ) {
	    match = pmatch[0].rm_so ;
	    subs = ap_pregsub(f->r->pool, rules[i].to, bufp, nmatch, pmatch) ;
	    apr_bucket_split(b, match) ;
	    b1 = APR_BUCKET_NEXT(b) ;
	    apr_bucket_split(b1, pmatch[0].rm_eo - match) ;
	    b = APR_BUCKET_NEXT(b1) ;
	    apr_bucket_delete(b1) ;
	    b1 = apr_bucket_pool_create(subs, strlen(subs), f->r->pool,
		  f->r->connection->bucket_alloc) ;
	    APR_BUCKET_INSERT_BEFORE(b, b1) ;
	    bufp += pmatch[0].rm_eo ;
	  }
	} else {
	  bufp = buf ;
	  while (subs = apr_strmatch(rules[i].from.s, bufp, bytes),
			subs != NULL) {
	    match = ((unsigned int)subs - (unsigned int)bufp) / sizeof(char) ;
	    bytes -= match ;
	    bufp += match ;
	    apr_bucket_split(b, match) ;
	    b1 = APR_BUCKET_NEXT(b) ;
	    apr_bucket_split(b1, rules[i].length) ;
	    b = APR_BUCKET_NEXT(b1) ;
	    apr_bucket_delete(b1) ;
	    bytes -= rules[i].length ;
	    bufp += rules[i].length ;
	    b1 = apr_bucket_immortal_create(rules[i].to, strlen(rules[i].to),
		f->r->connection->bucket_alloc) ;
	    APR_BUCKET_INSERT_BEFORE(b, b1) ;
	  }
	}
      }
    }
    /* If we used a local pool, clear it now */
    if ( (ctx->lpool != f->r->pool) && (rules[i].flags & M_REGEX) ) {
      apr_pool_clear(ctx->lpool) ;
    }
  }

  /* now pass it down the chain */
  rv = ap_pass_brigade(f->next, bbline) ;

  /* if we have leftover data, don't risk it going out of scope */
  for ( b = APR_BRIGADE_FIRST(ctx->bbsave) ;
	b != APR_BRIGADE_SENTINEL(ctx->bbsave) ;
	b = APR_BUCKET_NEXT(b)) {
    apr_bucket_setaside(b, f->r->pool) ;
  }

  return rv ;
}
Пример #6
0
apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, 
                          apr_size_t maxlen, int *pfile_handles_allowed, 
                          const char *msg)
{
    apr_status_t status = APR_SUCCESS;
    int same_alloc;
    
    AP_DEBUG_ASSERT(to);
    AP_DEBUG_ASSERT(from);
    same_alloc = (to->bucket_alloc == from->bucket_alloc);

    if (!FILE_MOVE) {
        pfile_handles_allowed = NULL;
    }
    
    if (!APR_BRIGADE_EMPTY(from)) {
        apr_bucket *b, *end;
        
        status = last_not_included(from, maxlen, same_alloc,
                                   pfile_handles_allowed, &end);
        if (status != APR_SUCCESS) {
            return status;
        }
        
        while (!APR_BRIGADE_EMPTY(from) && status == APR_SUCCESS) {
            b = APR_BRIGADE_FIRST(from);
            if (b == end) {
                break;
            }
            
            if (same_alloc || (b->list == to->bucket_alloc)) {
                /* both brigades use the same bucket_alloc and auto-cleanups
                 * have the same life time. It's therefore safe to just move
                 * directly. */
                APR_BUCKET_REMOVE(b);
                APR_BRIGADE_INSERT_TAIL(to, b);
#if LOG_BUCKETS
                ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
                              "h2_util_move: %s, passed bucket(same bucket_alloc) "
                              "%ld-%ld, type=%s",
                              msg, (long)b->start, (long)b->length, 
                              APR_BUCKET_IS_METADATA(b)? 
                              (APR_BUCKET_IS_EOS(b)? "EOS": 
                               (APR_BUCKET_IS_FLUSH(b)? "FLUSH" : "META")) : 
                              (APR_BUCKET_IS_FILE(b)? "FILE" : "DATA"));
#endif
            }
            else if (DEEP_COPY) {
                /* we have not managed the magic of passing buckets from
                 * one thread to another. Any attempts result in
                 * cleanup of pools scrambling memory.
                 */
                if (APR_BUCKET_IS_METADATA(b)) {
                    if (APR_BUCKET_IS_EOS(b)) {
                        APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc));
                    }
                    else if (APR_BUCKET_IS_FLUSH(b)) {
                        APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc));
                    }
                    else {
                        /* ignore */
                    }
                }
                else if (pfile_handles_allowed 
                         && *pfile_handles_allowed > 0 
                         && APR_BUCKET_IS_FILE(b)) {
                    /* We do not want to read files when passing buckets, if
                     * we can avoid it. However, what we've come up so far
                     * is not working corrently, resulting either in crashes or
                     * too many open file descriptors.
                     */
                    apr_bucket_file *f = (apr_bucket_file *)b->data;
                    apr_file_t *fd = f->fd;
                    int setaside = (f->readpool != to->p);
#if LOG_BUCKETS
                    ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
                                  "h2_util_move: %s, moving FILE bucket %ld-%ld "
                                  "from=%lx(p=%lx) to=%lx(p=%lx), setaside=%d",
                                  msg, (long)b->start, (long)b->length, 
                                  (long)from, (long)from->p, 
                                  (long)to, (long)to->p, setaside);
#endif
                    if (setaside) {
                        status = apr_file_setaside(&fd, fd, to->p);
                        if (status != APR_SUCCESS) {
                            ap_log_perror(APLOG_MARK, APLOG_ERR, status, to->p,
                                          APLOGNO(02947) "h2_util: %s, setaside FILE", 
                                          msg);
                            return status;
                        }
                    }
                    apr_brigade_insert_file(to, fd, b->start, b->length, 
                                            to->p);
                    --(*pfile_handles_allowed);
                }
                else {
                    const char *data;
                    apr_size_t len;
                    status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
                    if (status == APR_SUCCESS && len > 0) {
                        status = apr_brigade_write(to, NULL, NULL, data, len);
#if LOG_BUCKETS
                        ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
                                      "h2_util_move: %s, copied bucket %ld-%ld "
                                      "from=%lx(p=%lx) to=%lx(p=%lx)",
                                      msg, (long)b->start, (long)b->length, 
                                      (long)from, (long)from->p, 
                                      (long)to, (long)to->p);
#endif
                    }
                }
                apr_bucket_delete(b);
            }
            else {
                apr_bucket_setaside(b, to->p);
                APR_BUCKET_REMOVE(b);
                APR_BRIGADE_INSERT_TAIL(to, b);
#if LOG_BUCKETS
                ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
                              "h2_util_move: %s, passed setaside bucket %ld-%ld "
                              "from=%lx(p=%lx) to=%lx(p=%lx)",
                              msg, (long)b->start, (long)b->length, 
                              (long)from, (long)from->p, 
                              (long)to, (long)to->p);
#endif
            }
        }
    }
    
    return status;
}
Пример #7
0
static apr_status_t dispatch(proxy_conn_rec *conn, proxy_dir_conf *conf,
                             request_rec *r, apr_pool_t *setaside_pool,
                             apr_uint16_t request_id, const char **err,
                             int *bad_request, int *has_responded)
{
    apr_bucket_brigade *ib, *ob;
    int seen_end_of_headers = 0, done = 0, ignore_body = 0;
    apr_status_t rv = APR_SUCCESS;
    int script_error_status = HTTP_OK;
    conn_rec *c = r->connection;
    struct iovec vec[2];
    ap_fcgi_header header;
    unsigned char farray[AP_FCGI_HEADER_LEN];
    apr_pollfd_t pfd;
    int header_state = HDR_STATE_READING_HEADERS;
    char stack_iobuf[AP_IOBUFSIZE];
    apr_size_t iobuf_size = AP_IOBUFSIZE;
    char *iobuf = stack_iobuf;

    *err = NULL;
    if (conn->worker->s->io_buffer_size_set) {
        iobuf_size = conn->worker->s->io_buffer_size;
        iobuf = apr_palloc(r->pool, iobuf_size);
    }

    pfd.desc_type = APR_POLL_SOCKET;
    pfd.desc.s = conn->sock;
    pfd.p = r->pool;
    pfd.reqevents = APR_POLLIN | APR_POLLOUT;

    ib = apr_brigade_create(r->pool, c->bucket_alloc);
    ob = apr_brigade_create(r->pool, c->bucket_alloc);

    while (! done) {
        apr_interval_time_t timeout;
        apr_size_t len;
        int n;

        /* We need SOME kind of timeout here, or virtually anything will
         * cause timeout errors. */
        apr_socket_timeout_get(conn->sock, &timeout);

        rv = apr_poll(&pfd, 1, &n, timeout);
        if (rv != APR_SUCCESS) {
            if (APR_STATUS_IS_EINTR(rv)) {
                continue;
            }
            *err = "polling";
            break;
        }

        if (pfd.rtnevents & APR_POLLOUT) {
            apr_size_t to_send, writebuflen;
            int last_stdin = 0;
            char *iobuf_cursor;

            rv = ap_get_brigade(r->input_filters, ib,
                                AP_MODE_READBYTES, APR_BLOCK_READ,
                                iobuf_size);
            if (rv != APR_SUCCESS) {
                *err = "reading input brigade";
                *bad_request = 1;
                break;
            }

            if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(ib))) {
                last_stdin = 1;
            }

            writebuflen = iobuf_size;

            rv = apr_brigade_flatten(ib, iobuf, &writebuflen);

            apr_brigade_cleanup(ib);

            if (rv != APR_SUCCESS) {
                *err = "flattening brigade";
                break;
            }

            to_send = writebuflen;
            iobuf_cursor = iobuf;
            while (to_send > 0) {
                int nvec = 0;
                apr_size_t write_this_time;

                write_this_time =
                    to_send < AP_FCGI_MAX_CONTENT_LEN ? to_send : AP_FCGI_MAX_CONTENT_LEN;

                ap_fcgi_fill_in_header(&header, AP_FCGI_STDIN, request_id,
                                       (apr_uint16_t)write_this_time, 0);
                ap_fcgi_header_to_array(&header, farray);

                vec[nvec].iov_base = (void *)farray;
                vec[nvec].iov_len = sizeof(farray);
                ++nvec;
                if (writebuflen) {
                    vec[nvec].iov_base = iobuf_cursor;
                    vec[nvec].iov_len = write_this_time;
                    ++nvec;
                }

                rv = send_data(conn, vec, nvec, &len);
                if (rv != APR_SUCCESS) {
                    *err = "sending stdin";
                    break;
                }

                to_send -= write_this_time;
                iobuf_cursor += write_this_time;
            }
            if (rv != APR_SUCCESS) {
                break;
            }

            if (last_stdin) {
                pfd.reqevents = APR_POLLIN; /* Done with input data */

                /* signal EOF (empty FCGI_STDIN) */
                ap_fcgi_fill_in_header(&header, AP_FCGI_STDIN, request_id,
                                       0, 0);
                ap_fcgi_header_to_array(&header, farray);

                vec[0].iov_base = (void *)farray;
                vec[0].iov_len = sizeof(farray);

                rv = send_data(conn, vec, 1, &len);
                if (rv != APR_SUCCESS) {
                    *err = "sending empty stdin";
                    break;
                }
            }
        }

        if (pfd.rtnevents & APR_POLLIN) {
            apr_size_t readbuflen;
            apr_uint16_t clen, rid;
            apr_bucket *b;
            unsigned char plen;
            unsigned char type, version;

            /* First, we grab the header... */
            rv = get_data_full(conn, (char *) farray, AP_FCGI_HEADER_LEN);
            if (rv != APR_SUCCESS) {
                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01067)
                              "Failed to read FastCGI header");
                break;
            }

            ap_log_rdata(APLOG_MARK, APLOG_TRACE8, r, "FastCGI header",
                         farray, AP_FCGI_HEADER_LEN, 0);

            ap_fcgi_header_fields_from_array(&version, &type, &rid,
                                             &clen, &plen, farray);

            if (version != AP_FCGI_VERSION_1) {
                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01068)
                              "Got bogus version %d", (int)version);
                rv = APR_EINVAL;
                break;
            }

            if (rid != request_id) {
                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01069)
                              "Got bogus rid %d, expected %d",
                              rid, request_id);
                rv = APR_EINVAL;
                break;
            }

recv_again:
            if (clen > iobuf_size) {
                readbuflen = iobuf_size;
            } else {
                readbuflen = clen;
            }

            /* Now get the actual data.  Yes it sucks to do this in a second
             * recv call, this will eventually change when we move to real
             * nonblocking recv calls. */
            if (readbuflen != 0) {
                rv = get_data(conn, iobuf, &readbuflen);
                if (rv != APR_SUCCESS) {
                    *err = "reading response body";
                    break;
                }
            }

            switch (type) {
            case AP_FCGI_STDOUT:
                if (clen != 0) {
                    b = apr_bucket_transient_create(iobuf,
                                                    readbuflen,
                                                    c->bucket_alloc);

                    APR_BRIGADE_INSERT_TAIL(ob, b);

                    if (! seen_end_of_headers) {
                        int st = handle_headers(r, &header_state,
                                                iobuf, readbuflen);

                        if (st == 1) {
                            int status;
                            seen_end_of_headers = 1;

                            status = ap_scan_script_header_err_brigade_ex(r, ob,
                                NULL, APLOG_MODULE_INDEX);
                            /* suck in all the rest */
                            if (status != OK) {
                                apr_bucket *tmp_b;
                                apr_brigade_cleanup(ob);
                                tmp_b = apr_bucket_eos_create(c->bucket_alloc);
                                APR_BRIGADE_INSERT_TAIL(ob, tmp_b);

                                *has_responded = 1;
                                r->status = status;
                                rv = ap_pass_brigade(r->output_filters, ob);
                                if (rv != APR_SUCCESS) {
                                    *err = "passing headers brigade to output filters";
                                }
                                else if (status == HTTP_NOT_MODIFIED) {
                                    /* The 304 response MUST NOT contain
                                     * a message-body, ignore it. */
                                    ignore_body = 1;
                                }
                                else {
                                    ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01070)
                                                    "Error parsing script headers");
                                    rv = APR_EINVAL;
                                }
                                break;
                            }

                            if (conf->error_override &&
                                ap_is_HTTP_ERROR(r->status)) {
                                /*
                                 * set script_error_status to discard
                                 * everything after the headers
                                 */
                                script_error_status = r->status;
                                /*
                                 * prevent ap_die() from treating this as a
                                 * recursive error, initially:
                                 */
                                r->status = HTTP_OK;
                            }

                            if (script_error_status == HTTP_OK
                                && !APR_BRIGADE_EMPTY(ob) && !ignore_body) {
                                /* Send the part of the body that we read while
                                 * reading the headers.
                                 */
                                *has_responded = 1;
                                rv = ap_pass_brigade(r->output_filters, ob);
                                if (rv != APR_SUCCESS) {
                                    *err = "passing brigade to output filters";
                                    break;
                                }
                            }
                            apr_brigade_cleanup(ob);

                            apr_pool_clear(setaside_pool);
                        }
                        else {
                            /* We're still looking for the end of the
                             * headers, so this part of the data will need
                             * to persist. */
                            apr_bucket_setaside(b, setaside_pool);
                        }
                    } else {
                        /* we've already passed along the headers, so now pass
                         * through the content.  we could simply continue to
                         * setaside the content and not pass until we see the
                         * 0 content-length (below, where we append the EOS),
                         * but that could be a huge amount of data; so we pass
                         * along smaller chunks
                         */
                        if (script_error_status == HTTP_OK && !ignore_body) {
                            *has_responded = 1;
                            rv = ap_pass_brigade(r->output_filters, ob);
                            if (rv != APR_SUCCESS) {
                                *err = "passing brigade to output filters";
                                break;
                            }
                        }
                        apr_brigade_cleanup(ob);
                    }

                    /* If we didn't read all the data, go back and get the
                     * rest of it. */
                    if (clen > readbuflen) {
                        clen -= readbuflen;
                        goto recv_again;
                    }
                } else {
                    /* XXX what if we haven't seen end of the headers yet? */

                    if (script_error_status == HTTP_OK) {
                        b = apr_bucket_eos_create(c->bucket_alloc);
                        APR_BRIGADE_INSERT_TAIL(ob, b);

                        *has_responded = 1;
                        rv = ap_pass_brigade(r->output_filters, ob);
                        if (rv != APR_SUCCESS) {
                            *err = "passing brigade to output filters";
                            break;
                        }
                    }

                    /* XXX Why don't we cleanup here?  (logic from AJP) */
                }
                break;

            case AP_FCGI_STDERR:
                /* TODO: Should probably clean up this logging a bit... */
                if (clen) {
                    ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01071)
                                  "Got error '%.*s'", (int)readbuflen, iobuf);
                }

                if (clen > readbuflen) {
                    clen -= readbuflen;
                    goto recv_again;
                }
                break;

            case AP_FCGI_END_REQUEST:
                done = 1;
                break;

            default:
                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01072)
                              "Got bogus record %d", type);
                break;
            }
            /* Leave on above switch's inner error. */
            if (rv != APR_SUCCESS) {
                break;
            }

            if (plen) {
                rv = get_data_full(conn, iobuf, plen);
                if (rv != APR_SUCCESS) {
                    ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02537)
                                  "Error occurred reading padding");
                    break;
                }
            }
        }
    }

    apr_brigade_destroy(ib);
    apr_brigade_destroy(ob);

    if (script_error_status != HTTP_OK) {
        ap_die(script_error_status, r); /* send ErrorDocument */
        *has_responded = 1;
    }

    return rv;
}
Пример #8
0
static apr_status_t append_bucket(h2_bucket_beam *beam, 
                                  apr_bucket *bred,
                                  apr_read_type_e block,
                                  apr_pool_t *pool,
                                  h2_beam_lock *pbl)
{
    const char *data;
    apr_size_t len;
    apr_off_t space_left = 0;
    apr_status_t status;
    
    if (APR_BUCKET_IS_METADATA(bred)) {
        if (APR_BUCKET_IS_EOS(bred)) {
            beam->closed = 1;
        }
        APR_BUCKET_REMOVE(bred);
        H2_BLIST_INSERT_TAIL(&beam->red, bred);
        return APR_SUCCESS;
    }
    else if (APR_BUCKET_IS_FILE(bred)) {
        /* file bucket lengths do not really count */
    }
    else {
        space_left = calc_space_left(beam);
        if (space_left > 0 && bred->length == ((apr_size_t)-1)) {
            const char *data;
            status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ);
            if (status != APR_SUCCESS) {
                return status;
            }
        }
        
        if (space_left < bred->length) {
            status = r_wait_space(beam, block, pbl, &space_left);
            if (status != APR_SUCCESS) {
                return status;
            }
            if (space_left <= 0) {
                return APR_EAGAIN;
            }
        }
        /* space available, maybe need bucket split */
    }
    

    /* The fundamental problem is that reading a red bucket from
     * a green thread is a total NO GO, because the bucket might use
     * its pool/bucket_alloc from a foreign thread and that will
     * corrupt. */
    status = APR_ENOTIMPL;
    if (beam->closed && bred->length > 0) {
        status = APR_EOF;
    }
    else if (APR_BUCKET_IS_TRANSIENT(bred)) {
        /* this takes care of transient buckets and converts them
         * into heap ones. Other bucket types might or might not be
         * affected by this. */
        status = apr_bucket_setaside(bred, pool);
    }
    else if (APR_BUCKET_IS_HEAP(bred)) {
        /* For heap buckets read from a green thread is fine. The
         * data will be there and live until the bucket itself is
         * destroyed. */
        status = APR_SUCCESS;
    }
    else if (APR_BUCKET_IS_POOL(bred)) {
        /* pool buckets are bastards that register at pool cleanup
         * to morph themselves into heap buckets. That may happen anytime,
         * even after the bucket data pointer has been read. So at
         * any time inside the green thread, the pool bucket memory
         * may disappear. yikes. */
        status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ);
        if (status == APR_SUCCESS) {
            apr_bucket_heap_make(bred, data, len, NULL);
        }
    }
    else if (APR_BUCKET_IS_FILE(bred)) {
        /* For file buckets the problem is their internal readpool that
         * is used on the first read to allocate buffer/mmap.
         * Since setting aside a file bucket will de-register the
         * file cleanup function from the previous pool, we need to
         * call that from a red thread. 
         * Additionally, we allow callbacks to prevent beaming file
         * handles across. The use case for this is to limit the number 
         * of open file handles and rather use a less efficient beam
         * transport. */
        apr_file_t *fd = ((apr_bucket_file *)bred->data)->fd;
        int can_beam = 1;
        if (beam->last_beamed != fd && beam->can_beam_fn) {
            can_beam = beam->can_beam_fn(beam->can_beam_ctx, beam, fd);
        }
        if (can_beam) {
            beam->last_beamed = fd;
            status = apr_bucket_setaside(bred, pool);
        }
        /* else: enter ENOTIMPL case below */
    }
    
    if (status == APR_ENOTIMPL) {
        /* we have no knowledge about the internals of this bucket,
         * but hope that after read, its data stays immutable for the
         * lifetime of the bucket. (see pool bucket handling above for
         * a counter example).
         * We do the read while in a red thread, so that the bucket may
         * use pools/allocators safely. */
        if (space_left < APR_BUCKET_BUFF_SIZE) {
            space_left = APR_BUCKET_BUFF_SIZE;
        }
        if (space_left < bred->length) {
            apr_bucket_split(bred, space_left);
        }
        status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ);
        if (status == APR_SUCCESS) {
            status = apr_bucket_setaside(bred, pool);
        }
    }
    
    if (status != APR_SUCCESS && status != APR_ENOTIMPL) {
        return status;
    }
    
    APR_BUCKET_REMOVE(bred);
    H2_BLIST_INSERT_TAIL(&beam->red, bred);
    beam->sent_bytes += bred->length;
    
    return APR_SUCCESS;
}
static apr_status_t google_analytics_out_filter(ap_filter_t *f, apr_bucket_brigade *bb)
{
    request_rec *r = f->r;
    google_analytics_filter_ctx *ctx = f->ctx;
    google_analytics_filter_config *c;

    apr_bucket *b = APR_BRIGADE_FIRST(bb);

    apr_size_t bytes;
    apr_size_t fbytes;
    apr_size_t offs;
    const char *buf;
    const char *le = NULL;
    const char *le_n;
    const char *le_r;

    const char *bufp;
    const char *subs;
    unsigned int match;

    apr_bucket *b1;
    char *fbuf;
    int found = 0;
    apr_status_t rv;

    apr_bucket_brigade *bbline;
    
    // サブリクエストならなにもしない
    if (r->main) {
        ap_remove_output_filter(f);
        return ap_pass_brigade(f->next, bb);
    }

    c = ap_get_module_config(r->per_dir_config, &google_analytics_module);

    if (ctx == NULL) {
        ctx = f->ctx = apr_pcalloc(r->pool, sizeof(google_analytics_filter_ctx));
        ctx->bbsave = apr_brigade_create(r->pool, f->c->bucket_alloc);
    }

    // length かわってしまうので unset で OK?
    apr_table_unset(r->headers_out, "Content-Length");
    apr_table_unset(r->headers_out, "Content-MD5");
    apr_table_unset(r->headers_out, "Accept-Ranges");
    apr_table_unset(r->headers_out, "ETag");

    bbline = apr_brigade_create(r->pool, f->c->bucket_alloc);
    
    // 改行毎なbucketに編成しなおす
    while ( b != APR_BRIGADE_SENTINEL(bb) ) {
        if ( !APR_BUCKET_IS_METADATA(b) ) {
            if ( apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS ) {
                if ( bytes == 0 ) {
                    APR_BUCKET_REMOVE(b);
                } else {
					while ( bytes > 0 ) {
						le_n = memchr(buf, '\n', bytes);
                        le_r = memchr(buf, '\r', bytes);
                        if ( le_n != NULL ) {
                            if ( le_n == le_r + sizeof(char)) {
                                le = le_n;
                            }
                            else if ( (le_r < le_n) && (le_r != NULL) ) {
                                le = le_r;
                            }
                            else {
                                le = le_n;
                            }
                        }
                        else {
                            le = le_r;
                        }

                        if ( le ) {
                            offs = 1 + ((unsigned int)le-(unsigned int)buf) / sizeof(char);
                            apr_bucket_split(b, offs);
                            bytes -= offs;
                            buf += offs;
                            b1 = APR_BUCKET_NEXT(b);
                            APR_BUCKET_REMOVE(b);

                            if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) {
                                APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b);
                                rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, r->pool);
                                b = apr_bucket_pool_create(fbuf, fbytes, r->pool,
                                                           r->connection->bucket_alloc);
                                apr_brigade_cleanup(ctx->bbsave);
                            }
                            APR_BRIGADE_INSERT_TAIL(bbline, b);
                            b = b1;
                        } else {
                            APR_BUCKET_REMOVE(b);
                            APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b);
                            bytes = 0;
                        }
                    } /* while bytes > 0 */
				}
            } else {
                APR_BUCKET_REMOVE(b);
            }
        } else if ( APR_BUCKET_IS_EOS(b) ) {
            if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) {
                rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, r->pool);
                b1 = apr_bucket_pool_create(fbuf, fbytes, r->pool,
                                            r->connection->bucket_alloc);
                APR_BRIGADE_INSERT_TAIL(bbline, b1);
            }
            apr_brigade_cleanup(ctx->bbsave);
            f->ctx = NULL;
            APR_BUCKET_REMOVE(b);
            APR_BRIGADE_INSERT_TAIL(bbline, b);
        } else {
            apr_bucket_delete(b);
        }
        b = APR_BRIGADE_FIRST(bb);
    }

    // 改行毎なbucketをまわす
    for ( b = APR_BRIGADE_FIRST(bbline);
          b != APR_BRIGADE_SENTINEL(bbline);
          b = APR_BUCKET_NEXT(b) ) {
        if ( !APR_BUCKET_IS_METADATA(b)
             && (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS)) {

            bufp = buf;

			if (ap_regexec(regex_tag_exists, bufp, 0, NULL, 0) == 0) {
				break;
			}
			
            subs = apr_strmatch(pattern_body_end_tag, bufp, bytes);
            if (subs != NULL) {
                match = ((unsigned int)subs - (unsigned int)bufp) / sizeof(char);
                bytes -= match;
                bufp += match;
                apr_bucket_split(b, match);
                b1 = APR_BUCKET_NEXT(b);
                apr_bucket_split(b1, body_end_tag_length);
                b = APR_BUCKET_NEXT(b1);
                apr_bucket_delete(b1);
                bytes -= body_end_tag_length;
                bufp += body_end_tag_length;
                b1 = apr_bucket_immortal_create(c->replace, strlen(c->replace),
                                                r->connection->bucket_alloc);
                APR_BUCKET_INSERT_BEFORE(b, b1);
            }
        }
    }
    rv = ap_pass_brigade(f->next, bbline);

    for ( b = APR_BRIGADE_FIRST(ctx->bbsave);
          b != APR_BRIGADE_SENTINEL(ctx->bbsave);
          b = APR_BUCKET_NEXT(b)) {
        apr_bucket_setaside(b, r->pool);
    }

    return rv;
}