Ejemplo n.º 1
0
APU_DECLARE(apr_status_t) apr_brigade_split_line(apr_bucket_brigade *bbOut,
                                                 apr_bucket_brigade *bbIn,
                                                 apr_read_type_e block,
                                                 apr_off_t maxbytes)
{
    apr_off_t readbytes = 0;

    while (!APR_BRIGADE_EMPTY(bbIn)) {
        const char *pos;
        const char *str;
        apr_size_t len;
        apr_status_t rv;
        apr_bucket *e;

        e = APR_BRIGADE_FIRST(bbIn);
        rv = apr_bucket_read(e, &str, &len, block);

        if (rv != APR_SUCCESS) {
            return rv;
        }

        pos = memchr(str, APR_ASCII_LF, len);
        /* We found a match. */
        if (pos != NULL) {
            apr_bucket_split(e, pos - str + 1);
            APR_BUCKET_REMOVE(e);
            APR_BRIGADE_INSERT_TAIL(bbOut, e);
            return APR_SUCCESS;
        }
        APR_BUCKET_REMOVE(e);
        if (APR_BUCKET_IS_METADATA(e) || len > APR_BUCKET_BUFF_SIZE/4) {
            APR_BRIGADE_INSERT_TAIL(bbOut, e);
        }
        else {
            if (len > 0) {
                rv = apr_brigade_write(bbOut, NULL, NULL, str, len);
                if (rv != APR_SUCCESS) {
                    return rv;
                }
            }
            apr_bucket_destroy(e);
        }
        readbytes += len;
        /* We didn't find an APR_ASCII_LF within the maximum line length. */
        if (readbytes >= maxbytes) {
            break;
        }
    }

    return APR_SUCCESS;
}
Ejemplo n.º 2
0
static apr_status_t CaseFilterInFilter(ap_filter_t *f,
                                       apr_bucket_brigade *pbbOut,
                                       ap_input_mode_t eMode,
                                       apr_read_type_e eBlock,
                                       apr_off_t nBytes)
{
    request_rec *r = f->r;
    conn_rec *c = r->connection;
    CaseFilterInContext *pCtx;
    apr_status_t ret;

    if (!(pCtx = f->ctx)) {
        f->ctx = pCtx = apr_palloc(r->pool, sizeof *pCtx);
        pCtx->pbbTmp = apr_brigade_create(r->pool, c->bucket_alloc);
    }

    if (APR_BRIGADE_EMPTY(pCtx->pbbTmp)) {
        ret = ap_get_brigade(f->next, pCtx->pbbTmp, eMode, eBlock, nBytes);

        if (eMode == AP_MODE_EATCRLF || ret != APR_SUCCESS)
            return ret;
    }

    while (!APR_BRIGADE_EMPTY(pCtx->pbbTmp)) {
        apr_bucket *pbktIn = APR_BRIGADE_FIRST(pCtx->pbbTmp);
        apr_bucket *pbktOut;
        const char *data;
        apr_size_t len;
        char *buf;
        apr_size_t n;

        /* It is tempting to do this...
         * APR_BUCKET_REMOVE(pB);
         * APR_BRIGADE_INSERT_TAIL(pbbOut,pB);
         * and change the case of the bucket data, but that would be wrong
         * for a file or socket buffer, for example...
         */

        if (APR_BUCKET_IS_EOS(pbktIn)) {
            APR_BUCKET_REMOVE(pbktIn);
            APR_BRIGADE_INSERT_TAIL(pbbOut, pbktIn);
            break;
        }

        ret=apr_bucket_read(pbktIn, &data, &len, eBlock);
        if (ret != APR_SUCCESS)
            return ret;

        buf = ap_malloc(len);
        for (n=0 ; n < len ; ++n) {
            buf[n] = apr_toupper(data[n]);
        }

        pbktOut = apr_bucket_heap_create(buf, len, 0, c->bucket_alloc);
        APR_BRIGADE_INSERT_TAIL(pbbOut, pbktOut);
        apr_bucket_delete(pbktIn);
    }

    return APR_SUCCESS;
}
Ejemplo n.º 3
0
static void remove_empty_buckets(apr_bucket_brigade *bb)
{
    apr_bucket *bucket;
    while (((bucket = APR_BRIGADE_FIRST(bb)) != APR_BRIGADE_SENTINEL(bb)) &&
           (APR_BUCKET_IS_METADATA(bucket) || (bucket->length == 0))) {
        APR_BUCKET_REMOVE(bucket);
        apr_bucket_destroy(bucket);
    }
}
Ejemplo n.º 4
0
apr_status_t jxr_append_brigade(request_rec *r, apr_bucket_brigade *dest, apr_bucket_brigade *bb, int *eos_seen)
{
	apr_size_t max_msglen = MAX_PACKET_SIZE - sizeof(Jaxer_Header);
	apr_status_t rv;

	while (!APR_BRIGADE_EMPTY(bb)) 
	{
		apr_size_t readlen;
		const char *buffer;
		
		apr_bucket *e = APR_BRIGADE_FIRST(bb);

		if (APR_BUCKET_IS_EOS(e) )
		{
			apr_bucket_delete(e);
			if (eos_seen)
				*eos_seen = 1;
			continue;
		}
		if (APR_BUCKET_IS_METADATA(e)) {
			apr_bucket_delete(e);
			continue;
		}

		
		/* Read the bucket now */
		if ((rv = apr_bucket_read(e, &buffer, &readlen, APR_BLOCK_READ)) != APR_SUCCESS) 
		{
			ap_log_perror(APLOG_MARK, APLOG_INFO, rv, r->pool, "mod_jaxer: can't read data from handler");
			return rv;
		}
		
		if (readlen > max_msglen)
		{
        	apr_bucket_split(e, max_msglen);
		}else
		{
			APR_BUCKET_REMOVE(e);
			APR_BRIGADE_INSERT_TAIL(dest, e);
		}
	}
	if ((rv=apr_brigade_destroy(bb)) != APR_SUCCESS)
	{
		ap_log_perror(APLOG_MARK, APLOG_INFO, rv, r->pool, "mod_jaxer: failed to destroy brigade.");
		return rv;
	}

	return APR_SUCCESS;
}
Ejemplo n.º 5
0
static int cdn_html_filter(ap_filter_t * f, apr_bucket_brigade * bb)
{
  apr_bucket *b;
  const char *buf = 0;
  apr_size_t bytes = 0;

  /* now do HTML filtering if necessary, and pass the brigade onward */
  saxctxt *ctxt = check_html_filter_init(f);
  if (!ctxt)
    return ap_pass_brigade(f->next, bb);

  for(b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) {
    if(APR_BUCKET_IS_EOS(b) || APR_BUCKET_IS_FLUSH(b)) {
      consume_buffer(ctxt, buf, 0, 1);
      APR_BUCKET_REMOVE(b);
      APR_BRIGADE_INSERT_TAIL(ctxt->bb, b);
      ap_pass_brigade(ctxt->f->next, ctxt->bb);
      return APR_SUCCESS;
    }

    if(apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS && buf) {
      if(ctxt->parser == NULL) {

        /*
         * for now, always output utf-8; we could incorporate
         * mod_proxy_html's output transcoding with little problem if
         * necessary
         */
        ap_set_content_type(f->r, "text/html;charset=utf-8");

        if(!initialize_parser(f, ctxt, &buf, bytes)) {
          apr_status_t rv = ap_pass_brigade(ctxt->f->next, bb);
          ap_remove_output_filter(f);
          return rv;
        } else
          ap_fputs(f->next, ctxt->bb, ctxt->cfg->doctype);
      }
      consume_buffer(ctxt, buf, bytes, 0);
    }

  }

  /*ap_fflush(ctxt->f->next, ctxt->bb) ; */      /* uncomment for debug */
  apr_brigade_cleanup(bb);
  return APR_SUCCESS;
}
Ejemplo n.º 6
0
static int getsfunc_BRIGADE(char *buf, int len, void *arg)
{
    apr_bucket_brigade *bb = (apr_bucket_brigade *)arg;
    const char *dst_end = buf + len - 1; /* leave room for terminating null */
    char *dst = buf;
    apr_bucket *e = APR_BRIGADE_FIRST(bb);
    apr_status_t rv;
    int done = 0;

    while ((dst < dst_end) && !done && !APR_BUCKET_IS_EOS(e)) {
        const char *bucket_data;
        apr_size_t bucket_data_len;
        const char *src;
        const char *src_end;
        apr_bucket * next;

        rv = apr_bucket_read(e, &bucket_data, &bucket_data_len,
                             APR_BLOCK_READ);
        if (rv != APR_SUCCESS || (bucket_data_len == 0)) {
            return APR_STATUS_IS_TIMEUP(rv) ? -1 : 0;
        }
        src = bucket_data;
        src_end = bucket_data + bucket_data_len;
        while ((src < src_end) && (dst < dst_end) && !done) {
            if (*src == '\n') {
                done = 1;
            }
            else if (*src != '\r') {
                *dst++ = *src;
            }
            src++;
        }

        if (src < src_end) {
            apr_bucket_split(e, src - bucket_data);
        }
        next = APR_BUCKET_NEXT(e);
        APR_BUCKET_REMOVE(e);
        apr_bucket_destroy(e);
        e = next;
    }
    *dst = 0;
    return 1;
}
Ejemplo n.º 7
0
/**
 * @internal
 *
 * "Sniffs" the output (response) data from the connection stream.
 */
static int ironbee_output_filter (ap_filter_t *f, apr_bucket_brigade *bb)
{
    apr_bucket *b;
#if 0
    conn_rec *c = f->c;
    ironbee_conn_context *ctx = f->ctx;
    ib_conn_t *iconn = ctx->iconn;
    ib_core_cfg_t *corecfg;
    int buffering = 0;

    /* Configure. */
    ib_context_module_config(iconn->ctx, ib_core_module(), (void *)&corecfg);
    if (corecfg != NULL) {
        buffering = (int)corecfg->buffer_res;
    }
#endif


    for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) {
#if 0
        /// @todo Should this be done?  Maybe only for proxy?
        if (APR_BUCKET_IS_EOS(b)) {
            /// @todo Do we need to do this? Maybe only for proxy.
            apr_bucket *flush = apr_bucket_flush_create(f->c->bucket_alloc);
            APR_BUCKET_INSERT_BEFORE(b, flush);
        }

        if (buffering) {
            /// @todo setaside into our own pool to destroy later???
            apr_bucket_setaside(b, c->pool);
            process_bucket(f, b);
            APR_BUCKET_REMOVE(b);
        }
        else {
#endif
            process_bucket(f, b);
#if 0
        }
#endif
    }

    return ap_pass_brigade(f->next, bb);
}
// Called when Apache outputs data:
static apr_status_t firstbyte_out_filter(ap_filter_t *f,
                                     apr_bucket_brigade *bb) {
    firstbyte_config_t *cf = ap_get_module_config(f->c->conn_config, &log_firstbyte_module);

	apr_bucket *b = APR_BRIGADE_LAST(bb);
    /* End of data, make sure we flush */
    if (APR_BUCKET_IS_EOS(b)) {
        APR_BRIGADE_INSERT_TAIL(bb,
                                apr_bucket_flush_create(f->c->bucket_alloc));
        APR_BUCKET_REMOVE(b);
        apr_bucket_destroy(b);
    }

	if (cf->first_out==1) {
		cf->first_out_time = apr_time_now();
		cf->first_out = 0;
	}

    return ap_pass_brigade(f->next, bb);
}
Ejemplo n.º 9
0
apr_status_t ap_core_output_filter(ap_filter_t *f, apr_bucket_brigade *b)
{
    apr_status_t rv;
    apr_bucket_brigade *more;
    conn_rec *c = f->c;
    core_net_rec *net = f->ctx;
    core_output_filter_ctx_t *ctx = net->out_ctx;
    apr_read_type_e eblock = APR_NONBLOCK_READ;
    apr_pool_t *input_pool = b->p;

    /* Fail quickly if the connection has already been aborted. */
    if (c->aborted) {
        apr_brigade_cleanup(b);
        return APR_ECONNABORTED;
    }

    if (ctx == NULL) {
        ctx = apr_pcalloc(c->pool, sizeof(*ctx));
        net->out_ctx = ctx;
    }

    /* If we have a saved brigade, concatenate the new brigade to it */
    if (ctx->b) {
        APR_BRIGADE_CONCAT(ctx->b, b);
        b = ctx->b;
        ctx->b = NULL;
    }

    /* Perform multiple passes over the brigade, sending batches of output
       to the connection. */
    while (b && !APR_BRIGADE_EMPTY(b)) {
        apr_size_t nbytes = 0;
        apr_bucket *last_e = NULL; /* initialized for debugging */
        apr_bucket *e;

        /* one group of iovecs per pass over the brigade */
        apr_size_t nvec = 0;
        apr_size_t nvec_trailers = 0;
        struct iovec vec[MAX_IOVEC_TO_WRITE];
        struct iovec vec_trailers[MAX_IOVEC_TO_WRITE];

        /* one file per pass over the brigade */
        apr_file_t *fd = NULL;
        apr_size_t flen = 0;
        apr_off_t foffset = 0;

        /* keep track of buckets that we've concatenated
         * to avoid small writes
         */
        apr_bucket *last_merged_bucket = NULL;

        /* tail of brigade if we need another pass */
        more = NULL;

        /* Iterate over the brigade: collect iovecs and/or a file */
        for (e = APR_BRIGADE_FIRST(b);
                e != APR_BRIGADE_SENTINEL(b);
                e = APR_BUCKET_NEXT(e))
        {
            /* keep track of the last bucket processed */
            last_e = e;
            if (APR_BUCKET_IS_EOS(e) || AP_BUCKET_IS_EOC(e)) {
                break;
            }
            else if (APR_BUCKET_IS_FLUSH(e)) {
                if (e != APR_BRIGADE_LAST(b)) {
                    more = apr_brigade_split(b, APR_BUCKET_NEXT(e));
                }
                break;
            }

            /* It doesn't make any sense to use sendfile for a file bucket
             * that represents 10 bytes.
             */
            else if (APR_BUCKET_IS_FILE(e)
                     && (e->length >= AP_MIN_SENDFILE_BYTES)) {
                apr_bucket_file *a = e->data;

                /* We can't handle more than one file bucket at a time
                 * so we split here and send the file we have already
                 * found.
                 */
                if (fd) {
                    more = apr_brigade_split(b, e);
                    break;
                }

                fd = a->fd;
                flen = e->length;
                foffset = e->start;
            }
            else {
                const char *str;
                apr_size_t n;

                rv = apr_bucket_read(e, &str, &n, eblock);
                if (APR_STATUS_IS_EAGAIN(rv)) {
                    /* send what we have so far since we shouldn't expect more
                     * output for a while...  next time we read, block
                     */
                    more = apr_brigade_split(b, e);
                    eblock = APR_BLOCK_READ;
                    break;
                }
                eblock = APR_NONBLOCK_READ;
                if (n) {
                    if (!fd) {
                        if (nvec == MAX_IOVEC_TO_WRITE) {
                            /* woah! too many. buffer them up, for use later. */
                            apr_bucket *temp, *next;
                            apr_bucket_brigade *temp_brig;

                            if (nbytes >= AP_MIN_BYTES_TO_WRITE) {
                                /* We have enough data in the iovec
                                 * to justify doing a writev
                                 */
                                more = apr_brigade_split(b, e);
                                break;
                            }

                            /* Create a temporary brigade as a means
                             * of concatenating a bunch of buckets together
                             */
                            temp_brig = apr_brigade_create(f->c->pool,
                                                           f->c->bucket_alloc);
                            if (last_merged_bucket) {
                                /* If we've concatenated together small
                                 * buckets already in a previous pass,
                                 * the initial buckets in this brigade
                                 * are heap buckets that may have extra
                                 * space left in them (because they
                                 * were created by apr_brigade_write()).
                                 * We can take advantage of this by
                                 * building the new temp brigade out of
                                 * these buckets, so that the content
                                 * in them doesn't have to be copied again.
                                 */
                                APR_BRIGADE_PREPEND(b, temp_brig);
                                brigade_move(temp_brig, b, APR_BUCKET_NEXT(last_merged_bucket));
                            }

                            temp = APR_BRIGADE_FIRST(b);
                            while (temp != e) {
                                apr_bucket *d;
                                rv = apr_bucket_read(temp, &str, &n, APR_BLOCK_READ);
                                apr_brigade_write(temp_brig, NULL, NULL, str, n);
                                d = temp;
                                temp = APR_BUCKET_NEXT(temp);
                                apr_bucket_delete(d);
                            }

                            nvec = 0;
                            nbytes = 0;
                            temp = APR_BRIGADE_FIRST(temp_brig);
                            APR_BUCKET_REMOVE(temp);
                            APR_BRIGADE_INSERT_HEAD(b, temp);
                            apr_bucket_read(temp, &str, &n, APR_BLOCK_READ);
                            vec[nvec].iov_base = (char*) str;
                            vec[nvec].iov_len = n;
                            nvec++;

                            /* Just in case the temporary brigade has
                             * multiple buckets, recover the rest of
                             * them and put them in the brigade that
                             * we're sending.
                             */
                            for (next = APR_BRIGADE_FIRST(temp_brig);
                                    next != APR_BRIGADE_SENTINEL(temp_brig);
                                    next = APR_BRIGADE_FIRST(temp_brig)) {
                                APR_BUCKET_REMOVE(next);
                                APR_BUCKET_INSERT_AFTER(temp, next);
                                temp = next;
                                apr_bucket_read(next, &str, &n,
                                                APR_BLOCK_READ);
                                vec[nvec].iov_base = (char*) str;
                                vec[nvec].iov_len = n;
                                nvec++;
                            }

                            apr_brigade_destroy(temp_brig);

                            last_merged_bucket = temp;
                            e = temp;
                            last_e = e;
                        }
                        else {
                            vec[nvec].iov_base = (char*) str;
                            vec[nvec].iov_len = n;
                            nvec++;
                        }
                    }
                    else {
                        /* The bucket is a trailer to a file bucket */

                        if (nvec_trailers == MAX_IOVEC_TO_WRITE) {
                            /* woah! too many. stop now. */
                            more = apr_brigade_split(b, e);
                            break;
                        }

                        vec_trailers[nvec_trailers].iov_base = (char*) str;
                        vec_trailers[nvec_trailers].iov_len = n;
                        nvec_trailers++;
                    }

                    nbytes += n;
                }
            }
        }


        /* Completed iterating over the brigade, now determine if we want
         * to buffer the brigade or send the brigade out on the network.
         *
         * Save if we haven't accumulated enough bytes to send, the connection
         * is not about to be closed, and:
         *
         *   1) we didn't see a file, we don't have more passes over the
         *      brigade to perform,  AND we didn't stop at a FLUSH bucket.
         *      (IOW, we will save plain old bytes such as HTTP headers)
         * or
         *   2) we hit the EOS and have a keep-alive connection
         *      (IOW, this response is a bit more complex, but we save it
         *       with the hope of concatenating with another response)
         */
        if (nbytes + flen < AP_MIN_BYTES_TO_WRITE
                && !AP_BUCKET_IS_EOC(last_e)
                && ((!fd && !more && !APR_BUCKET_IS_FLUSH(last_e))
                    || (APR_BUCKET_IS_EOS(last_e)
                        && c->keepalive == AP_CONN_KEEPALIVE))) {

            /* NEVER save an EOS in here.  If we are saving a brigade with
             * an EOS bucket, then we are doing keepalive connections, and
             * we want to process to second request fully.
             */
            if (APR_BUCKET_IS_EOS(last_e)) {
                apr_bucket *bucket;
                int file_bucket_saved = 0;
                apr_bucket_delete(last_e);
                for (bucket = APR_BRIGADE_FIRST(b);
                        bucket != APR_BRIGADE_SENTINEL(b);
                        bucket = APR_BUCKET_NEXT(bucket)) {

                    /* Do a read on each bucket to pull in the
                     * data from pipe and socket buckets, so
                     * that we don't leave their file descriptors
                     * open indefinitely.  Do the same for file
                     * buckets, with one exception: allow the
                     * first file bucket in the brigade to remain
                     * a file bucket, so that we don't end up
                     * doing an mmap+memcpy every time a client
                     * requests a <8KB file over a keepalive
                     * connection.
                     */
                    if (APR_BUCKET_IS_FILE(bucket) && !file_bucket_saved) {
                        file_bucket_saved = 1;
                    }
                    else {
                        const char *buf;
                        apr_size_t len = 0;
                        rv = apr_bucket_read(bucket, &buf, &len,
                                             APR_BLOCK_READ);
                        if (rv != APR_SUCCESS) {
                            ap_log_cerror(APLOG_MARK, APLOG_ERR, rv,
                                          c, "core_output_filter:"
                                          " Error reading from bucket.");
                            return HTTP_INTERNAL_SERVER_ERROR;
                        }
                    }
                }
            }
            if (!ctx->deferred_write_pool) {
                apr_pool_create(&ctx->deferred_write_pool, c->pool);
                apr_pool_tag(ctx->deferred_write_pool, "deferred_write");
            }
            ap_save_brigade(f, &ctx->b, &b, ctx->deferred_write_pool);

            return APR_SUCCESS;
        }

        if (fd) {
            apr_hdtr_t hdtr;
            apr_size_t bytes_sent;

#if APR_HAS_SENDFILE
            apr_int32_t flags = 0;
#endif

            memset(&hdtr, '\0', sizeof(hdtr));
            if (nvec) {
                hdtr.numheaders = nvec;
                hdtr.headers = vec;
            }

            if (nvec_trailers) {
                hdtr.numtrailers = nvec_trailers;
                hdtr.trailers = vec_trailers;
            }

#if APR_HAS_SENDFILE
            if (apr_file_flags_get(fd) & APR_SENDFILE_ENABLED) {

                if (c->keepalive == AP_CONN_CLOSE && APR_BUCKET_IS_EOS(last_e)) {
                    /* Prepare the socket to be reused */
                    flags |= APR_SENDFILE_DISCONNECT_SOCKET;
                }

                rv = sendfile_it_all(net,      /* the network information   */
                                     fd,       /* the file to send          */
                                     &hdtr,    /* header and trailer iovecs */
                                     foffset,  /* offset in the file to begin
                                                  sending from              */
                                     flen,     /* length of file            */
                                     nbytes + flen, /* total length including
                                                       headers              */
                                     &bytes_sent,   /* how many bytes were
                                                       sent                 */
                                     flags);   /* apr_sendfile flags        */
            }
            else
#endif
            {
                rv = emulate_sendfile(net, fd, &hdtr, foffset, flen,
                                      &bytes_sent);
            }

            if (logio_add_bytes_out && bytes_sent > 0)
                logio_add_bytes_out(c, bytes_sent);

            fd = NULL;
        }
        else {
            apr_size_t bytes_sent;

            rv = writev_it_all(net->client_socket,
                               vec, nvec,
                               nbytes, &bytes_sent);

            if (logio_add_bytes_out && bytes_sent > 0)
                logio_add_bytes_out(c, bytes_sent);
        }

        apr_brigade_cleanup(b);

        /* drive cleanups for resources which were set aside
         * this may occur before or after termination of the request which
         * created the resource
         */
        if (ctx->deferred_write_pool) {
            if (more && more->p == ctx->deferred_write_pool) {
                /* "more" belongs to the deferred_write_pool,
                 * which is about to be cleared.
                 */
                if (APR_BRIGADE_EMPTY(more)) {
                    more = NULL;
                }
                else {
                    /* uh oh... change more's lifetime
                     * to the input brigade's lifetime
                     */
                    apr_bucket_brigade *tmp_more = more;
                    more = NULL;
                    ap_save_brigade(f, &more, &tmp_more, input_pool);
                }
            }
            apr_pool_clear(ctx->deferred_write_pool);
        }

        if (rv != APR_SUCCESS) {
            ap_log_cerror(APLOG_MARK, APLOG_INFO, rv, c,
                          "core_output_filter: writing data to the network");

            if (more)
                apr_brigade_cleanup(more);

            /* No need to check for SUCCESS, we did that above. */
            if (!APR_STATUS_IS_EAGAIN(rv)) {
                c->aborted = 1;
                return APR_ECONNABORTED;
            }

            return APR_SUCCESS;
        }

        b = more;
        more = NULL;
    }  /* end while () */

    return APR_SUCCESS;
}
Ejemplo n.º 10
0
static apr_status_t sendfile_nonblocking(apr_socket_t *s,
                                         apr_bucket *bucket,
                                         apr_size_t *cumulative_bytes_written,
                                         conn_rec *c)
{
    apr_status_t rv = APR_SUCCESS;
    apr_bucket_file *file_bucket;
    apr_file_t *fd;
    apr_size_t file_length;
    apr_off_t file_offset;
    apr_size_t bytes_written = 0;

    if (!APR_BUCKET_IS_FILE(bucket)) {
        ap_log_error(APLOG_MARK, APLOG_ERR, rv, c->base_server, APLOGNO(00006)
                     "core_filter: sendfile_nonblocking: "
                     "this should never happen");
        return APR_EGENERAL;
    }
    file_bucket = (apr_bucket_file *)(bucket->data);
    fd = file_bucket->fd;
    file_length = bucket->length;
    file_offset = bucket->start;

    if (bytes_written < file_length) {
        apr_size_t n = file_length - bytes_written;
        apr_status_t arv;
        apr_interval_time_t old_timeout;

        arv = apr_socket_timeout_get(s, &old_timeout);
        if (arv != APR_SUCCESS) {
            return arv;
        }
        arv = apr_socket_timeout_set(s, 0);
        if (arv != APR_SUCCESS) {
            return arv;
        }
        rv = apr_socket_sendfile(s, fd, NULL, &file_offset, &n, 0);
        if (rv == APR_SUCCESS) {
            bytes_written += n;
            file_offset += n;
        }
        arv = apr_socket_timeout_set(s, old_timeout);
        if ((arv != APR_SUCCESS) && (rv == APR_SUCCESS)) {
            rv = arv;
        }
    }
    if ((ap__logio_add_bytes_out != NULL) && (bytes_written > 0)) {
        ap__logio_add_bytes_out(c, bytes_written);
    }
    *cumulative_bytes_written += bytes_written;
    if ((bytes_written < file_length) && (bytes_written > 0)) {
        apr_bucket_split(bucket, bytes_written);
        APR_BUCKET_REMOVE(bucket);
        apr_bucket_destroy(bucket);
    }
    else if (bytes_written == file_length) {
        APR_BUCKET_REMOVE(bucket);
        apr_bucket_destroy(bucket);
    }
    return rv;
}
Ejemplo n.º 11
0
static apr_status_t writev_nonblocking(apr_socket_t *s,
                                       struct iovec *vec, apr_size_t nvec,
                                       apr_bucket_brigade *bb,
                                       apr_size_t *cumulative_bytes_written,
                                       conn_rec *c)
{
    apr_status_t rv = APR_SUCCESS, arv;
    apr_size_t bytes_written = 0, bytes_to_write = 0;
    apr_size_t i, offset;
    apr_interval_time_t old_timeout;

    arv = apr_socket_timeout_get(s, &old_timeout);
    if (arv != APR_SUCCESS) {
        return arv;
    }
    arv = apr_socket_timeout_set(s, 0);
    if (arv != APR_SUCCESS) {
        return arv;
    }

    for (i = 0; i < nvec; i++) {
        bytes_to_write += vec[i].iov_len;
    }
    offset = 0;
    while (bytes_written < bytes_to_write) {
        apr_size_t n = 0;
        rv = apr_socket_sendv(s, vec + offset, nvec - offset, &n);
        if (n > 0) {
            bytes_written += n;
            for (i = offset; i < nvec; ) {
                apr_bucket *bucket = APR_BRIGADE_FIRST(bb);
                if (APR_BUCKET_IS_METADATA(bucket)) {
                    APR_BUCKET_REMOVE(bucket);
                    apr_bucket_destroy(bucket);
                }
                else if (n >= vec[i].iov_len) {
                    APR_BUCKET_REMOVE(bucket);
                    apr_bucket_destroy(bucket);
                    offset++;
                    n -= vec[i++].iov_len;
                }
                else {
                    apr_bucket_split(bucket, n);
                    APR_BUCKET_REMOVE(bucket);
                    apr_bucket_destroy(bucket);
                    vec[i].iov_len -= n;
                    vec[i].iov_base = (char *) vec[i].iov_base + n;
                    break;
                }
            }
        }
        if (rv != APR_SUCCESS) {
            break;
        }
    }
    if ((ap__logio_add_bytes_out != NULL) && (bytes_written > 0)) {
        ap__logio_add_bytes_out(c, bytes_written);
    }
    *cumulative_bytes_written += bytes_written;

    arv = apr_socket_timeout_set(s, old_timeout);
    if ((arv != APR_SUCCESS) && (rv == APR_SUCCESS)) {
        return arv;
    }
    else {
        return rv;
    }
}
Ejemplo n.º 12
0
apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, 
                          apr_size_t maxlen, int *pfile_handles_allowed, 
                          const char *msg)
{
    apr_status_t status = APR_SUCCESS;
    int same_alloc;
    
    AP_DEBUG_ASSERT(to);
    AP_DEBUG_ASSERT(from);
    same_alloc = (to->bucket_alloc == from->bucket_alloc);

    if (!FILE_MOVE) {
        pfile_handles_allowed = NULL;
    }
    
    if (!APR_BRIGADE_EMPTY(from)) {
        apr_bucket *b, *end;
        
        status = last_not_included(from, maxlen, same_alloc,
                                   pfile_handles_allowed, &end);
        if (status != APR_SUCCESS) {
            return status;
        }
        
        while (!APR_BRIGADE_EMPTY(from) && status == APR_SUCCESS) {
            b = APR_BRIGADE_FIRST(from);
            if (b == end) {
                break;
            }
            
            if (same_alloc || (b->list == to->bucket_alloc)) {
                /* both brigades use the same bucket_alloc and auto-cleanups
                 * have the same life time. It's therefore safe to just move
                 * directly. */
                APR_BUCKET_REMOVE(b);
                APR_BRIGADE_INSERT_TAIL(to, b);
#if LOG_BUCKETS
                ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
                              "h2_util_move: %s, passed bucket(same bucket_alloc) "
                              "%ld-%ld, type=%s",
                              msg, (long)b->start, (long)b->length, 
                              APR_BUCKET_IS_METADATA(b)? 
                              (APR_BUCKET_IS_EOS(b)? "EOS": 
                               (APR_BUCKET_IS_FLUSH(b)? "FLUSH" : "META")) : 
                              (APR_BUCKET_IS_FILE(b)? "FILE" : "DATA"));
#endif
            }
            else if (DEEP_COPY) {
                /* we have not managed the magic of passing buckets from
                 * one thread to another. Any attempts result in
                 * cleanup of pools scrambling memory.
                 */
                if (APR_BUCKET_IS_METADATA(b)) {
                    if (APR_BUCKET_IS_EOS(b)) {
                        APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc));
                    }
                    else if (APR_BUCKET_IS_FLUSH(b)) {
                        APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc));
                    }
                    else {
                        /* ignore */
                    }
                }
                else if (pfile_handles_allowed 
                         && *pfile_handles_allowed > 0 
                         && APR_BUCKET_IS_FILE(b)) {
                    /* We do not want to read files when passing buckets, if
                     * we can avoid it. However, what we've come up so far
                     * is not working corrently, resulting either in crashes or
                     * too many open file descriptors.
                     */
                    apr_bucket_file *f = (apr_bucket_file *)b->data;
                    apr_file_t *fd = f->fd;
                    int setaside = (f->readpool != to->p);
#if LOG_BUCKETS
                    ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
                                  "h2_util_move: %s, moving FILE bucket %ld-%ld "
                                  "from=%lx(p=%lx) to=%lx(p=%lx), setaside=%d",
                                  msg, (long)b->start, (long)b->length, 
                                  (long)from, (long)from->p, 
                                  (long)to, (long)to->p, setaside);
#endif
                    if (setaside) {
                        status = apr_file_setaside(&fd, fd, to->p);
                        if (status != APR_SUCCESS) {
                            ap_log_perror(APLOG_MARK, APLOG_ERR, status, to->p,
                                          APLOGNO(02947) "h2_util: %s, setaside FILE", 
                                          msg);
                            return status;
                        }
                    }
                    apr_brigade_insert_file(to, fd, b->start, b->length, 
                                            to->p);
                    --(*pfile_handles_allowed);
                }
                else {
                    const char *data;
                    apr_size_t len;
                    status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
                    if (status == APR_SUCCESS && len > 0) {
                        status = apr_brigade_write(to, NULL, NULL, data, len);
#if LOG_BUCKETS
                        ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
                                      "h2_util_move: %s, copied bucket %ld-%ld "
                                      "from=%lx(p=%lx) to=%lx(p=%lx)",
                                      msg, (long)b->start, (long)b->length, 
                                      (long)from, (long)from->p, 
                                      (long)to, (long)to->p);
#endif
                    }
                }
                apr_bucket_delete(b);
            }
            else {
                apr_bucket_setaside(b, to->p);
                APR_BUCKET_REMOVE(b);
                APR_BRIGADE_INSERT_TAIL(to, b);
#if LOG_BUCKETS
                ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p,
                              "h2_util_move: %s, passed setaside bucket %ld-%ld "
                              "from=%lx(p=%lx) to=%lx(p=%lx)",
                              msg, (long)b->start, (long)b->length, 
                              (long)from, (long)from->p, 
                              (long)to, (long)to->p);
#endif
            }
        }
    }
    
    return status;
}
Ejemplo n.º 13
0
/**
 * @internal
 *
 * "Sniffs" the input (request) data from the connection stream and tries
 * to determine who closed a connection and why.
 */
static int ironbee_input_filter(ap_filter_t *f, apr_bucket_brigade *bb,
                                ap_input_mode_t mode, apr_read_type_e block,
                                apr_off_t readbytes)
{
    conn_rec *c = f->c;
    ironbee_conn_context *ctx = f->ctx;
    ib_conn_t *iconn = ctx->iconn;
    ib_core_cfg_t *corecfg;
    ib_stream_t *istream;
    apr_bucket *b;
    apr_status_t rc;
    int buffering = 0;

    /* Any mode not handled just gets passed through. */
    if ((mode != AP_MODE_GETLINE) && (mode != AP_MODE_READBYTES)) {
        return ap_get_brigade(f->next, bb, mode, block, readbytes);
    }

    /* Configure. */
    ib_context_module_config(iconn->ctx, ib_core_module(), (void *)&corecfg);
    if (corecfg != NULL) {
        buffering = (int)corecfg->buffer_req;
    }

    /* When buffering, data is removed from the brigade and handed
     * to IronBee. The filter must not return an empty brigade in this
     * case and keeps reading until there is processed data that comes
     * back from IronBee.
     */
    do {
        ib_tx_t *itx = iconn->tx;

        /* If there is any processed data, then send it now. */
        if (buffering && (itx != NULL)) {
            ib_sdata_t *sdata;

            /* Take any data from the drain (processed data) and
             * inject it back into the filter brigade.
             */
            ib_fctl_drain(itx->fctl, &istream);
            if ((istream != NULL) && (istream->nelts > 0)) {
                int done = 0;

                while (!done) {
                    apr_bucket *ibucket = NULL;

                    /// @todo Handle multi-bucket lines
                    if (mode == AP_MODE_GETLINE) {
                        done = 1;
                    }

                    ib_stream_pull(istream, &sdata);
                    if (sdata == NULL) {
                        /* No more data left. */
                        break;
                    }

                    switch (sdata->type) {
                        case IB_STREAM_DATA:
#ifdef IB_DEBUG
                            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                                         IB_PRODUCT_NAME ": DATA[%d]: %.*s", (int)sdata->dlen, (int)sdata->dlen, (char *)sdata->data);
#endif
                            
                            /// @todo Is this creating a copy?  Just need a reference.
                            ibucket = apr_bucket_heap_create(sdata->data,
                                                             sdata->dlen,
                                                             NULL,
                                                             bb->bucket_alloc);
                            break;
                        case IB_STREAM_FLUSH:
#ifdef IB_DEBUG
                            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                                         IB_PRODUCT_NAME ": FLUSH");
#endif
                            ibucket = apr_bucket_flush_create(bb->bucket_alloc);
                            break;
                        case IB_STREAM_EOH:
#ifdef IB_DEBUG
                            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                                         IB_PRODUCT_NAME ": EOH");
#endif
                            /// @todo Do something here???
                            break;
                        case IB_STREAM_EOB:
#ifdef IB_DEBUG
                            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                                         IB_PRODUCT_NAME ": EOB");
#endif
                            /// @todo Do something here???
                            break;
                        case IB_STREAM_EOS:
#ifdef IB_DEBUG
                            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                                         IB_PRODUCT_NAME ": EOS");
#endif
                            ibucket = apr_bucket_eos_create(bb->bucket_alloc);
                            break;
                        default:
                            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                                         IB_PRODUCT_NAME ": UNKNOWN stream data type %d", sdata->type);
                    }

                    if (ibucket != NULL) {
                        APR_BRIGADE_INSERT_TAIL(bb, ibucket);
                    }
                }

                /* Need to send any processed data to avoid deadlock. */
                if (!APR_BRIGADE_EMPTY(bb)) {
                    return APR_SUCCESS;
                }
            }
        }

        /* Fetch data from the next filter. */
        if (buffering) {
            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                         "FETCH BRIGADE (buffering)");

            /* Normally Apache will request the headers line-by-line, but
             * IronBee does not require this.  So, here the request is
             * fetched with READBYTES and IronBee will then break
             * it back up into lines when it is injected back into
             * the brigade after the data is processed.
             */
            rc = ap_get_brigade(f->next,
                                bb,
                                AP_MODE_READBYTES,
                                block,
                                HUGE_STRING_LEN);
        }
        else {
            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                         "FETCH BRIGADE (non-buffering)");
            rc = ap_get_brigade(f->next, bb, mode, block, readbytes);
        }

        /* Check for any timeouts/disconnects/errors. */
        if (APR_STATUS_IS_TIMEUP(rc)) {
            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                         IB_PRODUCT_NAME ": %s server closed connection (%d)",
                         f->frec->name, rc);

            ap_remove_input_filter(f);
            return rc;
        }
        else if (APR_STATUS_IS_EOF(rc) || apr_get_os_error() == ECONNRESET) {
            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                         IB_PRODUCT_NAME ": %s client closed connection (%d)",
                         f->frec->name, rc);

            ap_remove_input_filter(f);
            return rc;
        }
        else if (rc != APR_SUCCESS) {
            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                         IB_PRODUCT_NAME ": %s returned %d (0x%08x) - %s",
                         f->frec->name, rc, rc, strerror(apr_get_os_error()));

            return rc;
        }

        /* Process data. */
        for (b = APR_BRIGADE_FIRST(bb);
             b != APR_BRIGADE_SENTINEL(bb);
             b = APR_BUCKET_NEXT(b))
        {
            if (buffering) {
                /// @todo setaside into our own pool to destroy later???
                apr_bucket_setaside(b, c->pool);
                process_bucket(f, b);
                APR_BUCKET_REMOVE(b);
            }
            else {
                process_bucket(f, b);
            }
        }
    } while (buffering);

    return APR_SUCCESS;
}
Ejemplo n.º 14
0
/**
 * The output filter routine. This one gets called whenever a response is
 * generated that passes this filter. Returns APR_SUCCESS if everything works
 * out.
 *
 * @param f     The filter definition.
 * @param bb    The bucket brigade containing the data.
 */
static apr_status_t replace_output_filter(ap_filter_t *f, apr_bucket_brigade *bb)
{
    request_rec *r = f->r;
    conn_rec *c = r->connection;
    replace_ctx_t *ctx = f->ctx;
    apr_bucket *b;
    apr_size_t len;
    const char *data;
    const char *header;
    apr_status_t rv;
    int re_vector[RE_VECTOR_SIZE];  // 3 elements per matched pattern
    replace_pattern_t *next;
    header_replace_pattern_t *next_header;
    int modified = 0;               // flag to determine if a replacement has
                                    // occured.

    if (!ctx) {
        /* Initialize context */
        ctx = apr_pcalloc(f->r->pool, sizeof(replace_ctx_t));
        f->ctx = ctx;
        ctx->bb = apr_brigade_create(r->pool, c->bucket_alloc);
    }

    /* parse config settings */
    
    /* look for the user-defined filter */
    ctx->filter = find_filter_def(f->r->server, f->frec->name);
    if (!ctx->filter) {
        ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r,
                      "couldn't find definition of filter '%s'",
                      f->frec->name);
        return APR_EINVAL;
    }
    ctx->p = f->r->pool;
    if (ctx->filter->intype &&
        ctx->filter->intype != INTYPE_ALL) {
        if (!f->r->content_type) {
            ctx->noop = 1;
        }
        else {
            const char *ctypes = f->r->content_type;
            const char *ctype = ap_getword(f->r->pool, &ctypes, ';');

            if (strcasecmp(ctx->filter->intype, ctype)) {
                /* wrong IMT for us; don't mess with the output */
                ctx->noop = 1;
            }
        }
    }

    /* exit immediately if there are indications that the filter shouldn't be
     * executed.
     */
    if (ctx->noop == 1) {
        ap_pass_brigade(f->next, bb);
        return APR_SUCCESS;
    }

    /**
     * Loop through the configured header patterns.
     */
    for (next_header = ctx->filter->header_pattern;
         next_header != NULL;
         next_header = next_header->next) {

        // create a separate table with the requested HTTP header entries and
        // unset those headers in the original request.
        apr_table_t *header_table;
        header_table = apr_table_make(r->pool, 2);
    	// create a data structure for the callback function
    	header_replace_cb_t *hrcb;
    	hrcb = apr_palloc(r->pool, sizeof(header_replace_cb_t));
    	hrcb->header_table = header_table;
	    hrcb->pattern = next_header->pattern;
    	hrcb->extra = next_header->extra;
	    hrcb->replacement = next_header->replacement;
    	hrcb->r = r;
	    // pass any header that is defined to be processed to the callback 
    	// function and unset those headers in the original outgoing record.
        apr_table_do(replace_header_cb, hrcb, r->headers_out, 
                     next_header->header, NULL);
        // only touch the header if the changed header table is not empty.
        if (!apr_is_empty_table(header_table)) {
            apr_table_unset(r->headers_out, next_header->header);
            // overlay the original header table with the new one to reintegrate
            // the changed headers.
            r->headers_out = apr_table_overlay(r->pool, r->headers_out, 
                                               header_table);
        }
    }

    /* Not nice but neccessary: Unset the ETag , because we cannot adjust the 
     * value correctly, because we do not know how.
     */
    apr_table_unset(f->r->headers_out, "ETag"); 

    int eos = 0;        // flag to check if an EOS bucket is in the brigade.
    apr_bucket *eos_bucket;
                        // Backup for the EOS bucket.

    /* Interate through the available data. Stop if there is an EOS */

   for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) {
        if (APR_BUCKET_IS_EOS(b)) {
            eos = 1;
            ap_save_brigade(f, &ctx->bb, &bb, ctx->p);
            APR_BUCKET_REMOVE(b);
            eos_bucket = b;
            break;
        }
    }


    /* If the iteration over the brigade hasn't found an EOS bucket, just save
     * the brigade and return.
     */
    if (eos != 1) {
        ap_save_brigade(f, &ctx->bb, &bb, ctx->p);
        return APR_SUCCESS;
    }

    if ((rv = apr_brigade_pflatten(ctx->bb, (char **)&data, &len, ctx->p)) 
        != APR_SUCCESS) { 
        /* Return if the flattening didn't work. */
        return rv;
    } else {
        /* Remove the original data from the bucket brigade. Otherwise it would
         * be passed twice (original data and the processed, flattened copy) to
         * the next filter.
         */
        apr_brigade_cleanup(ctx->bb);
    }

    /* Good cast, we just tested len isn't negative or zero */
    if (len > 0) {

        /* start checking for the regex's. */
        for (next = ctx->filter->pattern; 
             next != NULL; 
             next = next->next)
        {
            int rc = 0;
            int offset = 0;

            /* loop through the configured patterns */
            do {
                rc = pcre_exec(next->pattern, next->extra, data, 
                               len, offset, 0,
                               re_vector, RE_VECTOR_SIZE);
                               
                if (rc < 0 && rc != PCRE_ERROR_NOMATCH) {
                    ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r, 
                                  "Matching Error %d", rc);
                    return rc;
                }

                /* This shouldn´t happen */
                if (rc == 0) {
                    ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r,
                                  "PCRE output vector too small (%d)", 
                                  RE_VECTOR_SIZE/3-1);
                }

                /* If the result count is greater than 0 then there are
                 * matches in the data string. Thus we try to replace those
                 * strings with the user provided string.
                 */
                if (rc > 0) {
                    char *prefix;   // the string before the matching part.
                    char *postfix;  // the string after the matching part.
                    char *newdata;  // the concatenated string of prefix,
                                    // the replaced string and postfix.
                    char *replacement;
                                    // the string with the data to replace
                                    // (after the subpattern processing has
                                    // been done).
                    char *to_replace[10];
                                    // the string array containing the
                                    // strings that are to be replaced.
                    int match_diff; // the difference between the matching
                                    // string and its replacement.
                    int x;          // a simple counter.
                    char *pos;      // the starting position within the
                                    // replacement string, where there is a
                                    // subpattern to replace.

                    /* start with building the replacement string */
                    replacement = apr_pstrcat(ctx->p, next->replacement,
                                              NULL);

                    /* look for the subpatterns \0 to \9 */

                    for (x = 0; x < rc && x < 10; x++) {
                        /* extract the x'ths subpattern */
                        to_replace[x] = substr(data, re_vector[x*2],
                                               re_vector[x*2+1] -
                                               re_vector[x*2], r); 

                        ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
                                      "Found match: %s", to_replace[x]);
                        
                        /* the token ( \0 to \9) we are looking for */
                        char *token = apr_pstrcat(ctx->p, "\\",
                                                apr_itoa(ctx->p, x), NULL);
                        /* allocate memory for the replacement operation */
                        char *tmp;
                        if (!to_replace[x] || strlen(to_replace[x]) < 2) {
                            tmp = malloc(strlen(replacement) + 1);
                        } else {
                            tmp = malloc(strlen(replacement) - 1 +
                                         strlen(to_replace[x]));
                        }
                        /* copy the replacement string to the new
                         * location.
                         */
                        memcpy(tmp, replacement, strlen(replacement) + 1);
                        replacement = tmp;
                        /* try to replace each occurence of the token with
                         * its matched subpattern. */
                        pos = ap_strstr(replacement, token);
                        while (pos) { 
                            if (!to_replace[x]) {
                                break;
                            }
                            substr_replace(pos, to_replace[x],
                                           strlen(pos), 
                                           strlen(to_replace[x]));
                            if (strlen(to_replace[x]) < 2) {
                                tmp = malloc(strlen(replacement) + 1);
                            } else {
                                tmp = malloc(strlen(replacement) - 1 + 
                                             strlen(to_replace[x]));
                            }
                            memcpy(tmp, replacement, 
                                   strlen(replacement) + 1);
                            /* clean up. */
                            free(replacement);
                            replacement = tmp; 
                            pos = ap_strstr(replacement, token);
                        }
                    }

                    match_diff = strlen(replacement) -
                                 (re_vector[1] - re_vector[0]);

                    /* Allocate memory for a buffer to copy the first part
                     * of the data string up to (but not including) the
                     * the matching pattern.
                     */
                    prefix = apr_pcalloc(ctx->p, re_vector[0] + 1);
                    if (prefix == NULL) {
                        ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
                            "Unable to allocate memory for prefix",
                            NULL);
                        return -1;
                    }

                    /* Copy the string from the offset (beginning of
                     * pattern matching) to the first occurence of the
                     * pattern and add a trailing \0.
                     */
                    memcpy(prefix, data, (size_t)re_vector[0]); 

                    /* Copy the string from the end of the pattern to the
                     * end of the data string itself.
                     */
                    postfix = apr_pcalloc(ctx->p, len);
                    if (postfix == NULL) {
                        ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
                            "Unable to allocate memory for postfix",
                            NULL);
                        return -1;
                    }
                    
                    memcpy(postfix, 
                           (data + re_vector[1]),
                           len - re_vector[1]);
                    
                    /* Create the new data string, replace the old one
                     * and clean up.
                     */
                    newdata = apr_pstrcat(ctx->p, prefix, 
                                          replacement, postfix, 
                                          NULL);
                    /* update the point of the data and free the allocated
                     * memory for the replacement string.
                     */
                    data = newdata;
                    free(replacement);

                    /* Calculate the new offset in the data string, where
                     * the new matching round is to begin.
                     */
                    offset = re_vector[1] + match_diff; 
                    len += match_diff;
                    modified = 1;
                }
            } while (rc > 0);
        }
        /* Adjust the real length of the processed data. */
        if (apr_table_get(f->r->headers_out, "Content-Length") != NULL) {
            apr_table_set(f->r->headers_out, "Content-Length",
                apr_itoa(ctx->p, len));
        }
        /* If an Entity Tag is set, change the mtime and generate a new ETag.*/
        if (apr_table_get(f->r->headers_out, "ETag") != NULL) {
           r->mtime = time(NULL);
           ap_set_etag(r);
        }
    }
    /* Create a new bucket with the processed data, insert that one into our
     * brigade, then insert the saved EOS bucket at the end of the brigade
     * and pass the brigade to the next filter.
     */
    APR_BRIGADE_INSERT_TAIL(ctx->bb, apr_bucket_transient_create(data, len, apr_bucket_alloc_create(ctx->p)));
    APR_BRIGADE_INSERT_TAIL(ctx->bb, eos_bucket);
    ap_pass_brigade(f->next, ctx->bb);

    return APR_SUCCESS;
}
Ejemplo n.º 15
0
/* The handler.  Create a new parser and/or filter context where appropriate
 * and parse the chunks of data received from the brigade
 */
static int idlChunkHandler( ap_filter_t *f, apr_bucket_brigade *brigade ) {

	idlChunkContext* ctx = f->ctx;
	apr_bucket* currentBucket = NULL;
	apr_pool_t* pool = f->r->pool;
	const char* data;
  	apr_size_t len;
    osrfStringArray* params = NULL;
    mparams = NULL;

	/* load the per-dir/location config */
	idlChunkConfig* config = ap_get_module_config( 
			f->r->per_dir_config, &idlchunk_module );

	ap_log_rerror(APLOG_MARK, APLOG_ERR, 
			0, f->r, "IDLCHUNK Config:\nContent Type = %s, "
			"Strip PI = %s, Strip Comments = %s, Doctype = %s", 
			config->contentType, 
			(config->stripPI) ? "yes" : "no", 
			(config->stripComments) ? "yes" : "no",
			config->doctype);

	/* set the content type based on the config */
	ap_set_content_type(f->r, config->contentType);

	//ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, "Set content type");

    params = apacheParseParms(f->r); /* free me */
    mparams = apacheGetParamValues( params, "class" ); /* free me */

    all = 1;

    if (mparams && mparams->size > 0) all = 0;

	//ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, "Parsed the params, if any");

	/* create the XML parser */
	int firstrun = 0;
	if( parser == NULL ) {
		firstrun = 1;
		parser = XML_ParserCreate("UTF-8");
		XML_SetUserData(parser, f);
		XML_SetElementHandler(parser, startElement, endElement);
		XML_SetCharacterDataHandler(parser, charHandler);
		if(!config->stripDoctype)
			XML_SetStartDoctypeDeclHandler( parser, doctypeHandler );
		if(!config->stripPI)
			XML_SetProcessingInstructionHandler(parser, handlePI);
		if(!config->stripComments)
			XML_SetCommentHandler(parser, handleComment);
	}

	/* create the filter context */
	if( ctx == NULL ) {
		f->ctx = ctx = apr_pcalloc( pool, sizeof(*ctx));
		ctx->brigade = apr_brigade_create( pool, f->c->bucket_alloc );
		ctx->parser = parser;
	}


	if(firstrun) { /* we haven't started writing the data to the stream yet */

		/* go ahead and write the doctype out if we have one defined */
		if(config->doctype) {
			ap_log_rerror( APLOG_MARK, APLOG_DEBUG, 
					0, f->r, "IDLCHUNK DOCTYPE => %s", config->doctype);
			_fwrite(f, "%s\n", config->doctype);
		}
	}


	/* cycle through the buckets in the brigade */
	while (!APR_BRIGADE_EMPTY(brigade)) {

		/* grab the next bucket */
		currentBucket = APR_BRIGADE_FIRST(brigade);

		/* clean up when we're done */
		if (APR_BUCKET_IS_EOS(currentBucket) || APR_BUCKET_IS_FLUSH(currentBucket)) {
    	  	APR_BUCKET_REMOVE(currentBucket);
			APR_BRIGADE_INSERT_TAIL(ctx->brigade, currentBucket);
			ap_pass_brigade(f->next, ctx->brigade);
			XML_ParserFree(parser);
            if (params) osrfStringArrayFree(params);
            if (mparams) osrfStringArrayFree(mparams);
			parser = NULL;
		  	return APR_SUCCESS;
    	}

		/* read the incoming data */
		int s = apr_bucket_read(currentBucket, &data, &len, APR_NONBLOCK_READ);
		if( s != APR_SUCCESS ) {
			ap_log_rerror( APLOG_MARK, APLOG_ERR, 0, f->r, 
					"IDLCHUNK error reading data from filter with status %d", s);
            if (params) osrfStringArrayFree(params);
            if (mparams) osrfStringArrayFree(mparams);
			return s;
		}

		if (len > 0) {

			ap_log_rerror( APLOG_MARK, APLOG_DEBUG, 
					0, f->r, "IDLCHUNK read %d bytes", (int)len);

			/* push data into the XML push parser */
			if ( XML_Parse(ctx->parser, data, len, 0) == XML_STATUS_ERROR ) {

                char tmp[len+1];
                memcpy(tmp, data, len);
                tmp[len] = '\0';

				/* log and die on XML errors */
				ap_log_rerror( APLOG_MARK, APLOG_ERR, 0, f->r, 
                    "IDLCHUNK XML Parse Error: %s at line %d: parsing %s: data %s",
					XML_ErrorString(XML_GetErrorCode(ctx->parser)), 
					(int) XML_GetCurrentLineNumber(ctx->parser), f->r->filename, tmp);

				XML_ParserFree(parser);
                if (params) osrfStringArrayFree(params);
                if (mparams) osrfStringArrayFree(mparams);
				parser = NULL;
				return HTTP_INTERNAL_SERVER_ERROR; 
			}
    	}

		/* so a subrequest doesn't re-read this bucket */
		apr_bucket_delete(currentBucket); 
  	}

	apr_brigade_destroy(brigade);
    if (params) osrfStringArrayFree(params);
    if (mparams) osrfStringArrayFree(mparams);
  	return APR_SUCCESS;	
}
Ejemplo n.º 16
0
static apr_status_t
 akismet_filter(ap_filter_t *f,
                apr_bucket_brigade *out_brigade,
                ap_input_mode_t input_mode,
                apr_read_type_e read_type,
                apr_off_t nbytes)
{
    akismet_config *conf = NULL;
    akismet_config *sconf =NULL;
    akismet_config *dconf =NULL;
    request_rec *r = f->r;
    AkismetFilterContext *pctx;
    apr_status_t ret;
    apr_table_t *params_table;
    char* query_string=NULL;
    int i=0;
    char *next, *last;
    /*
    * decide configuration
    * use server level config if no directory level config not defined
    */
    sconf =
        (akismet_config *)ap_get_module_config(r->server->module_config,&akismet_module);
    dconf =
        (akismet_config *)ap_get_module_config(r->per_dir_config, &akismet_module);
    conf = dconf;
    if ( !dconf
        || (!dconf->enabled && !dconf->apikey && !dconf->blogurl) ){
        conf = sconf;
    }
    /*
    * parse request parameters
    */
    params_table = apr_table_make(r->pool, PARAMS_TABLE_INIT_SIZE);

    if (!(pctx = f->ctx)) {
        f->ctx = pctx = apr_palloc(r->pool, sizeof *pctx);
        pctx->tmp_brigade = apr_brigade_create(r->pool, r->connection->bucket_alloc);
    }
    if (APR_BRIGADE_EMPTY(pctx->tmp_brigade)) {
        ret = ap_get_brigade(f->next, pctx->tmp_brigade, input_mode, read_type, nbytes);
        if (input_mode == AP_MODE_EATCRLF || ret != APR_SUCCESS) {
            return ret;
        }
    }
    while( !APR_BRIGADE_EMPTY(pctx->tmp_brigade) ) {
        apr_bucket *in_bucket = APR_BRIGADE_FIRST(pctx->tmp_brigade);
        apr_bucket *out_bucket;
        const char *data;
        apr_size_t len;
        char *buf;
        int n;
        if(APR_BUCKET_IS_EOS(in_bucket)) {
            APR_BUCKET_REMOVE(in_bucket);
            APR_BRIGADE_INSERT_TAIL(out_brigade, in_bucket);
            break;
        }
        ret=apr_bucket_read(in_bucket, &data, &len, read_type);
        if(ret != APR_SUCCESS){
            return ret;
        }
        if (query_string == NULL) {
            query_string = apr_pstrdup(r->pool, data);
        } else {
            query_string = apr_pstrcat(r->pool, query_string, data,NULL);
        }
        out_bucket = apr_bucket_heap_create(data, len, 0, r->connection->bucket_alloc);
        APR_BRIGADE_INSERT_TAIL(out_brigade, out_bucket);
        apr_bucket_delete(in_bucket);
    }
    if (!query_string) {
        return APR_SUCCESS;
    }
    /*
    * split query_string and set params tables
    */
    next =  (char*)apr_strtok( query_string, "&", &last);
    while (next) {
        apr_collapse_spaces (next, next);
        char* k, *v;
        k =  (char*)apr_strtok( next, "=", &v);
        if (k) {
            if ( ( conf->comment_param_key
                    && strcasecmp( k, conf->comment_param_key)==0)
                || ( conf->comment_author_param_key
                    && strcasecmp( k, conf->comment_author_param_key)==0)
                || (conf->comment_author_email_param_key
                    && strcasecmp( k, conf->comment_author_email_param_key)==0)
                || (conf->comment_author_url_param_key
                    && strcasecmp( k, conf->comment_author_url_param_key)==0)
                || (conf->comment_permalink_param_key
                    && strcasecmp( k, conf->comment_permalink_param_key)==0)
             ) {
                apr_table_set(params_table, k, v);
            }
        }
        next = (char*)apr_strtok(NULL, "&", &last);
    }

    /*
    * comment spam check by akismet api
    */
    return akismet_api_execute(r,conf,params_table);
}
Ejemplo n.º 17
0
static apr_status_t bucketeer_out_filter(ap_filter_t *f,
                                         apr_bucket_brigade *bb)
{
    apr_bucket *e;
    request_rec *r = f->r;
    bucketeer_ctx_t *ctx = f->ctx;
    bucketeer_filter_config_t *c;

    c = ap_get_module_config(r->server->module_config, &bucketeer_module);

    /* If have a context, it means we've done this before successfully. */
    if (!ctx) {
        if (!r->content_type || strncmp(r->content_type, "text/", 5)) {
            ap_remove_output_filter(f);
            return ap_pass_brigade(f->next, bb);
        }

        /* We're cool with filtering this. */
        ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
        ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
        apr_table_unset(f->r->headers_out, "Content-Length");
    }

    for (e = APR_BRIGADE_FIRST(bb);
         e != APR_BRIGADE_SENTINEL(bb);
         e = APR_BUCKET_NEXT(e))
    {
        const char *data;
        apr_size_t len, i, lastpos;

        if (APR_BUCKET_IS_EOS(e)) {
            APR_BUCKET_REMOVE(e);
            APR_BRIGADE_INSERT_TAIL(ctx->bb, e);

            /* Okay, we've seen the EOS.
             * Time to pass it along down the chain.
             */
            return ap_pass_brigade(f->next, ctx->bb);
        }

        if (APR_BUCKET_IS_FLUSH(e)) {
            /*
             * Ignore flush buckets for the moment..
             * we decide what to stream
             */
            continue;
        }

        if (APR_BUCKET_IS_METADATA(e)) {
            /* metadata bucket */
            apr_bucket *cpy;
            apr_bucket_copy(e, &cpy);
            APR_BRIGADE_INSERT_TAIL(ctx->bb, cpy);
            continue;
        }

        /* read */
        apr_bucket_read(e, &data, &len, APR_BLOCK_READ);

        if (len > 0) {
            lastpos = 0;
            for (i = 0; i < len; i++) {
                if (data[i] == c->flushdelimiter ||
                    data[i] == c->bucketdelimiter ||
                    data[i] == c->passdelimiter) {
                    apr_bucket *p;
                    if (i - lastpos > 0) {
                        p = apr_bucket_pool_create(apr_pmemdup(f->r->pool,
                                                               &data[lastpos],
                                                               i - lastpos),
                                                    i - lastpos,
                                                    f->r->pool,
                                                    f->c->bucket_alloc);
                        APR_BRIGADE_INSERT_TAIL(ctx->bb, p);
                    }
                    lastpos = i + 1;
                    if (data[i] == c->flushdelimiter) {
                        p = apr_bucket_flush_create(f->c->bucket_alloc);
                        APR_BRIGADE_INSERT_TAIL(ctx->bb, p);
                    }
                    if (data[i] == c->passdelimiter) {
                        apr_status_t rv;

                        rv = ap_pass_brigade(f->next, ctx->bb);
                        if (rv) {
                            return rv;
                        }
                    }
                }
            }
            /* XXX: really should append this to the next 'real' bucket */
            if (lastpos < i) {
                apr_bucket *p;
                p = apr_bucket_pool_create(apr_pmemdup(f->r->pool,
                                                       &data[lastpos],
                                                       i - lastpos),
                                           i - lastpos,
                                           f->r->pool,
                                           f->c->bucket_alloc);
                lastpos = i;
                APR_BRIGADE_INSERT_TAIL(ctx->bb, p);
            }
        }
    }

    return APR_SUCCESS;
}
Ejemplo n.º 18
0
static apr_status_t xlate_in_filter(ap_filter_t *f, apr_bucket_brigade *bb,
                                    ap_input_mode_t mode, apr_read_type_e block,
                                    apr_off_t readbytes)
{
    apr_status_t rv;
    charset_req_t *reqinfo = ap_get_module_config(f->r->request_config,
                                                  &charset_lite_module);
    charset_dir_t *dc = ap_get_module_config(f->r->per_dir_config,
                                             &charset_lite_module);
    charset_filter_ctx_t *ctx = f->ctx;
    apr_size_t buffer_size;
    int hit_eos;

    if (!ctx) {
        /* this is SetInputFilter path; grab the preallocated context,
         * if any; note that if we decided not to do anything in an earlier
         * handler, we won't even have a reqinfo
         */
        if (reqinfo) {
            ctx = f->ctx = reqinfo->input_ctx;
            reqinfo->input_ctx = NULL; /* prevent SNAFU if user coded us twice
                                        * in the filter chain; we can't have two
                                        * instances using the same context
                                        */
        }
        if (!ctx) {                   /* no idea how to translate; don't do anything */
            ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(charset_filter_ctx_t));
            ctx->dc = dc;
            ctx->noop = 1;
        }
    }

    ap_log_rerror(APLOG_MARK, APLOG_TRACE6, 0, f->r,
                 "xlate_in_filter() - "
                 "charset_source: %s charset_default: %s",
                 dc && dc->charset_source ? dc->charset_source : "(none)",
                 dc && dc->charset_default ? dc->charset_default : "(none)");

    if (!ctx->ran) {  /* filter never ran before */
        chk_filter_chain(f);
        ctx->ran = 1;
        if (!ctx->noop && !ctx->is_sb
            && apr_table_get(f->r->headers_in, "Content-Length")) {
            /* A Content-Length header is present, but it won't be valid after
             * conversion because we're not converting between two single-byte
             * charsets.  This will affect most CGI scripts and may affect
             * some modules.
             * Content-Length can't be unset here because that would break
             * being able to read the request body.
             * Processing of chunked request bodies is not impacted by this
             * filter since the the length was not declared anyway.
             */
            ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, f->r,
                          "Request body length may change, resulting in "
                          "misprocessing by some modules or scripts");
        }
    }

    if (ctx->noop) {
        return ap_get_brigade(f->next, bb, mode, block, readbytes);
    }

    if (APR_BRIGADE_EMPTY(ctx->bb)) {
        if ((rv = ap_get_brigade(f->next, bb, mode, block,
                                 readbytes)) != APR_SUCCESS) {
            return rv;
        }
    }
    else {
        APR_BRIGADE_PREPEND(bb, ctx->bb); /* first use the leftovers */
    }

    buffer_size = INPUT_XLATE_BUF_SIZE;
    rv = xlate_brigade(ctx, bb, ctx->tmp, &buffer_size, &hit_eos);
    if (rv == APR_SUCCESS) {
        if (!hit_eos) {
            /* move anything leftover into our context for next time;
             * we don't currently "set aside" since the data came from
             * down below, but I suspect that for long-term we need to
             * do that
             */
            APR_BRIGADE_CONCAT(ctx->bb, bb);
        }
        if (buffer_size < INPUT_XLATE_BUF_SIZE) { /* do we have output? */
            apr_bucket *e;

            e = apr_bucket_heap_create(ctx->tmp,
                                       INPUT_XLATE_BUF_SIZE - buffer_size,
                                       NULL, f->r->connection->bucket_alloc);
            /* make sure we insert at the head, because there may be
             * an eos bucket already there, and the eos bucket should
             * come after the data
             */
            APR_BRIGADE_INSERT_HEAD(bb, e);
        }
        else {
            /* XXX need to get some more data... what if the last brigade
             * we got had only the first byte of a multibyte char?  we need
             * to grab more data from the network instead of returning an
             * empty brigade
             */
        }
        /* If we have any metadata at the head of ctx->bb, go ahead and move it
         * onto the end of bb to be returned to our caller.
         */
        if (!APR_BRIGADE_EMPTY(ctx->bb)) {
            apr_bucket *b = APR_BRIGADE_FIRST(ctx->bb);
            while (b != APR_BRIGADE_SENTINEL(ctx->bb)
                   && APR_BUCKET_IS_METADATA(b)) {
                APR_BUCKET_REMOVE(b);
                APR_BRIGADE_INSERT_TAIL(bb, b);
                b = APR_BRIGADE_FIRST(ctx->bb);
            }
        }
    }
    else {
        log_xlate_error(f, rv);
    }

    return rv;
}
Ejemplo n.º 19
0
/* xlate_out_filter() handles (almost) arbitrary conversions from one charset
 * to another...
 * translation is determined in the fixup hook (find_code_page), which is
 * where the filter's context data is set up... the context data gives us
 * the translation handle
 */
static apr_status_t xlate_out_filter(ap_filter_t *f, apr_bucket_brigade *bb)
{
    charset_req_t *reqinfo = ap_get_module_config(f->r->request_config,
                                                  &charset_lite_module);
    charset_dir_t *dc = ap_get_module_config(f->r->per_dir_config,
                                             &charset_lite_module);
    charset_filter_ctx_t *ctx = f->ctx;
    apr_bucket *dptr, *consumed_bucket;
    const char *cur_str;
    apr_size_t cur_len, cur_avail;
    char tmp[OUTPUT_XLATE_BUF_SIZE];
    apr_size_t space_avail;
    int done;
    apr_status_t rv = APR_SUCCESS;

    if (!ctx) {
        /* this is SetOutputFilter path; grab the preallocated context,
         * if any; note that if we decided not to do anything in an earlier
         * handler, we won't even have a reqinfo
         */
        if (reqinfo) {
            ctx = f->ctx = reqinfo->output_ctx;
            reqinfo->output_ctx = NULL; /* prevent SNAFU if user coded us twice
                                         * in the filter chain; we can't have two
                                         * instances using the same context
                                         */
        }
        if (!ctx) {                   /* no idea how to translate; don't do anything */
            ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(charset_filter_ctx_t));
            ctx->dc = dc;
            ctx->noop = 1;
        }
    }

    /* Check the mime type to see if translation should be performed.
     */
    if (!ctx->noop && ctx->xlate == NULL) {
        const char *mime_type = f->r->content_type;

        if (mime_type && (strncasecmp(mime_type, "text/", 5) == 0 ||
#if APR_CHARSET_EBCDIC
        /* On an EBCDIC machine, be willing to translate mod_autoindex-
         * generated output.  Otherwise, it doesn't look too cool.
         *
         * XXX This isn't a perfect fix because this doesn't trigger us
         * to convert from the charset of the source code to ASCII.  The
         * general solution seems to be to allow a generator to set an
         * indicator in the r specifying that the body is coded in the
         * implementation character set (i.e., the charset of the source
         * code).  This would get several different types of documents
         * translated properly: mod_autoindex output, mod_status output,
         * mod_info output, hard-coded error documents, etc.
         */
            strcmp(mime_type, DIR_MAGIC_TYPE) == 0 ||
#endif
            strncasecmp(mime_type, "message/", 8) == 0 ||
            dc->force_xlate == FX_FORCE)) {

            rv = apr_xlate_open(&ctx->xlate,
                                dc->charset_default, dc->charset_source, f->r->pool);
            if (rv != APR_SUCCESS) {
                ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r, APLOGNO(01453)
                              "can't open translation %s->%s",
                              dc->charset_source, dc->charset_default);
                ctx->noop = 1;
            }
            else {
                if (apr_xlate_sb_get(ctx->xlate, &ctx->is_sb) != APR_SUCCESS) {
                    ctx->is_sb = 0;
                }
            }
        }
        else {
            ctx->noop = 1;
            if (mime_type) {
                ap_log_rerror(APLOG_MARK, APLOG_TRACE6, 0, f->r,
                              "mime type is %s; no translation selected",
                              mime_type);
            }
        }
    }

    ap_log_rerror(APLOG_MARK, APLOG_TRACE6, 0, f->r,
                  "xlate_out_filter() - "
                  "charset_source: %s charset_default: %s",
                  dc && dc->charset_source ? dc->charset_source : "(none)",
                  dc && dc->charset_default ? dc->charset_default : "(none)");

    if (!ctx->ran) {  /* filter never ran before */
        chk_filter_chain(f);
        ctx->ran = 1;
        if (!ctx->noop && !ctx->is_sb) {
            /* We're not converting between two single-byte charsets, so unset
             * Content-Length since it is unlikely to remain the same.
             */
            apr_table_unset(f->r->headers_out, "Content-Length");
        }
    }

    if (ctx->noop) {
        return ap_pass_brigade(f->next, bb);
    }

    dptr = APR_BRIGADE_FIRST(bb);
    done = 0;
    cur_len = 0;
    space_avail = sizeof(tmp);
    consumed_bucket = NULL;
    while (!done) {
        if (!cur_len) { /* no bytes left to process in the current bucket... */
            if (consumed_bucket) {
                apr_bucket_delete(consumed_bucket);
                consumed_bucket = NULL;
            }
            if (dptr == APR_BRIGADE_SENTINEL(bb)) {
                break;
            }
            if (APR_BUCKET_IS_EOS(dptr)) {
                cur_len = -1; /* XXX yuck, but that tells us to send
                                 * eos down; when we minimize our bb construction
                                 * we'll fix this crap */
                if (ctx->saved) {
                    /* Oops... we have a partial char from the previous bucket
                     * that won't be completed because there's no more data.
                     */
                    rv = APR_INCOMPLETE;
                    ctx->ees = EES_INCOMPLETE_CHAR;
                }
                break;
            }
            if (APR_BUCKET_IS_METADATA(dptr)) {
                apr_bucket *metadata_bucket;
                metadata_bucket = dptr;
                dptr = APR_BUCKET_NEXT(dptr);
                APR_BUCKET_REMOVE(metadata_bucket);
                rv = send_bucket_downstream(f, metadata_bucket);
                if (rv != APR_SUCCESS) {
                    done = 1;
                }
                continue;
            }
            rv = apr_bucket_read(dptr, &cur_str, &cur_len, APR_BLOCK_READ);
            if (rv != APR_SUCCESS) {
                ctx->ees = EES_BUCKET_READ;
                break;
            }
            consumed_bucket = dptr; /* for axing when we're done reading it */
            dptr = APR_BUCKET_NEXT(dptr); /* get ready for when we access the
                                          * next bucket */
        }
        /* Try to fill up our tmp buffer with translated data. */
        cur_avail = cur_len;

        if (cur_len) { /* maybe we just hit the end of a pipe (len = 0) ? */
            if (ctx->saved) {
                /* Rats... we need to finish a partial character from the previous
                 * bucket.
                 */
                char *tmp_tmp;

                tmp_tmp = tmp + sizeof(tmp) - space_avail;
                rv = finish_partial_char(ctx,
                                         &cur_str, &cur_len,
                                         &tmp_tmp, &space_avail);
            }
            else {
                rv = apr_xlate_conv_buffer(ctx->xlate,
                                           cur_str, &cur_avail,
                                           tmp + sizeof(tmp) - space_avail, &space_avail);

                /* Update input ptr and len after consuming some bytes */
                cur_str += cur_len - cur_avail;
                cur_len = cur_avail;

                if (rv == APR_INCOMPLETE) { /* partial character at end of input */
                    /* We need to save the final byte(s) for next time; we can't
                     * convert it until we look at the next bucket.
                     */
                    rv = set_aside_partial_char(ctx, cur_str, cur_len);
                    cur_len = 0;
                }
            }
        }

        if (rv != APR_SUCCESS) {
            /* bad input byte or partial char too big to store */
            done = 1;
        }

        if (space_avail < XLATE_MIN_BUFF_LEFT) {
            /* It is time to flush, as there is not enough space left in the
             * current output buffer to bother with converting more data.
             */
            rv = send_downstream(f, tmp, sizeof(tmp) - space_avail);
            if (rv != APR_SUCCESS) {
                done = 1;
            }

            /* tmp is now empty */
            space_avail = sizeof(tmp);
        }
    }

    if (rv == APR_SUCCESS) {
        if (space_avail < sizeof(tmp)) { /* gotta write out what we converted */
            rv = send_downstream(f, tmp, sizeof(tmp) - space_avail);
        }
    }
    if (rv == APR_SUCCESS) {
        if (cur_len == -1) {
            rv = send_eos(f);
        }
    }
    else {
        log_xlate_error(f, rv);
    }

    return rv;
}
Ejemplo n.º 20
0
/* Bring the data from the brigade (which represents the result of the
 * request_rec out filter chain) into the h2_mplx for further sending
 * on the master connection. 
 */
static apr_status_t slave_out(h2_task *task, ap_filter_t* f, 
                              apr_bucket_brigade* bb)
{
    apr_bucket *b;
    apr_status_t status = APR_SUCCESS;
    int flush = 0, blocking;
    
    if (task->frozen) {
        h2_util_bb_log(task->c, task->stream_id, APLOG_TRACE2,
                       "frozen task output write, ignored", bb);
        while (!APR_BRIGADE_EMPTY(bb)) {
            b = APR_BRIGADE_FIRST(bb);
            if (AP_BUCKET_IS_EOR(b)) {
                APR_BUCKET_REMOVE(b);
                task->eor = b;
            }
            else {
                apr_bucket_delete(b);
            }
        }
        return APR_SUCCESS;
    }

    /* we send block once we opened the output, so someone is there
     * reading it *and* the task is not assigned to a h2_req_engine */
    blocking = (!task->assigned && task->output.opened);
    if (!task->output.opened) {
        for (b = APR_BRIGADE_FIRST(bb);
             b != APR_BRIGADE_SENTINEL(bb);
             b = APR_BUCKET_NEXT(b)) {
            if (APR_BUCKET_IS_FLUSH(b)) {
                flush = 1;
                break;
            }
        }
    }
    
    if (task->output.bb && !APR_BRIGADE_EMPTY(task->output.bb)) {
        /* still have data buffered from previous attempt.
         * setaside and append new data and try to pass the complete data */
        if (!APR_BRIGADE_EMPTY(bb)) {
            status = ap_save_brigade(f, &task->output.bb, &bb, task->pool);
        }
        if (status == APR_SUCCESS) {
            status = send_out(task, task->output.bb, blocking);
        } 
    }
    else {
        /* no data buffered here, try to pass the brigade directly */
        status = send_out(task, bb, blocking); 
        if (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
            /* could not write all, buffer the rest */
            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, APLOGNO(03405)
                          "h2_slave_out(%s): saving brigade", 
                          task->id);
            status = ap_save_brigade(f, &task->output.bb, &bb, task->pool);
            flush = 1;
        }
    }
    
    if (status == APR_SUCCESS && !task->output.opened && flush) {
        /* got a flush or could not write all, time to tell someone to read */
        status = open_output(task);
    }
    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, task->c, 
                  "h2_slave_out(%s): slave_out leave", task->id);    
    return status;
}
static apr_status_t copy_brigade_range(apr_bucket_brigade *bb,
                                       apr_bucket_brigade *bbout,
                                       apr_off_t start,
                                       apr_off_t end)
{
    apr_bucket *first = NULL, *last = NULL, *out_first = NULL, *e;
    apr_uint64_t pos = 0, off_first = 0, off_last = 0;
    apr_status_t rv;
    apr_uint64_t start64, end64;
    apr_off_t pofft = 0;

    /*
     * Once we know that start and end are >= 0 convert everything to apr_uint64_t.
     * See the comments in apr_brigade_partition why.
     * In short apr_off_t (for values >= 0)and apr_size_t fit into apr_uint64_t.
     */
    start64 = (apr_uint64_t)start;
    end64 = (apr_uint64_t)end;

    if (start < 0 || end < 0 || start64 > end64)
        return APR_EINVAL;

    for (e = APR_BRIGADE_FIRST(bb);
         e != APR_BRIGADE_SENTINEL(bb);
         e = APR_BUCKET_NEXT(e))
    {
        apr_uint64_t elen64;
        /* we know that no bucket has undefined length (-1) */
        AP_DEBUG_ASSERT(e->length != (apr_size_t)(-1));
        elen64 = (apr_uint64_t)e->length;
        if (!first && (elen64 + pos > start64)) {
            first = e;
            off_first = pos;
        }
        if (elen64 + pos > end64) {
            last = e;
            off_last = pos;
            break;
        }
        pos += elen64;
    }
    if (!first || !last)
        return APR_EINVAL;

    e = first;
    while (1)
    {
        apr_bucket *copy;
        AP_DEBUG_ASSERT(e != APR_BRIGADE_SENTINEL(bb));
        rv = apr_bucket_copy(e, &copy);
        if (rv != APR_SUCCESS) {
            apr_brigade_cleanup(bbout);
            return rv;
        }

        APR_BRIGADE_INSERT_TAIL(bbout, copy);
        if (e == first) {
            if (off_first != start64) {
                rv = apr_bucket_split(copy, (apr_size_t)(start64 - off_first));
                if (rv != APR_SUCCESS) {
                    apr_brigade_cleanup(bbout);
                    return rv;
                }
                out_first = APR_BUCKET_NEXT(copy);
                APR_BUCKET_REMOVE(copy);
                apr_bucket_destroy(copy);
            }
            else {
                out_first = copy;
            }
        }
        if (e == last) {
            if (e == first) {
                off_last += start64 - off_first;
                copy = out_first;
            }
            if (end64 - off_last != (apr_uint64_t)e->length) {
                rv = apr_bucket_split(copy, (apr_size_t)(end64 + 1 - off_last));
                if (rv != APR_SUCCESS) {
                    apr_brigade_cleanup(bbout);
                    return rv;
                }
                copy = APR_BUCKET_NEXT(copy);
                if (copy != APR_BRIGADE_SENTINEL(bbout)) {
                    APR_BUCKET_REMOVE(copy);
                    apr_bucket_destroy(copy);
                }
            }
            break;
        }
        e = APR_BUCKET_NEXT(e);
    }

    AP_DEBUG_ASSERT(APR_SUCCESS == apr_brigade_length(bbout, 1, &pofft));
    pos = (apr_uint64_t)pofft;
    AP_DEBUG_ASSERT(pos == end64 - start64 + 1);
    return APR_SUCCESS;
}
Ejemplo n.º 22
0
static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy)
{
    h2_beam_lock bl;
    apr_bucket *b, *next;

    if (enter_yellow(beam, &bl) == APR_SUCCESS) {
        /* even when beam buckets are split, only the one where
         * refcount drops to 0 will call us */
        H2_BPROXY_REMOVE(proxy);
        /* invoked from green thread, the last beam bucket for the red
         * bucket bred is about to be destroyed.
         * remove it from the hold, where it should be now */
        if (proxy->bred) {
            for (b = H2_BLIST_FIRST(&beam->hold); 
                 b != H2_BLIST_SENTINEL(&beam->hold);
                 b = APR_BUCKET_NEXT(b)) {
                 if (b == proxy->bred) {
                    break;
                 }
            }
            if (b != H2_BLIST_SENTINEL(&beam->hold)) {
                /* bucket is in hold as it should be, mark this one
                 * and all before it for purging. We might have placed meta
                 * buckets without a green proxy into the hold before it 
                 * and schedule them for purging now */
                for (b = H2_BLIST_FIRST(&beam->hold); 
                     b != H2_BLIST_SENTINEL(&beam->hold);
                     b = next) {
                    next = APR_BUCKET_NEXT(b);
                    if (b == proxy->bred) {
                        APR_BUCKET_REMOVE(b);
                        H2_BLIST_INSERT_TAIL(&beam->purge, b);
                        break;
                    }
                    else if (APR_BUCKET_IS_METADATA(b)) {
                        APR_BUCKET_REMOVE(b);
                        H2_BLIST_INSERT_TAIL(&beam->purge, b);
                    }
                    else {
                        /* another data bucket before this one in hold. this
                         * is normal since DATA buckets need not be destroyed
                         * in order */
                    }
                }
                
                proxy->bred = NULL;
            }
            else {
                /* it should be there unless we screwed up */
                ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, beam->red_pool, 
                              APLOGNO(03384) "h2_beam(%d-%s): emitted bucket not "
                              "in hold, n=%d", beam->id, beam->tag, 
                              (int)proxy->n);
                AP_DEBUG_ASSERT(!proxy->bred);
            }
        }
        /* notify anyone waiting on space to become available */
        if (!bl.mutex) {
            r_purge_reds(beam);
        }
        else if (beam->m_cond) {
            apr_thread_cond_broadcast(beam->m_cond);
        }
        leave_yellow(beam, &bl);
    }
}
Ejemplo n.º 23
0
static apr_status_t append_bucket(h2_bucket_beam *beam, 
                                  apr_bucket *bred,
                                  apr_read_type_e block,
                                  apr_pool_t *pool,
                                  h2_beam_lock *pbl)
{
    const char *data;
    apr_size_t len;
    apr_off_t space_left = 0;
    apr_status_t status;
    
    if (APR_BUCKET_IS_METADATA(bred)) {
        if (APR_BUCKET_IS_EOS(bred)) {
            beam->closed = 1;
        }
        APR_BUCKET_REMOVE(bred);
        H2_BLIST_INSERT_TAIL(&beam->red, bred);
        return APR_SUCCESS;
    }
    else if (APR_BUCKET_IS_FILE(bred)) {
        /* file bucket lengths do not really count */
    }
    else {
        space_left = calc_space_left(beam);
        if (space_left > 0 && bred->length == ((apr_size_t)-1)) {
            const char *data;
            status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ);
            if (status != APR_SUCCESS) {
                return status;
            }
        }
        
        if (space_left < bred->length) {
            status = r_wait_space(beam, block, pbl, &space_left);
            if (status != APR_SUCCESS) {
                return status;
            }
            if (space_left <= 0) {
                return APR_EAGAIN;
            }
        }
        /* space available, maybe need bucket split */
    }
    

    /* The fundamental problem is that reading a red bucket from
     * a green thread is a total NO GO, because the bucket might use
     * its pool/bucket_alloc from a foreign thread and that will
     * corrupt. */
    status = APR_ENOTIMPL;
    if (beam->closed && bred->length > 0) {
        status = APR_EOF;
    }
    else if (APR_BUCKET_IS_TRANSIENT(bred)) {
        /* this takes care of transient buckets and converts them
         * into heap ones. Other bucket types might or might not be
         * affected by this. */
        status = apr_bucket_setaside(bred, pool);
    }
    else if (APR_BUCKET_IS_HEAP(bred)) {
        /* For heap buckets read from a green thread is fine. The
         * data will be there and live until the bucket itself is
         * destroyed. */
        status = APR_SUCCESS;
    }
    else if (APR_BUCKET_IS_POOL(bred)) {
        /* pool buckets are bastards that register at pool cleanup
         * to morph themselves into heap buckets. That may happen anytime,
         * even after the bucket data pointer has been read. So at
         * any time inside the green thread, the pool bucket memory
         * may disappear. yikes. */
        status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ);
        if (status == APR_SUCCESS) {
            apr_bucket_heap_make(bred, data, len, NULL);
        }
    }
    else if (APR_BUCKET_IS_FILE(bred)) {
        /* For file buckets the problem is their internal readpool that
         * is used on the first read to allocate buffer/mmap.
         * Since setting aside a file bucket will de-register the
         * file cleanup function from the previous pool, we need to
         * call that from a red thread. 
         * Additionally, we allow callbacks to prevent beaming file
         * handles across. The use case for this is to limit the number 
         * of open file handles and rather use a less efficient beam
         * transport. */
        apr_file_t *fd = ((apr_bucket_file *)bred->data)->fd;
        int can_beam = 1;
        if (beam->last_beamed != fd && beam->can_beam_fn) {
            can_beam = beam->can_beam_fn(beam->can_beam_ctx, beam, fd);
        }
        if (can_beam) {
            beam->last_beamed = fd;
            status = apr_bucket_setaside(bred, pool);
        }
        /* else: enter ENOTIMPL case below */
    }
    
    if (status == APR_ENOTIMPL) {
        /* we have no knowledge about the internals of this bucket,
         * but hope that after read, its data stays immutable for the
         * lifetime of the bucket. (see pool bucket handling above for
         * a counter example).
         * We do the read while in a red thread, so that the bucket may
         * use pools/allocators safely. */
        if (space_left < APR_BUCKET_BUFF_SIZE) {
            space_left = APR_BUCKET_BUFF_SIZE;
        }
        if (space_left < bred->length) {
            apr_bucket_split(bred, space_left);
        }
        status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ);
        if (status == APR_SUCCESS) {
            status = apr_bucket_setaside(bred, pool);
        }
    }
    
    if (status != APR_SUCCESS && status != APR_ENOTIMPL) {
        return status;
    }
    
    APR_BUCKET_REMOVE(bred);
    H2_BLIST_INSERT_TAIL(&beam->red, bred);
    beam->sent_bytes += bred->length;
    
    return APR_SUCCESS;
}
static apr_status_t line_edit_filter(ap_filter_t* f, apr_bucket_brigade* bb) {
  int i, j;
  unsigned int match ;
  unsigned int nmatch = 10 ;
  ap_regmatch_t pmatch[10] ;
  const char* bufp;
  const char* subs ;
  apr_size_t bytes ;
  apr_size_t fbytes ;
  apr_size_t offs ;
  const char* buf ;
  const char* le = NULL ;
  const char* le_n ;
  const char* le_r ;
  char* fbuf ;
  apr_bucket* b = APR_BRIGADE_FIRST(bb) ;
  apr_bucket* b1 ;
  int found = 0 ;
  apr_status_t rv ;

  apr_bucket_brigade* bbline ;
  line_edit_cfg* cfg
	= ap_get_module_config(f->r->per_dir_config, &line_edit_module) ;
  rewriterule* rules = (rewriterule*) cfg->rewriterules->elts ;
  rewriterule* newrule;

  line_edit_ctx* ctx = f->ctx ;
  if (ctx == NULL) {

    /* check env to see if we're wanted, to give basic control with 2.0 */
    buf = apr_table_get(f->r->subprocess_env, "LineEdit");
    if (buf && f->r->content_type) {
      char* lcbuf = apr_pstrdup(f->r->pool, buf) ;
      char* lctype = apr_pstrdup(f->r->pool, f->r->content_type) ;
      char* c ;

      for (c = lcbuf; *c; ++c)
	if (isupper(*c))
	  *c = tolower(*c) ;

      for (c = lctype; *c; ++c)
	if (isupper(*c))
	  *c = tolower(*c) ;
	else if (*c == ';') {
	  *c = 0 ;
	  break ;
	}

      if (!strstr(lcbuf, lctype)) {
	/* don't filter this content type */
	ap_filter_t* fnext = f->next ;
	ap_remove_output_filter(f) ;
	return ap_pass_brigade(fnext, bb) ;
      }
    }

    ctx = f->ctx = apr_palloc(f->r->pool, sizeof(line_edit_ctx)) ;
    ctx->bbsave = apr_brigade_create(f->r->pool, f->c->bucket_alloc) ;

    /* If we have any regex matches, we'll need to copy everything, so we
     * have null-terminated strings to parse.  That's a lot of memory if
     * we're streaming anything big.  So we'll use (and reuse) a local
     * subpool.  Fall back to the request pool if anything bad happens.
     */
    ctx->lpool = f->r->pool ;
    for (i = 0; i < cfg->rewriterules->nelts; ++i) {
      if ( rules[i].flags & M_REGEX ) {
        if (apr_pool_create(&ctx->lpool, f->r->pool) != APR_SUCCESS) {
	  ctx->lpool = f->r->pool ;
        }
        break ;
      }
    }
    /* If we have env interpolation, we'll need a private copy of
     * our rewrite rules with this requests env.  Otherwise we can
     * save processing time by using the original.
     *
     * If one ENV is found, we also have to copy all previous and
     * subsequent rules, even those with no interpolation.
     */
    ctx->rewriterules = cfg->rewriterules;
    for (i = 0; i < cfg->rewriterules->nelts; ++i) {
      found |= (rules[i].flags & M_ENV) ;
      if ( found ) {
	if (ctx->rewriterules == cfg->rewriterules) {
	  ctx->rewriterules = apr_array_make(f->r->pool,
		cfg->rewriterules->nelts, sizeof(rewriterule));
	  for (j = 0; j < i; ++j) {
            newrule = apr_array_push (((line_edit_ctx*)ctx)->rewriterules) ;
	    newrule->from = rules[j].from;
	    newrule->to = rules[j].to;
	    newrule->flags = rules[j].flags;
	    newrule->length = rules[j].length;
	  }
	}
	/* this rule needs to be interpolated */
        newrule = apr_array_push (((line_edit_ctx*)ctx)->rewriterules) ;
	newrule->from = rules[i].from;
	if (rules[i].flags & M_ENV) {
	  newrule->to = interpolate_env(f->r, rules[i].to);
	} else {
	  newrule->to = rules[i].to ;
	}
	newrule->flags = rules[i].flags;
	newrule->length = rules[i].length;
      }
    }
    /* for back-compatibility with Apache 2.0, set some protocol stuff */
    apr_table_unset(f->r->headers_out, "Content-Length") ;
    apr_table_unset(f->r->headers_out, "Content-MD5") ;
    apr_table_unset(f->r->headers_out, "Accept-Ranges") ;
  }
  /* by now our rules are in ctx->rewriterules */
  rules = (rewriterule*) ctx->rewriterules->elts ;

  /* bbline is what goes to the next filter,
   * so we (can) have a new one each time.
   */
  bbline = apr_brigade_create(f->r->pool, f->c->bucket_alloc) ;

  /* first ensure we have no mid-line breaks that might be in the
   * middle of a search string causing us to miss it!  At the same
   * time we split into lines to avoid pattern-matching over big
   * chunks of memory.
   */
  while ( b != APR_BRIGADE_SENTINEL(bb) ) {
    if ( !APR_BUCKET_IS_METADATA(b) ) {
      if ( apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS ) {
	if ( bytes == 0 ) {
	  APR_BUCKET_REMOVE(b) ;
	} else while ( bytes > 0 ) {
	  switch (cfg->lineend) {

	  case LINEEND_UNIX:
	    le = memchr(buf, '\n', bytes) ;
	    break ;

	  case LINEEND_MAC:
	    le = memchr(buf, '\r', bytes) ;
	    break ;

	  case LINEEND_DOS:
	    /* Edge-case issue: if a \r\n spans buckets it'll get missed.
	     * Not a problem for present purposes, but would be an issue
	     * if we claimed to support pattern matching on the lineends.
	     */
	    found = 0 ;
	    le = memchr(buf+1, '\n', bytes-1) ;
	    while ( le && !found ) {
	      if ( le[-1] == '\r' ) {
	        found = 1 ;
	      } else {
	        le = memchr(le+1, '\n', bytes-1 - (le+1 - buf)) ;
	      }
	    }
	    if ( !found )
	      le = 0 ;
	    break;

	  case LINEEND_ANY:
	  case LINEEND_UNSET:
	    /* Edge-case notabug: if a \r\n spans buckets it'll get seen as
	     * two line-ends.  It'll insert the \n as a one-byte bucket.
	     */
	    le_n = memchr(buf, '\n', bytes) ;
	    le_r = memchr(buf, '\r', bytes) ;
	    if ( le_n != NULL )
	      if ( le_n == le_r + sizeof(char))
	        le = le_n ;
	      else if ( (le_r < le_n) && (le_r != NULL) )
	        le = le_r ;
	      else
	        le = le_n ;
	    else
	      le = le_r ;
	    break;

	  case LINEEND_NONE:
	    le = 0 ;
	    break;

	  case LINEEND_CUSTOM:
	    le = memchr(buf, cfg->lechar, bytes) ;
	    break;
	  }
	  if ( le ) {
	    /* found a lineend in this bucket. */
	    offs = 1 + ((unsigned int)le-(unsigned int)buf) / sizeof(char) ;
	    apr_bucket_split(b, offs) ;
	    bytes -= offs ;
	    buf += offs ;
	    b1 = APR_BUCKET_NEXT(b) ;
	    APR_BUCKET_REMOVE(b);

	    /* Is there any previous unterminated content ? */
	    if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) {
	      /* append this to any content waiting for a lineend */
	      APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b) ;
	      rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, f->r->pool) ;
	      /* make b a new bucket of the flattened stuff */
	      b = apr_bucket_pool_create(fbuf, fbytes, f->r->pool,
			f->r->connection->bucket_alloc) ;

	      /* bbsave has been consumed, so clear it */
	      apr_brigade_cleanup(ctx->bbsave) ;
	    }
	    /* b now contains exactly one line */
	    APR_BRIGADE_INSERT_TAIL(bbline, b);
	    b = b1 ;
	  } else {
	    /* no lineend found.  Remember the dangling content */
	    APR_BUCKET_REMOVE(b);
	    APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b);
	    bytes = 0 ;
	  }
	} /* while bytes > 0 */
      } else {
	/* bucket read failed - oops !  Let's remove it. */
	APR_BUCKET_REMOVE(b);
      }
    } else if ( APR_BUCKET_IS_EOS(b) ) {
      /* If there's data to pass, send it in one bucket */
      if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) {
        rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, f->r->pool) ;
        b1 = apr_bucket_pool_create(fbuf, fbytes, f->r->pool,
		f->r->connection->bucket_alloc) ;
        APR_BRIGADE_INSERT_TAIL(bbline, b1);
      }
      apr_brigade_cleanup(ctx->bbsave) ;
      /* start again rather than segfault if a seriously buggy
       * filter in front of us sent a bogus EOS
       */
      f->ctx = NULL ;

      /* move the EOS to the new brigade */
      APR_BUCKET_REMOVE(b);
      APR_BRIGADE_INSERT_TAIL(bbline, b);
    } else {
      /* chop flush or unknown metadata bucket types */
      apr_bucket_delete(b);
    }
    /* OK, reset pointer to what's left (since we're not in a for-loop) */
    b = APR_BRIGADE_FIRST(bb) ;
  }

  /* OK, now we have a bunch of complete lines in bbline,
   * so we can apply our edit rules
   */

  /* When we get a match, we split the line into before+match+after.
   * To flatten that back into one buf every time would be inefficient.
   * So we treat it as three separate bufs to apply future rules.
   *
   * We can only reasonably do that by looping over buckets *inside*
   * the loop over rules.
   *
   * That means concepts like one-match-per-line or start-of-line-only
   * won't work, except for the first rule.  So we won't pretend.
   */
  for (i = 0; i < ctx->rewriterules->nelts; ++i) {
    for ( b = APR_BRIGADE_FIRST(bbline) ;
	b != APR_BRIGADE_SENTINEL(bbline) ;
	b = APR_BUCKET_NEXT(b) ) {
      if ( !APR_BUCKET_IS_METADATA(b)
	&& (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS)) {
	if ( rules[i].flags & M_REGEX ) {
	  bufp = apr_pstrmemdup(ctx->lpool, buf, bytes) ;
	  while ( ! ap_regexec(rules[i].from.r, bufp, nmatch, pmatch, 0) ) {
	    match = pmatch[0].rm_so ;
	    subs = ap_pregsub(f->r->pool, rules[i].to, bufp, nmatch, pmatch) ;
	    apr_bucket_split(b, match) ;
	    b1 = APR_BUCKET_NEXT(b) ;
	    apr_bucket_split(b1, pmatch[0].rm_eo - match) ;
	    b = APR_BUCKET_NEXT(b1) ;
	    apr_bucket_delete(b1) ;
	    b1 = apr_bucket_pool_create(subs, strlen(subs), f->r->pool,
		  f->r->connection->bucket_alloc) ;
	    APR_BUCKET_INSERT_BEFORE(b, b1) ;
	    bufp += pmatch[0].rm_eo ;
	  }
	} else {
	  bufp = buf ;
	  while (subs = apr_strmatch(rules[i].from.s, bufp, bytes),
			subs != NULL) {
	    match = ((unsigned int)subs - (unsigned int)bufp) / sizeof(char) ;
	    bytes -= match ;
	    bufp += match ;
	    apr_bucket_split(b, match) ;
	    b1 = APR_BUCKET_NEXT(b) ;
	    apr_bucket_split(b1, rules[i].length) ;
	    b = APR_BUCKET_NEXT(b1) ;
	    apr_bucket_delete(b1) ;
	    bytes -= rules[i].length ;
	    bufp += rules[i].length ;
	    b1 = apr_bucket_immortal_create(rules[i].to, strlen(rules[i].to),
		f->r->connection->bucket_alloc) ;
	    APR_BUCKET_INSERT_BEFORE(b, b1) ;
	  }
	}
      }
    }
    /* If we used a local pool, clear it now */
    if ( (ctx->lpool != f->r->pool) && (rules[i].flags & M_REGEX) ) {
      apr_pool_clear(ctx->lpool) ;
    }
  }

  /* now pass it down the chain */
  rv = ap_pass_brigade(f->next, bbline) ;

  /* if we have leftover data, don't risk it going out of scope */
  for ( b = APR_BRIGADE_FIRST(ctx->bbsave) ;
	b != APR_BRIGADE_SENTINEL(ctx->bbsave) ;
	b = APR_BUCKET_NEXT(b)) {
    apr_bucket_setaside(b, f->r->pool) ;
  }

  return rv ;
}
Ejemplo n.º 25
0
static apr_status_t fauth_output_filter(ap_filter_t *f, apr_bucket_brigade *pbbIn) {
	request_rec *r = f->r;
	conn_rec *c = r->connection;
	apr_bucket *pbktOut, *pbktIn;
	apr_bucket_brigade *pbbOut;
	char *uri, *buf, *hash, *req;
	int ig=0;
	int ie=0;

	req = malloc(sizeof(char)*(strlen(r->the_request)+1));
	strncpy(req,r->the_request,strlen(r->the_request));
	req[strlen(r->the_request)]='\0';
	uri = strtok(req, " ");
	if(uri) uri = strtok(NULL, " ");
	if(!uri) {
		free(req);
		return ap_pass_brigade(f->next,pbbIn);
	}
	uri[strlen(uri)]='\0';

	ig = is_gs_req(uri);
	ie = is_ee_req(uri);

	if( !(r->status==HTTP_NOT_FOUND||r->status==HTTP_FORBIDDEN) ||
		r->method_number!=M_GET || ( !is_sym_req(uri) && !ig && !ie ) ) {
		free(req);
		return ap_pass_brigade(f->next,pbbIn);
	}

	if(ig||ie) {
		uri = malloc(sizeof(char)*(strlen(r->hostname)+1));
		sprintf(uri,"/%s",r->hostname);
	}
	hash=malloc(sizeof(char)*(HASH_MAXLENGTH+1));
	strncpy(hash,dbapi_lookup(uri),HASH_MAXLENGTH);
	hash[HASH_MAXLENGTH]='\0';

	if(strncmp(hash,"404 Not Found",13)==0||hash[0]=='{') {
		free(hash); free(req);
		return ap_pass_brigade(f->next,pbbIn);
	}

	pbbOut=apr_brigade_create(r->pool, c->bucket_alloc);
	for (pbktIn = APR_BRIGADE_FIRST(pbbIn);
		pbktIn != APR_BRIGADE_SENTINEL(pbbIn);
		pbktIn = APR_BUCKET_NEXT(pbktIn)) {
		APR_BUCKET_REMOVE(pbktIn);
	}

	if(ig) {
		char *hasho=malloc(sizeof(char)*(HASH_MAXLENGTH+1));
		memcpy(hasho,hash,strlen(hash)+1);
		snprintf(hash,HASH_MAXLENGTH,"<html><head><meta name=\"_globalsign-domain-verification\" content=\"%s\" /></head></html>",hasho);
		hash[HASH_MAXLENGTH]='\0';
		free(hasho);
	} else if (ie) {
		char *hasho=malloc(sizeof(char)*(HASH_MAXLENGTH+1));
		memcpy(hasho,hash,strlen(hash)+1);
		snprintf(hash,HASH_MAXLENGTH,"%s",hasho);
		hash[HASH_MAXLENGTH]='\0';
		free(hasho);
	}

	buf = apr_bucket_alloc(strlen(hash), c->bucket_alloc);
	strcpy(buf,hash);
	buf[strlen(hash)]='\0';
	free(hash);
	free(req);
	pbktOut = apr_bucket_heap_create(buf, strlen(buf), apr_bucket_free, c->bucket_alloc);
	APR_BRIGADE_INSERT_TAIL(pbbOut,pbktOut);
	apr_brigade_cleanup(pbbIn);
	f->r->status=200;
	apr_bucket *pbktEOS=apr_bucket_eos_create(c->bucket_alloc);
	APR_BRIGADE_INSERT_TAIL(pbbOut,pbktEOS);
	return ap_pass_brigade(f->next,pbbOut);
}
Ejemplo n.º 26
0
/* TODO: cleanup ctx */
static apr_status_t
zlibdict_output_filter(ap_filter_t *f, apr_bucket_brigade *bb)
{
    apr_bucket *b;
    zlibdict_ctx_t *ctx = f->ctx;
    request_rec *r = f->r;
    const char *client_accepts;
    apr_status_t status = APR_SUCCESS;
    apr_pool_t *subpool;
    int zerr;

    ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r,
                  "triggered zlibdict_output_filter");

    /* Do nothing if asked to filter nothing. */
    if (APR_BRIGADE_EMPTY(bb)) {
        return APR_SUCCESS;
    }

    /* First time we are called for this response? */
    if (!ctx) {
        client_accepts = apr_table_get(r->headers_in, "Accept-Encoding");
        if (client_accepts == NULL ||
            zlibdict__header_contains(r->pool, client_accepts)) {
            ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
                          "Not compressing (no Accept-Encoding: zlibdict)");
            ap_remove_output_filter(f);
            return ap_pass_brigade(f->next, bb);
        }

        ctx = f->ctx = apr_pcalloc(r->pool, sizeof(*ctx));
        ctx->bb = apr_brigade_create(r->pool, f->c->bucket_alloc);
        ctx->buf = apr_palloc(r->pool, DEFAULT_BUFFERSIZE);

        /* zstream must be NULL'd out. */
        memset(&ctx->zstr, 0, sizeof(z_stream));
        zerr = deflateInit2(&ctx->zstr, DEFAULT_COMPRESSION,
                            Z_DEFLATED,
                            DEFAULT_WINDOWSIZE, DEFAULT_MEMLEVEL,
                            Z_DEFAULT_STRATEGY);

        deflateSetDictionary(&ctx->zstr, (Bytef *)propfind_dictionary,
                             strlen(propfind_dictionary));

        /* Set Content-Encoding header so our client knows how to handle 
           this data. */
        apr_table_mergen(r->headers_out, "Content-Encoding", "zlibdict");
    }

    /* Read the data from the handler and compress it with a dictionary. */
    for (b = APR_BRIGADE_FIRST(bb);
         b != APR_BRIGADE_SENTINEL(bb);
         b = APR_BUCKET_NEXT(b)) {

        const char *data;
        void *write_buf;
        size_t len;
        size_t buf_size, write_len;

        if (APR_BUCKET_IS_EOS(b)) {
            deflateEnd(&ctx->zstr);

            /* Remove EOS from the old list, and insert into the new. */
            APR_BUCKET_REMOVE(b);
            APR_BRIGADE_INSERT_TAIL(ctx->bb, b);

            return ap_pass_brigade(f->next, ctx->bb);
        }

        if (APR_BUCKET_IS_METADATA(b))
            continue;

        status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
        if (status != APR_SUCCESS)
            break;

        /* The largest buffer we should need is 0.1% larger than the
         compressed data, + 12 bytes. This info comes from zlib.h.  */
        buf_size = len + (len / 1000) + 13;
        apr_pool_create(&subpool, r->pool);
        write_buf = apr_palloc(subpool, buf_size);
        
        ctx->zstr.next_in = (Bytef *)data;  /* Casting away const! */
        ctx->zstr.avail_in = (uInt) len;

        zerr = Z_OK;
        while (ctx->zstr.avail_in > 0 && zerr != Z_STREAM_END)
        {
            ctx->zstr.next_out = write_buf;
            ctx->zstr.avail_out = (uInt) buf_size;

            zerr = deflate(&ctx->zstr, Z_FINISH);
            if (zerr < 0)
                return -1; /* TODO: fix error */
            write_len = buf_size - ctx->zstr.avail_out;
            if (write_len > 0) {
                apr_bucket *b_out;

                b_out = apr_bucket_heap_create(write_buf, len,
                                               NULL, f->c->bucket_alloc);
                APR_BRIGADE_INSERT_TAIL(ctx->bb, b_out);
                /* Send what we have right now to the next filter. */
                status = ap_pass_brigade(f->next, ctx->bb);
                if (status != APR_SUCCESS) {
                    apr_pool_destroy(subpool);
                    return status;
                }
            }

            apr_pool_destroy(subpool);
        }
    }
    
    return status;
}
Ejemplo n.º 27
0
static apr_status_t substitute_filter(ap_filter_t *f, apr_bucket_brigade *bb)
{
    apr_size_t bytes;
    apr_size_t len;
    apr_size_t fbytes;
    const char *buff;
    const char *nl = NULL;
    char *bflat;
    apr_bucket *b;
    apr_bucket *tmp_b;
    apr_bucket_brigade *tmp_bb = NULL;
    apr_status_t rv;
    subst_dir_conf *cfg =
    (subst_dir_conf *) ap_get_module_config(f->r->per_dir_config,
                                             &substitute_module);

    substitute_module_ctx *ctx = f->ctx;

    /*
     * First time around? Create the saved bb that we used for each pass
     * through. Note that we can also get here when we explicitly clear ctx,
     * for error handling
     */
    if (!ctx) {
        f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
        /*
         * Create all the temporary brigades we need and reuse them to avoid
         * creating them over and over again from r->pool which would cost a
         * lot of memory in some cases.
         */
        ctx->linebb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
        ctx->linesbb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
        ctx->pattbb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
        /*
         * Everything to be passed to the next filter goes in
         * here, our pass brigade.
         */
        ctx->passbb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
        /* Create our temporary pool only once */
        apr_pool_create(&(ctx->tpool), f->r->pool);
        apr_table_unset(f->r->headers_out, "Content-Length");
    }

    /*
     * Shortcircuit processing
     */
    if (APR_BRIGADE_EMPTY(bb))
        return APR_SUCCESS;

    /*
     * Here's the concept:
     *  Read in the data and look for newlines. Once we
     *  find a full "line", add it to our working brigade.
     *  If we've finished reading the brigade and we have
     *  any left over data (not a "full" line), store that
     *  for the next pass.
     *
     * Note: anything stored in ctx->linebb for sure does not have
     * a newline char, so we don't concat that bb with the
     * new bb, since we would spending time searching for the newline
     * in data we know it doesn't exist. So instead, we simply scan
     * our current bb and, if we see a newline, prepend ctx->linebb
     * to the front of it. This makes the code much less straight-
     * forward (otherwise we could APR_BRIGADE_CONCAT(ctx->linebb, bb)
     * and just scan for newlines and not bother with needing to know
     * when ctx->linebb needs to be reset) but also faster. We'll take
     * the speed.
     *
     * Note: apr_brigade_split_line would be nice here, but we
     * really can't use it since we need more control and we want
     * to re-use already read bucket data.
     *
     * See mod_include if still confused :)
     */

    while ((b = APR_BRIGADE_FIRST(bb)) && (b != APR_BRIGADE_SENTINEL(bb))) {
        if (APR_BUCKET_IS_EOS(b)) {
            /*
             * if we see the EOS, then we need to pass along everything we
             * have. But if the ctx->linebb isn't empty, then we need to add
             * that to the end of what we'll be passing.
             */
            if (!APR_BRIGADE_EMPTY(ctx->linebb)) {
                rv = apr_brigade_pflatten(ctx->linebb, &bflat,
                                          &fbytes, ctx->tpool);
                if (rv != APR_SUCCESS)
                    goto err;
                if (fbytes > cfg->max_line_length) {
                    rv = APR_ENOMEM;
                    goto err;
                }
                tmp_b = apr_bucket_transient_create(bflat, fbytes,
                                                f->r->connection->bucket_alloc);
                rv = do_pattmatch(f, tmp_b, ctx->pattbb, ctx->tpool);
                if (rv != APR_SUCCESS)
                    goto err;
                APR_BRIGADE_CONCAT(ctx->passbb, ctx->pattbb);
                apr_brigade_cleanup(ctx->linebb);
            }
            APR_BUCKET_REMOVE(b);
            APR_BRIGADE_INSERT_TAIL(ctx->passbb, b);
        }
        /*
         * No need to handle FLUSH buckets separately as we call
         * ap_pass_brigade anyway at the end of the loop.
         */
        else if (APR_BUCKET_IS_METADATA(b)) {
            APR_BUCKET_REMOVE(b);
            APR_BRIGADE_INSERT_TAIL(ctx->passbb, b);
        }
        else {
            /*
             * We have actual "data" so read in as much as we can and start
             * scanning and splitting from our read buffer
             */
            rv = apr_bucket_read(b, &buff, &bytes, APR_BLOCK_READ);
            if (rv != APR_SUCCESS || bytes == 0) {
                apr_bucket_delete(b);
            }
            else {
                int num = 0;
                while (bytes > 0) {
                    nl = memchr(buff, APR_ASCII_LF, bytes);
                    if (nl) {
                        len = (apr_size_t) (nl - buff) + 1;
                        /* split *after* the newline */
                        apr_bucket_split(b, len);
                        /*
                         * We've likely read more data, so bypass rereading
                         * bucket data and continue scanning through this
                         * buffer
                         */
                        bytes -= len;
                        buff += len;
                        /*
                         * we need b to be updated for future potential
                         * splitting
                         */
                        tmp_b = APR_BUCKET_NEXT(b);
                        APR_BUCKET_REMOVE(b);
                        /*
                         * Hey, we found a newline! Don't forget the old
                         * stuff that needs to be added to the front. So we
                         * add the split bucket to the end, flatten the whole
                         * bb, morph the whole shebang into a bucket which is
                         * then added to the tail of the newline bb.
                         */
                        if (!APR_BRIGADE_EMPTY(ctx->linebb)) {
                            APR_BRIGADE_INSERT_TAIL(ctx->linebb, b);
                            rv = apr_brigade_pflatten(ctx->linebb, &bflat,
                                                      &fbytes, ctx->tpool);
                            if (rv != APR_SUCCESS)
                                goto err;
                            if (fbytes > cfg->max_line_length) {
                                /* Avoid pflattening further lines, we will
                                 * abort later on anyway.
                                 */
                                rv = APR_ENOMEM;
                                goto err;
                            }
                            b = apr_bucket_transient_create(bflat, fbytes,
                                            f->r->connection->bucket_alloc);
                            apr_brigade_cleanup(ctx->linebb);
                        }
                        rv = do_pattmatch(f, b, ctx->pattbb, ctx->tpool);
                        if (rv != APR_SUCCESS)
                            goto err;
                        /*
                         * Count how many buckets we have in ctx->passbb
                         * so far. Yes, this is correct we count ctx->passbb
                         * and not ctx->pattbb as we do not reset num on every
                         * iteration.
                         */
                        for (b = APR_BRIGADE_FIRST(ctx->pattbb);
                             b != APR_BRIGADE_SENTINEL(ctx->pattbb);
                             b = APR_BUCKET_NEXT(b)) {
                            num++;
                        }
                        APR_BRIGADE_CONCAT(ctx->passbb, ctx->pattbb);
                        /*
                         * If the number of buckets in ctx->passbb reaches an
                         * "insane" level, we consume much memory for all the
                         * buckets as such. So lets flush them down the chain
                         * in this case and thus clear ctx->passbb. This frees
                         * the buckets memory for further processing.
                         * Usually this condition should not become true, but
                         * it is a safety measure for edge cases.
                         */
                        if (num > AP_MAX_BUCKETS) {
                            b = apr_bucket_flush_create(
                                                f->r->connection->bucket_alloc);
                            APR_BRIGADE_INSERT_TAIL(ctx->passbb, b);
                            rv = ap_pass_brigade(f->next, ctx->passbb);
                            apr_brigade_cleanup(ctx->passbb);
                            num = 0;
                            apr_pool_clear(ctx->tpool);
                            if (rv != APR_SUCCESS)
                                goto err;
                        }
                        b = tmp_b;
                    }
                    else {
                        /*
                         * no newline in whatever is left of this buffer so
                         * tuck data away and get next bucket
                         */
                        APR_BUCKET_REMOVE(b);
                        APR_BRIGADE_INSERT_TAIL(ctx->linebb, b);
                        bytes = 0;
                    }
                }
            }
        }
        if (!APR_BRIGADE_EMPTY(ctx->passbb)) {
            rv = ap_pass_brigade(f->next, ctx->passbb);
            apr_brigade_cleanup(ctx->passbb);
            if (rv != APR_SUCCESS)
                goto err;
        }
        apr_pool_clear(ctx->tpool);
    }

    /* Anything left we want to save/setaside for the next go-around */
    if (!APR_BRIGADE_EMPTY(ctx->linebb)) {
        /*
         * Provide ap_save_brigade with an existing empty brigade
         * (ctx->linesbb) to avoid creating a new one.
         */
        ap_save_brigade(f, &(ctx->linesbb), &(ctx->linebb), f->r->pool);
        tmp_bb = ctx->linebb;
        ctx->linebb = ctx->linesbb;
        ctx->linesbb = tmp_bb;
    }

    return APR_SUCCESS;
err:
    if (rv == APR_ENOMEM)
        ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, APLOGNO(01328) "Line too long, URI %s",
                      f->r->uri);
    apr_pool_clear(ctx->tpool);
    return rv;
}
Ejemplo n.º 28
0
apr_status_t h2_beam_receive(h2_bucket_beam *beam, 
                             apr_bucket_brigade *bb, 
                             apr_read_type_e block,
                             apr_off_t readbytes)
{
    h2_beam_lock bl;
    apr_bucket *bred, *bgreen, *ng;
    int transferred = 0;
    apr_status_t status = APR_SUCCESS;
    apr_off_t remain = readbytes;
    
    /* Called from the green thread to take buckets from the beam */
    if (enter_yellow(beam, &bl) == APR_SUCCESS) {
transfer:
        if (beam->aborted) {
            if (beam->green && !APR_BRIGADE_EMPTY(beam->green)) {
                apr_brigade_cleanup(beam->green);
            }
            status = APR_ECONNABORTED;
            goto leave;
        }

        /* transfer enough buckets from our green brigade, if we have one */
        while (beam->green
               && !APR_BRIGADE_EMPTY(beam->green)
               && (readbytes <= 0 || remain >= 0)) {
            bgreen = APR_BRIGADE_FIRST(beam->green);
            if (readbytes > 0 && bgreen->length > 0 && remain <= 0) {
                break;
            }            
            APR_BUCKET_REMOVE(bgreen);
            APR_BRIGADE_INSERT_TAIL(bb, bgreen);
            remain -= bgreen->length;
            ++transferred;
        }

        /* transfer from our red brigade, transforming red buckets to
         * green ones until we have enough */
        while (!H2_BLIST_EMPTY(&beam->red) && (readbytes <= 0 || remain >= 0)) {
            bred = H2_BLIST_FIRST(&beam->red);
            bgreen = NULL;
            
            if (readbytes > 0 && bred->length > 0 && remain <= 0) {
                break;
            }
                        
            if (APR_BUCKET_IS_METADATA(bred)) {
                if (APR_BUCKET_IS_EOS(bred)) {
                    bgreen = apr_bucket_eos_create(bb->bucket_alloc);
                    beam->close_sent = 1;
                }
                else if (APR_BUCKET_IS_FLUSH(bred)) {
                    bgreen = apr_bucket_flush_create(bb->bucket_alloc);
                }
                else {
                    /* put red into hold, no green sent out */
                }
            }
            else if (APR_BUCKET_IS_FILE(bred)) {
                /* This is set aside into the target brigade pool so that 
                 * any read operation messes with that pool and not 
                 * the red one. */
                apr_bucket_file *f = (apr_bucket_file *)bred->data;
                apr_file_t *fd = f->fd;
                int setaside = (f->readpool != bb->p);
                
                if (setaside) {
                    status = apr_file_setaside(&fd, fd, bb->p);
                    if (status != APR_SUCCESS) {
                        goto leave;
                    }
                    ++beam->files_beamed;
                }
                ng = apr_brigade_insert_file(bb, fd, bred->start, bred->length, 
                                             bb->p);
#if APR_HAS_MMAP
                /* disable mmap handling as this leads to segfaults when
                 * the underlying file is changed while memory pointer has
                 * been handed out. See also PR 59348 */
                apr_bucket_file_enable_mmap(ng, 0);
#endif
                remain -= bred->length;
                ++transferred;
                APR_BUCKET_REMOVE(bred);
                H2_BLIST_INSERT_TAIL(&beam->hold, bred);
                ++transferred;
                continue;
            }
            else {
                /* create a "green" standin bucket. we took care about the
                 * underlying red bucket and its data when we placed it into
                 * the red brigade.
                 * the beam bucket will notify us on destruction that bred is
                 * no longer needed. */
                bgreen = h2_beam_bucket_create(beam, bred, bb->bucket_alloc,
                                               beam->buckets_sent++);
            }
            
            /* Place the red bucket into our hold, to be destroyed when no
             * green bucket references it any more. */
            APR_BUCKET_REMOVE(bred);
            H2_BLIST_INSERT_TAIL(&beam->hold, bred);
            beam->received_bytes += bred->length;
            if (bgreen) {
                APR_BRIGADE_INSERT_TAIL(bb, bgreen);
                remain -= bgreen->length;
                ++transferred;
            }
        }

        if (readbytes > 0 && remain < 0) {
            /* too much, put some back */
            remain = readbytes;
            for (bgreen = APR_BRIGADE_FIRST(bb);
                 bgreen != APR_BRIGADE_SENTINEL(bb);
                 bgreen = APR_BUCKET_NEXT(bgreen)) {
                 remain -= bgreen->length;
                 if (remain < 0) {
                     apr_bucket_split(bgreen, bgreen->length+remain);
                     beam->green = apr_brigade_split_ex(bb, 
                                                        APR_BUCKET_NEXT(bgreen), 
                                                        beam->green);
                     break;
                 }
            }
        }

        if (beam->closed 
            && (!beam->green || APR_BRIGADE_EMPTY(beam->green))
            && H2_BLIST_EMPTY(&beam->red)) {
            /* beam is closed and we have nothing more to receive */ 
            if (!beam->close_sent) {
                apr_bucket *b = apr_bucket_eos_create(bb->bucket_alloc);
                APR_BRIGADE_INSERT_TAIL(bb, b);
                beam->close_sent = 1;
                ++transferred;
                status = APR_SUCCESS;
            }
        }
        
        if (transferred) {
            status = APR_SUCCESS;
        }
        else if (beam->closed) {
            status = APR_EOF;
        }
        else if (block == APR_BLOCK_READ && bl.mutex && beam->m_cond) {
            status = wait_cond(beam, bl.mutex);
            if (status != APR_SUCCESS) {
                goto leave;
            }
            goto transfer;
        }
        else {
            status = APR_EAGAIN;
        }
leave:        
        leave_yellow(beam, &bl);
    }
    return status;
}
Ejemplo n.º 29
0
/* read post body. code taken from "The apache modules book, Nick Kew" */
static void read_post_body(mapcache_context_apache_request *ctx, mapcache_request_proxy *p) {
  request_rec *r = ctx->request;
  mapcache_context *mctx = (mapcache_context*)ctx;
  int bytes,eos;
  apr_bucket_brigade *bb, *bbin;
  apr_bucket *b;
  apr_status_t rv;
  const char *clen = apr_table_get(r->headers_in, "Content-Length");
  if(clen) {
    bytes = strtol(clen, NULL, 0);
    if(bytes >= p->rule->max_post_len) {
      mctx->set_error(mctx, HTTP_REQUEST_ENTITY_TOO_LARGE, "post request too big");
      return;
    }
  } else {
    bytes = p->rule->max_post_len;
  } 

  bb = apr_brigade_create(mctx->pool, r->connection->bucket_alloc);
  bbin = apr_brigade_create(mctx->pool, r->connection->bucket_alloc);
  p->post_len = 0;

  do {
    rv = ap_get_brigade(r->input_filters, bbin, AP_MODE_READBYTES, APR_BLOCK_READ, bytes);
    if(rv != APR_SUCCESS) {
      mctx->set_error(mctx, 500, "failed to read form input");
      return;
    }
    for(b = APR_BRIGADE_FIRST(bbin); b != APR_BRIGADE_SENTINEL(bbin); b = APR_BUCKET_NEXT(b)) {
      if(APR_BUCKET_IS_EOS(b)) {
        eos = 1;
      }
    }
    if(!APR_BUCKET_IS_METADATA(b)) {
      if(b->length != (apr_size_t)(-1)) {
        p->post_len += b->length;
        if(p->post_len > p->rule->max_post_len) {
          apr_bucket_delete(b);
        }
      }
    }
    if(p->post_len <= p->rule->max_post_len) {
      APR_BUCKET_REMOVE(b);
      APR_BRIGADE_INSERT_TAIL(bb, b);
    }
  } while (!eos);

  if(p->post_len > p->rule->max_post_len) {
    mctx->set_error(mctx, HTTP_REQUEST_ENTITY_TOO_LARGE, "request too big");
    return;
  }

  p->post_buf = apr_palloc(mctx->pool, p->post_len+1);

  rv = apr_brigade_flatten(bb, p->post_buf, &(p->post_len));
  if(rv != APR_SUCCESS) {
    mctx->set_error(mctx, 500, "error (flatten) reading form data");
    return;
  }
  p->post_buf[p->post_len] = 0;
}
Ejemplo n.º 30
0
static int parse_form_from_POST(request_rec* r, apr_hash_t** form) {
  int bytes, eos;
  apr_size_t count;
  apr_status_t rv;
  apr_bucket_brigade *bb;
  apr_bucket_brigade *bbin;
  char *buf;
  apr_bucket *b;
  apr_bucket *nextb;
  const char *clen = apr_table_get(r->headers_in, "Content-Length");
  if(clen != NULL) {
    bytes = strtol(clen, NULL, 0);
    if(bytes >= MAX_SIZE) {
      ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
		    "Request too big (%d bytes; limit %d)",
		    bytes, MAX_SIZE);
      return HTTP_REQUEST_ENTITY_TOO_LARGE;
    }
  } else {
    bytes = MAX_SIZE;
  }

  bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
  bbin = apr_brigade_create(r->pool, r->connection->bucket_alloc);
  count = 0;

  do {
    rv = ap_get_brigade(r->input_filters, bbin, AP_MODE_READBYTES,
			APR_BLOCK_READ, bytes);
    if(rv != APR_SUCCESS) {
      ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
		    "failed to read from input");
      return HTTP_INTERNAL_SERVER_ERROR;
    }
    for (b = APR_BRIGADE_FIRST(bbin);
	 b != APR_BRIGADE_SENTINEL(bbin);
	 b = nextb ) {
      nextb = APR_BUCKET_NEXT(b);
      if(APR_BUCKET_IS_EOS(b) ) {
	eos = 1;
      }
      if (!APR_BUCKET_IS_METADATA(b)) {
	if(b->length != (apr_size_t)(-1)) {
	  count += b->length;
	  if(count > MAX_SIZE) {
	    /* This is more data than we accept, so we're
	     * going to kill the request. But we have to
	     * mop it up first.
	     */
	    apr_bucket_delete(b);
	  }
	}
      }
      if(count <= MAX_SIZE) {
	APR_BUCKET_REMOVE(b);
	APR_BRIGADE_INSERT_TAIL(bb, b);
      }
    }
  } while(!eos);

  /* OK, done with the data. Kill the request if we got too much data. */
  if(count > MAX_SIZE) {
    ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
		  "Request too big (%d bytes; limit %d)",
		  bytes, MAX_SIZE);
    return HTTP_REQUEST_ENTITY_TOO_LARGE;
  }

  /* We've got all the data. Now put it in a buffer and parse it. */
  buf = apr_palloc(r->pool, count+1);
  rv = apr_brigade_flatten(bb, buf, &count);
  if(rv != APR_SUCCESS) {
    ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
		  "Error (flatten) reading from data");
    return HTTP_INTERNAL_SERVER_ERROR;
  }
  buf[count] = '\0';
  *form = parse_form_from_string(r, buf);
  
  return OK;

}