コード例 #1
0
ファイル: mod_triger.c プロジェクト: dreamsxin/mod_triger
static triger_bucket_t *get_data_at_tail(ap_filter_t * f,
					 apr_bucket_brigade * bb)
{
    const char *data;
    apr_bucket *b = APR_BRIGADE_LAST(bb);
    apr_size_t len = 0;
    triger_module_ctx_t *ctx = f->ctx;
    triger_bucket_t *rv = ctx->triger_bucket;

    rv->len = 0;
    rv->data = NULL;
    rv->b = NULL;
    rv->body_end_tag_pos = rv->body_start_tag_pos =
	rv->html_start_tag_pos = rv->head_start_tag_pos =
	rv->body_start_tag_pos = -1;


    while (APR_BUCKET_IS_METADATA(b) && b != APR_BRIGADE_SENTINEL(bb))
	b = APR_BUCKET_PREV(b);

    if (APR_BUCKET_IS_METADATA(b) || b == APR_BRIGADE_SENTINEL(bb))
	return rv;

    apr_bucket_read(b, &data, &len, APR_BLOCK_READ);

    rv->len = len;
    rv->data = data;
    rv->b = b;
    return rv;
}
コード例 #2
0
ファイル: core_filters.c プロジェクト: pexip/os-apache2
static void remove_empty_buckets(apr_bucket_brigade *bb)
{
    apr_bucket *bucket;
    while (((bucket = APR_BRIGADE_FIRST(bb)) != APR_BRIGADE_SENTINEL(bb)) &&
           (APR_BUCKET_IS_METADATA(bucket) || (bucket->length == 0))) {
        apr_bucket_delete(bucket);
    }
}
コード例 #3
0
ファイル: mod_sslhaf.c プロジェクト: Ath103/sslhaf
/**
 * This input filter will basically sniff on a connection and analyse
 * the packets when it detects SSL.
 */
static apr_status_t mod_sslhaf_in_filter(ap_filter_t *f,
                                         apr_bucket_brigade *bb,
                                         ap_input_mode_t mode,
                                         apr_read_type_e block,
                                         apr_off_t readbytes)
{
    sslhaf_cfg_t *cfg = ap_get_module_config(f->c->conn_config,
        &sslhaf_module);
    apr_status_t status;
    apr_bucket *bucket;

    // Return straight away if there's no configuration
    if (cfg == NULL) {
        return ap_get_brigade(f->next, bb, mode, block, readbytes);
    }

    // Sanity check first
    if (cfg->state == SSLHAF_STATE_GOAWAY) {
        return ap_get_brigade(f->next, bb, mode, block, readbytes);
    }

    // Get brigade
    status = ap_get_brigade(f->next, bb, mode, block, readbytes);
    if (status != APR_SUCCESS) {
        // Do not log, since we're passing the status anyway
        cfg->state = SSLHAF_STATE_GOAWAY;

        return status;
    }

    // Loop through the buckets
    for(bucket = APR_BRIGADE_FIRST(bb);
        bucket != APR_BRIGADE_SENTINEL(bb);
        bucket = APR_BUCKET_NEXT(bucket))
    {
        const char *buf = NULL;
        apr_size_t buflen = 0;

        if (!(APR_BUCKET_IS_METADATA(bucket))) {
            // Get bucket data
            status = apr_bucket_read(bucket, &buf, &buflen, APR_BLOCK_READ);
            if (status != APR_SUCCESS) {
                ap_log_error(APLOG_MARK, APLOG_ERR, status, f->c->base_server,
                    "mod_sslhaf [%s]: Error while reading input bucket",
                    SSLHAF_AP_CONN_REMOTE_IP(f->c));
                return status;
            }

            // Look into the bucket
            if (sslhaf_decode_buffer(cfg,
                    (const unsigned char *)buf, buflen) <= 0) {
                cfg->state = SSLHAF_STATE_GOAWAY;
            }
        }
    }

    return APR_SUCCESS;
}
コード例 #4
0
ファイル: h2_util.c プロジェクト: stevedien/mod_h2
static apr_status_t last_not_included(apr_bucket_brigade *bb, 
                                      apr_size_t maxlen, 
                                      int same_alloc,
                                      int *pfile_buckets_allowed,
                                      apr_bucket **pend)
{
    apr_bucket *b;
    apr_status_t status = APR_SUCCESS;
    int files_allowed = pfile_buckets_allowed? *pfile_buckets_allowed : 0;
    
    if (maxlen > 0) {
        /* Find the bucket, up to which we reach maxlen/mem bytes */
        for (b = APR_BRIGADE_FIRST(bb); 
             (b != APR_BRIGADE_SENTINEL(bb));
             b = APR_BUCKET_NEXT(b)) {
            
            if (APR_BUCKET_IS_METADATA(b)) {
                /* included */
            }
            else {
                if (maxlen == 0) {
                    *pend = b;
                    return status;
                }
                
                if (b->length == ((apr_size_t)-1)) {
                    const char *ign;
                    apr_size_t ilen;
                    status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ);
                    if (status != APR_SUCCESS) {
                        return status;
                    }
                }
                
                if (same_alloc && APR_BUCKET_IS_FILE(b)) {
                    /* we like it move it, always */
                }
                else if (files_allowed > 0 && APR_BUCKET_IS_FILE(b)) {
                    /* this has no memory footprint really unless
                     * it is read, disregard it in length count,
                     * unless we do not move the file buckets */
                    --files_allowed;
                }
                else if (maxlen < b->length) {
                    apr_bucket_split(b, maxlen);
                    maxlen = 0;
                }
                else {
                    maxlen -= b->length;
                }
            }
        }
    }
    *pend = APR_BRIGADE_SENTINEL(bb);
    return status;
}
コード例 #5
0
ファイル: h2_util.c プロジェクト: stevedien/mod_h2
int h2_util_bb_has_data(apr_bucket_brigade *bb)
{
    apr_bucket *b;
    for (b = APR_BRIGADE_FIRST(bb);
         b != APR_BRIGADE_SENTINEL(bb);
         b = APR_BUCKET_NEXT(b))
    {
        if (!APR_BUCKET_IS_METADATA(b)) {
            return 1;
        }
    }
    return 0;
}
コード例 #6
0
ファイル: h2_conn_io.c プロジェクト: NeedfulThings/mod_h2
static apr_status_t h2_conn_io_bucket_read(h2_conn_io *io,
                                           apr_read_type_e block,
                                           h2_conn_io_on_read_cb on_read_cb,
                                           void *puser, int *pdone)
{
    apr_status_t status = APR_SUCCESS;
    apr_size_t readlen = 0;
    *pdone = 0;
    
    while (status == APR_SUCCESS && !*pdone
           && !APR_BRIGADE_EMPTY(io->input)) {
        
        apr_bucket* bucket = APR_BRIGADE_FIRST(io->input);
        if (APR_BUCKET_IS_METADATA(bucket)) {
            /* we do nothing regarding any meta here */
        }
        else {
            const char *bucket_data = NULL;
            apr_size_t bucket_length = 0;
            status = apr_bucket_read(bucket, &bucket_data,
                                     &bucket_length, block);
            
            if (status == APR_SUCCESS && bucket_length > 0) {
                if (APLOGctrace2(io->connection)) {
                    char buffer[32];
                    h2_util_hex_dump(buffer, sizeof(buffer)/sizeof(buffer[0]),
                                     bucket_data, bucket_length);
                    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, io->connection,
                                  "h2_conn_io(%ld): read %d bytes: %s",
                                  io->connection->id, (int)bucket_length, buffer);
                }
                
                if (bucket_length > 0) {
                    apr_size_t consumed = 0;
                    status = on_read_cb(bucket_data, bucket_length,
                                        &consumed, pdone, puser);
                    if (status == APR_SUCCESS && bucket_length > consumed) {
                        /* We have data left in the bucket. Split it. */
                        status = apr_bucket_split(bucket, consumed);
                    }
                    readlen += consumed;
                }
            }
        }
        apr_bucket_delete(bucket);
    }
    if (readlen == 0 && status == APR_SUCCESS && block == APR_NONBLOCK_READ) {
        return APR_EAGAIN;
    }
    return status;
}
コード例 #7
0
ファイル: apr_brigade.c プロジェクト: 0jpq0/kbengine
APU_DECLARE(apr_status_t) apr_brigade_split_line(apr_bucket_brigade *bbOut,
                                                 apr_bucket_brigade *bbIn,
                                                 apr_read_type_e block,
                                                 apr_off_t maxbytes)
{
    apr_off_t readbytes = 0;

    while (!APR_BRIGADE_EMPTY(bbIn)) {
        const char *pos;
        const char *str;
        apr_size_t len;
        apr_status_t rv;
        apr_bucket *e;

        e = APR_BRIGADE_FIRST(bbIn);
        rv = apr_bucket_read(e, &str, &len, block);

        if (rv != APR_SUCCESS) {
            return rv;
        }

        pos = memchr(str, APR_ASCII_LF, len);
        /* We found a match. */
        if (pos != NULL) {
            apr_bucket_split(e, pos - str + 1);
            APR_BUCKET_REMOVE(e);
            APR_BRIGADE_INSERT_TAIL(bbOut, e);
            return APR_SUCCESS;
        }
        APR_BUCKET_REMOVE(e);
        if (APR_BUCKET_IS_METADATA(e) || len > APR_BUCKET_BUFF_SIZE/4) {
            APR_BRIGADE_INSERT_TAIL(bbOut, e);
        }
        else {
            if (len > 0) {
                rv = apr_brigade_write(bbOut, NULL, NULL, str, len);
                if (rv != APR_SUCCESS) {
                    return rv;
                }
            }
            apr_bucket_destroy(e);
        }
        readbytes += len;
        /* We didn't find an APR_ASCII_LF within the maximum line length. */
        if (readbytes >= maxbytes) {
            break;
        }
    }

    return APR_SUCCESS;
}
コード例 #8
0
ファイル: mod_dumpio.c プロジェクト: kheradmand/Break
/*
 * Workhorse function: simply log to the current error_log
 * info about the data in the bucket as well as the data itself
 */
static void dumpit(ap_filter_t *f, apr_bucket *b)
{
    conn_rec *c = f->c;
    
    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
	"mod_dumpio:  %s (%s-%s): %" APR_SIZE_T_FMT " bytes",
                f->frec->name,
                (APR_BUCKET_IS_METADATA(b)) ? "metadata" : "data",
                b->type->name,
                b->length) ;

    if (!(APR_BUCKET_IS_METADATA(b))) {
        const char *buf;
        apr_size_t nbytes;
        char *obuf;
        if (apr_bucket_read(b, &buf, &nbytes, APR_BLOCK_READ) == APR_SUCCESS) {
            if (nbytes) {
                obuf = malloc(nbytes+1);    /* use pool? */
                memcpy(obuf, buf, nbytes);
                obuf[nbytes] = '\0';
                ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                     "mod_dumpio:  %s (%s-%s): %s",
                     f->frec->name,
                     (APR_BUCKET_IS_METADATA(b)) ? "metadata" : "data",
                     b->type->name,
                     obuf);
                free(obuf);
            }
        } else {
            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server,
                 "mod_dumpio:  %s (%s-%s): %s",
                 f->frec->name,
                 (APR_BUCKET_IS_METADATA(b)) ? "metadata" : "data",
                 b->type->name,
                 "error reading data");
        }
    }
}
コード例 #9
0
ファイル: mod_jaxer_proc.c プロジェクト: absynce/Jaxer
apr_status_t jxr_append_brigade(request_rec *r, apr_bucket_brigade *dest, apr_bucket_brigade *bb, int *eos_seen)
{
	apr_size_t max_msglen = MAX_PACKET_SIZE - sizeof(Jaxer_Header);
	apr_status_t rv;

	while (!APR_BRIGADE_EMPTY(bb)) 
	{
		apr_size_t readlen;
		const char *buffer;
		
		apr_bucket *e = APR_BRIGADE_FIRST(bb);

		if (APR_BUCKET_IS_EOS(e) )
		{
			apr_bucket_delete(e);
			if (eos_seen)
				*eos_seen = 1;
			continue;
		}
		if (APR_BUCKET_IS_METADATA(e)) {
			apr_bucket_delete(e);
			continue;
		}

		
		/* Read the bucket now */
		if ((rv = apr_bucket_read(e, &buffer, &readlen, APR_BLOCK_READ)) != APR_SUCCESS) 
		{
			ap_log_perror(APLOG_MARK, APLOG_INFO, rv, r->pool, "mod_jaxer: can't read data from handler");
			return rv;
		}
		
		if (readlen > max_msglen)
		{
        	apr_bucket_split(e, max_msglen);
		}else
		{
			APR_BUCKET_REMOVE(e);
			APR_BRIGADE_INSERT_TAIL(dest, e);
		}
	}
	if ((rv=apr_brigade_destroy(bb)) != APR_SUCCESS)
	{
		ap_log_perror(APLOG_MARK, APLOG_INFO, rv, r->pool, "mod_jaxer: failed to destroy brigade.");
		return rv;
	}

	return APR_SUCCESS;
}
コード例 #10
0
ファイル: mod_ironbee.c プロジェクト: BillTheBest/ironbee
/**
 * Sends bucket data to ironbee for processing.
 */
static void process_bucket(ap_filter_t *f, apr_bucket *b)
{
    conn_rec *c = f->c;
    ironbee_conn_context *ctx = f->ctx;
    ib_conndata_t icdata;
    const char *bdata;
    apr_size_t nbytes;
    apr_status_t rc;

    if (APR_BUCKET_IS_METADATA(b)) {
        return;
    }

    /* Translate a bucket to a ib_conndata_t structure to be passed
     * to IronBee. */
    rc = apr_bucket_read(b, &bdata, &nbytes, APR_BLOCK_READ);
    if (rc != APR_SUCCESS) {
        ap_log_error(APLOG_MARK, APLOG_ERR, 0, c->base_server,
                     IB_PRODUCT_NAME ": %s (%s): error reading %s data",
                     f->frec->name, b->type->name,
                     ((ctx->direction == IRONBEE_REQUEST) ? "request" : "response"));
            return;
    }
    icdata.conn = ctx->iconn;
    icdata.dlen = nbytes;
    icdata.data = (uint8_t *)bdata;


    if (ctx->direction == IRONBEE_REQUEST) {
        ctx->status = ib_state_notify_conn_data_in(ironbee, &icdata);
    }
    else {
        ctx->status = ib_state_notify_conn_data_out(ironbee, &icdata);
    }
    if (ctx->status != IB_OK) {
        ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c,
                     IB_PRODUCT_NAME ": signaled error in %s",
                     ((ctx->direction == IRONBEE_REQUEST) ? "request" : "response"));
    }
}
コード例 #11
0
ファイル: h2_filter.c プロジェクト: Karm/mod_h2
static apr_status_t consume_brigade(h2_filter_cin *cin, 
                                    apr_bucket_brigade *bb, 
                                    apr_read_type_e block)
{
    apr_status_t status = APR_SUCCESS;
    apr_size_t readlen = 0;
    
    while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
        
        apr_bucket* bucket = APR_BRIGADE_FIRST(bb);
        if (APR_BUCKET_IS_METADATA(bucket)) {
            /* we do nothing regarding any meta here */
        }
        else {
            const char *bucket_data = NULL;
            apr_size_t bucket_length = 0;
            status = apr_bucket_read(bucket, &bucket_data,
                                     &bucket_length, block);
            
            if (status == APR_SUCCESS && bucket_length > 0) {
                apr_size_t consumed = 0;

                status = cin->cb(cin->cb_ctx, bucket_data, bucket_length, &consumed);
                if (status == APR_SUCCESS && bucket_length > consumed) {
                    /* We have data left in the bucket. Split it. */
                    status = apr_bucket_split(bucket, consumed);
                }
                readlen += consumed;
                cin->start_read = apr_time_now();
            }
        }
        apr_bucket_delete(bucket);
    }
    
    if (readlen == 0 && status == APR_SUCCESS && block == APR_NONBLOCK_READ) {
        return APR_EAGAIN;
    }
    return status;
}
コード例 #12
0
ファイル: mod_sslhaf.c プロジェクト: Ath103/sslhaf
/**
 * Monitor outbound data and count buckets. This will help us determine
 * if input data is fragmented (we see more than one inbound bucket before
 * we see one outbound bucket).
 */
static apr_status_t mod_sslhaf_out_filter(ap_filter_t *f, apr_bucket_brigade *bb) {
    sslhaf_cfg_t *cfg = ap_get_module_config(f->c->conn_config,
        &sslhaf_module);
    apr_status_t status;
    apr_bucket *bucket;

    // Return straight away if there's no configuration
    if (cfg == NULL) {
        return ap_pass_brigade(f->next, bb);
    }

    // Loop through the buckets
    for (bucket = APR_BRIGADE_FIRST(bb);
        bucket != APR_BRIGADE_SENTINEL(bb);
        bucket = APR_BUCKET_NEXT(bucket))
    {
        const char *buf = NULL;
        apr_size_t buflen = 0;

        if (!(APR_BUCKET_IS_METADATA(bucket))) {
            // Get bucket data
            status = apr_bucket_read(bucket, &buf, &buflen, APR_BLOCK_READ);
            if (status != APR_SUCCESS) {
                ap_log_error(APLOG_MARK, APLOG_ERR, status, f->c->base_server,
                    "mod_sslhaf [%s]: Error while reading output bucket",
                    SSLHAF_AP_CONN_REMOTE_IP(f->c));
                return status;
            }

            // Count output buckets
            cfg->out_bucket_count++;
        }
    }

    return ap_pass_brigade(f->next, bb);
}
コード例 #13
0
ファイル: mod_ratelimit.c プロジェクト: pexip/os-apache2
static apr_status_t
rate_limit_filter(ap_filter_t *f, apr_bucket_brigade *bb)
{
    apr_status_t rv = APR_SUCCESS;
    rl_ctx_t *ctx = f->ctx;
    apr_bucket_alloc_t *ba = f->r->connection->bucket_alloc;

    /* Set up our rl_ctx_t on first use */
    if (ctx == NULL) {
        const char *rl = NULL;
        int ratelimit;
        int burst = 0;

        /* no subrequests. */
        if (f->r->main != NULL) {
            ap_remove_output_filter(f);
            return ap_pass_brigade(f->next, bb);
        }

        /* Configuration: rate limit */
        rl = apr_table_get(f->r->subprocess_env, "rate-limit");

        if (rl == NULL) {
            ap_remove_output_filter(f);
            return ap_pass_brigade(f->next, bb);
        }
        
        /* rl is in kilo bytes / second  */
        ratelimit = atoi(rl) * 1024;
        if (ratelimit <= 0) {
            /* remove ourselves */
            ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r,
                          APLOGNO(03488) "rl: disabling: rate-limit = %s (too high?)", rl);
            ap_remove_output_filter(f);
            return ap_pass_brigade(f->next, bb);
        }

        /* Configuration: optional initial burst */
        rl = apr_table_get(f->r->subprocess_env, "rate-initial-burst");
        if (rl != NULL) {
            burst = atoi(rl) * 1024;
            if (burst <= 0) {
               ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r,
                             APLOGNO(03489) "rl: disabling burst: rate-initial-burst = %s (too high?)", rl);
               burst = 0;
            }
        }

        /* Set up our context */
        ctx = apr_palloc(f->r->pool, sizeof(rl_ctx_t));
        f->ctx = ctx;
        ctx->state = RATE_LIMIT;
        ctx->speed = ratelimit;
        ctx->burst = burst;
        ctx->do_sleep = 0;

        /* calculate how many bytes / interval we want to send */
        /* speed is bytes / second, so, how many  (speed / 1000 % interval) */
        ctx->chunk_size = (ctx->speed / (1000 / RATE_INTERVAL_MS));
        ctx->tmpbb = apr_brigade_create(f->r->pool, ba);
        ctx->holdingbb = apr_brigade_create(f->r->pool, ba);
    }
    else {
        APR_BRIGADE_PREPEND(bb, ctx->holdingbb);
    }

    while (!APR_BRIGADE_EMPTY(bb)) {
        apr_bucket *e;

        if (ctx->state == RATE_FULLSPEED) {
            /* Find where we 'stop' going full speed. */
            for (e = APR_BRIGADE_FIRST(bb);
                 e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) {
                if (AP_RL_BUCKET_IS_END(e)) {
                    apr_brigade_split_ex(bb, e, ctx->holdingbb);
                    ctx->state = RATE_LIMIT;
                    break;
                }
            }

            e = apr_bucket_flush_create(ba);
            APR_BRIGADE_INSERT_TAIL(bb, e);
            rv = ap_pass_brigade(f->next, bb);
            apr_brigade_cleanup(bb);

            if (rv != APR_SUCCESS) {
                ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, f->r, APLOGNO(01455)
                              "rl: full speed brigade pass failed.");
                return rv;
            }
        }
        else {
            for (e = APR_BRIGADE_FIRST(bb);
                 e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) {
                if (AP_RL_BUCKET_IS_START(e)) {
                    apr_brigade_split_ex(bb, e, ctx->holdingbb);
                    ctx->state = RATE_FULLSPEED;
                    break;
                }
            }

            while (!APR_BRIGADE_EMPTY(bb)) {
                apr_off_t len = ctx->chunk_size + ctx->burst;

                APR_BRIGADE_CONCAT(ctx->tmpbb, bb);

                /*
                 * Pull next chunk of data; the initial amount is our
                 * burst allotment (if any) plus a chunk.  All subsequent
                 * iterations are just chunks with whatever remaining
                 * burst amounts we have left (in case not done in the
                 * first bucket).
                 */
                rv = apr_brigade_partition(ctx->tmpbb, len, &e);
                if (rv != APR_SUCCESS && rv != APR_INCOMPLETE) {
                    ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r, APLOGNO(01456)
                                  "rl: partition failed.");
                    return rv;
                }
                /* Send next metadata now if any */
                while (e != APR_BRIGADE_SENTINEL(ctx->tmpbb)
                       && APR_BUCKET_IS_METADATA(e)) {
                    e = APR_BUCKET_NEXT(e);
                }
                if (e != APR_BRIGADE_SENTINEL(ctx->tmpbb)) {
                    apr_brigade_split_ex(ctx->tmpbb, e, bb);
                }
                else {
                    apr_brigade_length(ctx->tmpbb, 1, &len);
                }

                /*
                 * Adjust the burst amount depending on how much
                 * we've done up to now.
                 */
                if (ctx->burst) {
                    ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
                        APLOGNO(03485) "rl: burst %d; len %"APR_OFF_T_FMT, ctx->burst, len);
                    if (len < ctx->burst) {
                        ctx->burst -= len;
                    }
                    else {
                        ctx->burst = 0;
                    }
                }

                e = APR_BRIGADE_LAST(ctx->tmpbb);
                if (APR_BUCKET_IS_EOS(e)) {
                    ap_remove_output_filter(f);
                }
                else if (!APR_BUCKET_IS_FLUSH(e)) {
                    if (APR_BRIGADE_EMPTY(bb)) {
                        /* Wait for more (or next call) */
                        break;
                    }
                    e = apr_bucket_flush_create(ba);
                    APR_BRIGADE_INSERT_TAIL(ctx->tmpbb, e);
                }

#if defined(RLFDEBUG)
                brigade_dump(f->r, ctx->tmpbb);
                brigade_dump(f->r, bb);
#endif /* RLFDEBUG */

                if (ctx->do_sleep) {
                    apr_sleep(RATE_INTERVAL_MS * 1000);
                }
                else {
                    ctx->do_sleep = 1;
                }

                rv = ap_pass_brigade(f->next, ctx->tmpbb);
                apr_brigade_cleanup(ctx->tmpbb);

                if (rv != APR_SUCCESS) {
                    /* Most often, user disconnects from stream */
                    ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, f->r, APLOGNO(01457)
                                  "rl: brigade pass failed.");
                    return rv;
                }
            }
        }

        if (!APR_BRIGADE_EMPTY(ctx->holdingbb)) {
            /* Any rate-limited data in tmpbb is sent unlimited along
             * with the rest.
             */
            APR_BRIGADE_CONCAT(bb, ctx->tmpbb);
            APR_BRIGADE_CONCAT(bb, ctx->holdingbb);
        }
    }

#if defined(RLFDEBUG)
    brigade_dump(f->r, ctx->tmpbb);
#endif /* RLFDEBUG */

    /* Save remaining tmpbb with the correct lifetime for the next call */
    return ap_save_brigade(f, &ctx->holdingbb, &ctx->tmpbb, f->r->pool);
}
コード例 #14
0
ファイル: core_filters.c プロジェクト: pexip/os-apache2
static apr_status_t writev_nonblocking(apr_socket_t *s,
                                       struct iovec *vec, apr_size_t nvec,
                                       apr_bucket_brigade *bb,
                                       apr_size_t *cumulative_bytes_written,
                                       conn_rec *c)
{
    apr_status_t rv = APR_SUCCESS, arv;
    apr_size_t bytes_written = 0, bytes_to_write = 0;
    apr_size_t i, offset;
    apr_interval_time_t old_timeout;

    arv = apr_socket_timeout_get(s, &old_timeout);
    if (arv != APR_SUCCESS) {
        return arv;
    }
    arv = apr_socket_timeout_set(s, 0);
    if (arv != APR_SUCCESS) {
        return arv;
    }

    for (i = 0; i < nvec; i++) {
        bytes_to_write += vec[i].iov_len;
    }
    offset = 0;
    while (bytes_written < bytes_to_write) {
        apr_size_t n = 0;
        rv = apr_socket_sendv(s, vec + offset, nvec - offset, &n);
        if (n > 0) {
            bytes_written += n;
            for (i = offset; i < nvec; ) {
                apr_bucket *bucket = APR_BRIGADE_FIRST(bb);
                if (APR_BUCKET_IS_METADATA(bucket)) {
                    apr_bucket_delete(bucket);
                }
                else if (n >= vec[i].iov_len) {
                    apr_bucket_delete(bucket);
                    offset++;
                    n -= vec[i++].iov_len;
                }
                else {
                    apr_bucket_split(bucket, n);
                    apr_bucket_delete(bucket);
                    vec[i].iov_len -= n;
                    vec[i].iov_base = (char *) vec[i].iov_base + n;
                    break;
                }
            }
        }
        if (rv != APR_SUCCESS) {
            break;
        }
    }
    if ((ap__logio_add_bytes_out != NULL) && (bytes_written > 0)) {
        ap__logio_add_bytes_out(c, bytes_written);
    }
    *cumulative_bytes_written += bytes_written;

    arv = apr_socket_timeout_set(s, old_timeout);
    if ((arv != APR_SUCCESS) && (rv == APR_SUCCESS)) {
        return arv;
    }
    else {
        return rv;
    }
}
コード例 #15
0
ファイル: mod_bucketeer.c プロジェクト: Aimbot2/apache2
static apr_status_t bucketeer_out_filter(ap_filter_t *f,
                                         apr_bucket_brigade *bb)
{
    apr_bucket *e;
    request_rec *r = f->r;
    bucketeer_ctx_t *ctx = f->ctx;
    bucketeer_filter_config_t *c;

    c = ap_get_module_config(r->server->module_config, &bucketeer_module);

    /* If have a context, it means we've done this before successfully. */
    if (!ctx) {
        if (!r->content_type || strncmp(r->content_type, "text/", 5)) {
            ap_remove_output_filter(f);
            return ap_pass_brigade(f->next, bb);
        }

        /* We're cool with filtering this. */
        ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
        ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
        apr_table_unset(f->r->headers_out, "Content-Length");
    }

    for (e = APR_BRIGADE_FIRST(bb);
         e != APR_BRIGADE_SENTINEL(bb);
         e = APR_BUCKET_NEXT(e))
    {
        const char *data;
        apr_size_t len, i, lastpos;

        if (APR_BUCKET_IS_EOS(e)) {
            APR_BUCKET_REMOVE(e);
            APR_BRIGADE_INSERT_TAIL(ctx->bb, e);

            /* Okay, we've seen the EOS.
             * Time to pass it along down the chain.
             */
            return ap_pass_brigade(f->next, ctx->bb);
        }

        if (APR_BUCKET_IS_FLUSH(e)) {
            /*
             * Ignore flush buckets for the moment..
             * we decide what to stream
             */
            continue;
        }

        if (APR_BUCKET_IS_METADATA(e)) {
            /* metadata bucket */
            apr_bucket *cpy;
            apr_bucket_copy(e, &cpy);
            APR_BRIGADE_INSERT_TAIL(ctx->bb, cpy);
            continue;
        }

        /* read */
        apr_bucket_read(e, &data, &len, APR_BLOCK_READ);

        if (len > 0) {
            lastpos = 0;
            for (i = 0; i < len; i++) {
                if (data[i] == c->flushdelimiter ||
                    data[i] == c->bucketdelimiter ||
                    data[i] == c->passdelimiter) {
                    apr_bucket *p;
                    if (i - lastpos > 0) {
                        p = apr_bucket_pool_create(apr_pmemdup(f->r->pool,
                                                               &data[lastpos],
                                                               i - lastpos),
                                                    i - lastpos,
                                                    f->r->pool,
                                                    f->c->bucket_alloc);
                        APR_BRIGADE_INSERT_TAIL(ctx->bb, p);
                    }
                    lastpos = i + 1;
                    if (data[i] == c->flushdelimiter) {
                        p = apr_bucket_flush_create(f->c->bucket_alloc);
                        APR_BRIGADE_INSERT_TAIL(ctx->bb, p);
                    }
                    if (data[i] == c->passdelimiter) {
                        apr_status_t rv;

                        rv = ap_pass_brigade(f->next, ctx->bb);
                        if (rv) {
                            return rv;
                        }
                    }
                }
            }
            /* XXX: really should append this to the next 'real' bucket */
            if (lastpos < i) {
                apr_bucket *p;
                p = apr_bucket_pool_create(apr_pmemdup(f->r->pool,
                                                       &data[lastpos],
                                                       i - lastpos),
                                           i - lastpos,
                                           f->r->pool,
                                           f->c->bucket_alloc);
                lastpos = i;
                APR_BRIGADE_INSERT_TAIL(ctx->bb, p);
            }
        }
    }

    return APR_SUCCESS;
}
コード例 #16
0
ファイル: core_filters.c プロジェクト: pexip/os-apache2
static apr_status_t send_brigade_nonblocking(apr_socket_t *s,
                                             apr_bucket_brigade *bb,
                                             apr_size_t *bytes_written,
                                             conn_rec *c)
{
    apr_bucket *bucket, *next;
    apr_status_t rv;
    struct iovec vec[MAX_IOVEC_TO_WRITE];
    apr_size_t nvec = 0;

    remove_empty_buckets(bb);

    for (bucket = APR_BRIGADE_FIRST(bb);
         bucket != APR_BRIGADE_SENTINEL(bb);
         bucket = next) {
        next = APR_BUCKET_NEXT(bucket);
#if APR_HAS_SENDFILE
        if (APR_BUCKET_IS_FILE(bucket)) {
            apr_bucket_file *file_bucket = (apr_bucket_file *)(bucket->data);
            apr_file_t *fd = file_bucket->fd;
            /* Use sendfile to send this file unless:
             *   - the platform doesn't support sendfile,
             *   - the file is too small for sendfile to be useful, or
             *   - sendfile is disabled in the httpd config via "EnableSendfile off"
             */

            if ((apr_file_flags_get(fd) & APR_SENDFILE_ENABLED) &&
                (bucket->length >= AP_MIN_SENDFILE_BYTES)) {
                if (nvec > 0) {
                    (void)apr_socket_opt_set(s, APR_TCP_NOPUSH, 1);
                    rv = writev_nonblocking(s, vec, nvec, bb, bytes_written, c);
                    if (rv != APR_SUCCESS) {
                        (void)apr_socket_opt_set(s, APR_TCP_NOPUSH, 0);
                        return rv;
                    }
                }
                rv = sendfile_nonblocking(s, bucket, bytes_written, c);
                if (nvec > 0) {
                    (void)apr_socket_opt_set(s, APR_TCP_NOPUSH, 0);
                    nvec = 0;
                }
                if (rv != APR_SUCCESS) {
                    return rv;
                }
                break;
            }
        }
#endif /* APR_HAS_SENDFILE */
        /* didn't sendfile */
        if (!APR_BUCKET_IS_METADATA(bucket)) {
            const char *data;
            apr_size_t length;
            
            /* Non-blocking read first, in case this is a morphing
             * bucket type. */
            rv = apr_bucket_read(bucket, &data, &length, APR_NONBLOCK_READ);
            if (APR_STATUS_IS_EAGAIN(rv)) {
                /* Read would block; flush any pending data and retry. */
                if (nvec) {
                    rv = writev_nonblocking(s, vec, nvec, bb, bytes_written, c);
                    if (rv) {
                        return rv;
                    }
                    nvec = 0;
                }
                
                rv = apr_bucket_read(bucket, &data, &length, APR_BLOCK_READ);
            }
            if (rv != APR_SUCCESS) {
                return rv;
            }

            /* reading may have split the bucket, so recompute next: */
            next = APR_BUCKET_NEXT(bucket);
            vec[nvec].iov_base = (char *)data;
            vec[nvec].iov_len = length;
            nvec++;
            if (nvec == MAX_IOVEC_TO_WRITE) {
                rv = writev_nonblocking(s, vec, nvec, bb, bytes_written, c);
                nvec = 0;
                if (rv != APR_SUCCESS) {
                    return rv;
                }
                break;
            }
        }
    }

    if (nvec > 0) {
        rv = writev_nonblocking(s, vec, nvec, bb, bytes_written, c);
        if (rv != APR_SUCCESS) {
            return rv;
        }
    }

    remove_empty_buckets(bb);

    return APR_SUCCESS;
}
コード例 #17
0
static apr_status_t line_edit_filter(ap_filter_t* f, apr_bucket_brigade* bb) {
  int i, j;
  unsigned int match ;
  unsigned int nmatch = 10 ;
  ap_regmatch_t pmatch[10] ;
  const char* bufp;
  const char* subs ;
  apr_size_t bytes ;
  apr_size_t fbytes ;
  apr_size_t offs ;
  const char* buf ;
  const char* le = NULL ;
  const char* le_n ;
  const char* le_r ;
  char* fbuf ;
  apr_bucket* b = APR_BRIGADE_FIRST(bb) ;
  apr_bucket* b1 ;
  int found = 0 ;
  apr_status_t rv ;

  apr_bucket_brigade* bbline ;
  line_edit_cfg* cfg
	= ap_get_module_config(f->r->per_dir_config, &line_edit_module) ;
  rewriterule* rules = (rewriterule*) cfg->rewriterules->elts ;
  rewriterule* newrule;

  line_edit_ctx* ctx = f->ctx ;
  if (ctx == NULL) {

    /* check env to see if we're wanted, to give basic control with 2.0 */
    buf = apr_table_get(f->r->subprocess_env, "LineEdit");
    if (buf && f->r->content_type) {
      char* lcbuf = apr_pstrdup(f->r->pool, buf) ;
      char* lctype = apr_pstrdup(f->r->pool, f->r->content_type) ;
      char* c ;

      for (c = lcbuf; *c; ++c)
	if (isupper(*c))
	  *c = tolower(*c) ;

      for (c = lctype; *c; ++c)
	if (isupper(*c))
	  *c = tolower(*c) ;
	else if (*c == ';') {
	  *c = 0 ;
	  break ;
	}

      if (!strstr(lcbuf, lctype)) {
	/* don't filter this content type */
	ap_filter_t* fnext = f->next ;
	ap_remove_output_filter(f) ;
	return ap_pass_brigade(fnext, bb) ;
      }
    }

    ctx = f->ctx = apr_palloc(f->r->pool, sizeof(line_edit_ctx)) ;
    ctx->bbsave = apr_brigade_create(f->r->pool, f->c->bucket_alloc) ;

    /* If we have any regex matches, we'll need to copy everything, so we
     * have null-terminated strings to parse.  That's a lot of memory if
     * we're streaming anything big.  So we'll use (and reuse) a local
     * subpool.  Fall back to the request pool if anything bad happens.
     */
    ctx->lpool = f->r->pool ;
    for (i = 0; i < cfg->rewriterules->nelts; ++i) {
      if ( rules[i].flags & M_REGEX ) {
        if (apr_pool_create(&ctx->lpool, f->r->pool) != APR_SUCCESS) {
	  ctx->lpool = f->r->pool ;
        }
        break ;
      }
    }
    /* If we have env interpolation, we'll need a private copy of
     * our rewrite rules with this requests env.  Otherwise we can
     * save processing time by using the original.
     *
     * If one ENV is found, we also have to copy all previous and
     * subsequent rules, even those with no interpolation.
     */
    ctx->rewriterules = cfg->rewriterules;
    for (i = 0; i < cfg->rewriterules->nelts; ++i) {
      found |= (rules[i].flags & M_ENV) ;
      if ( found ) {
	if (ctx->rewriterules == cfg->rewriterules) {
	  ctx->rewriterules = apr_array_make(f->r->pool,
		cfg->rewriterules->nelts, sizeof(rewriterule));
	  for (j = 0; j < i; ++j) {
            newrule = apr_array_push (((line_edit_ctx*)ctx)->rewriterules) ;
	    newrule->from = rules[j].from;
	    newrule->to = rules[j].to;
	    newrule->flags = rules[j].flags;
	    newrule->length = rules[j].length;
	  }
	}
	/* this rule needs to be interpolated */
        newrule = apr_array_push (((line_edit_ctx*)ctx)->rewriterules) ;
	newrule->from = rules[i].from;
	if (rules[i].flags & M_ENV) {
	  newrule->to = interpolate_env(f->r, rules[i].to);
	} else {
	  newrule->to = rules[i].to ;
	}
	newrule->flags = rules[i].flags;
	newrule->length = rules[i].length;
      }
    }
    /* for back-compatibility with Apache 2.0, set some protocol stuff */
    apr_table_unset(f->r->headers_out, "Content-Length") ;
    apr_table_unset(f->r->headers_out, "Content-MD5") ;
    apr_table_unset(f->r->headers_out, "Accept-Ranges") ;
  }
  /* by now our rules are in ctx->rewriterules */
  rules = (rewriterule*) ctx->rewriterules->elts ;

  /* bbline is what goes to the next filter,
   * so we (can) have a new one each time.
   */
  bbline = apr_brigade_create(f->r->pool, f->c->bucket_alloc) ;

  /* first ensure we have no mid-line breaks that might be in the
   * middle of a search string causing us to miss it!  At the same
   * time we split into lines to avoid pattern-matching over big
   * chunks of memory.
   */
  while ( b != APR_BRIGADE_SENTINEL(bb) ) {
    if ( !APR_BUCKET_IS_METADATA(b) ) {
      if ( apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS ) {
	if ( bytes == 0 ) {
	  APR_BUCKET_REMOVE(b) ;
	} else while ( bytes > 0 ) {
	  switch (cfg->lineend) {

	  case LINEEND_UNIX:
	    le = memchr(buf, '\n', bytes) ;
	    break ;

	  case LINEEND_MAC:
	    le = memchr(buf, '\r', bytes) ;
	    break ;

	  case LINEEND_DOS:
	    /* Edge-case issue: if a \r\n spans buckets it'll get missed.
	     * Not a problem for present purposes, but would be an issue
	     * if we claimed to support pattern matching on the lineends.
	     */
	    found = 0 ;
	    le = memchr(buf+1, '\n', bytes-1) ;
	    while ( le && !found ) {
	      if ( le[-1] == '\r' ) {
	        found = 1 ;
	      } else {
	        le = memchr(le+1, '\n', bytes-1 - (le+1 - buf)) ;
	      }
	    }
	    if ( !found )
	      le = 0 ;
	    break;

	  case LINEEND_ANY:
	  case LINEEND_UNSET:
	    /* Edge-case notabug: if a \r\n spans buckets it'll get seen as
	     * two line-ends.  It'll insert the \n as a one-byte bucket.
	     */
	    le_n = memchr(buf, '\n', bytes) ;
	    le_r = memchr(buf, '\r', bytes) ;
	    if ( le_n != NULL )
	      if ( le_n == le_r + sizeof(char))
	        le = le_n ;
	      else if ( (le_r < le_n) && (le_r != NULL) )
	        le = le_r ;
	      else
	        le = le_n ;
	    else
	      le = le_r ;
	    break;

	  case LINEEND_NONE:
	    le = 0 ;
	    break;

	  case LINEEND_CUSTOM:
	    le = memchr(buf, cfg->lechar, bytes) ;
	    break;
	  }
	  if ( le ) {
	    /* found a lineend in this bucket. */
	    offs = 1 + ((unsigned int)le-(unsigned int)buf) / sizeof(char) ;
	    apr_bucket_split(b, offs) ;
	    bytes -= offs ;
	    buf += offs ;
	    b1 = APR_BUCKET_NEXT(b) ;
	    APR_BUCKET_REMOVE(b);

	    /* Is there any previous unterminated content ? */
	    if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) {
	      /* append this to any content waiting for a lineend */
	      APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b) ;
	      rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, f->r->pool) ;
	      /* make b a new bucket of the flattened stuff */
	      b = apr_bucket_pool_create(fbuf, fbytes, f->r->pool,
			f->r->connection->bucket_alloc) ;

	      /* bbsave has been consumed, so clear it */
	      apr_brigade_cleanup(ctx->bbsave) ;
	    }
	    /* b now contains exactly one line */
	    APR_BRIGADE_INSERT_TAIL(bbline, b);
	    b = b1 ;
	  } else {
	    /* no lineend found.  Remember the dangling content */
	    APR_BUCKET_REMOVE(b);
	    APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b);
	    bytes = 0 ;
	  }
	} /* while bytes > 0 */
      } else {
	/* bucket read failed - oops !  Let's remove it. */
	APR_BUCKET_REMOVE(b);
      }
    } else if ( APR_BUCKET_IS_EOS(b) ) {
      /* If there's data to pass, send it in one bucket */
      if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) {
        rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, f->r->pool) ;
        b1 = apr_bucket_pool_create(fbuf, fbytes, f->r->pool,
		f->r->connection->bucket_alloc) ;
        APR_BRIGADE_INSERT_TAIL(bbline, b1);
      }
      apr_brigade_cleanup(ctx->bbsave) ;
      /* start again rather than segfault if a seriously buggy
       * filter in front of us sent a bogus EOS
       */
      f->ctx = NULL ;

      /* move the EOS to the new brigade */
      APR_BUCKET_REMOVE(b);
      APR_BRIGADE_INSERT_TAIL(bbline, b);
    } else {
      /* chop flush or unknown metadata bucket types */
      apr_bucket_delete(b);
    }
    /* OK, reset pointer to what's left (since we're not in a for-loop) */
    b = APR_BRIGADE_FIRST(bb) ;
  }

  /* OK, now we have a bunch of complete lines in bbline,
   * so we can apply our edit rules
   */

  /* When we get a match, we split the line into before+match+after.
   * To flatten that back into one buf every time would be inefficient.
   * So we treat it as three separate bufs to apply future rules.
   *
   * We can only reasonably do that by looping over buckets *inside*
   * the loop over rules.
   *
   * That means concepts like one-match-per-line or start-of-line-only
   * won't work, except for the first rule.  So we won't pretend.
   */
  for (i = 0; i < ctx->rewriterules->nelts; ++i) {
    for ( b = APR_BRIGADE_FIRST(bbline) ;
	b != APR_BRIGADE_SENTINEL(bbline) ;
	b = APR_BUCKET_NEXT(b) ) {
      if ( !APR_BUCKET_IS_METADATA(b)
	&& (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS)) {
	if ( rules[i].flags & M_REGEX ) {
	  bufp = apr_pstrmemdup(ctx->lpool, buf, bytes) ;
	  while ( ! ap_regexec(rules[i].from.r, bufp, nmatch, pmatch, 0) ) {
	    match = pmatch[0].rm_so ;
	    subs = ap_pregsub(f->r->pool, rules[i].to, bufp, nmatch, pmatch) ;
	    apr_bucket_split(b, match) ;
	    b1 = APR_BUCKET_NEXT(b) ;
	    apr_bucket_split(b1, pmatch[0].rm_eo - match) ;
	    b = APR_BUCKET_NEXT(b1) ;
	    apr_bucket_delete(b1) ;
	    b1 = apr_bucket_pool_create(subs, strlen(subs), f->r->pool,
		  f->r->connection->bucket_alloc) ;
	    APR_BUCKET_INSERT_BEFORE(b, b1) ;
	    bufp += pmatch[0].rm_eo ;
	  }
	} else {
	  bufp = buf ;
	  while (subs = apr_strmatch(rules[i].from.s, bufp, bytes),
			subs != NULL) {
	    match = ((unsigned int)subs - (unsigned int)bufp) / sizeof(char) ;
	    bytes -= match ;
	    bufp += match ;
	    apr_bucket_split(b, match) ;
	    b1 = APR_BUCKET_NEXT(b) ;
	    apr_bucket_split(b1, rules[i].length) ;
	    b = APR_BUCKET_NEXT(b1) ;
	    apr_bucket_delete(b1) ;
	    bytes -= rules[i].length ;
	    bufp += rules[i].length ;
	    b1 = apr_bucket_immortal_create(rules[i].to, strlen(rules[i].to),
		f->r->connection->bucket_alloc) ;
	    APR_BUCKET_INSERT_BEFORE(b, b1) ;
	  }
	}
      }
    }
    /* If we used a local pool, clear it now */
    if ( (ctx->lpool != f->r->pool) && (rules[i].flags & M_REGEX) ) {
      apr_pool_clear(ctx->lpool) ;
    }
  }

  /* now pass it down the chain */
  rv = ap_pass_brigade(f->next, bbline) ;

  /* if we have leftover data, don't risk it going out of scope */
  for ( b = APR_BRIGADE_FIRST(ctx->bbsave) ;
	b != APR_BRIGADE_SENTINEL(ctx->bbsave) ;
	b = APR_BUCKET_NEXT(b)) {
    apr_bucket_setaside(b, f->r->pool) ;
  }

  return rv ;
}
コード例 #18
0
static apr_status_t do_pattmatch(ap_filter_t *f, apr_bucket *inb,
                                 apr_bucket_brigade *mybb,
                                 apr_pool_t *pool)
{
    int i;
    int force_quick = 0;
    ap_regmatch_t regm[AP_MAX_REG_MATCH];
    apr_size_t bytes;
    apr_size_t len;
    const char *buff;
    struct ap_varbuf vb;
    apr_bucket *b;
    apr_bucket *tmp_b;

    subst_dir_conf *cfg =
    (subst_dir_conf *) ap_get_module_config(f->r->per_dir_config,
                                             &substitute_module);
    subst_pattern_t *script;

    APR_BRIGADE_INSERT_TAIL(mybb, inb);
    ap_varbuf_init(pool, &vb, 0);

    script = (subst_pattern_t *) cfg->patterns->elts;
    /*
     * Simple optimization. If we only have one pattern, then
     * we can safely avoid the overhead of flattening
     */
    if (cfg->patterns->nelts == 1) {
       force_quick = 1;
    }
    for (i = 0; i < cfg->patterns->nelts; i++) {
        for (b = APR_BRIGADE_FIRST(mybb);
             b != APR_BRIGADE_SENTINEL(mybb);
             b = APR_BUCKET_NEXT(b)) {
            if (APR_BUCKET_IS_METADATA(b)) {
                /*
                 * we should NEVER see this, because we should never
                 * be passed any, but "handle" it just in case.
                 */
                continue;
            }
            if (apr_bucket_read(b, &buff, &bytes, APR_BLOCK_READ)
                    == APR_SUCCESS) {
                int have_match = 0;
                vb.strlen = 0;
                if (script->pattern) {
                    const char *repl;
                    /*
                     * space_left counts how many bytes we have left until the
                     * line length reaches max_line_length.
                     */
                    apr_size_t space_left = cfg->max_line_length;
                    apr_size_t repl_len = strlen(script->replacement);
                    while ((repl = apr_strmatch(script->pattern, buff, bytes)))
                    {
                        have_match = 1;
                        /* get offset into buff for pattern */
                        len = (apr_size_t) (repl - buff);
                        if (script->flatten && !force_quick) {
                            /*
                             * We are flattening the buckets here, meaning
                             * that we don't do the fast bucket splits.
                             * Instead we copy over what the buckets would
                             * contain and use them. This is slow, since we
                             * are constanting allocing space and copying
                             * strings.
                             */
                            if (vb.strlen + len + repl_len > cfg->max_line_length)
                                return APR_ENOMEM;
                            ap_varbuf_strmemcat(&vb, buff, len);
                            ap_varbuf_strmemcat(&vb, script->replacement, repl_len);
                        }
                        else {
                            /*
                             * The string before the match but after the
                             * previous match (if any) has length 'len'.
                             * Check if we still have space for this string and
                             * the replacement string.
                             */
                            if (space_left < len + repl_len)
                                return APR_ENOMEM;
                            space_left -= len + repl_len;
                            /*
                             * We now split off the string before the match
                             * as its own bucket, then isolate the matched
                             * string and delete it.
                             */
                            SEDRMPATBCKT(b, len, tmp_b, script->patlen);
                            /*
                             * Finally, we create a bucket that contains the
                             * replacement...
                             */
                            tmp_b = apr_bucket_transient_create(script->replacement,
                                      script->replen,
                                      f->r->connection->bucket_alloc);
                            /* ... and insert it */
                            APR_BUCKET_INSERT_BEFORE(b, tmp_b);
                        }
                        /* now we need to adjust buff for all these changes */
                        len += script->patlen;
                        bytes -= len;
                        buff += len;
                    }
                    if (have_match) {
                        if (script->flatten && !force_quick) {
                            /* XXX: we should check for AP_MAX_BUCKETS here and
                             * XXX: call ap_pass_brigade accordingly
                             */
                            char *copy = ap_varbuf_pdup(pool, &vb, NULL, 0,
                                                        buff, bytes, &len);
                            tmp_b = apr_bucket_pool_create(copy, len, pool,
                                                           f->r->connection->bucket_alloc);
                            APR_BUCKET_INSERT_BEFORE(b, tmp_b);
                            apr_bucket_delete(b);
                            b = tmp_b;
                        }
                        else {
                            /*
                             * We want the behaviour to be predictable.
                             * Therefore we try to always error out if the
                             * line length is larger than the limit,
                             * regardless of the content of the line. So,
                             * let's check if the remaining non-matching
                             * string does not exceed the limit.
                             */
                            if (space_left < b->length)
                                return APR_ENOMEM;
                        }
                    }
                }
                else if (script->regexp) {
                    int left = bytes;
                    const char *pos = buff;
                    char *repl;
                    apr_size_t space_left = cfg->max_line_length;
                    while (!ap_regexec_len(script->regexp, pos, left,
                                       AP_MAX_REG_MATCH, regm, 0)) {
                        apr_status_t rv;
                        have_match = 1;
                        if (script->flatten && !force_quick) {
                            /* check remaining buffer size */
                            /* Note that the last param in ap_varbuf_regsub below
                             * must stay positive. If it gets 0, it would mean
                             * unlimited space available. */
                            if (vb.strlen + regm[0].rm_so >= cfg->max_line_length)
                                return APR_ENOMEM;
                            /* copy bytes before the match */
                            if (regm[0].rm_so > 0)
                                ap_varbuf_strmemcat(&vb, pos, regm[0].rm_so);
                            /* add replacement string, last argument is unsigned! */
                            rv = ap_varbuf_regsub(&vb, script->replacement, pos,
                                                  AP_MAX_REG_MATCH, regm,
                                                  cfg->max_line_length - vb.strlen);
                            if (rv != APR_SUCCESS)
                                return rv;
                        }
                        else {
                            apr_size_t repl_len;
                            /* acount for string before the match */
                            if (space_left <= regm[0].rm_so)
                                return APR_ENOMEM;
                            space_left -= regm[0].rm_so;
                            rv = ap_pregsub_ex(pool, &repl,
                                               script->replacement, pos,
                                               AP_MAX_REG_MATCH, regm,
                                               space_left);
                            if (rv != APR_SUCCESS)
                                return rv;
                            repl_len = strlen(repl);
                            space_left -= repl_len;
                            len = (apr_size_t) (regm[0].rm_eo - regm[0].rm_so);
                            SEDRMPATBCKT(b, regm[0].rm_so, tmp_b, len);
                            tmp_b = apr_bucket_transient_create(repl, repl_len,
                                                f->r->connection->bucket_alloc);
                            APR_BUCKET_INSERT_BEFORE(b, tmp_b);
                        }
                        /*
                         * reset to past what we just did. pos now maps to b
                         * again
                         */
                        pos += regm[0].rm_eo;
                        left -= regm[0].rm_eo;
                    }
                    if (have_match && script->flatten && !force_quick) {
                        char *copy;
                        /* Copy result plus the part after the last match into
                         * a bucket.
                         */
                        copy = ap_varbuf_pdup(pool, &vb, NULL, 0, pos, left,
                                              &len);
                        tmp_b = apr_bucket_pool_create(copy, len, pool,
                                           f->r->connection->bucket_alloc);
                        APR_BUCKET_INSERT_BEFORE(b, tmp_b);
                        apr_bucket_delete(b);
                        b = tmp_b;
                    }
                }
                else {
                    ap_assert(0);
                    continue;
                }
            }
        }
        script++;
    }
    ap_varbuf_free(&vb);
    return APR_SUCCESS;
}
コード例 #19
0
ngx_int_t move_brigade_to_chain(apr_bucket_brigade *bb,
        ngx_chain_t **ll, ngx_pool_t *pool)
{
    apr_bucket  *e;
    ngx_buf_t   *buf;
    ngx_chain_t *cl;

    cl = NULL;

    if (APR_BRIGADE_EMPTY(bb)) {
        *ll = NULL;
        return NGX_OK;
    }

    for (e = APR_BRIGADE_FIRST(bb);
            e != APR_BRIGADE_SENTINEL(bb);
            e = APR_BUCKET_NEXT(e)) {

        if (APR_BUCKET_IS_EOS(e)) {
            if (cl == NULL) {
                cl = ngx_alloc_chain_link(pool);
                if (cl == NULL) {
                    break;
                }
                
                cl->buf = ngx_calloc_buf(pool);
                if (cl->buf == NULL) {
                    break;
                }

                cl->buf->last_buf = 1;
                cl->next = NULL;
                *ll = cl;
            } else {
                cl->next = NULL;
                cl->buf->last_buf = 1;
            }
            apr_brigade_cleanup(bb);
            return NGX_OK;
        }

        if (APR_BUCKET_IS_METADATA(e)) {
            continue;
        }

        buf = apr_bucket_to_ngx_buf(e, pool);
        if (buf == NULL) {
            break;
        }

        cl = ngx_alloc_chain_link(pool);
        if (cl == NULL) {
            break;
        }

        cl->buf = buf;
        cl->next = NULL;
        *ll = cl;
        ll = &cl->next;
    }


    apr_brigade_cleanup(bb);
    /* no eos or error */
    return NGX_ERROR;
}
コード例 #20
0
ファイル: mod_triger.c プロジェクト: dreamsxin/mod_triger
static apr_status_t triger_filter(ap_filter_t * f, apr_bucket_brigade * bb)
{
    apr_status_t rv = APR_SUCCESS;
    triger_conf_t *cfg;
    apr_bucket *b;
    triger_module_ctx_t *ctx = f->ctx;
    if (APR_BRIGADE_EMPTY(bb))
	return APR_SUCCESS;
    cfg = ap_get_module_config(f->r->per_dir_config, &triger_module);
    if (!cfg)
	goto last;
    if (!cfg->enabled)
	goto last;
    if (!ctx) {
	f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
	if (!ctx)
	    goto last;
	ctx->times = 1;
	ctx->unknown_start_tag_find = 0;
	ctx->unknown_end_tag_find = 0;
	ctx->find = 0;
	ctx->no_tag_find = 1;
	ctx->doctype_tag_find = 0;
	ctx->html_start_tag_find = ctx->head_start_tag_find =
	    ctx->body_start_tag_find = ctx->body_end_tag_find =
	    ctx->html_end_tag_find = 0;
	ctx->triger_bucket =
	    apr_pcalloc(f->r->pool, sizeof(triger_bucket_t));
	if (!ctx->triger_bucket)
	    goto last;
	ctx->triger_bucket->limit = cfg->chk_len;
	ctx->head_check = 0;
	apr_table_unset(f->r->headers_out, "Content-Length");
    } else
	ctx->times++;
    if (!is_this_html(f->r) || ctx->find)
	goto last;
    ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
		  "Enter this filter %u times", ctx->times);
    if (!cfg->full_chk) {
	ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
		      "Only check the first and last data buckets");
	if (!ctx->head_check) {
	    ctx->head_check = 1;
	    get_data_at_head(f, bb);
	    where_to_insert_html_fragment_at_head(f);
	    ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
			  "Find the first data bucket. Content length: %d uri: %s path info: %s positions found: %d (<head>)",
			  (int) ctx->triger_bucket->len, f->r->uri,
			  f->r->path_info,
			  (int) ctx->triger_bucket->head_start_tag_pos);
	    insert_html_fragment_at_head(f, bb, cfg);
	}
	if (ctx->find || !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb)))
	    goto last;
	get_data_at_tail(f, bb);
	where_to_insert_html_fragment_at_tail(f);
	ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
		      "Find the last data bucket. Content length: %d uri: %s path info: %s positions found: %d (</body>) %d (/html)",
		      (int) ctx->triger_bucket->len, f->r->uri,
		      f->r->path_info,
		      (int) ctx->triger_bucket->body_end_tag_pos,
		      (int) ctx->triger_bucket->html_end_tag_pos);
	insert_html_fragment_at_tail(f, bb, cfg);
    } else {
	ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
		      "Check each data bucket");
	for (b = APR_BRIGADE_FIRST(bb);
	     b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) {
	    if (!APR_BUCKET_IS_METADATA(b)) {
		get_triger_bucket(f, b);
		where_to_insert_html_fragment_at_head(f);
		insert_html_fragment_at_head(f, bb, cfg);
	    }
	    if (!ctx->find && APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
		get_data_at_tail(f, bb);
		where_to_insert_html_fragment_at_tail(f);
		insert_html_fragment_at_tail(f, bb, cfg);
	    }
	}
    }
  last:
    rv = ap_pass_brigade(f->next, bb);
    return rv;
}
コード例 #21
0
ファイル: mod_defender.cpp プロジェクト: Annihil/mod_defender
/*
 * This routine is called to perform any module-specific fixing of header
 * fields, et cetera.  It is invoked just before any content-handler.
 *
 * This is a RUN_ALL HOOK.
 */
static int fixups(request_rec *r) {
    int ret;
    dir_config_t *dcfg = (dir_config_t *) ap_get_module_config(r->per_dir_config, &defender_module);
    // Stop if Defender not enabled
    if (!dcfg->defender)
        return DECLINED;

    // Stop if this is not the main request
    if (r->main != NULL || r->prev != NULL)
        return DECLINED;

    // Process only if POST / PUT request
    if (r->method_number != M_POST && r->method_number != M_PUT)
        return DECLINED;

    defender_config_t *defc = (defender_config_t *) ap_get_module_config(r->request_config, &defender_module);
    RuntimeScanner *scanner = defc->vpRuntimeScanner;

    if (scanner->contentLengthProvided && scanner->contentLength == 0)
        return scanner->processBody();

    if (scanner->contentType == CONTENT_TYPE_UNSUPPORTED)
        return scanner->processBody();

    if (scanner->bodyLimitExceeded)
        return scanner->processBody();

    if (!scanner->contentLengthProvided)
        return HTTP_NOT_IMPLEMENTED;

    if (scanner->transferEncodingProvided /*&& scanner->transferEncoding == TRANSFER_ENCODING_UNSUPPORTED*/)
        return HTTP_NOT_IMPLEMENTED;

    // Retrieve the body
    // Pre-allocate necessary bytes
    scanner->body.reserve(scanner->contentLength);

    // Wait for the body to be fully received 
    apr_bucket_brigade *bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
    unsigned int prev_nr_buckets = 0;
    if (bb == NULL)
        goto read_error_out;
    do {
        int rc = ap_get_brigade(r->input_filters, bb, AP_MODE_SPECULATIVE, APR_BLOCK_READ, LONG_MAX);
        if (rc != APR_SUCCESS) {
            ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, "Error reading body: %s", get_apr_error(r->pool, rc));
            goto read_error_out;
        }

        unsigned int nr_buckets = 0;
        // Iterate over buckets
        for (apr_bucket *bucket = APR_BRIGADE_FIRST(bb);
             bucket != APR_BRIGADE_SENTINEL(bb); bucket = APR_BUCKET_NEXT(bucket)) {
            // Stop if we reach the EOS bucket
            if (APR_BUCKET_IS_EOS(bucket))
                break;

            // Ignore non data buckets
            if (APR_BUCKET_IS_METADATA(bucket) || APR_BUCKET_IS_FLUSH(bucket))
                continue;

            nr_buckets++;
            // Skip already copied buckets
            if (nr_buckets <= prev_nr_buckets)
                continue;

            const char *buf;
            apr_size_t nbytes;
            int rv = apr_bucket_read(bucket, &buf, &nbytes, APR_BLOCK_READ);
            if (rv != APR_SUCCESS) {
                ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, "Failed reading input / bucket: %s",
                              get_apr_error(r->pool, rv));
                goto read_error_out;
            }

//            cerr << "bucket #" << nr_buckets - 1 << " received, nbytes: " << nbytes << ", total len: "
//                 << scanner->body.length() + nbytes << endl;

            // More bytes in the BODY than specified in the content-length
            if (scanner->body.length() + nbytes > scanner->contentLength) {
                ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, "Too much POST data: received body of %lu bytes but "
                        "got content-length: %lu", scanner->body.length() + nbytes, scanner->contentLength);
                goto read_error_out;
            }

            // More bytes in the BODY than specified by the allowed body limit
            if (scanner->body.length() + nbytes > dcfg->requestBodyLimit) {
                scanner->bodyLimitExceeded = true;
                ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, "Body limit exceeded (%lu)", dcfg->requestBodyLimit);
                break;
            }

            scanner->body.append(buf, nbytes);
        }
        prev_nr_buckets = nr_buckets;

        if (scanner->body.length() == scanner->contentLength)
            break;

        apr_brigade_cleanup(bb);
        apr_sleep(1000);
    } while (true);

//    cerr << "[pid " << getpid() << "] read " << scanner->body.length() << " bytes, ";
//    cerr << "content-length: " << scanner->contentLength << endl;
//    cerr << "body: " << scanner->body << endl;

    // Run scanner
    ret = scanner->processBody();

    if (dcfg->useenv)
        ret = pass_in_env(r, scanner);

//    cerr << "[pid " << getpid() << "] body (" << scanner->body.length() << ") scanned" << endl;

    return ret;

    read_error_out:
    if (dcfg->useenv) return DECLINED;
    return HTTP_INTERNAL_SERVER_ERROR;
}
コード例 #22
0
static void do_pattmatch(ap_filter_t *f, apr_bucket *inb,
                         apr_bucket_brigade *mybb,
                         apr_pool_t *tmp_pool)
{
    int i;
    int force_quick = 0;
    ap_regmatch_t regm[AP_MAX_REG_MATCH];
    apr_size_t bytes;
    apr_size_t len;
    apr_size_t fbytes;
    const char *buff;
    const char *repl;
    char *scratch;
    char *p;
    char *s1;
    char *s2;
    apr_bucket *b;
    apr_bucket *tmp_b;
    apr_pool_t *tpool;

    subst_dir_conf *cfg =
    (subst_dir_conf *) ap_get_module_config(f->r->per_dir_config,
                                             &substitute_module);
    subst_pattern_t *script;

    APR_BRIGADE_INSERT_TAIL(mybb, inb);
    
    script = (subst_pattern_t *) cfg->patterns->elts;
    apr_pool_create(&tpool, tmp_pool);
    scratch = NULL;
    fbytes = 0;
    /*
     * Simple optimization. If we only have one pattern, then
     * we can safely avoid the overhead of flattening
     */
    if (cfg->patterns->nelts == 1) {
       force_quick = 1;
    }
    for (i = 0; i < cfg->patterns->nelts; i++) {
        for (b = APR_BRIGADE_FIRST(mybb);
             b != APR_BRIGADE_SENTINEL(mybb);
             b = APR_BUCKET_NEXT(b)) {
            if (APR_BUCKET_IS_METADATA(b)) {
                /*
                 * we should NEVER see this, because we should never
                 * be passed any, but "handle" it just in case.
                 */
                continue;
            }
            if (apr_bucket_read(b, &buff, &bytes, APR_BLOCK_READ)
                    == APR_SUCCESS) {
                s1 = NULL;
                if (script->pattern) {
                    while ((repl = apr_strmatch(script->pattern, buff, bytes)))
                    {
                        /* get offset into buff for pattern */
                        len = (apr_size_t) (repl - buff);
                        if (script->flatten && !force_quick) {
                            /*
                             * We are flattening the buckets here, meaning
                             * that we don't do the fast bucket splits.
                             * Instead we copy over what the buckets would
                             * contain and use them. This is slow, since we
                             * are constanting allocing space and copying
                             * strings.
                             */
                            SEDSCAT(s1, s2, tmp_pool, buff, len,
                                    script->replacement);
                        }
                        else {
                            /*
                             * We now split off the stuff before the regex
                             * as its own bucket, then isolate the pattern
                             * and delete it.
                             */
                            SEDRMPATBCKT(b, len, tmp_b, script->patlen);
                            /*
                             * Finally, we create a bucket that contains the
                             * replacement...
                             */
                            tmp_b = apr_bucket_transient_create(script->replacement,
                                      script->replen,
                                      f->r->connection->bucket_alloc);
                            /* ... and insert it */
                            APR_BUCKET_INSERT_BEFORE(b, tmp_b);
                        }
                        /* now we need to adjust buff for all these changes */
                        len += script->patlen;
                        bytes -= len;
                        buff += len;
                    }
                    if (script->flatten && s1 && !force_quick) {
                        /*
                         * we've finished looking at the bucket, so remove the
                         * old one and add in our new one
                         */
                        s2 = apr_pstrmemdup(tmp_pool, buff, bytes);
                        s1 = apr_pstrcat(tmp_pool, s1, s2, NULL);
                        tmp_b = apr_bucket_transient_create(s1, strlen(s1),
                                            f->r->connection->bucket_alloc);
                        APR_BUCKET_INSERT_BEFORE(b, tmp_b);
                        apr_bucket_delete(b);
                        b = tmp_b;
                    }

                }
                else if (script->regexp) {
                    /*
                     * we need a null terminated string here :(. To hopefully
                     * save time and memory, we don't alloc for each run
                     * through, but only if we need to have a larger chunk
                     * to save the string to. So we keep track of how much
                     * we've allocated and only re-alloc when we need it.
                     * NOTE: this screams for a macro.
                     */
                    if (!scratch || (bytes > (fbytes + 1))) {
                        fbytes = bytes + 1;
                        scratch = apr_palloc(tpool, fbytes);
                    }
                    /* reset pointer to the scratch space */
                    p = scratch;
                    memcpy(p, buff, bytes);
                    p[bytes] = '\0';
                    while (!ap_regexec(script->regexp, p,
                                       AP_MAX_REG_MATCH, regm, 0)) {
                        /* first, grab the replacement string */
                        repl = ap_pregsub(tmp_pool, script->replacement, p,
                                          AP_MAX_REG_MATCH, regm);
                        if (script->flatten && !force_quick) {
                            SEDSCAT(s1, s2, tmp_pool, p, regm[0].rm_so, repl);
                        }
                        else {
                            len = (apr_size_t) (regm[0].rm_eo - regm[0].rm_so);
                            SEDRMPATBCKT(b, regm[0].rm_so, tmp_b, len);
                            tmp_b = apr_bucket_transient_create(repl,
                                                                strlen(repl),
                                             f->r->connection->bucket_alloc);
                            APR_BUCKET_INSERT_BEFORE(b, tmp_b);
                        }
                        /*
                         * reset to past what we just did. buff now maps to b
                         * again
                         */
                        p += regm[0].rm_eo;
                    }
                    if (script->flatten && s1 && !force_quick) {
                        s1 = apr_pstrcat(tmp_pool, s1, p, NULL);
                        tmp_b = apr_bucket_transient_create(s1, strlen(s1),
                                            f->r->connection->bucket_alloc);
                        APR_BUCKET_INSERT_BEFORE(b, tmp_b);
                        apr_bucket_delete(b);
                        b = tmp_b;
                    }

                }
                else {
                    /* huh? */
                    continue;
                }
            }
        }
        script++;
    }

    apr_pool_destroy(tpool);

    return;
}
コード例 #23
0
/**
 * Process input stream
 */
static apr_status_t helocon_filter_in(ap_filter_t *f, apr_bucket_brigade *b, ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes)
{
    conn_rec *c = f->c;
    my_ctx *ctx = f->ctx;

    // Fail quickly if the connection has already been aborted.
    if (c->aborted) {
        apr_brigade_cleanup(b);
        return APR_ECONNABORTED;
    }
    // Fast passthrough
    if (ctx->phase == PHASE_DONE) {
        return ap_get_brigade(f->next, b, mode, block, readbytes);
    }

    // Process Head
    do {
#ifdef DEBUG
        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (1)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase);
#endif

        if (APR_BRIGADE_EMPTY(b)) {
#ifdef DEBUG
            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (2)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase);
#endif
            apr_status_t s = ap_get_brigade(f->next, b, ctx->mode, APR_BLOCK_READ, ctx->need);
            if (s != APR_SUCCESS) {
#ifdef DEBUG
                ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (fail)(1)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase);
#endif
                return s;
            }
        }
        if (ctx->phase == PHASE_DONE) {
            return APR_SUCCESS;
        }
        if (APR_BRIGADE_EMPTY(b)) {
#ifdef DEBUG
            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (empty)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase);
#endif
            return APR_SUCCESS;
        }
        apr_bucket *e = NULL;
        for (e = APR_BRIGADE_FIRST(b); e != APR_BRIGADE_SENTINEL(b); e = APR_BUCKET_NEXT(e)) {
            if (e->type == NULL) {
#ifdef DEBUG
                ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (type=NULL)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase);
#endif
                return APR_SUCCESS;
            }

            // We need more data
            if (ctx->need > 0) {
                const char *str = NULL;
                apr_size_t length = 0;
                apr_status_t s = apr_bucket_read(e, &str, &length, APR_BLOCK_READ);
                if (s != APR_SUCCESS) {
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " recv=%" APR_OFF_T_FMT " phase=%d readed=%" APR_SIZE_T_FMT " (fail)(2)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->recv, ctx->phase, length);
#endif
                    return s;
                }
#ifdef DEBUG
                ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " recv=%" APR_OFF_T_FMT " phase=%d readed=%" APR_SIZE_T_FMT " (3)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->recv, ctx->phase, length);
#endif
                if (length > 0) {
                    if ((ctx->offset + length) > PROXY_MAX_LENGTH) { // Overflow
                        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in ERROR: PROXY protocol header overflow from=%s to port=%d length=%" APR_OFF_T_FMT, _CLIENT_IP, c->local_addr->port, (ctx->offset + length));
                        goto ABORT_CONN2;
                    }
                    memcpy(ctx->buf + ctx->offset, str, length);
                    if (ctx->pad != ctx->magic) {
                        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in padding magic fail (bad=%d vs good=%d)", ctx->pad, ctx->magic);
                        goto ABORT_CONN;
                    }
                    ctx->offset += length;
                    ctx->recv += length;
                    ctx->need -= length;
                    ctx->buf[ctx->offset] = 0;
                    // delete HEAD
                    if (e->length > length) {
                        apr_bucket_split(e, length);
                    }
                }
                apr_bucket_delete(e);
                if (length == 0) {
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG bucket flush=%d meta=%d", APR_BUCKET_IS_FLUSH(e) ? 1 : 0, APR_BUCKET_IS_METADATA(e) ? 1 : 0);
#endif
                    continue;
                }
            }
            // Handle GETLINE mode
            if (ctx->mode == AP_MODE_GETLINE) {
                if ((ctx->need > 0) && (ctx->recv > 2)) {
                    char *end = memchr(ctx->buf, '\r', ctx->offset - 1);
                    if (end) {
#ifdef DEBUG
                        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: GETLINE OK");
#endif
                        if ((end[0] == '\r') && (end[1] == '\n')) {
                            ctx->need = 0;
                        }
                    }
                }
            }
            if (ctx->need <= 0) {
#ifdef DEBUG
                ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " recv=%" APR_OFF_T_FMT " phase=%d (4)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->recv, ctx->phase);
#endif
                switch (ctx->phase) {
                case PHASE_WANT_HEAD: {
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d checking=%s buf=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase, "HEAD", ctx->buf);
#endif
                    // TEST Command
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=TEST CHECK");
#endif
                    if (strncmp(TEST, ctx->buf, 4) == 0) {
                        apr_socket_t *csd = ap_get_module_config(c->conn_config, &core_module);
                        apr_size_t length = strlen(TEST_RES_OK);
                        apr_socket_send(csd, TEST_RES_OK, &length);
                        apr_socket_shutdown(csd, APR_SHUTDOWN_WRITE);
                        apr_socket_close(csd);

#ifdef DEBUG
                        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=TEST OK");
#endif

                        // No need to check for SUCCESS, we did that above
                        c->aborted = 1;
                        apr_brigade_cleanup(b);
                        return APR_ECONNABORTED;
                    }
                    // HELO Command
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=HELO CHECK");
#endif
                    if (strncmp(HELO, ctx->buf, 4) == 0) {
#ifdef DEBUG
                        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=HELO OK");
#endif
                        ctx->phase = PHASE_WANT_BINIP;
                        ctx->mode = AP_MODE_READBYTES;
                        ctx->need = 4;
                        ctx->recv = 0;
                        break;
                    }
                    // PROXY Command
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=PROXY CHECK");
#endif
                    if (strncmp(PROXY, ctx->buf, 4) == 0) {
#ifdef DEBUG
                        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=PROXY OK");
#endif
                        ctx->phase = PHASE_WANT_LINE;
                        ctx->mode = AP_MODE_GETLINE;
                        ctx->need = PROXY_MAX_LENGTH - ctx->offset;
                        ctx->recv = 0;
                        break;
                    }
                    // ELSE... GET / POST / etc
                    ctx->phase = PHASE_DONE;
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG from: %s:%d to port=%d newBucket (1) size=%" APR_OFF_T_FMT, _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->offset);
#endif
                    // Restore original data
                    if (ctx->offset) {
                        e = apr_bucket_heap_create(ctx->buf, ctx->offset, NULL, c->bucket_alloc);
                        APR_BRIGADE_INSERT_HEAD(b, e);
                        goto END_CONN;
                    }
                    break;
                }
                case PHASE_WANT_BINIP: {
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d checking=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase, "BINIP");
#endif
                    // REWRITE CLIENT IP
                    const char *new_ip = fromBinIPtoString(c->pool, ctx->buf+4);
                    if (!new_ip) {
                        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in ERROR: HELO+IP invalid");
                        goto ABORT_CONN;
                    }

                    apr_table_set(c->notes, NOTE_REWRITE_IP, new_ip);
                    ctx->phase = PHASE_DONE;
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG from: %s:%d to port=%d newip=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, new_ip);
#endif
                    break;
                }
                case PHASE_WANT_LINE: {
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d checking=%s buf=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase, "LINE", ctx->buf);
#endif
                    ctx->phase = PHASE_DONE;
                    char *end = memchr(ctx->buf, '\r', ctx->offset - 1);
                    if (!end) {
                        goto ABORT_CONN;
                    }
                    if ((end[0] != '\r') || (end[1] != '\n')) {
                        goto ABORT_CONN;
                    }
                    if (!process_proxy_header(f)) {
                        goto ABORT_CONN;
                    }
                    // Restore original data
                    int count = (ctx->offset - ((end - ctx->buf) + 2));
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG from: %s:%d to port=%d newBucket (2) size=%d rest=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, count, end + 2);
#endif
                    if (count > 0) {
                        e = apr_bucket_heap_create(end + 2, count, NULL, c->bucket_alloc);
                        APR_BRIGADE_INSERT_HEAD(b, e);
                        goto END_CONN;
                    }
                    break;
                }
                }
                if (ctx->phase == PHASE_DONE) {
#ifdef DEBUG
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d (DONE)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase);
#endif
                    ctx->mode = mode;
                    ctx->need = 0;
                    ctx->recv = 0;
                }
                break;
            }
        }
    } while (ctx->phase != PHASE_DONE);

END_CONN:
    return ap_get_brigade(f->next, b, mode, block, readbytes);

ABORT_CONN:
    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in ERROR: PROXY protocol header invalid from=%s to port=%d", _CLIENT_IP, c->local_addr->port);
ABORT_CONN2:
    c->aborted = 1;
    apr_brigade_cleanup(b);
    return APR_ECONNABORTED;
}
コード例 #24
0
ファイル: mod_mapcache.c プロジェクト: jmckenna/mapcache
/* read post body. code taken from "The apache modules book, Nick Kew" */
static void read_post_body(mapcache_context_apache_request *ctx, mapcache_request_proxy *p) {
  request_rec *r = ctx->request;
  mapcache_context *mctx = (mapcache_context*)ctx;
  int bytes,eos;
  apr_bucket_brigade *bb, *bbin;
  apr_bucket *b;
  apr_status_t rv;
  const char *clen = apr_table_get(r->headers_in, "Content-Length");
  if(clen) {
    bytes = strtol(clen, NULL, 0);
    if(bytes >= p->rule->max_post_len) {
      mctx->set_error(mctx, HTTP_REQUEST_ENTITY_TOO_LARGE, "post request too big");
      return;
    }
  } else {
    bytes = p->rule->max_post_len;
  } 

  bb = apr_brigade_create(mctx->pool, r->connection->bucket_alloc);
  bbin = apr_brigade_create(mctx->pool, r->connection->bucket_alloc);
  p->post_len = 0;

  do {
    rv = ap_get_brigade(r->input_filters, bbin, AP_MODE_READBYTES, APR_BLOCK_READ, bytes);
    if(rv != APR_SUCCESS) {
      mctx->set_error(mctx, 500, "failed to read form input");
      return;
    }
    for(b = APR_BRIGADE_FIRST(bbin); b != APR_BRIGADE_SENTINEL(bbin); b = APR_BUCKET_NEXT(b)) {
      if(APR_BUCKET_IS_EOS(b)) {
        eos = 1;
      }
    }
    if(!APR_BUCKET_IS_METADATA(b)) {
      if(b->length != (apr_size_t)(-1)) {
        p->post_len += b->length;
        if(p->post_len > p->rule->max_post_len) {
          apr_bucket_delete(b);
        }
      }
    }
    if(p->post_len <= p->rule->max_post_len) {
      APR_BUCKET_REMOVE(b);
      APR_BRIGADE_INSERT_TAIL(bb, b);
    }
  } while (!eos);

  if(p->post_len > p->rule->max_post_len) {
    mctx->set_error(mctx, HTTP_REQUEST_ENTITY_TOO_LARGE, "request too big");
    return;
  }

  p->post_buf = apr_palloc(mctx->pool, p->post_len+1);

  rv = apr_brigade_flatten(bb, p->post_buf, &(p->post_len));
  if(rv != APR_SUCCESS) {
    mctx->set_error(mctx, 500, "error (flatten) reading form data");
    return;
  }
  p->post_buf[p->post_len] = 0;
}
コード例 #25
0
ファイル: fcgid_proc_win.c プロジェクト: Pballer/prayer
apr_status_t proc_write_ipc(fcgid_ipc * ipc_handle,
                            apr_bucket_brigade * birgade_send)
{
    fcgid_namedpipe_handle *handle_info;
    apr_bucket *bucket_request;
    apr_status_t rv;
    DWORD transferred;

    handle_info = (fcgid_namedpipe_handle *) ipc_handle->ipc_handle_info;

    for (bucket_request = APR_BRIGADE_FIRST(birgade_send);
         bucket_request != APR_BRIGADE_SENTINEL(birgade_send);
         bucket_request = APR_BUCKET_NEXT(bucket_request))
    {
        const char *write_buf;
        apr_size_t write_buf_len;
        apr_size_t has_write;

        if (APR_BUCKET_IS_METADATA(bucket_request))
            continue;

        if ((rv = apr_bucket_read(bucket_request, &write_buf, &write_buf_len,
                                  APR_BLOCK_READ)) != APR_SUCCESS) {
            ap_log_rerror(APLOG_MARK, APLOG_WARNING, rv, ipc_handle->request,
                          "mod_fcgid: can't read request from bucket");
            return rv;
        }

        /* Write the buffer to fastcgi server */
        has_write = 0;
        while (has_write < write_buf_len) {
            DWORD byteswrite;

            if (WriteFile(handle_info->handle_pipe,
                          write_buf + has_write,
                          write_buf_len - has_write,
                          &byteswrite, &handle_info->overlap_write)) {
                has_write += byteswrite;
                continue;
            } else if ((rv = GetLastError()) != ERROR_IO_PENDING) {
                ap_log_rerror(APLOG_MARK, APLOG_WARNING,
                              APR_FROM_OS_ERROR(rv), ipc_handle->request,
                              "mod_fcgid: can't write to pipe");
                return rv;
            } else {
                /*
                   it's ERROR_IO_PENDING on write
                 */
                DWORD dwWaitResult =
                    WaitForSingleObject(handle_info->overlap_write.hEvent,
                                        ipc_handle->communation_timeout * 1000);
                if (dwWaitResult == WAIT_OBJECT_0) {
                    if (!GetOverlappedResult(handle_info->handle_pipe,
                                             &handle_info->overlap_write,
                                             &transferred,
                                             FALSE /* don't wait */ )
                        || transferred == 0)
                    {
                        ap_log_rerror(APLOG_MARK, APLOG_WARNING,
                                      apr_get_os_error(), ipc_handle->request,
                                      "mod_fcgid: get overlap result error");
                        return APR_ESPIPE;
                    }
                    has_write += transferred;
                    continue;
                } else {
                    ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0,
                                  ipc_handle->request,
                                  "mod_fcgid: write timeout to pipe");
                    return APR_ESPIPE;
                }
            }
        }
    }

    return APR_SUCCESS;
}
コード例 #26
0
ファイル: mod_dav_svn.c プロジェクト: Ranga123/test1
static apr_status_t
merge_xml_in_filter(ap_filter_t *f,
                    apr_bucket_brigade *bb,
                    ap_input_mode_t mode,
                    apr_read_type_e block,
                    apr_off_t readbytes)
{
  apr_status_t rv;
  request_rec *r = f->r;
  merge_ctx_t *ctx = f->ctx;
  apr_bucket *bucket;
  int seen_eos = 0;

  /* We shouldn't be added if we're not a MERGE/DELETE, but double check. */
  if ((r->method_number != M_MERGE)
      && (r->method_number != M_DELETE))
    {
      ap_remove_input_filter(f);
      return ap_get_brigade(f->next, bb, mode, block, readbytes);
    }

  if (!ctx)
    {
      f->ctx = ctx = apr_palloc(r->pool, sizeof(*ctx));
      ctx->parser = apr_xml_parser_create(r->pool);
      ctx->bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
      apr_pool_create(&ctx->pool, r->pool);
    }

  rv = ap_get_brigade(f->next, ctx->bb, mode, block, readbytes);

  if (rv != APR_SUCCESS)
    return rv;

  for (bucket = APR_BRIGADE_FIRST(ctx->bb);
       bucket != APR_BRIGADE_SENTINEL(ctx->bb);
       bucket = APR_BUCKET_NEXT(bucket))
    {
      const char *data;
      apr_size_t len;

      if (APR_BUCKET_IS_EOS(bucket))
        {
          seen_eos = 1;
          break;
        }

      if (APR_BUCKET_IS_METADATA(bucket))
        continue;

      rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
      if (rv != APR_SUCCESS)
        return rv;

      rv = apr_xml_parser_feed(ctx->parser, data, len);
      if (rv != APR_SUCCESS)
        {
          /* Clean up the parser. */
          (void) apr_xml_parser_done(ctx->parser, NULL);
          break;
        }
    }

  /* This will clear-out the ctx->bb as well. */
  APR_BRIGADE_CONCAT(bb, ctx->bb);

  if (seen_eos)
    {
      apr_xml_doc *pdoc;

      /* Remove ourselves now. */
      ap_remove_input_filter(f);

      /* tell the parser that we're done */
      rv = apr_xml_parser_done(ctx->parser, &pdoc);
      if (rv == APR_SUCCESS)
        {
#if APR_CHARSET_EBCDIC
          apr_xml_parser_convert_doc(r->pool, pdoc, ap_hdrs_from_ascii);
#endif
          /* stash the doc away for mod_dav_svn's later use. */
          rv = apr_pool_userdata_set(pdoc, "svn-request-body",
                                     NULL, r->pool);
          if (rv != APR_SUCCESS)
            return rv;

        }
    }

  return APR_SUCCESS;
}
コード例 #27
0
ファイル: util_xml.c プロジェクト: Aimbot2/apache2
AP_DECLARE(int) ap_xml_parse_input(request_rec * r, apr_xml_doc **pdoc)
{
    apr_xml_parser *parser;
    apr_bucket_brigade *brigade;
    int seen_eos;
    apr_status_t status;
    char errbuf[200];
    apr_size_t total_read = 0;
    apr_size_t limit_xml_body = ap_get_limit_xml_body(r);
    int result = HTTP_BAD_REQUEST;

    parser = apr_xml_parser_create(r->pool);
    brigade = apr_brigade_create(r->pool, r->connection->bucket_alloc);

    seen_eos = 0;
    total_read = 0;

    do {
        apr_bucket *bucket;

        /* read the body, stuffing it into the parser */
        status = ap_get_brigade(r->input_filters, brigade,
                                AP_MODE_READBYTES, APR_BLOCK_READ,
                                READ_BLOCKSIZE);

        if (status != APR_SUCCESS) {
            goto read_error;
        }

        for (bucket = APR_BRIGADE_FIRST(brigade);
             bucket != APR_BRIGADE_SENTINEL(brigade);
             bucket = APR_BUCKET_NEXT(bucket))
        {
            const char *data;
            apr_size_t len;

            if (APR_BUCKET_IS_EOS(bucket)) {
                seen_eos = 1;
                break;
            }

            if (APR_BUCKET_IS_METADATA(bucket)) {
                continue;
            }

            status = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
            if (status != APR_SUCCESS) {
                goto read_error;
            }

            total_read += len;
            if (limit_xml_body && total_read > limit_xml_body) {
                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00539)
                              "XML request body is larger than the configured "
                              "limit of %lu", (unsigned long)limit_xml_body);
                result = HTTP_REQUEST_ENTITY_TOO_LARGE;
                goto read_error;
            }

            status = apr_xml_parser_feed(parser, data, len);
            if (status) {
                goto parser_error;
            }
        }

        apr_brigade_cleanup(brigade);
    } while (!seen_eos);

    apr_brigade_destroy(brigade);

    /* tell the parser that we're done */
    status = apr_xml_parser_done(parser, pdoc);
    if (status) {
        /* Some parsers are stupid and return an error on blank documents. */
        if (!total_read) {
            *pdoc = NULL;
            return OK;
        }
        ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00540)
                      "XML parser error (at end). status=%d", status);
        return HTTP_BAD_REQUEST;
    }

#if APR_CHARSET_EBCDIC
    apr_xml_parser_convert_doc(r->pool, *pdoc, ap_hdrs_from_ascii);
#endif
    return OK;

  parser_error:
    (void) apr_xml_parser_geterror(parser, errbuf, sizeof(errbuf));
    ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00541)
                  "XML Parser Error: %s", errbuf);

    /* FALLTHRU */

  read_error:
    /* make sure the parser is terminated */
    (void) apr_xml_parser_done(parser, NULL);

    apr_brigade_destroy(brigade);

    /* Apache will supply a default error, plus the error log above. */
    return result;
}
コード例 #28
0
ファイル: mirror.c プロジェクト: gunjanms/svnmigration
apr_status_t dav_svn__location_in_filter(ap_filter_t *f,
                                         apr_bucket_brigade *bb,
                                         ap_input_mode_t mode,
                                         apr_read_type_e block,
                                         apr_off_t readbytes)
{
    request_rec *r = f->r;
    locate_ctx_t *ctx = f->ctx;
    apr_status_t rv;
    apr_bucket *bkt;
    const char *master_uri, *root_dir, *canonicalized_uri;
    apr_uri_t uri;

    /* Don't filter if we're in a subrequest or we aren't setup to
       proxy anything. */
    master_uri = dav_svn__get_master_uri(r);
    if (r->main || !master_uri) {
        ap_remove_input_filter(f);
        return ap_get_brigade(f->next, bb, mode, block, readbytes);
    }

    /* And don't filter if our search-n-replace would be a noop anyway
       (that is, if our root path matches that of the master server). */
    apr_uri_parse(r->pool, master_uri, &uri);
    root_dir = dav_svn__get_root_dir(r);
    canonicalized_uri = svn_urlpath__canonicalize(uri.path, r->pool);
    if (strcmp(canonicalized_uri, root_dir) == 0) {
        ap_remove_input_filter(f);
        return ap_get_brigade(f->next, bb, mode, block, readbytes);
    }

    /* ### FIXME: While we want to fix up any locations in proxied XML
       ### requests, we do *not* want to be futzing with versioned (or
       ### to-be-versioned) data, such as the file contents present in
       ### PUT requests and properties in PROPPATCH requests.
       ### See issue #3445 for details. */

    /* We are url encoding the current url and the master url
       as incoming(from client) request body has it encoded already. */
    canonicalized_uri = svn_path_uri_encode(canonicalized_uri, r->pool);
    root_dir = svn_path_uri_encode(root_dir, r->pool);
    if (!f->ctx) {
        ctx = f->ctx = apr_pcalloc(r->pool, sizeof(*ctx));
        ctx->remotepath = canonicalized_uri;
        ctx->remotepath_len = strlen(ctx->remotepath);
        ctx->localpath = root_dir;
        ctx->localpath_len = strlen(ctx->localpath);
        ctx->pattern = apr_strmatch_precompile(r->pool, ctx->localpath, 1);
        ctx->pattern_len = ctx->localpath_len;
    }

    rv = ap_get_brigade(f->next, bb, mode, block, readbytes);
    if (rv) {
        return rv;
    }

    bkt = APR_BRIGADE_FIRST(bb);
    while (bkt != APR_BRIGADE_SENTINEL(bb)) {

        const char *data, *match;
        apr_size_t len;

        if (APR_BUCKET_IS_METADATA(bkt)) {
            bkt = APR_BUCKET_NEXT(bkt);
            continue;
        }

        /* read */
        apr_bucket_read(bkt, &data, &len, APR_BLOCK_READ);
        match = apr_strmatch(ctx->pattern, data, len);
        if (match) {
            apr_bucket *next_bucket;
            apr_bucket_split(bkt, match - data);
            next_bucket = APR_BUCKET_NEXT(bkt);
            apr_bucket_split(next_bucket, ctx->pattern_len);
            bkt = APR_BUCKET_NEXT(next_bucket);
            apr_bucket_delete(next_bucket);
            next_bucket = apr_bucket_pool_create(ctx->remotepath,
                                                 ctx->remotepath_len,
                                                 r->pool, bb->bucket_alloc);
            APR_BUCKET_INSERT_BEFORE(bkt, next_bucket);
        }
        else {
            bkt = APR_BUCKET_NEXT(bkt);
        }
    }
    return APR_SUCCESS;
}
コード例 #29
0
static apr_status_t substitute_filter(ap_filter_t *f, apr_bucket_brigade *bb)
{
    apr_size_t bytes;
    apr_size_t len;
    apr_size_t fbytes;
    const char *buff;
    const char *nl = NULL;
    char *bflat;
    apr_bucket *b;
    apr_bucket *tmp_b;
    apr_bucket_brigade *tmp_bb = NULL;
    apr_status_t rv;
    subst_dir_conf *cfg =
    (subst_dir_conf *) ap_get_module_config(f->r->per_dir_config,
                                             &substitute_module);

    substitute_module_ctx *ctx = f->ctx;

    /*
     * First time around? Create the saved bb that we used for each pass
     * through. Note that we can also get here when we explicitly clear ctx,
     * for error handling
     */
    if (!ctx) {
        f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
        /*
         * Create all the temporary brigades we need and reuse them to avoid
         * creating them over and over again from r->pool which would cost a
         * lot of memory in some cases.
         */
        ctx->linebb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
        ctx->linesbb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
        ctx->pattbb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
        /*
         * Everything to be passed to the next filter goes in
         * here, our pass brigade.
         */
        ctx->passbb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
        /* Create our temporary pool only once */
        apr_pool_create(&(ctx->tpool), f->r->pool);
        apr_table_unset(f->r->headers_out, "Content-Length");
    }

    /*
     * Shortcircuit processing
     */
    if (APR_BRIGADE_EMPTY(bb))
        return APR_SUCCESS;

    /*
     * Here's the concept:
     *  Read in the data and look for newlines. Once we
     *  find a full "line", add it to our working brigade.
     *  If we've finished reading the brigade and we have
     *  any left over data (not a "full" line), store that
     *  for the next pass.
     *
     * Note: anything stored in ctx->linebb for sure does not have
     * a newline char, so we don't concat that bb with the
     * new bb, since we would spending time searching for the newline
     * in data we know it doesn't exist. So instead, we simply scan
     * our current bb and, if we see a newline, prepend ctx->linebb
     * to the front of it. This makes the code much less straight-
     * forward (otherwise we could APR_BRIGADE_CONCAT(ctx->linebb, bb)
     * and just scan for newlines and not bother with needing to know
     * when ctx->linebb needs to be reset) but also faster. We'll take
     * the speed.
     *
     * Note: apr_brigade_split_line would be nice here, but we
     * really can't use it since we need more control and we want
     * to re-use already read bucket data.
     *
     * See mod_include if still confused :)
     */

    while ((b = APR_BRIGADE_FIRST(bb)) && (b != APR_BRIGADE_SENTINEL(bb))) {
        if (APR_BUCKET_IS_EOS(b)) {
            /*
             * if we see the EOS, then we need to pass along everything we
             * have. But if the ctx->linebb isn't empty, then we need to add
             * that to the end of what we'll be passing.
             */
            if (!APR_BRIGADE_EMPTY(ctx->linebb)) {
                rv = apr_brigade_pflatten(ctx->linebb, &bflat,
                                          &fbytes, ctx->tpool);
                if (rv != APR_SUCCESS)
                    goto err;
                if (fbytes > cfg->max_line_length) {
                    rv = APR_ENOMEM;
                    goto err;
                }
                tmp_b = apr_bucket_transient_create(bflat, fbytes,
                                                f->r->connection->bucket_alloc);
                rv = do_pattmatch(f, tmp_b, ctx->pattbb, ctx->tpool);
                if (rv != APR_SUCCESS)
                    goto err;
                APR_BRIGADE_CONCAT(ctx->passbb, ctx->pattbb);
                apr_brigade_cleanup(ctx->linebb);
            }
            APR_BUCKET_REMOVE(b);
            APR_BRIGADE_INSERT_TAIL(ctx->passbb, b);
        }
        /*
         * No need to handle FLUSH buckets separately as we call
         * ap_pass_brigade anyway at the end of the loop.
         */
        else if (APR_BUCKET_IS_METADATA(b)) {
            APR_BUCKET_REMOVE(b);
            APR_BRIGADE_INSERT_TAIL(ctx->passbb, b);
        }
        else {
            /*
             * We have actual "data" so read in as much as we can and start
             * scanning and splitting from our read buffer
             */
            rv = apr_bucket_read(b, &buff, &bytes, APR_BLOCK_READ);
            if (rv != APR_SUCCESS || bytes == 0) {
                apr_bucket_delete(b);
            }
            else {
                int num = 0;
                while (bytes > 0) {
                    nl = memchr(buff, APR_ASCII_LF, bytes);
                    if (nl) {
                        len = (apr_size_t) (nl - buff) + 1;
                        /* split *after* the newline */
                        apr_bucket_split(b, len);
                        /*
                         * We've likely read more data, so bypass rereading
                         * bucket data and continue scanning through this
                         * buffer
                         */
                        bytes -= len;
                        buff += len;
                        /*
                         * we need b to be updated for future potential
                         * splitting
                         */
                        tmp_b = APR_BUCKET_NEXT(b);
                        APR_BUCKET_REMOVE(b);
                        /*
                         * Hey, we found a newline! Don't forget the old
                         * stuff that needs to be added to the front. So we
                         * add the split bucket to the end, flatten the whole
                         * bb, morph the whole shebang into a bucket which is
                         * then added to the tail of the newline bb.
                         */
                        if (!APR_BRIGADE_EMPTY(ctx->linebb)) {
                            APR_BRIGADE_INSERT_TAIL(ctx->linebb, b);
                            rv = apr_brigade_pflatten(ctx->linebb, &bflat,
                                                      &fbytes, ctx->tpool);
                            if (rv != APR_SUCCESS)
                                goto err;
                            if (fbytes > cfg->max_line_length) {
                                /* Avoid pflattening further lines, we will
                                 * abort later on anyway.
                                 */
                                rv = APR_ENOMEM;
                                goto err;
                            }
                            b = apr_bucket_transient_create(bflat, fbytes,
                                            f->r->connection->bucket_alloc);
                            apr_brigade_cleanup(ctx->linebb);
                        }
                        rv = do_pattmatch(f, b, ctx->pattbb, ctx->tpool);
                        if (rv != APR_SUCCESS)
                            goto err;
                        /*
                         * Count how many buckets we have in ctx->passbb
                         * so far. Yes, this is correct we count ctx->passbb
                         * and not ctx->pattbb as we do not reset num on every
                         * iteration.
                         */
                        for (b = APR_BRIGADE_FIRST(ctx->pattbb);
                             b != APR_BRIGADE_SENTINEL(ctx->pattbb);
                             b = APR_BUCKET_NEXT(b)) {
                            num++;
                        }
                        APR_BRIGADE_CONCAT(ctx->passbb, ctx->pattbb);
                        /*
                         * If the number of buckets in ctx->passbb reaches an
                         * "insane" level, we consume much memory for all the
                         * buckets as such. So lets flush them down the chain
                         * in this case and thus clear ctx->passbb. This frees
                         * the buckets memory for further processing.
                         * Usually this condition should not become true, but
                         * it is a safety measure for edge cases.
                         */
                        if (num > AP_MAX_BUCKETS) {
                            b = apr_bucket_flush_create(
                                                f->r->connection->bucket_alloc);
                            APR_BRIGADE_INSERT_TAIL(ctx->passbb, b);
                            rv = ap_pass_brigade(f->next, ctx->passbb);
                            apr_brigade_cleanup(ctx->passbb);
                            num = 0;
                            apr_pool_clear(ctx->tpool);
                            if (rv != APR_SUCCESS)
                                goto err;
                        }
                        b = tmp_b;
                    }
                    else {
                        /*
                         * no newline in whatever is left of this buffer so
                         * tuck data away and get next bucket
                         */
                        APR_BUCKET_REMOVE(b);
                        APR_BRIGADE_INSERT_TAIL(ctx->linebb, b);
                        bytes = 0;
                    }
                }
            }
        }
        if (!APR_BRIGADE_EMPTY(ctx->passbb)) {
            rv = ap_pass_brigade(f->next, ctx->passbb);
            apr_brigade_cleanup(ctx->passbb);
            if (rv != APR_SUCCESS)
                goto err;
        }
        apr_pool_clear(ctx->tpool);
    }

    /* Anything left we want to save/setaside for the next go-around */
    if (!APR_BRIGADE_EMPTY(ctx->linebb)) {
        /*
         * Provide ap_save_brigade with an existing empty brigade
         * (ctx->linesbb) to avoid creating a new one.
         */
        ap_save_brigade(f, &(ctx->linesbb), &(ctx->linebb), f->r->pool);
        tmp_bb = ctx->linebb;
        ctx->linebb = ctx->linesbb;
        ctx->linesbb = tmp_bb;
    }

    return APR_SUCCESS;
err:
    if (rv == APR_ENOMEM)
        ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, APLOGNO(01328) "Line too long, URI %s",
                      f->r->uri);
    apr_pool_clear(ctx->tpool);
    return rv;
}
コード例 #30
0
ファイル: core_filters.c プロジェクト: pexip/os-apache2
apr_status_t ap_core_output_filter(ap_filter_t *f, apr_bucket_brigade *new_bb)
{
    conn_rec *c = f->c;
    core_net_rec *net = f->ctx;
    core_output_filter_ctx_t *ctx = net->out_ctx;
    apr_bucket_brigade *bb = NULL;
    apr_bucket *bucket, *next, *flush_upto = NULL;
    apr_size_t bytes_in_brigade, non_file_bytes_in_brigade;
    int eor_buckets_in_brigade, morphing_bucket_in_brigade;
    apr_status_t rv;
    int loglevel = ap_get_conn_module_loglevel(c, APLOG_MODULE_INDEX);

    /* Fail quickly if the connection has already been aborted. */
    if (c->aborted) {
        if (new_bb != NULL) {
            apr_brigade_cleanup(new_bb);
        }
        return APR_ECONNABORTED;
    }

    if (ctx == NULL) {
        ctx = apr_pcalloc(c->pool, sizeof(*ctx));
        net->out_ctx = (core_output_filter_ctx_t *)ctx;
        /*
         * Need to create tmp brigade with correct lifetime. Passing
         * NULL to apr_brigade_split_ex would result in a brigade
         * allocated from bb->pool which might be wrong.
         */
        ctx->tmp_flush_bb = apr_brigade_create(c->pool, c->bucket_alloc);
        /* same for buffered_bb and ap_save_brigade */
        ctx->buffered_bb = apr_brigade_create(c->pool, c->bucket_alloc);
    }

    if (new_bb != NULL)
        bb = new_bb;

    if ((ctx->buffered_bb != NULL) &&
        !APR_BRIGADE_EMPTY(ctx->buffered_bb)) {
        if (new_bb != NULL) {
            APR_BRIGADE_PREPEND(bb, ctx->buffered_bb);
        }
        else {
            bb = ctx->buffered_bb;
        }
        c->data_in_output_filters = 0;
    }
    else if (new_bb == NULL) {
        return APR_SUCCESS;
    }

    /* Scan through the brigade and decide whether to attempt a write,
     * and how much to write, based on the following rules:
     *
     *  1) The new_bb is null: Do a nonblocking write of as much as
     *     possible: do a nonblocking write of as much data as possible,
     *     then save the rest in ctx->buffered_bb.  (If new_bb == NULL,
     *     it probably means that the MPM is doing asynchronous write
     *     completion and has just determined that this connection
     *     is writable.)
     *
     *  2) Determine if and up to which bucket we need to do a blocking
     *     write:
     *
     *  a) The brigade contains a flush bucket: Do a blocking write
     *     of everything up that point.
     *
     *  b) The request is in CONN_STATE_HANDLER state, and the brigade
     *     contains at least THRESHOLD_MAX_BUFFER bytes in non-file
     *     buckets: Do blocking writes until the amount of data in the
     *     buffer is less than THRESHOLD_MAX_BUFFER.  (The point of this
     *     rule is to provide flow control, in case a handler is
     *     streaming out lots of data faster than the data can be
     *     sent to the client.)
     *
     *  c) The request is in CONN_STATE_HANDLER state, and the brigade
     *     contains at least MAX_REQUESTS_IN_PIPELINE EOR buckets:
     *     Do blocking writes until less than MAX_REQUESTS_IN_PIPELINE EOR
     *     buckets are left. (The point of this rule is to prevent too many
     *     FDs being kept open by pipelined requests, possibly allowing a
     *     DoS).
     *
     *  d) The brigade contains a morphing bucket: If there was no other
     *     reason to do a blocking write yet, try reading the bucket. If its
     *     contents fit into memory before THRESHOLD_MAX_BUFFER is reached,
     *     everything is fine. Otherwise we need to do a blocking write the
     *     up to and including the morphing bucket, because ap_save_brigade()
     *     would read the whole bucket into memory later on.
     *
     *  3) Actually do the blocking write up to the last bucket determined
     *     by rules 2a-d. The point of doing only one flush is to make as
     *     few calls to writev() as possible.
     *
     *  4) If the brigade contains at least THRESHOLD_MIN_WRITE
     *     bytes: Do a nonblocking write of as much data as possible,
     *     then save the rest in ctx->buffered_bb.
     */

    if (new_bb == NULL) {
        rv = send_brigade_nonblocking(net->client_socket, bb,
                                      &(ctx->bytes_written), c);
        if (rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv)) {
            /* The client has aborted the connection */
            ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c,
                          "core_output_filter: writing data to the network");
            apr_brigade_cleanup(bb);
            c->aborted = 1;
            return rv;
        }
        setaside_remaining_output(f, ctx, bb, c);
        return APR_SUCCESS;
    }

    bytes_in_brigade = 0;
    non_file_bytes_in_brigade = 0;
    eor_buckets_in_brigade = 0;
    morphing_bucket_in_brigade = 0;

    for (bucket = APR_BRIGADE_FIRST(bb); bucket != APR_BRIGADE_SENTINEL(bb);
         bucket = next) {
        next = APR_BUCKET_NEXT(bucket);

        if (!APR_BUCKET_IS_METADATA(bucket)) {
            if (bucket->length == (apr_size_t)-1) {
                /*
                 * A setaside of morphing buckets would read everything into
                 * memory. Instead, we will flush everything up to and
                 * including this bucket.
                 */
                morphing_bucket_in_brigade = 1;
            }
            else {
                bytes_in_brigade += bucket->length;
                if (!APR_BUCKET_IS_FILE(bucket))
                    non_file_bytes_in_brigade += bucket->length;
            }
        }
        else if (AP_BUCKET_IS_EOR(bucket)) {
            eor_buckets_in_brigade++;
        }

        if (APR_BUCKET_IS_FLUSH(bucket)
            || non_file_bytes_in_brigade >= THRESHOLD_MAX_BUFFER
            || morphing_bucket_in_brigade
            || eor_buckets_in_brigade > MAX_REQUESTS_IN_PIPELINE) {
            /* this segment of the brigade MUST be sent before returning. */

            if (loglevel >= APLOG_TRACE6) {
                char *reason = APR_BUCKET_IS_FLUSH(bucket) ?
                               "FLUSH bucket" :
                               (non_file_bytes_in_brigade >= THRESHOLD_MAX_BUFFER) ?
                               "THRESHOLD_MAX_BUFFER" :
                               morphing_bucket_in_brigade ? "morphing bucket" :
                               "MAX_REQUESTS_IN_PIPELINE";
                ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, c,
                              "will flush because of %s", reason);
                ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, c,
                              "seen in brigade%s: bytes: %" APR_SIZE_T_FMT
                              ", non-file bytes: %" APR_SIZE_T_FMT ", eor "
                              "buckets: %d, morphing buckets: %d",
                              flush_upto == NULL ? " so far"
                                                 : " since last flush point",
                              bytes_in_brigade,
                              non_file_bytes_in_brigade,
                              eor_buckets_in_brigade,
                              morphing_bucket_in_brigade);
            }
            /*
             * Defer the actual blocking write to avoid doing many writes.
             */
            flush_upto = next;

            bytes_in_brigade = 0;
            non_file_bytes_in_brigade = 0;
            eor_buckets_in_brigade = 0;
            morphing_bucket_in_brigade = 0;
        }
    }

    if (flush_upto != NULL) {
        ctx->tmp_flush_bb = apr_brigade_split_ex(bb, flush_upto,
                                                 ctx->tmp_flush_bb);
        if (loglevel >= APLOG_TRACE8) {
                ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, c,
                              "flushing now");
        }
        rv = send_brigade_blocking(net->client_socket, bb,
                                   &(ctx->bytes_written), c);
        if (rv != APR_SUCCESS) {
            /* The client has aborted the connection */
            ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c,
                          "core_output_filter: writing data to the network");
            apr_brigade_cleanup(bb);
            c->aborted = 1;
            return rv;
        }
        if (loglevel >= APLOG_TRACE8) {
                ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, c,
                              "total bytes written: %" APR_SIZE_T_FMT,
                              ctx->bytes_written);
        }
        APR_BRIGADE_CONCAT(bb, ctx->tmp_flush_bb);
    }

    if (loglevel >= APLOG_TRACE8) {
        ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, c,
                      "brigade contains: bytes: %" APR_SIZE_T_FMT
                      ", non-file bytes: %" APR_SIZE_T_FMT
                      ", eor buckets: %d, morphing buckets: %d",
                      bytes_in_brigade, non_file_bytes_in_brigade,
                      eor_buckets_in_brigade, morphing_bucket_in_brigade);
    }

    if (bytes_in_brigade >= THRESHOLD_MIN_WRITE) {
        rv = send_brigade_nonblocking(net->client_socket, bb,
                                      &(ctx->bytes_written), c);
        if ((rv != APR_SUCCESS) && (!APR_STATUS_IS_EAGAIN(rv))) {
            /* The client has aborted the connection */
            ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c,
                          "core_output_filter: writing data to the network");
            apr_brigade_cleanup(bb);
            c->aborted = 1;
            return rv;
        }
        if (loglevel >= APLOG_TRACE8) {
                ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, c,
                              "tried nonblocking write, total bytes "
                              "written: %" APR_SIZE_T_FMT,
                              ctx->bytes_written);
        }
    }

    setaside_remaining_output(f, ctx, bb, c);
    return APR_SUCCESS;
}