Пример #1
0
static int ef_input_filter(ap_filter_t *f, apr_bucket_brigade *bb,
                           ap_input_mode_t mode, apr_read_type_e block,
                           apr_off_t readbytes)
{
    apr_status_t rv;
    apr_bucket *b;
    char *buf;
    apr_ssize_t len;
    char *zero;

    rv = ap_get_brigade(f->next, bb, mode, block, readbytes);
    if (rv != APR_SUCCESS) {
        return rv;
    }

    APR_BRIGADE_FOREACH(b, bb) {
        if (!APR_BUCKET_IS_EOS(b)) {
            if ((rv = apr_bucket_read(b, (const char **)&buf, &len, APR_BLOCK_READ)) != APR_SUCCESS) {
                ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r, "apr_bucket_read() failed");
                return rv;
            }
            ap_log_error(APLOG_MARK, APLOG_ERR, 0, NULL, "apr_bucket_read -> %d bytes",
                         len);
            while ((zero = memchr(buf, '0', len))) {
                *zero = 'a';
            }
        }
        else
            ap_log_error(APLOG_MARK, APLOG_ERR, 0, NULL, "got eos bucket");
    }

    return rv;
}
Пример #2
0
static apr_status_t CaseFilterInFilter(ap_filter_t *f,
                                       apr_bucket_brigade *pbbOut,
                                       ap_input_mode_t eMode,
                                       apr_read_type_e eBlock,
                                       apr_off_t nBytes)
{
    request_rec *r = f->r;
    conn_rec *c = r->connection;
    CaseFilterInContext *pCtx;
    apr_status_t ret;

    if (!(pCtx = f->ctx)) {
        f->ctx = pCtx = apr_palloc(r->pool, sizeof *pCtx);
        pCtx->pbbTmp = apr_brigade_create(r->pool, c->bucket_alloc);
    }

    if (APR_BRIGADE_EMPTY(pCtx->pbbTmp)) {
        ret = ap_get_brigade(f->next, pCtx->pbbTmp, eMode, eBlock, nBytes);

        if (eMode == AP_MODE_EATCRLF || ret != APR_SUCCESS)
            return ret;
    }

    while (!APR_BRIGADE_EMPTY(pCtx->pbbTmp)) {
        apr_bucket *pbktIn = APR_BRIGADE_FIRST(pCtx->pbbTmp);
        apr_bucket *pbktOut;
        const char *data;
        apr_size_t len;
        char *buf;
        apr_size_t n;

        /* It is tempting to do this...
         * APR_BUCKET_REMOVE(pB);
         * APR_BRIGADE_INSERT_TAIL(pbbOut,pB);
         * and change the case of the bucket data, but that would be wrong
         * for a file or socket buffer, for example...
         */

        if (APR_BUCKET_IS_EOS(pbktIn)) {
            APR_BUCKET_REMOVE(pbktIn);
            APR_BRIGADE_INSERT_TAIL(pbbOut, pbktIn);
            break;
        }

        ret=apr_bucket_read(pbktIn, &data, &len, eBlock);
        if (ret != APR_SUCCESS)
            return ret;

        buf = ap_malloc(len);
        for (n=0 ; n < len ; ++n) {
            buf[n] = apr_toupper(data[n]);
        }

        pbktOut = apr_bucket_heap_create(buf, len, 0, c->bucket_alloc);
        APR_BRIGADE_INSERT_TAIL(pbbOut, pbktOut);
        apr_bucket_delete(pbktIn);
    }

    return APR_SUCCESS;
}
Пример #3
0
static apr_status_t apreq_output_filter_test(ap_filter_t *f, apr_bucket_brigade *bb)
{
    request_rec *r = f->r;
    apreq_handle_t *handle;
    apr_bucket_brigade *eos;
    struct ctx_t ctx = {r, bb};
    const apr_table_t *t;

    if (!APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb)))
        return ap_pass_brigade(f->next,bb);

    eos = apr_brigade_split(bb, APR_BRIGADE_LAST(bb));

    handle = apreq_handle_apache2(r);
    ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r,
                  "appending parsed data");

    apr_brigade_puts(bb, NULL, NULL, "\n--APREQ OUTPUT FILTER--\nARGS:\n");

    apreq_args(handle, &t);
    if (t != NULL)
        apr_table_do(dump_table, &ctx, t, NULL);

    apreq_body(handle, &t);
    if (t != NULL) {
        apr_brigade_puts(bb,NULL,NULL,"BODY:\n");
        apr_table_do(dump_table, &ctx, t, NULL);
    }
    APR_BRIGADE_CONCAT(bb,eos);
    return ap_pass_brigade(f->next,bb);
}
Пример #4
0
AP_DECLARE(apr_status_t) ap_pass_brigade(ap_filter_t *next,
                                         apr_bucket_brigade *bb)
{
    if (next) {
        apr_bucket *e;
        if ((e = APR_BRIGADE_LAST(bb)) && APR_BUCKET_IS_EOS(e) && next->r) {
            /* This is only safe because HTTP_HEADER filter is always in
             * the filter stack.   This ensures that there is ALWAYS a
             * request-based filter that we can attach this to.  If the
             * HTTP_FILTER is removed, and another filter is not put in its
             * place, then handlers like mod_cgi, which attach their own
             * EOS bucket to the brigade will be broken, because we will
             * get two EOS buckets on the same request.
             */
            next->r->eos_sent = 1;

            /* remember the eos for internal redirects, too */
            if (next->r->prev) {
                request_rec *prev = next->r->prev;

                while (prev) {
                    prev->eos_sent = 1;
                    prev = prev->prev;
                }
            }
        }
        return next->frec->filter_func.out_func(next, bb);
    }
    return AP_NOBODY_WROTE;
}
Пример #5
0
static int read_msg(request_rec *r, msg_t **msg)
{
    conn_rec *c = r->connection;
    apr_bucket_brigade *bb = apr_brigade_create(r->pool, c->bucket_alloc);
    char *dbuf = apr_palloc(r->pool, MAX_MSG_SIZE);
    int dbpos = 0;
    int seen_eos = 0;

    do {
        apr_status_t rv;
        apr_bucket *bucket;

        rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
                            APR_BLOCK_READ, HUGE_STRING_LEN);

        if (rv != APR_SUCCESS) {
            ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                          "reading request entity data");
            return HTTP_INTERNAL_SERVER_ERROR;
        }

        for (bucket = APR_BRIGADE_FIRST(bb);
                bucket != APR_BRIGADE_SENTINEL(bb);
                bucket = APR_BUCKET_NEXT(bucket))
        {
            const char *data;
            apr_size_t len;

            if (APR_BUCKET_IS_EOS(bucket)) {
                seen_eos = 1;
                break;
            }

            /* We can't do much with this. */
            if (APR_BUCKET_IS_FLUSH(bucket))
                continue;

            /* read */
            apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);

            if (dbpos < MAX_MSG_SIZE) {
                int cursize = (dbpos + len) > MAX_MSG_SIZE ?
                              (MAX_MSG_SIZE - dbpos) : len;

                memcpy(dbuf + dbpos, data, cursize);
                dbpos += cursize;
            }
        }

        apr_brigade_cleanup(bb);

    } while (!seen_eos);

    (*msg) = apr_pcalloc(r->pool, sizeof(msg_t));
    (*msg)->data = dbuf;
    (*msg)->data[dbpos] = '\0';
    (*msg)->size = dbpos;

    return OK;
}
Пример #6
0
static apr_status_t
read_complete_body(request_rec *r, apr_bucket_brigade *kept_body)
{
    apr_bucket_brigade *tmp_bb;
    apr_bucket *t_bucket1, *t_bucket2;
    unsigned short eos_seen = 0;
    apr_status_t status;

    tmp_bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);

    while (!eos_seen) {
        status = ap_get_brigade(
                        r->input_filters,
                        tmp_bb,
                        AP_MODE_READBYTES,
                        APR_BLOCK_READ,
                        HUGE_STRING_LEN);

        /* This means the filter discovered an error.
         * Furthermore input-filter already handeld the error and sends
         * something to the output chain.
         * For example ap_http_filter does this if LimitRequestBody is reached
         */
        if (status == AP_FILTER_ERROR) {
            apr_brigade_destroy(tmp_bb);
            return AP_FILTER_ERROR;
        }

        /* Cool no need to search for the eos bucket */
        if (APR_STATUS_IS_EOF(status)) {
            apr_brigade_destroy(tmp_bb);
            return APR_SUCCESS;
        }

        if (status != APR_SUCCESS) {
            apr_brigade_destroy(tmp_bb);
            return status;
        }

        ITER_BRIGADE(t_bucket1, tmp_bb) {

            apr_bucket_copy(t_bucket1, &t_bucket2);

            /* If SSL is used TRANSIENT buckets are returned.
             * However we need this bucket for a longer period than
             * this function call, hence 'setaside' the bucket.
             */
            if APR_BUCKET_IS_TRANSIENT(t_bucket2) {
                apr_bucket_setaside(t_bucket2, r->pool);
            }

            APR_BRIGADE_INSERT_TAIL(kept_body, t_bucket2);

            if (!eos_seen && APR_BUCKET_IS_EOS(t_bucket1)) {
                eos_seen = 1;
            }
        }
        apr_brigade_cleanup(tmp_bb);
    }
Пример #7
0
/**
 * Remove the cache lock, if present.
 *
 * First, try to close the file handle, whose delete-on-close should
 * kill the file. Otherwise, just delete the file by name.
 *
 * If no lock name has yet been calculated, do the calculation of the
 * lock name first before trying to delete the file.
 *
 * If an optional bucket brigade is passed, the lock will only be
 * removed if the bucket brigade contains an EOS bucket.
 */
apr_status_t cache_remove_lock(cache_server_conf *conf,
        cache_request_rec *cache, request_rec *r, apr_bucket_brigade *bb)
{
    void *dummy;
    const char *lockname;

    if (!conf || !conf->lock || !conf->lockpath) {
        /* no locks configured, leave */
        return APR_SUCCESS;
    }
    if (bb) {
        apr_bucket *e;
        int eos_found = 0;
        for (e = APR_BRIGADE_FIRST(bb);
             e != APR_BRIGADE_SENTINEL(bb);
             e = APR_BUCKET_NEXT(e))
        {
            if (APR_BUCKET_IS_EOS(e)) {
                eos_found = 1;
                break;
            }
        }
        if (!eos_found) {
            /* no eos found in brigade, don't delete anything just yet,
             * we are not done.
             */
            return APR_SUCCESS;
        }
    }
    apr_pool_userdata_get(&dummy, CACHE_LOCKFILE_KEY, r->pool);
    if (dummy) {
        return apr_file_close((apr_file_t *)dummy);
    }
    apr_pool_userdata_get(&dummy, CACHE_LOCKNAME_KEY, r->pool);
    lockname = (const char *)dummy;
    if (!lockname) {
        char dir[5];

        /* create the key if it doesn't exist */
        if (!cache->key) {
            cache_generate_key(r, r->pool, &cache->key);
        }

        /* create a hashed filename from the key, and save it for later */
        lockname = ap_cache_generate_name(r->pool, 0, 0, cache->key);

        /* lock files represent discrete just-went-stale URLs "in flight", so
         * we support a simple two level directory structure, more is overkill.
         */
        dir[0] = '/';
        dir[1] = lockname[0];
        dir[2] = '/';
        dir[3] = lockname[1];
        dir[4] = 0;

        lockname = apr_pstrcat(r->pool, conf->lockpath, dir, "/", lockname, NULL);
    }
    return apr_file_remove(lockname, r->pool);
}
static apr_status_t tmpfile_filter(ap_filter_t *f, apr_bucket_brigade *bbout,
	ap_input_mode_t mode, apr_read_type_e block, apr_off_t nbytes) {

  apr_bucket_brigade* bbin = apr_brigade_create(f->r->pool,
	     f->r->connection->bucket_alloc);

  apr_file_t* tmpfile ;
  char* tmpname = apr_pstrdup(f->r->pool, "/tmp/mod-upload.XXXXXX") ;

  if ( f->ctx ) {
    APR_BRIGADE_INSERT_TAIL(bbout, apr_bucket_eos_create(bbout->bucket_alloc)) ;
    return APR_SUCCESS ;
  }
  if ( apr_file_mktemp(&tmpfile, tmpname, KEEPONCLOSE, f->r->pool) != APR_SUCCESS ) {
	            // error
    ap_remove_input_filter(f) ;
  }
  apr_pool_cleanup_register(f->r->pool, tmpfile,
		(void*)apr_file_close, apr_pool_cleanup_null) ;

  for ( ; ; ) {
    apr_bucket* b ;
    const char* ptr = 0 ;
    apr_size_t bytes ;
#ifdef DEBUG
    ap_log_rerror(APLOG_MARK,APLOG_DEBUG,0, f->r, "get_brigade") ;
#endif
    ap_get_brigade(f->next, bbin, AP_MODE_READBYTES, APR_BLOCK_READ, BUFLEN) ;
    for ( b = APR_BRIGADE_FIRST(bbin) ;
	b != APR_BRIGADE_SENTINEL(bbin) && ! f->ctx ;
	b = APR_BUCKET_NEXT(b) ) {
      if ( APR_BUCKET_IS_EOS(b) ) {
	f->ctx = f ;	// just using it as a flag; any nonzero will do
	apr_file_flush(tmpfile) ;
	apr_brigade_puts(bbout, ap_filter_flush, f, tmpname) ;
	APR_BRIGADE_INSERT_TAIL(bbout,
		apr_bucket_eos_create(bbout->bucket_alloc) ) ;
      } else if ( apr_bucket_read(b, &ptr, &bytes, APR_BLOCK_READ)
		== APR_SUCCESS ) {
#ifdef DEBUG
  ap_log_rerror(APLOG_MARK,APLOG_DEBUG,0, f->r, "	%d bytes in bucket", bytes) ;
#endif
	apr_file_write(tmpfile, ptr, &bytes) ;
      }
    }
    if ( f->ctx )
      break ;
    else
      apr_brigade_cleanup(bbin) ;
  }

  apr_brigade_destroy(bbin) ;

  return APR_SUCCESS ;
}
Пример #9
0
int h2_util_has_flush_or_eos(apr_bucket_brigade *bb) {
    apr_bucket *b;
    for (b = APR_BRIGADE_FIRST(bb);
         b != APR_BRIGADE_SENTINEL(bb);
         b = APR_BUCKET_NEXT(b))
    {
        if (APR_BUCKET_IS_EOS(b) || APR_BUCKET_IS_FLUSH(b)) {
            return 1;
        }
    }
    return 0;
}
Пример #10
0
static apr_status_t CaseFilterOutFilter(ap_filter_t *f,
                                        apr_bucket_brigade *pbbIn)
    {
    request_rec *r = f->r;
    conn_rec *c = r->connection;
    apr_bucket *pbktIn;
    apr_bucket_brigade *pbbOut;

    pbbOut=apr_brigade_create(r->pool, c->bucket_alloc);
    for (pbktIn = APR_BRIGADE_FIRST(pbbIn);
         pbktIn != APR_BRIGADE_SENTINEL(pbbIn);
         pbktIn = APR_BUCKET_NEXT(pbktIn))
    {
        const char *data;
        apr_size_t len;
        char *buf;
        apr_size_t n;
        apr_bucket *pbktOut;

        if(APR_BUCKET_IS_EOS(pbktIn))
            {
            apr_bucket *pbktEOS=apr_bucket_eos_create(c->bucket_alloc);
            APR_BRIGADE_INSERT_TAIL(pbbOut,pbktEOS);
            continue;
            }

        /* read */
        apr_bucket_read(pbktIn,&data,&len,APR_BLOCK_READ);

        /* write */
        buf = apr_bucket_alloc(len, c->bucket_alloc);
        for(n=0 ; n < len ; ++n)
            buf[n] = apr_toupper(data[n]);

        pbktOut = apr_bucket_heap_create(buf, len, apr_bucket_free,
                                         c->bucket_alloc);
        APR_BRIGADE_INSERT_TAIL(pbbOut,pbktOut);
        }

    /* Q: is there any advantage to passing a brigade for each bucket?
     * A: obviously, it can cut down server resource consumption, if this
     * experimental module was fed a file of 4MB, it would be using 8MB for
     * the 'read' buckets and the 'write' buckets.
     *
     * Note it is more efficient to consume (destroy) each bucket as it's
     * processed above than to do a single cleanup down here.  In any case,
     * don't let our caller pass the same buckets to us, twice;
     */
    apr_brigade_cleanup(pbbIn);
    return ap_pass_brigade(f->next,pbbOut);
    }
Пример #11
0
apr_status_t jxr_append_brigade(request_rec *r, apr_bucket_brigade *dest, apr_bucket_brigade *bb, int *eos_seen)
{
	apr_size_t max_msglen = MAX_PACKET_SIZE - sizeof(Jaxer_Header);
	apr_status_t rv;

	while (!APR_BRIGADE_EMPTY(bb)) 
	{
		apr_size_t readlen;
		const char *buffer;
		
		apr_bucket *e = APR_BRIGADE_FIRST(bb);

		if (APR_BUCKET_IS_EOS(e) )
		{
			apr_bucket_delete(e);
			if (eos_seen)
				*eos_seen = 1;
			continue;
		}
		if (APR_BUCKET_IS_METADATA(e)) {
			apr_bucket_delete(e);
			continue;
		}

		
		/* Read the bucket now */
		if ((rv = apr_bucket_read(e, &buffer, &readlen, APR_BLOCK_READ)) != APR_SUCCESS) 
		{
			ap_log_perror(APLOG_MARK, APLOG_INFO, rv, r->pool, "mod_jaxer: can't read data from handler");
			return rv;
		}
		
		if (readlen > max_msglen)
		{
        	apr_bucket_split(e, max_msglen);
		}else
		{
			APR_BUCKET_REMOVE(e);
			APR_BRIGADE_INSERT_TAIL(dest, e);
		}
	}
	if ((rv=apr_brigade_destroy(bb)) != APR_SUCCESS)
	{
		ap_log_perror(APLOG_MARK, APLOG_INFO, rv, r->pool, "mod_jaxer: failed to destroy brigade.");
		return rv;
	}

	return APR_SUCCESS;
}
Пример #12
0
apr_status_t jxr_send_brigade(jaxer_connection * ac, apr_bucket_brigade * bb)
{
	apr_bucket *bucket;
	apr_status_t rv;
	
	compat_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, ac->request, "mod_jaxer: sending a brigade (sock=%d)", ac->sock);

	for (bucket = APR_BRIGADE_FIRST(bb);
		 bucket != APR_BRIGADE_SENTINEL(bb);
		 bucket = APR_BUCKET_NEXT(bucket))
	{
		char *write_buf;
		apr_size_t write_buf_len;
		
		if (APR_BUCKET_IS_EOS(bucket))
			break;

		if (APR_BUCKET_IS_FLUSH(bucket))
			continue;

		if ((rv =
			 apr_bucket_read(bucket, (const char **)&write_buf, &write_buf_len,
							 APR_BLOCK_READ)) != APR_SUCCESS) {
			compat_log_rerror(APLOG_MARK, APLOG_WARNING, rv, ac->request,
						 "mod_jaxer: can't read request from bucket");
			return rv;
		}

		{
			int type = jxr_msg_get_type(write_buf);
			apr_size_t pos; // not used
			apr_size_t len = jxr_msg_get_length(write_buf, &pos);
			compat_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, ac->request, "mod_jaxer: sending a brigade (type=%s len=%d)", sBlockType[type], len);
		}

		/* Write the buffer to jaxer server */
		if(0 > jxr_socket_sendfull(ac, write_buf, (int) write_buf_len))
		{
		
			compat_log_rerror(APLOG_MARK, APLOG_WARNING, APR_FROM_OS_ERROR(rv), ac->request,
					"mod_jaxer: can't write to socket");
			return apr_get_os_error();
		}
	}
	compat_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, ac->request, "mod_jaxer: sent a brigade (sock=%d)", ac->sock);

	return APR_SUCCESS;
}
Пример #13
0
static int cdn_html_filter(ap_filter_t * f, apr_bucket_brigade * bb)
{
  apr_bucket *b;
  const char *buf = 0;
  apr_size_t bytes = 0;

  /* now do HTML filtering if necessary, and pass the brigade onward */
  saxctxt *ctxt = check_html_filter_init(f);
  if (!ctxt)
    return ap_pass_brigade(f->next, bb);

  for(b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) {
    if(APR_BUCKET_IS_EOS(b) || APR_BUCKET_IS_FLUSH(b)) {
      consume_buffer(ctxt, buf, 0, 1);
      APR_BUCKET_REMOVE(b);
      APR_BRIGADE_INSERT_TAIL(ctxt->bb, b);
      ap_pass_brigade(ctxt->f->next, ctxt->bb);
      return APR_SUCCESS;
    }

    if(apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS && buf) {
      if(ctxt->parser == NULL) {

        /*
         * for now, always output utf-8; we could incorporate
         * mod_proxy_html's output transcoding with little problem if
         * necessary
         */
        ap_set_content_type(f->r, "text/html;charset=utf-8");

        if(!initialize_parser(f, ctxt, &buf, bytes)) {
          apr_status_t rv = ap_pass_brigade(ctxt->f->next, bb);
          ap_remove_output_filter(f);
          return rv;
        } else
          ap_fputs(f->next, ctxt->bb, ctxt->cfg->doctype);
      }
      consume_buffer(ctxt, buf, bytes, 0);
    }

  }

  /*ap_fflush(ctxt->f->next, ctxt->bb) ; */      /* uncomment for debug */
  apr_brigade_cleanup(bb);
  return APR_SUCCESS;
}
Пример #14
0
static int getsfunc_BRIGADE(char *buf, int len, void *arg)
{
    apr_bucket_brigade *bb = (apr_bucket_brigade *)arg;
    const char *dst_end = buf + len - 1; /* leave room for terminating null */
    char *dst = buf;
    apr_bucket *e = APR_BRIGADE_FIRST(bb);
    apr_status_t rv;
    int done = 0;

    while ((dst < dst_end) && !done && e != APR_BRIGADE_SENTINEL(bb)
           && !APR_BUCKET_IS_EOS(e)) {
        const char *bucket_data;
        apr_size_t bucket_data_len;
        const char *src;
        const char *src_end;
        apr_bucket * next;

        rv = apr_bucket_read(e, &bucket_data, &bucket_data_len,
                             APR_BLOCK_READ);
        if (rv != APR_SUCCESS || (bucket_data_len == 0)) {
            *dst = '\0';
            return APR_STATUS_IS_TIMEUP(rv) ? -1 : 0;
        }
        src = bucket_data;
        src_end = bucket_data + bucket_data_len;
        while ((src < src_end) && (dst < dst_end) && !done) {
            if (*src == '\n') {
                done = 1;
            }
            else if (*src != '\r') {
                *dst++ = *src;
            }
            src++;
        }

        if (src < src_end) {
            apr_bucket_split(e, src - bucket_data);
        }
        next = APR_BUCKET_NEXT(e);
        apr_bucket_delete(e);
        e = next;
    }
    *dst = 0;
    return done;
}
Пример #15
0
static void discard_script_output(apr_bucket_brigade *bb)
{
    apr_bucket *e;
    const char *buf;
    apr_size_t len;
    apr_status_t rv;
    e = APR_BRIGADE_FIRST(bb);
    while (e != APR_BRIGADE_SENTINEL(bb)) {
        if (APR_BUCKET_IS_EOS(e)) {
            break;
        }
        rv = apr_bucket_read(e, &buf, &len, APR_BLOCK_READ);
        if (rv != APR_SUCCESS) {
            break;
        }
        e = APR_BUCKET_NEXT(e);
    }
}
Пример #16
0
int h2_util_bb_has_data_or_eos(apr_bucket_brigade *bb)
{
    apr_bucket *b;
    for (b = APR_BRIGADE_FIRST(bb);
         b != APR_BRIGADE_SENTINEL(bb);
         b = APR_BUCKET_NEXT(b))
    {
        if (APR_BUCKET_IS_METADATA(b)) {
            if (APR_BUCKET_IS_EOS(b)) {
                return 1;
            }
        }
        else {
            return 1;
        }
    }
    return 0;
}
Пример #17
0
int h2_util_has_eos(apr_bucket_brigade *bb, apr_size_t len)
{
    apr_bucket *b, *end;
    
    apr_status_t status = last_not_included(bb, len, 0, 0, &end);
    if (status != APR_SUCCESS) {
        return status;
    }
    
    for (b = APR_BRIGADE_FIRST(bb);
         b != APR_BRIGADE_SENTINEL(bb) && b != end;
         b = APR_BUCKET_NEXT(b))
    {
        if (APR_BUCKET_IS_EOS(b)) {
            return 1;
        }
    }
    return 0;
}
Пример #18
0
/**
 * @internal
 *
 * "Sniffs" the output (response) data from the connection stream.
 */
static int ironbee_output_filter (ap_filter_t *f, apr_bucket_brigade *bb)
{
    apr_bucket *b;
#if 0
    conn_rec *c = f->c;
    ironbee_conn_context *ctx = f->ctx;
    ib_conn_t *iconn = ctx->iconn;
    ib_core_cfg_t *corecfg;
    int buffering = 0;

    /* Configure. */
    ib_context_module_config(iconn->ctx, ib_core_module(), (void *)&corecfg);
    if (corecfg != NULL) {
        buffering = (int)corecfg->buffer_res;
    }
#endif


    for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) {
#if 0
        /// @todo Should this be done?  Maybe only for proxy?
        if (APR_BUCKET_IS_EOS(b)) {
            /// @todo Do we need to do this? Maybe only for proxy.
            apr_bucket *flush = apr_bucket_flush_create(f->c->bucket_alloc);
            APR_BUCKET_INSERT_BEFORE(b, flush);
        }

        if (buffering) {
            /// @todo setaside into our own pool to destroy later???
            apr_bucket_setaside(b, c->pool);
            process_bucket(f, b);
            APR_BUCKET_REMOVE(b);
        }
        else {
#endif
            process_bucket(f, b);
#if 0
        }
#endif
    }

    return ap_pass_brigade(f->next, bb);
}
// Called when Apache outputs data:
static apr_status_t firstbyte_out_filter(ap_filter_t *f,
                                     apr_bucket_brigade *bb) {
    firstbyte_config_t *cf = ap_get_module_config(f->c->conn_config, &log_firstbyte_module);

	apr_bucket *b = APR_BRIGADE_LAST(bb);
    /* End of data, make sure we flush */
    if (APR_BUCKET_IS_EOS(b)) {
        APR_BRIGADE_INSERT_TAIL(bb,
                                apr_bucket_flush_create(f->c->bucket_alloc));
        APR_BUCKET_REMOVE(b);
        apr_bucket_destroy(b);
    }

	if (cf->first_out==1) {
		cf->first_out_time = apr_time_now();
		cf->first_out = 0;
	}

    return ap_pass_brigade(f->next, bb);
}
Пример #20
0
static int dumpio_output_filter (ap_filter_t *f, apr_bucket_brigade *bb)
{
    apr_bucket *b;
    conn_rec *c = f->c;
    
    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, "mod_dumpio: %s", f->frec->name) ;
    
    for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) {
        /*
         * If we ever see an EOS, make sure to FLUSH.
         */
        if (APR_BUCKET_IS_EOS(b)) {
            apr_bucket *flush = apr_bucket_flush_create(f->c->bucket_alloc);
            APR_BUCKET_INSERT_BEFORE(b, flush);
        }
        dumpit(f, b);
    }
    
    return ap_pass_brigade(f->next, bb) ;
}
Пример #21
0
static apr_status_t urlReplaceFilterOutFilter(ap_filter_t *f,
                                        apr_bucket_brigade *pbbIn)
{
    request_rec *r = f->r;
    conn_rec *c = r->connection;
    apr_bucket *pbktIn;
    apr_bucket_brigade *pbbOut;

    pbbOut=apr_brigade_create(r->pool, c->bucket_alloc);
    for (pbktIn = APR_BRIGADE_FIRST(pbbIn);
            pbktIn != APR_BRIGADE_SENTINEL(pbbIn);
            pbktIn = APR_BUCKET_NEXT(pbktIn))
    {
        const char *data;
        apr_size_t len;
        char *buf;
        apr_size_t n;
        apr_bucket *pbktOut;

        if (APR_BUCKET_IS_EOS(pbktIn))
        {
            apr_bucket *pbktEOS=apr_bucket_eos_create(c->bucket_alloc);
            APR_BRIGADE_INSERT_TAIL(pbbOut,pbktEOS);
            continue;
        }

        /* read */
        apr_bucket_read(pbktIn,&data,&len,APR_BLOCK_READ);

        /* write */
        buf = apr_bucket_alloc(len, c->bucket_alloc);
        for (n=0 ; n < len ; ++n)
            buf[n] = apr_toupper(data[n]);

        pbktOut = apr_bucket_heap_create(buf, len, apr_bucket_free,
                                         c->bucket_alloc);
        APR_BRIGADE_INSERT_TAIL(pbbOut,pbktOut);
    }
    apr_brigade_cleanup(pbbIn);
    return ap_pass_brigade(f->next,pbbOut);
}
Пример #22
0
ngx_int_t move_brigade_to_chain(apr_bucket_brigade *bb,
        ngx_chain_t **ll, ngx_pool_t *pool)
{
    apr_bucket  *e;
    ngx_buf_t   *buf;
    ngx_chain_t *cl;

    cl = NULL;

    if (APR_BRIGADE_EMPTY(bb)) {
        *ll = NULL;
        return NGX_OK;
    }

    for (e = APR_BRIGADE_FIRST(bb);
            e != APR_BRIGADE_SENTINEL(bb);
            e = APR_BUCKET_NEXT(e)) {

        if (APR_BUCKET_IS_EOS(e)) {
            if (cl == NULL) {
                cl = ngx_alloc_chain_link(pool);
                if (cl == NULL) {
                    break;
                }
                
                cl->buf = ngx_calloc_buf(pool);
                if (cl->buf == NULL) {
                    break;
                }

                cl->buf->last_buf = 1;
                cl->next = NULL;
                *ll = cl;
            } else {
                cl->next = NULL;
                cl->buf->last_buf = 1;
            }
            apr_brigade_cleanup(bb);
            return NGX_OK;
        }

        if (APR_BUCKET_IS_METADATA(e)) {
            continue;
        }

        buf = apr_bucket_to_ngx_buf(e, pool);
        if (buf == NULL) {
            break;
        }

        cl = ngx_alloc_chain_link(pool);
        if (cl == NULL) {
            break;
        }

        cl->buf = buf;
        cl->next = NULL;
        *ll = cl;
        ll = &cl->next;
    }


    apr_brigade_cleanup(bb);
    /* no eos or error */
    return NGX_ERROR;
}
Пример #23
0
static apr_status_t store_body(cache_handle_t *h, request_rec *r,
                               apr_bucket_brigade *bb)
{
    apr_bucket *e;
    apr_status_t rv;
    disk_cache_object_t *dobj = (disk_cache_object_t *) h->cache_obj->vobj;
    disk_cache_conf *conf = ap_get_module_config(r->server->module_config,
                                                 &disk_cache_module);

    /* We write to a temp file and then atomically rename the file over
     * in file_cache_el_final().
     */
    if (!dobj->tfd) {
        rv = apr_file_mktemp(&dobj->tfd, dobj->tempfile,
                             APR_CREATE | APR_WRITE | APR_BINARY |
                             APR_BUFFERED | APR_EXCL, r->pool);
        if (rv != APR_SUCCESS) {
            return rv;
        }
        dobj->file_size = 0;
    }

    for (e = APR_BRIGADE_FIRST(bb);
         e != APR_BRIGADE_SENTINEL(bb);
         e = APR_BUCKET_NEXT(e))
    {
        const char *str;
        apr_size_t length, written;
        rv = apr_bucket_read(e, &str, &length, APR_BLOCK_READ);
        if (rv != APR_SUCCESS) {
            ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
                         "cache_disk: Error when reading bucket for URL %s",
                         h->cache_obj->key);
            /* Remove the intermediate cache file and return non-APR_SUCCESS */
            file_cache_errorcleanup(dobj, r);
            return rv;
        }
        rv = apr_file_write_full(dobj->tfd, str, length, &written);
        if (rv != APR_SUCCESS) {
            ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
                         "cache_disk: Error when writing cache file for URL %s",
                         h->cache_obj->key);
            /* Remove the intermediate cache file and return non-APR_SUCCESS */
            file_cache_errorcleanup(dobj, r);
            return rv;
        }
        dobj->file_size += written;
        if (dobj->file_size > conf->maxfs) {
            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                         "cache_disk: URL %s failed the size check "
                         "(%" APR_OFF_T_FMT " > %" APR_OFF_T_FMT ")",
                         h->cache_obj->key, dobj->file_size, conf->maxfs);
            /* Remove the intermediate cache file and return non-APR_SUCCESS */
            file_cache_errorcleanup(dobj, r);
            return APR_EGENERAL;
        }
    }

    /* Was this the final bucket? If yes, close the temp file and perform
     * sanity checks.
     */
    if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
        if (r->connection->aborted || r->no_cache) {
            ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
                         "disk_cache: Discarding body for URL %s "
                         "because connection has been aborted.",
                         h->cache_obj->key);
            /* Remove the intermediate cache file and return non-APR_SUCCESS */
            file_cache_errorcleanup(dobj, r);
            return APR_EGENERAL;
        }
        if (dobj->file_size < conf->minfs) {
            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                         "cache_disk: URL %s failed the size check "
                         "(%" APR_OFF_T_FMT " < %" APR_OFF_T_FMT ")",
                         h->cache_obj->key, dobj->file_size, conf->minfs);
            /* Remove the intermediate cache file and return non-APR_SUCCESS */
            file_cache_errorcleanup(dobj, r);
            return APR_EGENERAL;
        }

        /* All checks were fine. Move tempfile to final destination */
        /* Link to the perm file, and close the descriptor */
        file_cache_el_final(dobj, r);
        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                     "disk_cache: Body for URL %s cached.",  dobj->name);
    }

    return APR_SUCCESS;
}
Пример #24
0
static apr_status_t substitute_filter(ap_filter_t *f, apr_bucket_brigade *bb)
{
    apr_size_t bytes;
    apr_size_t len;
    apr_size_t fbytes;
    const char *buff;
    const char *nl = NULL;
    char *bflat;
    apr_bucket *b;
    apr_bucket *tmp_b;
    apr_bucket_brigade *tmp_bb = NULL;
    apr_status_t rv;
    subst_dir_conf *cfg =
    (subst_dir_conf *) ap_get_module_config(f->r->per_dir_config,
                                             &substitute_module);

    substitute_module_ctx *ctx = f->ctx;

    /*
     * First time around? Create the saved bb that we used for each pass
     * through. Note that we can also get here when we explicitly clear ctx,
     * for error handling
     */
    if (!ctx) {
        f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
        /*
         * Create all the temporary brigades we need and reuse them to avoid
         * creating them over and over again from r->pool which would cost a
         * lot of memory in some cases.
         */
        ctx->linebb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
        ctx->linesbb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
        ctx->pattbb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
        /*
         * Everything to be passed to the next filter goes in
         * here, our pass brigade.
         */
        ctx->passbb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
        /* Create our temporary pool only once */
        apr_pool_create(&(ctx->tpool), f->r->pool);
        apr_table_unset(f->r->headers_out, "Content-Length");
    }

    /*
     * Shortcircuit processing
     */
    if (APR_BRIGADE_EMPTY(bb))
        return APR_SUCCESS;

    /*
     * Here's the concept:
     *  Read in the data and look for newlines. Once we
     *  find a full "line", add it to our working brigade.
     *  If we've finished reading the brigade and we have
     *  any left over data (not a "full" line), store that
     *  for the next pass.
     *
     * Note: anything stored in ctx->linebb for sure does not have
     * a newline char, so we don't concat that bb with the
     * new bb, since we would spending time searching for the newline
     * in data we know it doesn't exist. So instead, we simply scan
     * our current bb and, if we see a newline, prepend ctx->linebb
     * to the front of it. This makes the code much less straight-
     * forward (otherwise we could APR_BRIGADE_CONCAT(ctx->linebb, bb)
     * and just scan for newlines and not bother with needing to know
     * when ctx->linebb needs to be reset) but also faster. We'll take
     * the speed.
     *
     * Note: apr_brigade_split_line would be nice here, but we
     * really can't use it since we need more control and we want
     * to re-use already read bucket data.
     *
     * See mod_include if still confused :)
     */

    while ((b = APR_BRIGADE_FIRST(bb)) && (b != APR_BRIGADE_SENTINEL(bb))) {
        if (APR_BUCKET_IS_EOS(b)) {
            /*
             * if we see the EOS, then we need to pass along everything we
             * have. But if the ctx->linebb isn't empty, then we need to add
             * that to the end of what we'll be passing.
             */
            if (!APR_BRIGADE_EMPTY(ctx->linebb)) {
                rv = apr_brigade_pflatten(ctx->linebb, &bflat,
                                          &fbytes, ctx->tpool);
                if (rv != APR_SUCCESS)
                    goto err;
                if (fbytes > cfg->max_line_length) {
                    rv = APR_ENOMEM;
                    goto err;
                }
                tmp_b = apr_bucket_transient_create(bflat, fbytes,
                                                f->r->connection->bucket_alloc);
                rv = do_pattmatch(f, tmp_b, ctx->pattbb, ctx->tpool);
                if (rv != APR_SUCCESS)
                    goto err;
                APR_BRIGADE_CONCAT(ctx->passbb, ctx->pattbb);
                apr_brigade_cleanup(ctx->linebb);
            }
            APR_BUCKET_REMOVE(b);
            APR_BRIGADE_INSERT_TAIL(ctx->passbb, b);
        }
        /*
         * No need to handle FLUSH buckets separately as we call
         * ap_pass_brigade anyway at the end of the loop.
         */
        else if (APR_BUCKET_IS_METADATA(b)) {
            APR_BUCKET_REMOVE(b);
            APR_BRIGADE_INSERT_TAIL(ctx->passbb, b);
        }
        else {
            /*
             * We have actual "data" so read in as much as we can and start
             * scanning and splitting from our read buffer
             */
            rv = apr_bucket_read(b, &buff, &bytes, APR_BLOCK_READ);
            if (rv != APR_SUCCESS || bytes == 0) {
                apr_bucket_delete(b);
            }
            else {
                int num = 0;
                while (bytes > 0) {
                    nl = memchr(buff, APR_ASCII_LF, bytes);
                    if (nl) {
                        len = (apr_size_t) (nl - buff) + 1;
                        /* split *after* the newline */
                        apr_bucket_split(b, len);
                        /*
                         * We've likely read more data, so bypass rereading
                         * bucket data and continue scanning through this
                         * buffer
                         */
                        bytes -= len;
                        buff += len;
                        /*
                         * we need b to be updated for future potential
                         * splitting
                         */
                        tmp_b = APR_BUCKET_NEXT(b);
                        APR_BUCKET_REMOVE(b);
                        /*
                         * Hey, we found a newline! Don't forget the old
                         * stuff that needs to be added to the front. So we
                         * add the split bucket to the end, flatten the whole
                         * bb, morph the whole shebang into a bucket which is
                         * then added to the tail of the newline bb.
                         */
                        if (!APR_BRIGADE_EMPTY(ctx->linebb)) {
                            APR_BRIGADE_INSERT_TAIL(ctx->linebb, b);
                            rv = apr_brigade_pflatten(ctx->linebb, &bflat,
                                                      &fbytes, ctx->tpool);
                            if (rv != APR_SUCCESS)
                                goto err;
                            if (fbytes > cfg->max_line_length) {
                                /* Avoid pflattening further lines, we will
                                 * abort later on anyway.
                                 */
                                rv = APR_ENOMEM;
                                goto err;
                            }
                            b = apr_bucket_transient_create(bflat, fbytes,
                                            f->r->connection->bucket_alloc);
                            apr_brigade_cleanup(ctx->linebb);
                        }
                        rv = do_pattmatch(f, b, ctx->pattbb, ctx->tpool);
                        if (rv != APR_SUCCESS)
                            goto err;
                        /*
                         * Count how many buckets we have in ctx->passbb
                         * so far. Yes, this is correct we count ctx->passbb
                         * and not ctx->pattbb as we do not reset num on every
                         * iteration.
                         */
                        for (b = APR_BRIGADE_FIRST(ctx->pattbb);
                             b != APR_BRIGADE_SENTINEL(ctx->pattbb);
                             b = APR_BUCKET_NEXT(b)) {
                            num++;
                        }
                        APR_BRIGADE_CONCAT(ctx->passbb, ctx->pattbb);
                        /*
                         * If the number of buckets in ctx->passbb reaches an
                         * "insane" level, we consume much memory for all the
                         * buckets as such. So lets flush them down the chain
                         * in this case and thus clear ctx->passbb. This frees
                         * the buckets memory for further processing.
                         * Usually this condition should not become true, but
                         * it is a safety measure for edge cases.
                         */
                        if (num > AP_MAX_BUCKETS) {
                            b = apr_bucket_flush_create(
                                                f->r->connection->bucket_alloc);
                            APR_BRIGADE_INSERT_TAIL(ctx->passbb, b);
                            rv = ap_pass_brigade(f->next, ctx->passbb);
                            apr_brigade_cleanup(ctx->passbb);
                            num = 0;
                            apr_pool_clear(ctx->tpool);
                            if (rv != APR_SUCCESS)
                                goto err;
                        }
                        b = tmp_b;
                    }
                    else {
                        /*
                         * no newline in whatever is left of this buffer so
                         * tuck data away and get next bucket
                         */
                        APR_BUCKET_REMOVE(b);
                        APR_BRIGADE_INSERT_TAIL(ctx->linebb, b);
                        bytes = 0;
                    }
                }
            }
        }
        if (!APR_BRIGADE_EMPTY(ctx->passbb)) {
            rv = ap_pass_brigade(f->next, ctx->passbb);
            apr_brigade_cleanup(ctx->passbb);
            if (rv != APR_SUCCESS)
                goto err;
        }
        apr_pool_clear(ctx->tpool);
    }

    /* Anything left we want to save/setaside for the next go-around */
    if (!APR_BRIGADE_EMPTY(ctx->linebb)) {
        /*
         * Provide ap_save_brigade with an existing empty brigade
         * (ctx->linesbb) to avoid creating a new one.
         */
        ap_save_brigade(f, &(ctx->linesbb), &(ctx->linebb), f->r->pool);
        tmp_bb = ctx->linebb;
        ctx->linebb = ctx->linesbb;
        ctx->linesbb = tmp_bb;
    }

    return APR_SUCCESS;
err:
    if (rv == APR_ENOMEM)
        ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, APLOGNO(01328) "Line too long, URI %s",
                      f->r->uri);
    apr_pool_clear(ctx->tpool);
    return rv;
}
Пример #25
0
static am_status_t get_request_body(am_request_t *rq) {
    const char *thisfunc = "get_request_body():";
    request_rec *r = (request_rec *) (rq != NULL ? rq->ctx : NULL);
    apr_bucket_brigade *bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
    int eos_found = 0, read_bytes = 0;
    apr_status_t read_status = 0;
    am_status_t status = AM_ERROR;
    char *out = NULL;

    if (r == NULL || rq == NULL) {
        return AM_EINVAL;
    }

    do {
        apr_bucket *ob;
        read_status = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
                APR_BLOCK_READ, HUGE_STRING_LEN);
        if (read_status != APR_SUCCESS) {
            if (out != NULL) free(out);
            return AM_ERROR;
        }

        ob = APR_BRIGADE_FIRST(bb);
        while (ob != APR_BRIGADE_SENTINEL(bb)) {
            const char *data;
            apr_size_t data_size;

            if (APR_BUCKET_IS_EOS(ob)) {
                eos_found = 1;
                break;
            }

            if (APR_BUCKET_IS_FLUSH(ob)) {
                continue;
            }

            /* read data */
            apr_bucket_read(ob, &data, &data_size, APR_BLOCK_READ);
            /* process data */
            out = realloc(out, read_bytes + data_size + 1);
            if (out == NULL) {
                status = AM_ENOMEM;
                eos_found = 1;
                break;
            }

            memcpy(out + read_bytes, data, data_size);
            read_bytes += (int) data_size;
            out[read_bytes] = 0;

            ob = APR_BUCKET_NEXT(ob);
            status = AM_SUCCESS;
        }
        apr_brigade_destroy(bb);

    } while (eos_found == 0);

    apr_brigade_destroy(bb);

    rq->post_data = out;
    rq->post_data_sz = read_bytes;

    if (status == AM_SUCCESS) {
        am_log_debug(rq->instance_id, "%s read %d bytes \n%s", thisfunc,
                read_bytes, LOGEMPTY(out));
        /* remove the content length since the body has been read */
        r->clength = 0;
        apr_table_unset(r->headers_in, "Content-Length");
    }
    return status;
}
Пример #26
0
static int aikido_handler(request_rec *r)
{
    apr_bucket_brigade *bb;
    apr_bucket *b;
    int seen_eos, child_stopped_reading;
    apr_pool_t *p;
    apr_status_t rv;
    conn_rec *c = r->connection;
    void *aikidoreq;
    int inpipes[2];
    int outpipes[2];
    apr_file_t *infile;
    apr_file_t *outfile;
    int e;
    pthread_t tid;
    pthread_attr_t attrs;
    ThreadData tdata;
    void *tresult;

    if (aikido == NULL) {
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                      "mod_aikido failed to initalize");
        return HTTP_INTERNAL_SERVER_ERROR;
    }
    p = r->main ? r->main->pool : r->pool;

    if (r->method_number == M_OPTIONS) {
        r->allowed |= (AP_METHOD_BIT << M_GET);
        r->allowed |= (AP_METHOD_BIT << M_POST);
        return DECLINED;
    }

    // create the request object
    aikidoreq = aikido_new_request (r);

    if (!aikido_check_uri(aikido, aikidoreq)) {
        aikido_delete_request (aikidoreq);
        return DECLINED;
    }

    e = socketpair (AF_UNIX, SOCK_STREAM, 0, inpipes);
    e |= socketpair (AF_UNIX, SOCK_STREAM, 0, outpipes);
    if (e != 0) {
        perror ("socketpair");
        aikido_delete_request (aikidoreq);
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                      "Failed to create pipes");
        return HTTP_INTERNAL_SERVER_ERROR;
    }

    // redirect the output from the webapp to our outfile
    apr_os_file_put (&outfile, &outpipes[0], O_RDONLY, r->pool);   // open for reading from outpipes pipe

    // redirect the input to the webapp from out infile
    apr_os_file_put (&infile, &inpipes[1], O_CREAT | O_WRONLY, r->pool);        // open for writing to inpipes pipe

    ap_add_common_vars(r);
    ap_add_cgi_vars(r);


    bb = apr_brigade_create(r->pool, c->bucket_alloc);
    seen_eos = 0;
    child_stopped_reading = 0;
    do {
        apr_bucket *bucket;

        rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
                            APR_BLOCK_READ, HUGE_STRING_LEN);
       
        if (rv != APR_SUCCESS) {
            close (inpipes[0]);
            close (inpipes[1]);
            close (outpipes[0]);
            close (outpipes[1]);
            aikido_delete_request (aikidoreq);
            return rv;
        }

        bucket = APR_BRIGADE_FIRST(bb);
        while (bucket != APR_BRIGADE_SENTINEL(bb)) {
            const char *data;
            apr_size_t len;

            if (APR_BUCKET_IS_EOS(bucket)) {
                seen_eos = 1;
                break;
            }

            /* We can't do much with this. */
            if (APR_BUCKET_IS_FLUSH(bucket)) {
                continue;
            }

            /* If the child stopped, we still must read to EOS. */
            if (child_stopped_reading) {
                continue;
            } 

            /* read */
            apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
            
            /* Keep writing data to the child until done or too much time
             * elapses with no progress or an error occurs.
             */
            rv = apr_file_write_full(infile, data, len, NULL);

            if (rv != APR_SUCCESS) {
                /* silly script stopped reading, soak up remaining message */
                child_stopped_reading = 1;
            }
            bucket = APR_BUCKET_NEXT(bucket);
        }
        apr_brigade_cleanup(bb);
    }
    while (!seen_eos);

    /* Is this flush really needed? */
    apr_file_flush(infile);
    apr_file_close(infile);

    apr_brigade_cleanup(bb);

    /* start the reader thread */
    e = pthread_attr_init (&attrs);
    if (e != 0) {
        close (inpipes[0]);
        close (inpipes[1]);
        close (outpipes[0]);
        close (outpipes[1]);
        aikido_delete_request (aikidoreq);
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                      "Failed to allocate script thread attributes");
        return HTTP_INTERNAL_SERVER_ERROR;
    }

    e = pthread_attr_setdetachstate (&attrs, PTHREAD_CREATE_JOINABLE) ;
    if (e != 0) {
        close (inpipes[0]);
        close (inpipes[1]);
        close (outpipes[0]);
        close (outpipes[1]);
        aikido_delete_request (aikidoreq);
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                      "Failed to set script thread attributes");
        return HTTP_INTERNAL_SERVER_ERROR;
    }


    tdata.req = aikidoreq;
    tdata.in = inpipes[0];
    tdata.out = outpipes[1];

    e = pthread_create (&tid, NULL, script_thread, &tdata) ;
    if (e != 0) {
        close (inpipes[0]);
        close (inpipes[1]);
        close (outpipes[0]);
        close (outpipes[1]);
        aikido_delete_request (aikidoreq);
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                      "Failed to run script thread");
        return HTTP_INTERNAL_SERVER_ERROR;
    }

    /* script thread is now executing script with i/o redirected to the pipes */


    /* read the script output from the pipe*/
    apr_file_pipe_timeout_set(outfile, 0);

    b = aikido_bucket_create(r, outfile, c->bucket_alloc);
    if (b == NULL) {
        pthread_join (tid, &tresult);       /* wait for script to complete */
        close (inpipes[0]);
        close (inpipes[1]);
        close (outpipes[0]);
        close (outpipes[1]);
        aikido_delete_request (aikidoreq);
        return HTTP_INTERNAL_SERVER_ERROR;
    }

    APR_BRIGADE_INSERT_TAIL(bb, b);
    b = apr_bucket_eos_create(c->bucket_alloc);
    APR_BRIGADE_INSERT_TAIL(bb, b);

    ap_pass_brigade(r->output_filters, bb);

    /* wait for the script thread to complete */
    pthread_join (tid, &tresult);
    
    /* terminate script output */
    close (inpipes[0]);
    close (inpipes[1]);
    close (outpipes[0]);
    close (outpipes[1]);

    /* delete the aikido request objects */
    aikido_delete_request (aikidoreq);

    return OK;
}
AP_CORE_DECLARE_NONSTD(apr_status_t) ap_byterange_filter(ap_filter_t *f,
                                                         apr_bucket_brigade *bb)
{
    request_rec *r = f->r;
    conn_rec *c = r->connection;
    apr_bucket *e;
    apr_bucket_brigade *bsend;
    apr_bucket_brigade *tmpbb;
    apr_off_t range_start;
    apr_off_t range_end;
    apr_off_t clength = 0;
    apr_status_t rv;
    int found = 0;
    int num_ranges;
    char *boundary = NULL;
    char *bound_head = NULL;
    apr_array_header_t *indexes;
    indexes_t *idx;
    int i;
    int original_status;
    int max_ranges = get_max_ranges(r);

    /*
     * Iterate through the brigade until reaching EOS or a bucket with
     * unknown length.
     */
    for (e = APR_BRIGADE_FIRST(bb);
         (e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e)
          && e->length != (apr_size_t)-1);
         e = APR_BUCKET_NEXT(e)) {
        clength += e->length;
    }

    /*
     * Don't attempt to do byte range work if this brigade doesn't
     * contain an EOS, or if any of the buckets has an unknown length;
     * this avoids the cases where it is expensive to perform
     * byteranging (i.e. may require arbitrary amounts of memory).
     */
    if (!APR_BUCKET_IS_EOS(e) || clength <= 0) {
        ap_remove_output_filter(f);
        return ap_pass_brigade(f->next, bb);
    }

    original_status = r->status;
    num_ranges = ap_set_byterange(r, clength, &indexes);

    /* We have nothing to do, get out of the way. */
    if (num_ranges == 0 || (max_ranges >= 0 && num_ranges > max_ranges)) {
        r->status = original_status;
        ap_remove_output_filter(f);
        return ap_pass_brigade(f->next, bb);
    }

    /* this brigade holds what we will be sending */
    bsend = apr_brigade_create(r->pool, c->bucket_alloc);

    if (num_ranges < 0)
        return send_416(f, bsend);

    if (num_ranges > 1) {
        /* Is ap_make_content_type required here? */
        const char *orig_ct = ap_make_content_type(r, r->content_type);
        boundary = apr_psprintf(r->pool, "%" APR_UINT64_T_HEX_FMT "%lx",
                                (apr_uint64_t)r->request_time, c->id);

        ap_set_content_type(r, apr_pstrcat(r->pool, "multipart",
                                           use_range_x(r) ? "/x-" : "/",
                                           "byteranges; boundary=",
                                           boundary, NULL));

        if (strcasecmp(orig_ct, NO_CONTENT_TYPE)) {
            bound_head = apr_pstrcat(r->pool,
                                     CRLF "--", boundary,
                                     CRLF "Content-type: ",
                                     orig_ct,
                                     CRLF "Content-range: bytes ",
                                     NULL);
        }
        else {
            /* if we have no type for the content, do our best */
            bound_head = apr_pstrcat(r->pool,
                                     CRLF "--", boundary,
                                     CRLF "Content-range: bytes ",
                                     NULL);
        }
        ap_xlate_proto_to_ascii(bound_head, strlen(bound_head));
    }

    tmpbb = apr_brigade_create(r->pool, c->bucket_alloc);

    idx = (indexes_t *)indexes->elts;
    for (i = 0; i < indexes->nelts; i++, idx++) {
        range_start = idx->start;
        range_end = idx->end;

        rv = copy_brigade_range(bb, tmpbb, range_start, range_end);
        if (rv != APR_SUCCESS ) {
            ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
                          "copy_brigade_range() failed [%" APR_OFF_T_FMT
                          "-%" APR_OFF_T_FMT ",%" APR_OFF_T_FMT "]",
                          range_start, range_end, clength);
            continue;
        }
        found = 1;

        /*
         * For single range requests, we must produce Content-Range header.
         * Otherwise, we need to produce the multipart boundaries.
         */
        if (num_ranges == 1) {
            apr_table_setn(r->headers_out, "Content-Range",
                           apr_psprintf(r->pool, "bytes " BYTERANGE_FMT,
                                        range_start, range_end, clength));
        }
        else {
            char *ts;

            e = apr_bucket_pool_create(bound_head, strlen(bound_head),
                                       r->pool, c->bucket_alloc);
            APR_BRIGADE_INSERT_TAIL(bsend, e);

            ts = apr_psprintf(r->pool, BYTERANGE_FMT CRLF CRLF,
                              range_start, range_end, clength);
            ap_xlate_proto_to_ascii(ts, strlen(ts));
            e = apr_bucket_pool_create(ts, strlen(ts), r->pool,
                                       c->bucket_alloc);
            APR_BRIGADE_INSERT_TAIL(bsend, e);
        }

        APR_BRIGADE_CONCAT(bsend, tmpbb);
        if (i && !(i & 0x1F)) {
            /*
             * Every now and then, pass what we have down the filter chain.
             * In this case, the content-length filter cannot calculate and
             * set the content length and we must remove any Content-Length
             * header already present.
             */
            apr_table_unset(r->headers_out, "Content-Length");
            if ((rv = ap_pass_brigade(f->next, bsend)) != APR_SUCCESS)
                return rv;
            apr_brigade_cleanup(bsend);
        }
    }

    if (found == 0) {
        /* bsend is assumed to be empty if we get here. */
        return send_416(f, bsend);
    }

    if (num_ranges > 1) {
        char *end;

        /* add the final boundary */
        end = apr_pstrcat(r->pool, CRLF "--", boundary, "--" CRLF, NULL);
        ap_xlate_proto_to_ascii(end, strlen(end));
        e = apr_bucket_pool_create(end, strlen(end), r->pool, c->bucket_alloc);
        APR_BRIGADE_INSERT_TAIL(bsend, e);
    }

    e = apr_bucket_eos_create(c->bucket_alloc);
    APR_BRIGADE_INSERT_TAIL(bsend, e);

    /* we're done with the original content - all of our data is in bsend. */
    apr_brigade_cleanup(bb);
    apr_brigade_destroy(tmpbb);

    /* send our multipart output */
    return ap_pass_brigade(f->next, bsend);
}
Пример #28
0
AP_DECLARE(int) ap_xml_parse_input(request_rec * r, apr_xml_doc **pdoc)
{
    apr_xml_parser *parser;
    apr_bucket_brigade *brigade;
    int seen_eos;
    apr_status_t status;
    char errbuf[200];
    apr_size_t total_read = 0;
    apr_size_t limit_xml_body = ap_get_limit_xml_body(r);
    int result = HTTP_BAD_REQUEST;

    parser = apr_xml_parser_create(r->pool);
    brigade = apr_brigade_create(r->pool, r->connection->bucket_alloc);

    seen_eos = 0;
    total_read = 0;

    do {
        apr_bucket *bucket;

        /* read the body, stuffing it into the parser */
        status = ap_get_brigade(r->input_filters, brigade,
                                AP_MODE_READBYTES, APR_BLOCK_READ,
                                READ_BLOCKSIZE);

        if (status != APR_SUCCESS) {
            goto read_error;
        }

        for (bucket = APR_BRIGADE_FIRST(brigade);
             bucket != APR_BRIGADE_SENTINEL(brigade);
             bucket = APR_BUCKET_NEXT(bucket))
        {
            const char *data;
            apr_size_t len;

            if (APR_BUCKET_IS_EOS(bucket)) {
                seen_eos = 1;
                break;
            }

            if (APR_BUCKET_IS_METADATA(bucket)) {
                continue;
            }

            status = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
            if (status != APR_SUCCESS) {
                goto read_error;
            }

            total_read += len;
            if (limit_xml_body && total_read > limit_xml_body) {
                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00539)
                              "XML request body is larger than the configured "
                              "limit of %lu", (unsigned long)limit_xml_body);
                result = HTTP_REQUEST_ENTITY_TOO_LARGE;
                goto read_error;
            }

            status = apr_xml_parser_feed(parser, data, len);
            if (status) {
                goto parser_error;
            }
        }

        apr_brigade_cleanup(brigade);
    } while (!seen_eos);

    apr_brigade_destroy(brigade);

    /* tell the parser that we're done */
    status = apr_xml_parser_done(parser, pdoc);
    if (status) {
        /* Some parsers are stupid and return an error on blank documents. */
        if (!total_read) {
            *pdoc = NULL;
            return OK;
        }
        ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00540)
                      "XML parser error (at end). status=%d", status);
        return HTTP_BAD_REQUEST;
    }

#if APR_CHARSET_EBCDIC
    apr_xml_parser_convert_doc(r->pool, *pdoc, ap_hdrs_from_ascii);
#endif
    return OK;

  parser_error:
    (void) apr_xml_parser_geterror(parser, errbuf, sizeof(errbuf));
    ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00541)
                  "XML Parser Error: %s", errbuf);

    /* FALLTHRU */

  read_error:
    /* make sure the parser is terminated */
    (void) apr_xml_parser_done(parser, NULL);

    apr_brigade_destroy(brigade);

    /* Apache will supply a default error, plus the error log above. */
    return result;
}
Пример #29
0
static apr_status_t triger_filter(ap_filter_t * f, apr_bucket_brigade * bb)
{
    apr_status_t rv = APR_SUCCESS;
    triger_conf_t *cfg;
    apr_bucket *b;
    triger_module_ctx_t *ctx = f->ctx;
    if (APR_BRIGADE_EMPTY(bb))
	return APR_SUCCESS;
    cfg = ap_get_module_config(f->r->per_dir_config, &triger_module);
    if (!cfg)
	goto last;
    if (!cfg->enabled)
	goto last;
    if (!ctx) {
	f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
	if (!ctx)
	    goto last;
	ctx->times = 1;
	ctx->unknown_start_tag_find = 0;
	ctx->unknown_end_tag_find = 0;
	ctx->find = 0;
	ctx->no_tag_find = 1;
	ctx->doctype_tag_find = 0;
	ctx->html_start_tag_find = ctx->head_start_tag_find =
	    ctx->body_start_tag_find = ctx->body_end_tag_find =
	    ctx->html_end_tag_find = 0;
	ctx->triger_bucket =
	    apr_pcalloc(f->r->pool, sizeof(triger_bucket_t));
	if (!ctx->triger_bucket)
	    goto last;
	ctx->triger_bucket->limit = cfg->chk_len;
	ctx->head_check = 0;
	apr_table_unset(f->r->headers_out, "Content-Length");
    } else
	ctx->times++;
    if (!is_this_html(f->r) || ctx->find)
	goto last;
    ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
		  "Enter this filter %u times", ctx->times);
    if (!cfg->full_chk) {
	ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
		      "Only check the first and last data buckets");
	if (!ctx->head_check) {
	    ctx->head_check = 1;
	    get_data_at_head(f, bb);
	    where_to_insert_html_fragment_at_head(f);
	    ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
			  "Find the first data bucket. Content length: %d uri: %s path info: %s positions found: %d (<head>)",
			  (int) ctx->triger_bucket->len, f->r->uri,
			  f->r->path_info,
			  (int) ctx->triger_bucket->head_start_tag_pos);
	    insert_html_fragment_at_head(f, bb, cfg);
	}
	if (ctx->find || !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb)))
	    goto last;
	get_data_at_tail(f, bb);
	where_to_insert_html_fragment_at_tail(f);
	ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
		      "Find the last data bucket. Content length: %d uri: %s path info: %s positions found: %d (</body>) %d (/html)",
		      (int) ctx->triger_bucket->len, f->r->uri,
		      f->r->path_info,
		      (int) ctx->triger_bucket->body_end_tag_pos,
		      (int) ctx->triger_bucket->html_end_tag_pos);
	insert_html_fragment_at_tail(f, bb, cfg);
    } else {
	ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r,
		      "Check each data bucket");
	for (b = APR_BRIGADE_FIRST(bb);
	     b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) {
	    if (!APR_BUCKET_IS_METADATA(b)) {
		get_triger_bucket(f, b);
		where_to_insert_html_fragment_at_head(f);
		insert_html_fragment_at_head(f, bb, cfg);
	    }
	    if (!ctx->find && APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
		get_data_at_tail(f, bb);
		where_to_insert_html_fragment_at_tail(f);
		insert_html_fragment_at_tail(f, bb, cfg);
	    }
	}
    }
  last:
    rv = ap_pass_brigade(f->next, bb);
    return rv;
}
Пример #30
0
/* The handler.  Create a new parser and/or filter context where appropriate
 * and parse the chunks of data received from the brigade
 */
static int idlChunkHandler( ap_filter_t *f, apr_bucket_brigade *brigade ) {

	idlChunkContext* ctx = f->ctx;
	apr_bucket* currentBucket = NULL;
	apr_pool_t* pool = f->r->pool;
	const char* data;
  	apr_size_t len;
    osrfStringArray* params = NULL;
    mparams = NULL;

	/* load the per-dir/location config */
	idlChunkConfig* config = ap_get_module_config( 
			f->r->per_dir_config, &idlchunk_module );

	ap_log_rerror(APLOG_MARK, APLOG_ERR, 
			0, f->r, "IDLCHUNK Config:\nContent Type = %s, "
			"Strip PI = %s, Strip Comments = %s, Doctype = %s", 
			config->contentType, 
			(config->stripPI) ? "yes" : "no", 
			(config->stripComments) ? "yes" : "no",
			config->doctype);

	/* set the content type based on the config */
	ap_set_content_type(f->r, config->contentType);

	//ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, "Set content type");

    params = apacheParseParms(f->r); /* free me */
    mparams = apacheGetParamValues( params, "class" ); /* free me */

    all = 1;

    if (mparams && mparams->size > 0) all = 0;

	//ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, "Parsed the params, if any");

	/* create the XML parser */
	int firstrun = 0;
	if( parser == NULL ) {
		firstrun = 1;
		parser = XML_ParserCreate("UTF-8");
		XML_SetUserData(parser, f);
		XML_SetElementHandler(parser, startElement, endElement);
		XML_SetCharacterDataHandler(parser, charHandler);
		if(!config->stripDoctype)
			XML_SetStartDoctypeDeclHandler( parser, doctypeHandler );
		if(!config->stripPI)
			XML_SetProcessingInstructionHandler(parser, handlePI);
		if(!config->stripComments)
			XML_SetCommentHandler(parser, handleComment);
	}

	/* create the filter context */
	if( ctx == NULL ) {
		f->ctx = ctx = apr_pcalloc( pool, sizeof(*ctx));
		ctx->brigade = apr_brigade_create( pool, f->c->bucket_alloc );
		ctx->parser = parser;
	}


	if(firstrun) { /* we haven't started writing the data to the stream yet */

		/* go ahead and write the doctype out if we have one defined */
		if(config->doctype) {
			ap_log_rerror( APLOG_MARK, APLOG_DEBUG, 
					0, f->r, "IDLCHUNK DOCTYPE => %s", config->doctype);
			_fwrite(f, "%s\n", config->doctype);
		}
	}


	/* cycle through the buckets in the brigade */
	while (!APR_BRIGADE_EMPTY(brigade)) {

		/* grab the next bucket */
		currentBucket = APR_BRIGADE_FIRST(brigade);

		/* clean up when we're done */
		if (APR_BUCKET_IS_EOS(currentBucket) || APR_BUCKET_IS_FLUSH(currentBucket)) {
    	  	APR_BUCKET_REMOVE(currentBucket);
			APR_BRIGADE_INSERT_TAIL(ctx->brigade, currentBucket);
			ap_pass_brigade(f->next, ctx->brigade);
			XML_ParserFree(parser);
            if (params) osrfStringArrayFree(params);
            if (mparams) osrfStringArrayFree(mparams);
			parser = NULL;
		  	return APR_SUCCESS;
    	}

		/* read the incoming data */
		int s = apr_bucket_read(currentBucket, &data, &len, APR_NONBLOCK_READ);
		if( s != APR_SUCCESS ) {
			ap_log_rerror( APLOG_MARK, APLOG_ERR, 0, f->r, 
					"IDLCHUNK error reading data from filter with status %d", s);
            if (params) osrfStringArrayFree(params);
            if (mparams) osrfStringArrayFree(mparams);
			return s;
		}

		if (len > 0) {

			ap_log_rerror( APLOG_MARK, APLOG_DEBUG, 
					0, f->r, "IDLCHUNK read %d bytes", (int)len);

			/* push data into the XML push parser */
			if ( XML_Parse(ctx->parser, data, len, 0) == XML_STATUS_ERROR ) {

                char tmp[len+1];
                memcpy(tmp, data, len);
                tmp[len] = '\0';

				/* log and die on XML errors */
				ap_log_rerror( APLOG_MARK, APLOG_ERR, 0, f->r, 
                    "IDLCHUNK XML Parse Error: %s at line %d: parsing %s: data %s",
					XML_ErrorString(XML_GetErrorCode(ctx->parser)), 
					(int) XML_GetCurrentLineNumber(ctx->parser), f->r->filename, tmp);

				XML_ParserFree(parser);
                if (params) osrfStringArrayFree(params);
                if (mparams) osrfStringArrayFree(mparams);
				parser = NULL;
				return HTTP_INTERNAL_SERVER_ERROR; 
			}
    	}

		/* so a subrequest doesn't re-read this bucket */
		apr_bucket_delete(currentBucket); 
  	}

	apr_brigade_destroy(brigade);
    if (params) osrfStringArrayFree(params);
    if (mparams) osrfStringArrayFree(mparams);
  	return APR_SUCCESS;	
}