Exemplo n.º 1
0
static apr_status_t apreq_output_filter_test(ap_filter_t *f, apr_bucket_brigade *bb)
{
    request_rec *r = f->r;
    apreq_handle_t *handle;
    apr_bucket_brigade *eos;
    struct ctx_t ctx = {r, bb};
    const apr_table_t *t;

    if (!APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb)))
        return ap_pass_brigade(f->next,bb);

    eos = apr_brigade_split(bb, APR_BRIGADE_LAST(bb));

    handle = apreq_handle_apache2(r);
    ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r,
                  "appending parsed data");

    apr_brigade_puts(bb, NULL, NULL, "\n--APREQ OUTPUT FILTER--\nARGS:\n");

    apreq_args(handle, &t);
    if (t != NULL)
        apr_table_do(dump_table, &ctx, t, NULL);

    apreq_body(handle, &t);
    if (t != NULL) {
        apr_brigade_puts(bb,NULL,NULL,"BODY:\n");
        apr_table_do(dump_table, &ctx, t, NULL);
    }
    APR_BRIGADE_CONCAT(bb,eos);
    return ap_pass_brigade(f->next,bb);
}
Exemplo n.º 2
0
int output_status_json(output_t* output){
	apr_table_add(output->headers,"Access-Control-Allow-Origin", "*");
	apr_cpystrn((char*)output->content_type, "application/json", 255);

	apr_brigade_puts(output->bucket_brigade, NULL,NULL, "{\n");

	error_messages_print_json_bb(output->error_messages, output->pool,output->bucket_brigade);

	apr_brigade_puts(output->bucket_brigade, NULL,NULL,"\n}\n");

	return 0;
}
Exemplo n.º 3
0
h2_task_input *h2_task_input_create(h2_task *task, apr_pool_t *pool, 
                                    apr_bucket_alloc_t *bucket_alloc)
{
    h2_task_input *input = apr_pcalloc(pool, sizeof(h2_task_input));
    if (input) {
        input->task = task;
        input->bb = NULL;
        
        if (task->serialize_headers) {
            ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
                          "h2_task_input(%s): serialize request %s %s", 
                          task->id, task->request->method, task->request->path);
            input->bb = apr_brigade_create(pool, bucket_alloc);
            apr_brigade_printf(input->bb, NULL, NULL, "%s %s HTTP/1.1\r\n", 
                               task->request->method, task->request->path);
            apr_table_do(ser_header, input, task->request->headers, NULL);
            apr_brigade_puts(input->bb, NULL, NULL, "\r\n");
            if (input->task->input_eos) {
                APR_BRIGADE_INSERT_TAIL(input->bb, apr_bucket_eos_create(bucket_alloc));
            }
        }
        else if (!input->task->input_eos) {
            input->bb = apr_brigade_create(pool, bucket_alloc);
        }
        else {
            /* We do not serialize and have eos already, no need to
             * create a bucket brigade. */
        }
    }
    return input;
}
Exemplo n.º 4
0
apr_status_t h2_to_h1_add_data(h2_to_h1 *to_h1,
                               const char *data, size_t len)
{
    ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, h2_mplx_get_conn(to_h1->m),
                  "h2_to_h1(%ld-%d): add %ld data bytes", 
                  h2_mplx_get_id(to_h1->m), to_h1->stream_id, (long)len);
    
    if (to_h1->chunked) {
        /* if input may have a body and we have not seen any
         * content-length header, we need to chunk the input data.
         */
        apr_status_t status = apr_brigade_printf(to_h1->bb, NULL, NULL,
                                                 "%lx\r\n", len);
        if (status == APR_SUCCESS) {
            status = h2_to_h1_add_data_raw(to_h1, data, len);
            if (status == APR_SUCCESS) {
                status = apr_brigade_puts(to_h1->bb, NULL, NULL, "\r\n");
            }
        }
        return status;
    }
    else {
        to_h1->remain_len -= len;
        if (to_h1->remain_len < 0) {
            ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, 
                          h2_mplx_get_conn(to_h1->m),
                          "h2_to_h1(%ld-%d): got %ld more content bytes than announced "
                          "in content-length header: %ld", 
                          h2_mplx_get_id(to_h1->m),
                          to_h1->stream_id, (long)to_h1->content_len,
                          -(long)to_h1->remain_len);
        }
        return h2_to_h1_add_data_raw(to_h1, data, len);
    }
}
Exemplo n.º 5
0
static int brigade_puts(lua_State*L)
{
	apr_bucket_brigade *bb = (apr_bucket_brigade *)CHECK_BUCKETBRIGADE_OBJECT(1);
	const char* str = luaL_checkstring(L,2);

	apr_status_t rc = apr_brigade_puts(bb, NULL,NULL, str);
	lua_pushinteger(L,rc);
	return 1;
}
static apr_status_t tmpfile_filter(ap_filter_t *f, apr_bucket_brigade *bbout,
	ap_input_mode_t mode, apr_read_type_e block, apr_off_t nbytes) {

  apr_bucket_brigade* bbin = apr_brigade_create(f->r->pool,
	     f->r->connection->bucket_alloc);

  apr_file_t* tmpfile ;
  char* tmpname = apr_pstrdup(f->r->pool, "/tmp/mod-upload.XXXXXX") ;

  if ( f->ctx ) {
    APR_BRIGADE_INSERT_TAIL(bbout, apr_bucket_eos_create(bbout->bucket_alloc)) ;
    return APR_SUCCESS ;
  }
  if ( apr_file_mktemp(&tmpfile, tmpname, KEEPONCLOSE, f->r->pool) != APR_SUCCESS ) {
	            // error
    ap_remove_input_filter(f) ;
  }
  apr_pool_cleanup_register(f->r->pool, tmpfile,
		(void*)apr_file_close, apr_pool_cleanup_null) ;

  for ( ; ; ) {
    apr_bucket* b ;
    const char* ptr = 0 ;
    apr_size_t bytes ;
#ifdef DEBUG
    ap_log_rerror(APLOG_MARK,APLOG_DEBUG,0, f->r, "get_brigade") ;
#endif
    ap_get_brigade(f->next, bbin, AP_MODE_READBYTES, APR_BLOCK_READ, BUFLEN) ;
    for ( b = APR_BRIGADE_FIRST(bbin) ;
	b != APR_BRIGADE_SENTINEL(bbin) && ! f->ctx ;
	b = APR_BUCKET_NEXT(b) ) {
      if ( APR_BUCKET_IS_EOS(b) ) {
	f->ctx = f ;	// just using it as a flag; any nonzero will do
	apr_file_flush(tmpfile) ;
	apr_brigade_puts(bbout, ap_filter_flush, f, tmpname) ;
	APR_BRIGADE_INSERT_TAIL(bbout,
		apr_bucket_eos_create(bbout->bucket_alloc) ) ;
      } else if ( apr_bucket_read(b, &ptr, &bytes, APR_BLOCK_READ)
		== APR_SUCCESS ) {
#ifdef DEBUG
  ap_log_rerror(APLOG_MARK,APLOG_DEBUG,0, f->r, "	%d bytes in bucket", bytes) ;
#endif
	apr_file_write(tmpfile, ptr, &bytes) ;
      }
    }
    if ( f->ctx )
      break ;
    else
      apr_brigade_cleanup(bbin) ;
  }

  apr_brigade_destroy(bbin) ;

  return APR_SUCCESS ;
}
Exemplo n.º 7
0
static apr_status_t input_add_data(h2_stream *stream,
                                   const char *data, size_t len, int chunked)
{
    apr_status_t status = APR_SUCCESS;
    
    if (chunked) {
        status = apr_brigade_printf(stream->bbin, input_flush, stream,
                                    "%lx\r\n", (unsigned long)len);
        if (status == APR_SUCCESS) {
            status = apr_brigade_write(stream->bbin, input_flush, stream, data, len);
            if (status == APR_SUCCESS) {
                status = apr_brigade_puts(stream->bbin, input_flush, stream, "\r\n");
            }
        }
    }
    else {
        status = apr_brigade_write(stream->bbin, input_flush, stream, data, len);
    }
    return status;
}
Exemplo n.º 8
0
h2_task_input *h2_task_input_create(h2_task_env *env, apr_pool_t *pool, 
                                    apr_bucket_alloc_t *bucket_alloc)
{
    h2_task_input *input = apr_pcalloc(pool, sizeof(h2_task_input));
    if (input) {
        input->env = env;
        input->bb = NULL;
        
        if (env->serialize_headers) {
            input->bb = apr_brigade_create(pool, bucket_alloc);
            apr_brigade_printf(input->bb, NULL, NULL, "%s %s HTTP/1.1\r\n", 
                               env->method, env->path);
            apr_table_do(ser_header, input, env->headers, NULL);
            apr_brigade_puts(input->bb, NULL, NULL, "\r\n");
            if (input->env->input_eos) {
                APR_BRIGADE_INSERT_TAIL(input->bb, apr_bucket_eos_create(bucket_alloc));
            }
        }
        else if (!input->env->input_eos) {
            input->bb = apr_brigade_create(pool, bucket_alloc);
        }
        else {
            /* We do not serialize and have eos already, no need to
             * create a bucket brigade. */
        }
        
        if (APLOGcdebug(&env->c)) {
            char buffer[1024];
            apr_size_t len = sizeof(buffer)-1;
            if (input->bb) {
                apr_brigade_flatten(input->bb, buffer, &len);
            }
            buffer[len] = 0;
            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, &env->c,
                          "h2_task_input(%s): request is: %s", 
                          env->id, buffer);
        }
    }
    return input;
}
Exemplo n.º 9
0
int output_dirsync_status(music_query_t* music_query){
	int i = 0;


	apr_bucket_brigade* output_bb = music_query->output->bucket_brigade;

	apr_table_add(music_query->output->headers,"Access-Control-Allow-Origin", "*");
	apr_cpystrn((char*)music_query->output->content_type, "application/json", 255);



	apr_brigade_puts(music_query->output->bucket_brigade, NULL,NULL, "{\n");

	//Print Status
	if(music_query->globals->music_dirs != NULL){
		apr_brigade_puts(output_bb, NULL,NULL,"\t\"dir_sync_status\" : {\n");
			for(i = 0; i < music_query->globals->music_dirs->nelts; i++){
				dir_t* dir = &(((dir_t*)music_query->globals->music_dirs->elts)[i]);
				apr_brigade_printf(output_bb, NULL, NULL, "\t\t\"%s\" : {\n",dir->path);	
				apr_brigade_printf(output_bb, NULL,NULL, "\t\t\"Progress\" :  \"%.2f\",\n",dir->stats->sync_progress);
				apr_brigade_printf(output_bb, NULL,NULL, "\t\t\"Files Scanned\" :  \"%d\"\n", dir->stats->files_scanned);
				apr_brigade_printf(output_bb, NULL, NULL, "\t\t}");
				if(i < (music_query->globals->music_dirs->nelts - 1)){
					apr_brigade_printf(output_bb, NULL, NULL, ",");
				}
			}

		apr_brigade_puts(output_bb, NULL,NULL,"\t},\n");
	}

	apr_brigade_puts(output_bb, NULL,NULL,"\t\"db_status\" : ");
	output_db_result_json(music_query->results,music_query->db_query,music_query->output);
	apr_brigade_puts(output_bb, NULL,NULL,"\n,");

	print_error_messages(music_query->pool,output_bb, music_query->error_messages);

	apr_brigade_puts(output_bb, NULL,NULL,"\n}\n");
	return 0;
}
static apr_status_t upload_filter(ap_filter_t *f, apr_bucket_brigade *bbout,
	ap_input_mode_t mode, apr_read_type_e block, apr_off_t nbytes) {

  char* buf = 0 ;
  char* p = buf ;
  char* e ;
 
  int ret = APR_SUCCESS ;
 
  apr_size_t bytes = 0 ;
  apr_bucket* b ;
  apr_bucket_brigade* bbin ;
 
  upload_ctx* ctx = (upload_ctx*) f->ctx ;
  if ( ctx->parse_state == p_done ) {
    // send an EOS
    APR_BRIGADE_INSERT_TAIL(bbout, apr_bucket_eos_create(bbout->bucket_alloc) ) ;
    return APR_SUCCESS ;
  }

  /* should be more efficient to do this in-place without resorting
   * to a new brigade
   */
  bbin = apr_brigade_create(f->r->pool, f->r->connection->bucket_alloc) ;

  if ( ret = ap_get_brigade(f->next, bbin, mode, block, nbytes) ,
	ret != APR_SUCCESS )
     return ret ;


  for ( b = APR_BRIGADE_FIRST(bbin) ;
	b != APR_BRIGADE_SENTINEL(bbin) ;
	b = APR_BUCKET_NEXT(b) ) {
    const char* ptr = buf ;
    if ( APR_BUCKET_IS_EOS(b) ) {
      ctx->parse_state = p_done ;
      APR_BRIGADE_INSERT_TAIL(bbout,
	 apr_bucket_eos_create(bbout->bucket_alloc) ) ;
      apr_brigade_destroy(bbin) ;
      return APR_SUCCESS ;
    } else if ( apr_bucket_read(b, &ptr, &bytes, APR_BLOCK_READ)
		== APR_SUCCESS ) {
      const char* p = ptr ;
      while ( e = strchr(p, '\n'), ( e && ( e < (ptr+bytes) ) ) ) {
	const char* ptmp = p ;
	*e = 0 ;
	if ( ctx->leftover ) {
		// this'll be grossly inefficient if we get lots of
		// little buckets (we don't in my setup:-)
	  ptmp = apr_pstrcat(f->r->pool, ctx->leftover, p, NULL) ;
	  ctx->leftover = 0 ;
	}
	switch ( ctx->parse_state ) {
	  case p_none:
	    if ( is_boundary(ctx, ptmp) == boundary_part )
	      ctx->parse_state = p_head ;
	    break ;
	  case p_head:
	    if ( (! *ptmp) || ( *ptmp == '\r') )
	      ctx->parse_state = p_field ;
	    else
	      set_header(ctx, ptmp) ;
	    break ;
	  case p_field:
	    switch ( is_boundary(ctx, ptmp) ) {
	      case boundary_part:
		end_body(ctx) ;
		ctx->parse_state = p_head ;
		break ;
	      case boundary_end:
		end_body(ctx) ;
		ctx->parse_state = p_end ;
		break ;
	      case boundary_none:
		if ( ctx->is_file ) {
		  apr_brigade_puts(bbout, ap_filter_flush, f, ptmp) ;
		  apr_brigade_putc(bbout, ap_filter_flush, f, '\n') ;
		} else
		  set_body(ctx, ptmp) ;
		break ;
	    }
	    break ;
	  case p_end:
	    //APR_BRIGADE_INSERT_TAIL(bbout,
	//	apr_bucket_eos_create(bbout->bucket_alloc) ) ;
	    ctx->parse_state = p_done ;
	  case p_done:
	    break ;
	}
	if ( e - ptr >= bytes )
	  break ;
	p = e + 1 ;
      }
      if ( ( ctx->parse_state != p_end ) && ( ctx->parse_state != p_done ) ) {
	size_t bleft = bytes - (p-ptr) ;
	ctx->leftover = apr_pstrndup(f->r->pool, p, bleft ) ;
#ifdef DEBUG
  ap_log_rerror(APLOG_MARK,APLOG_DEBUG,0, f->r, "leftover %d bytes\n\t%s\n\t%s\n", bleft, ctx->leftover, p) ;
#endif
      }
    }
  }
  apr_brigade_destroy(bbin) ;
  return ret ;
}
Exemplo n.º 11
0
apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id)
{
    conn_rec *c;
    
    ap_assert(task);
    c = task->c;
    task->worker_started = 1;
    task->started_at = apr_time_now();
    
    if (c->master) {
        /* Each conn_rec->id is supposed to be unique at a point in time. Since
         * some modules (and maybe external code) uses this id as an identifier
         * for the request_rec they handle, it needs to be unique for slave 
         * connections also.
         * The connection id is generated by the MPM and most MPMs use the formula
         *    id := (child_num * max_threads) + thread_num
         * which means that there is a maximum id of about
         *    idmax := max_child_count * max_threads
         * If we assume 2024 child processes with 2048 threads max, we get
         *    idmax ~= 2024 * 2048 = 2 ** 22
         * On 32 bit systems, we have not much space left, but on 64 bit systems
         * (and higher?) we can use the upper 32 bits without fear of collision.
         * 32 bits is just what we need, since a connection can only handle so
         * many streams. 
         */
        int slave_id, free_bits;
        
        task->id = apr_psprintf(task->pool, "%ld-%d", c->master->id, 
                                task->stream_id);
        if (sizeof(unsigned long) >= 8) {
            free_bits = 32;
            slave_id = task->stream_id;
        }
        else {
            /* Assume we have a more limited number of threads/processes
             * and h2 workers on a 32-bit system. Use the worker instead
             * of the stream id. */
            free_bits = 8;
            slave_id = worker_id; 
        }
        task->c->id = (c->master->id << free_bits)^slave_id;
        c->keepalive = AP_CONN_KEEPALIVE;
    }
        
    h2_beam_create(&task->output.beam, c->pool, task->stream_id, "output", 
                   H2_BEAM_OWNER_SEND, 0, task->timeout);
    if (!task->output.beam) {
        return APR_ENOMEM;
    }
    
    h2_beam_buffer_size_set(task->output.beam, task->output.max_buffer);
    h2_beam_send_from(task->output.beam, task->pool);
    
    h2_ctx_create_for(c, task);
    apr_table_setn(c->notes, H2_TASK_ID_NOTE, task->id);

    if (task->input.beam) {
        h2_beam_mutex_enable(task->input.beam);
    }
    
    h2_slave_run_pre_connection(c, ap_get_conn_socket(c));            

    task->input.bb = apr_brigade_create(task->pool, c->bucket_alloc);
    if (task->request->serialize) {
        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
                      "h2_task(%s): serialize request %s %s", 
                      task->id, task->request->method, task->request->path);
        apr_brigade_printf(task->input.bb, NULL, 
                           NULL, "%s %s HTTP/1.1\r\n", 
                           task->request->method, task->request->path);
        apr_table_do(input_ser_header, task, task->request->headers, NULL);
        apr_brigade_puts(task->input.bb, NULL, NULL, "\r\n");
    }
    
    ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
                  "h2_task(%s): process connection", task->id);
                  
    task->c->current_thread = thread; 
    ap_run_process_connection(c);
    
    if (task->frozen) {
        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
                      "h2_task(%s): process_conn returned frozen task", 
                      task->id);
        /* cleanup delayed */
        return APR_EAGAIN;
    }
    else {
        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
                      "h2_task(%s): processing done", task->id);
        return output_finish(task);
    }
}