/* Warning: if you change this function, be sure to * change apr_bucket_pool_make() too! */ APU_DECLARE(apr_bucket *) apr_bucket_heap_make(apr_bucket *b, const char *buf, apr_size_t length, void (*free_func)(void *data)) { apr_bucket_heap *h; h = apr_bucket_alloc(sizeof(*h), b->list); if (!free_func) { h->alloc_len = length; h->base = apr_bucket_alloc(h->alloc_len, b->list); if (h->base == NULL) { apr_bucket_free(h); return NULL; } h->free_func = apr_bucket_free; memcpy(h->base, buf, length); } else { /* XXX: we lose the const qualifier here which indicates * there's something screwy with the API... */ h->base = (char *) buf; h->alloc_len = length; h->free_func = free_func; } b = apr_bucket_shared_make(b, h, 0, length); b->type = &apr_bucket_type_heap; return b; }
static apr_status_t nginx_bucket_read(apr_bucket *b, const char **str, apr_size_t *len, apr_read_type_e block) { apr_bucket_nginx *n = b->data; ngx_buf_t *buf = n->buf; u_char *data; ssize_t size; if (buf->pos == NULL && ngx_buf_size(buf) != 0) { data = apr_bucket_alloc(ngx_buf_size(buf), b->list); if (data == NULL) { return APR_EGENERAL; } size = ngx_read_file(buf->file, data, ngx_buf_size(buf), buf->file_pos); if (size != ngx_buf_size(buf)) { apr_bucket_free(data); return APR_EGENERAL; } buf->pos = data; } *str = (char *)buf->pos + b->start; *len = b->length; return APR_SUCCESS; }
APU_DECLARE(apr_bucket *) apr_bucket_pool_make(apr_bucket *b, const char *buf, apr_size_t length, apr_pool_t *pool) { apr_bucket_pool *p; p = apr_bucket_alloc(sizeof(*p), b->list); /* XXX: we lose the const qualifier here which indicates * there's something screwy with the API... */ /* XXX: why is this? buf is const, p->base is const... what's * the problem? --jcw */ p->base = (char *) buf; p->pool = pool; p->list = b->list; b = apr_bucket_shared_make(b, p, 0, length); b->type = &apr_bucket_type_pool; /* pre-initialize heap bucket member */ p->heap.alloc_len = length; p->heap.base = NULL; p->heap.free_func = apr_bucket_free; apr_pool_cleanup_register(p->pool, p, pool_bucket_cleanup, apr_pool_cleanup_null); return b; }
/* Handle stdout from CGI child. Duplicate of logic from the _read * method of the real APR pipe bucket implementation. */ static apr_status_t aikido_read_stdout(apr_bucket *a, apr_file_t *out, const char **str, apr_size_t *len) { char *buf; apr_status_t rv; *str = NULL; *len = APR_BUCKET_BUFF_SIZE; buf = apr_bucket_alloc(*len, a->list); /* XXX: check for failure? */ rv = apr_file_read(out, buf, len); if (rv != APR_SUCCESS && rv != APR_EOF) { apr_bucket_free(buf); return rv; } if (*len > 0) { struct aikido_bucket_data *data = a->data; apr_bucket_heap *h; /* Change the current bucket to refer to what we read */ a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free); h = a->data; h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */ *str = buf; APR_BUCKET_INSERT_AFTER(a, aikido_bucket_dup(data, a->list)); } else { apr_bucket_free(buf); a = apr_bucket_immortal_make(a, "", 0); *str = a->data; } return rv; }
static int brigade_flatten(lua_State*L) { apr_bucket_brigade *bb = (apr_bucket_brigade*)CHECK_BUCKETBRIGADE_OBJECT(1); apr_off_t off = 0; apr_status_t rc = apr_brigade_length(bb, 1, &off); apr_size_t len = (apr_size_t)off; if(rc==APR_SUCCESS) { char* buf = apr_bucket_alloc(len, bb->bucket_alloc); rc = apr_brigade_flatten(bb, buf, &len); if(rc==APR_SUCCESS) { lua_pushlstring(L,buf, len); }else { lua_pushnil(L); } apr_bucket_free(buf); }else { lua_pushnil(L); } lua_pushinteger(L,rc); return 2; }
APU_DECLARE_NONSTD(apr_status_t) apr_bucket_simple_copy(apr_bucket *a, apr_bucket **b) { *b = apr_bucket_alloc(sizeof(**b), a->list); /* XXX: check for failure? */ **b = *a; return APR_SUCCESS; }
APU_DECLARE(apr_status_t) apr_brigade_write(apr_bucket_brigade *b, apr_brigade_flush flush, void *ctx, const char *str, apr_size_t nbyte) { apr_bucket *e = APR_BRIGADE_LAST(b); apr_size_t remaining = APR_BUCKET_BUFF_SIZE; char *buf = NULL; /* * If the last bucket is a heap bucket and its buffer is not shared with * another bucket, we may write into that bucket. */ if (!APR_BRIGADE_EMPTY(b) && APR_BUCKET_IS_HEAP(e) && ((apr_bucket_heap *)(e->data))->refcount.refcount == 1) { apr_bucket_heap *h = e->data; /* HEAP bucket start offsets are always in-memory, safe to cast */ remaining = h->alloc_len - (e->length + (apr_size_t)e->start); buf = h->base + e->start + e->length; } if (nbyte > remaining) { /* either a buffer bucket exists but is full, * or no buffer bucket exists and the data is too big * to buffer. In either case, we should flush. */ if (flush) { e = apr_bucket_transient_create(str, nbyte, b->bucket_alloc); APR_BRIGADE_INSERT_TAIL(b, e); return flush(b, ctx); } else { e = apr_bucket_heap_create(str, nbyte, NULL, b->bucket_alloc); APR_BRIGADE_INSERT_TAIL(b, e); return APR_SUCCESS; } } else if (!buf) { /* we don't have a buffer, but the data is small enough * that we don't mind making a new buffer */ buf = apr_bucket_alloc(APR_BUCKET_BUFF_SIZE, b->bucket_alloc); e = apr_bucket_heap_create(buf, APR_BUCKET_BUFF_SIZE, apr_bucket_free, b->bucket_alloc); APR_BRIGADE_INSERT_TAIL(b, e); e->length = 0; /* We are writing into the brigade, and * allocating more memory than we need. This * ensures that the bucket thinks it is empty just * after we create it. We'll fix the length * once we put data in it below. */ } /* there is a sufficiently big buffer bucket available now */ memcpy(buf, str, nbyte); e->length += nbyte; return APR_SUCCESS; }
static apr_status_t pipe_bucket_read(apr_bucket *a, const char **str, apr_size_t *len, apr_read_type_e block) { apr_file_t *p = a->data; char *buf; apr_status_t rv; apr_interval_time_t timeout; if (block == APR_NONBLOCK_READ) { apr_file_pipe_timeout_get(p, &timeout); apr_file_pipe_timeout_set(p, 0); } *str = NULL; *len = APR_BUCKET_BUFF_SIZE; buf = apr_bucket_alloc(*len, a->list); /* XXX: check for failure? */ rv = apr_file_read(p, buf, len); if (block == APR_NONBLOCK_READ) { apr_file_pipe_timeout_set(p, timeout); } if (rv != APR_SUCCESS && rv != APR_EOF) { apr_bucket_free(buf); return rv; } /* * If there's more to read we have to keep the rest of the pipe * for later. Otherwise, we'll close the pipe. * XXX: Note that more complicated bucket types that * refer to data not in memory and must therefore have a read() * function similar to this one should be wary of copying this * code because if they have a destroy function they probably * want to migrate the bucket's subordinate structure from the * old bucket to a raw new one and adjust it as appropriate, * rather than destroying the old one and creating a completely * new bucket. */ if (*len > 0) { apr_bucket_heap *h; /* Change the current bucket to refer to what we read */ a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free); h = a->data; h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */ *str = buf; APR_BUCKET_INSERT_AFTER(a, apr_bucket_pipe_create(p, a->list)); } else { apr_bucket_free(buf); a = apr_bucket_immortal_make(a, "", 0); *str = a->data; if (rv == APR_EOF) { apr_file_close(p); } } return APR_SUCCESS; }
APU_DECLARE(apr_bucket *) apr_bucket_eos_create(apr_bucket_alloc_t *list) { apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); APR_BUCKET_INIT(b); b->free = apr_bucket_free; b->list = list; return apr_bucket_eos_make(b); }
static apr_bucket *bucket_socket_ex_create(socket_ex_data *data, apr_bucket_alloc_t *list) { apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); APR_BUCKET_INIT(b); b->free = apr_bucket_free; b->list = list; return bucket_socket_ex_make(b, data); }
apr_bucket * passenger_bucket_create(const PassengerBucketStatePtr &state, apr_bucket_alloc_t *list, bool bufferResponse) { apr_bucket *bucket; bucket = (apr_bucket *) apr_bucket_alloc(sizeof(*bucket), list); APR_BUCKET_INIT(bucket); bucket->free = apr_bucket_free; bucket->list = list; return passenger_bucket_make(bucket, state, bufferResponse); }
static apr_status_t lob_bucket_read(apr_bucket *e, const char **str, apr_size_t *len, apr_read_type_e block) { apr_bucket_lob *a = e->data; const apr_dbd_row_t *row = a->row; apr_dbd_results_t *res = row->res; int col = a->col; apr_bucket *b = NULL; int rv; apr_size_t blength = e->length; /* bytes remaining in file past offset */ apr_off_t boffset = e->start; MYSQL_BIND *bind = &res->bind[col]; *str = NULL; /* in case we die prematurely */ /* fetch from offset if not at the beginning */ if (boffset > 0) { rv = mysql_stmt_fetch_column(res->statement, bind, col, (unsigned long) boffset); if (rv != 0) { return APR_EGENERAL; } } blength -= blength > bind->buffer_length ? bind->buffer_length : blength; *len = e->length - blength; *str = bind->buffer; /* allocate new buffer, since we used this one for the bucket */ bind->buffer = apr_palloc(res->pool, bind->buffer_length); /* * Change the current bucket to refer to what we read, * even if we read nothing because we hit EOF. */ apr_bucket_pool_make(e, *str, *len, res->pool); /* If we have more to read from the field, then create another bucket */ if (blength > 0) { /* for efficiency, we can just build a new apr_bucket struct * to wrap around the existing LOB bucket */ b = apr_bucket_alloc(sizeof(*b), e->list); b->start = boffset + *len; b->length = blength; b->data = a; b->type = &apr_bucket_type_lob; b->free = apr_bucket_free; b->list = e->list; APR_BUCKET_INSERT_AFTER(e, b); } else { lob_bucket_destroy(a); } return APR_SUCCESS; }
APU_DECLARE(apr_bucket *) apr_bucket_immortal_create(const char *buf, apr_size_t length, apr_bucket_alloc_t *list) { apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); APR_BUCKET_INIT(b); b->free = apr_bucket_free; b->list = list; return apr_bucket_immortal_make(b, buf, length); }
AP_DECLARE(apr_bucket *) ap_bucket_error_create(int error, const char *buf, apr_pool_t *p, apr_bucket_alloc_t *list) { apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); APR_BUCKET_INIT(b); b->free = apr_bucket_free; b->list = list; return ap_bucket_error_make(b, error, buf, p); }
static apr_bucket *apr_bucket_lob_create(const apr_dbd_row_t *row, int col, apr_off_t offset, apr_size_t len, apr_pool_t *p, apr_bucket_alloc_t *list) { apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); APR_BUCKET_INIT(b); b->free = apr_bucket_free; b->list = list; return apr_bucket_lob_make(b, row, col, offset, len, p); }
apr_bucket *h2_bucket_eos_make(apr_bucket *b, h2_stream *stream) { h2_bucket_eos *h; h = apr_bucket_alloc(sizeof(*h), b->list); h->stream = stream; b = apr_bucket_shared_make(b, h, 0, 0); b->type = &h2_bucket_type_eos; return b; }
APU_DECLARE(apr_bucket *) apr_bucket_file_create(apr_file_t *fd, apr_off_t offset, apr_size_t len, apr_pool_t *p, apr_bucket_alloc_t *list) { apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); APR_BUCKET_INIT(b); b->free = apr_bucket_free; b->list = list; return apr_bucket_file_make(b, fd, offset, len, p); }
APU_DECLARE(apr_bucket *) apr_bucket_heap_create(const char *buf, apr_size_t length, void (*free_func)(void *data), apr_bucket_alloc_t *list) { apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); APR_BUCKET_INIT(b); b->free = apr_bucket_free; b->list = list; return apr_bucket_heap_make(b, buf, length, free_func); }
static apr_bucket *h2_beam_bucket_create(h2_bucket_beam *beam, apr_bucket *bred, apr_bucket_alloc_t *list, apr_size_t n) { apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); APR_BUCKET_INIT(b); b->free = apr_bucket_free; b->list = list; return h2_beam_bucket_make(b, beam, bred, n); }
/* ngx_buf_t to apr_bucket */ apr_bucket * apr_bucket_nginx_create(ngx_buf_t *buf, apr_pool_t *p, apr_bucket_alloc_t *list) { apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); APR_BUCKET_INIT(b); /* link */ b->free = apr_bucket_free; b->list = list; return apr_bucket_nginx_make(b, buf, p); }
apr_bucket * apr_bucket_nginx_make(apr_bucket *b, ngx_buf_t *buf, apr_pool_t *pool) { apr_bucket_nginx *n; n = apr_bucket_alloc(sizeof(*n), b->list); n->buf = buf; b = apr_bucket_shared_make(b, n, 0, ngx_buf_size(buf)); b->type = &apr_bucket_type_nginx; return b; }
/* Create a duplicate CGI bucket using given bucket data */ static apr_bucket *aikido_bucket_dup(struct aikido_bucket_data *data, apr_bucket_alloc_t *list) { apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); APR_BUCKET_INIT(b); b->free = apr_bucket_free; b->list = list; b->type = &bucket_type_aikido; b->length = (apr_size_t)(-1); b->start = -1; b->data = data; return b; }
AP_DECLARE(apr_bucket *) ap_bucket_error_make(apr_bucket *b, int error, const char *buf, apr_pool_t *p) { ap_bucket_error *h; h = apr_bucket_alloc(sizeof(*h), b->list); h->status = error; h->data = (buf) ? apr_pstrdup(p, buf) : NULL; b = apr_bucket_shared_make(b, h, 0, 0); b->type = &ap_bucket_type_error; return b; }
static apr_status_t CaseFilterOutFilter(ap_filter_t *f, apr_bucket_brigade *pbbIn) { request_rec *r = f->r; conn_rec *c = r->connection; apr_bucket *pbktIn; apr_bucket_brigade *pbbOut; pbbOut=apr_brigade_create(r->pool, c->bucket_alloc); for (pbktIn = APR_BRIGADE_FIRST(pbbIn); pbktIn != APR_BRIGADE_SENTINEL(pbbIn); pbktIn = APR_BUCKET_NEXT(pbktIn)) { const char *data; apr_size_t len; char *buf; apr_size_t n; apr_bucket *pbktOut; if(APR_BUCKET_IS_EOS(pbktIn)) { apr_bucket *pbktEOS=apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(pbbOut,pbktEOS); continue; } /* read */ apr_bucket_read(pbktIn,&data,&len,APR_BLOCK_READ); /* write */ buf = apr_bucket_alloc(len, c->bucket_alloc); for(n=0 ; n < len ; ++n) buf[n] = apr_toupper(data[n]); pbktOut = apr_bucket_heap_create(buf, len, apr_bucket_free, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(pbbOut,pbktOut); } /* Q: is there any advantage to passing a brigade for each bucket? * A: obviously, it can cut down server resource consumption, if this * experimental module was fed a file of 4MB, it would be using 8MB for * the 'read' buckets and the 'write' buckets. * * Note it is more efficient to consume (destroy) each bucket as it's * processed above than to do a single cleanup down here. In any case, * don't let our caller pass the same buckets to us, twice; */ apr_brigade_cleanup(pbbIn); return ap_pass_brigade(f->next,pbbOut); }
static apr_status_t bucket_socket_ex_read(apr_bucket *a, const char **str, apr_size_t *len, apr_read_type_e block) { socket_ex_data *data = a->data; apr_socket_t *p = data->sock; char *buf; apr_status_t rv; apr_interval_time_t timeout; if (block == APR_NONBLOCK_READ) { apr_socket_timeout_get(p, &timeout); apr_socket_timeout_set(p, 0); } *str = NULL; *len = APR_BUCKET_BUFF_SIZE; buf = apr_bucket_alloc(*len, a->list); rv = apr_socket_recv(p, buf, len); if (block == APR_NONBLOCK_READ) { apr_socket_timeout_set(p, timeout); } if (rv != APR_SUCCESS && rv != APR_EOF) { apr_bucket_free(buf); return rv; } if (*len > 0) { apr_bucket_heap *h; /* count for stats */ *data->counter += *len; /* Change the current bucket to refer to what we read */ a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free); h = a->data; h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */ *str = buf; APR_BUCKET_INSERT_AFTER(a, bucket_socket_ex_create(data, a->list)); } else { apr_bucket_free(buf); a = apr_bucket_immortal_make(a, "", 0); *str = a->data; } return APR_SUCCESS; }
AP_DECLARE(apr_bucket *) ap_bucket_error_create(int error, const char *buf, apr_pool_t *p, apr_bucket_alloc_t *list) { apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); APR_BUCKET_INIT(b); b->free = apr_bucket_free; b->list = list; if (!ap_is_HTTP_VALID_RESPONSE(error)) { error = HTTP_INTERNAL_SERVER_ERROR; } return ap_bucket_error_make(b, error, buf, p); }
ap_rl_start_create(apr_bucket_alloc_t *list) { apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); APR_BUCKET_INIT(b); b->free = apr_bucket_free; b->list = list; b->length = 0; b->start = 0; b->data = NULL; b->type = &ap_rl_bucket_type_start; return b; }
/* Create a CGI bucket using pipes from script stdout 'out' * and stderr 'err', for request 'r'. */ static apr_bucket *cgi_bucket_create(request_rec *r, apr_file_t *out, apr_file_t *err, apr_bucket_alloc_t *list) { apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); apr_status_t rv; apr_pollfd_t fd; struct cgi_bucket_data *data = apr_palloc(r->pool, sizeof *data); APR_BUCKET_INIT(b); b->free = apr_bucket_free; b->list = list; b->type = &bucket_type_cgi; b->length = (apr_size_t)(-1); b->start = -1; /* Create the pollset */ rv = apr_pollset_create(&data->pollset, 2, r->pool, 0); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01217) "apr_pollset_create(); check system or user limits"); return NULL; } fd.desc_type = APR_POLL_FILE; fd.reqevents = APR_POLLIN; fd.p = r->pool; fd.desc.f = out; /* script's stdout */ fd.client_data = (void *)1; rv = apr_pollset_add(data->pollset, &fd); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01218) "apr_pollset_add(); check system or user limits"); return NULL; } fd.desc.f = err; /* script's stderr */ fd.client_data = (void *)2; rv = apr_pollset_add(data->pollset, &fd); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01219) "apr_pollset_add(); check system or user limits"); return NULL; } data->r = r; b->data = data; return b; }
apr_bucket *h2_bucket_eos_create(apr_bucket_alloc_t *list, h2_stream *stream) { apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); APR_BUCKET_INIT(b); b->free = apr_bucket_free; b->list = list; b = h2_bucket_eos_make(b, stream); if (stream) { h2_bucket_eos *h = b->data; apr_pool_pre_cleanup_register(stream->pool, &h->stream, bucket_cleanup); } return b; }
static apr_bucket *apr_bucket_lob_make(apr_bucket *b, const apr_dbd_row_t *row, int col, apr_off_t offset, apr_size_t len, apr_pool_t *p) { apr_bucket_lob *f; f = apr_bucket_alloc(sizeof(*f), b->list); f->row = row; f->col = col; f->readpool = p; b = apr_bucket_shared_make(b, f, offset, len); b->type = &apr_bucket_type_lob; return b; }