APU_DECLARE(apr_status_t) apr_brigade_write(apr_bucket_brigade *b, apr_brigade_flush flush, void *ctx, const char *str, apr_size_t nbyte) { apr_bucket *e = APR_BRIGADE_LAST(b); apr_size_t remaining = APR_BUCKET_BUFF_SIZE; char *buf = NULL; /* * If the last bucket is a heap bucket and its buffer is not shared with * another bucket, we may write into that bucket. */ if (!APR_BRIGADE_EMPTY(b) && APR_BUCKET_IS_HEAP(e) && ((apr_bucket_heap *)(e->data))->refcount.refcount == 1) { apr_bucket_heap *h = e->data; /* HEAP bucket start offsets are always in-memory, safe to cast */ remaining = h->alloc_len - (e->length + (apr_size_t)e->start); buf = h->base + e->start + e->length; } if (nbyte > remaining) { /* either a buffer bucket exists but is full, * or no buffer bucket exists and the data is too big * to buffer. In either case, we should flush. */ if (flush) { e = apr_bucket_transient_create(str, nbyte, b->bucket_alloc); APR_BRIGADE_INSERT_TAIL(b, e); return flush(b, ctx); } else { e = apr_bucket_heap_create(str, nbyte, NULL, b->bucket_alloc); APR_BRIGADE_INSERT_TAIL(b, e); return APR_SUCCESS; } } else if (!buf) { /* we don't have a buffer, but the data is small enough * that we don't mind making a new buffer */ buf = apr_bucket_alloc(APR_BUCKET_BUFF_SIZE, b->bucket_alloc); e = apr_bucket_heap_create(buf, APR_BUCKET_BUFF_SIZE, apr_bucket_free, b->bucket_alloc); APR_BRIGADE_INSERT_TAIL(b, e); e->length = 0; /* We are writing into the brigade, and * allocating more memory than we need. This * ensures that the bucket thinks it is empty just * after we create it. We'll fix the length * once we put data in it below. */ } /* there is a sufficiently big buffer bucket available now */ memcpy(buf, str, nbyte); e->length += nbyte; return APR_SUCCESS; }
static apr_status_t CaseFilterInFilter(ap_filter_t *f, apr_bucket_brigade *pbbOut, ap_input_mode_t eMode, apr_read_type_e eBlock, apr_off_t nBytes) { request_rec *r = f->r; conn_rec *c = r->connection; CaseFilterInContext *pCtx; apr_status_t ret; if (!(pCtx = f->ctx)) { f->ctx = pCtx = apr_palloc(r->pool, sizeof *pCtx); pCtx->pbbTmp = apr_brigade_create(r->pool, c->bucket_alloc); } if (APR_BRIGADE_EMPTY(pCtx->pbbTmp)) { ret = ap_get_brigade(f->next, pCtx->pbbTmp, eMode, eBlock, nBytes); if (eMode == AP_MODE_EATCRLF || ret != APR_SUCCESS) return ret; } while (!APR_BRIGADE_EMPTY(pCtx->pbbTmp)) { apr_bucket *pbktIn = APR_BRIGADE_FIRST(pCtx->pbbTmp); apr_bucket *pbktOut; const char *data; apr_size_t len; char *buf; apr_size_t n; /* It is tempting to do this... * APR_BUCKET_REMOVE(pB); * APR_BRIGADE_INSERT_TAIL(pbbOut,pB); * and change the case of the bucket data, but that would be wrong * for a file or socket buffer, for example... */ if (APR_BUCKET_IS_EOS(pbktIn)) { APR_BUCKET_REMOVE(pbktIn); APR_BRIGADE_INSERT_TAIL(pbbOut, pbktIn); break; } ret=apr_bucket_read(pbktIn, &data, &len, eBlock); if (ret != APR_SUCCESS) return ret; buf = ap_malloc(len); for (n=0 ; n < len ; ++n) { buf[n] = apr_toupper(data[n]); } pbktOut = apr_bucket_heap_create(buf, len, 0, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(pbbOut, pbktOut); apr_bucket_delete(pbktIn); } return APR_SUCCESS; }
/* drain_available_output(): * * if any data is available from the filter, read it and append it * to the the bucket brigade */ static apr_status_t drain_available_output(ap_filter_t *f, apr_bucket_brigade *bb) { request_rec *r = f->r; conn_rec *c = r->connection; ef_ctx_t *ctx = f->ctx; ef_dir_t *dc = ctx->dc; apr_size_t len; char buf[4096]; apr_status_t rv; apr_bucket *b; while (1) { len = sizeof(buf); rv = apr_file_read(ctx->proc->out, buf, &len); if ((rv && !APR_STATUS_IS_EAGAIN(rv)) || dc->debug >= DBGLVL_GORY) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, "apr_file_read(child output), len %" APR_SIZE_T_FMT, !rv ? len : -1); } if (rv != APR_SUCCESS) { return rv; } b = apr_bucket_heap_create(buf, len, NULL, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); return APR_SUCCESS; } /* we should never get here; if we do, a bogus error message would be * the least of our problems */ return APR_ANONYMOUS; }
static dav_error * dav_rainx_deliver_SPECIAL(const dav_resource *resource, ap_filter_t *output) { const char *result; int result_len; apr_status_t status; apr_pool_t *pool; apr_bucket_brigade *bb; apr_bucket *bkt; DAV_XDEBUG_RES(resource, 0, "%s()", __FUNCTION__); pool = resource->info->request->pool; /* Check resource type */ if (resource->type != DAV_RESOURCE_TYPE_PRIVATE) return server_create_and_stat_error(resource_get_server_config(resource), pool, HTTP_CONFLICT, 0, apr_pstrdup(pool, "Cannot GET this type of resource.")); if (resource->collection) return server_create_and_stat_error(resource_get_server_config(resource), pool, HTTP_CONFLICT, 0, apr_pstrdup(pool,"No GET on collections")); /* Generate the output */ result = resource->info->generator(resource, pool); result_len = strlen(result); /* We must reply a buffer */ bkt = apr_bucket_heap_create(result, result_len, NULL, output->c->bucket_alloc); bb = apr_brigade_create(pool, output->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, bkt); /* Nothing more to reply */ bkt = apr_bucket_eos_create(output->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, bkt); DAV_XDEBUG_RES(resource, 0, "%s : ready to deliver", __FUNCTION__); if ((status = ap_pass_brigade(output, bb)) != APR_SUCCESS) return server_create_and_stat_error(resource_get_server_config(resource), pool, HTTP_FORBIDDEN, 0, apr_pstrdup(pool,"Could not write contents to filter.")); server_inc_stat(resource_get_server_config(resource), RAWX_STATNAME_REP_2XX, 0); /* HERE ADD request counter */ switch(resource->info->type) { case STAT: server_inc_request_stat(resource_get_server_config(resource), RAWX_STATNAME_REQ_STAT, request_get_duration(resource->info->request)); break; case INFO: server_inc_request_stat(resource_get_server_config(resource), RAWX_STATNAME_REQ_INFO, request_get_duration(resource->info->request)); break; default: break; } return NULL; }
static apr_status_t CaseFilterOutFilter(ap_filter_t *f, apr_bucket_brigade *pbbIn) { request_rec *r = f->r; conn_rec *c = r->connection; apr_bucket *pbktIn; apr_bucket_brigade *pbbOut; pbbOut=apr_brigade_create(r->pool, c->bucket_alloc); for (pbktIn = APR_BRIGADE_FIRST(pbbIn); pbktIn != APR_BRIGADE_SENTINEL(pbbIn); pbktIn = APR_BUCKET_NEXT(pbktIn)) { const char *data; apr_size_t len; char *buf; apr_size_t n; apr_bucket *pbktOut; if(APR_BUCKET_IS_EOS(pbktIn)) { apr_bucket *pbktEOS=apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(pbbOut,pbktEOS); continue; } /* read */ apr_bucket_read(pbktIn,&data,&len,APR_BLOCK_READ); /* write */ buf = apr_bucket_alloc(len, c->bucket_alloc); for(n=0 ; n < len ; ++n) buf[n] = apr_toupper(data[n]); pbktOut = apr_bucket_heap_create(buf, len, apr_bucket_free, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(pbbOut,pbktOut); } /* Q: is there any advantage to passing a brigade for each bucket? * A: obviously, it can cut down server resource consumption, if this * experimental module was fed a file of 4MB, it would be using 8MB for * the 'read' buckets and the 'write' buckets. * * Note it is more efficient to consume (destroy) each bucket as it's * processed above than to do a single cleanup down here. In any case, * don't let our caller pass the same buckets to us, twice; */ apr_brigade_cleanup(pbbIn); return ap_pass_brigade(f->next,pbbOut); }
static PyObject *filter_write(filterobject *self, PyObject *args) { char *buff; int len; apr_bucket *b; PyObject *s; conn_rec *c = self->request_obj->request_rec->connection; if (! PyArg_ParseTuple(args, "O", &s)) return NULL; if (! PyString_Check(s)) { PyErr_SetString(PyExc_TypeError, "Argument to write() must be a string"); return NULL; } if (self->closed) { PyErr_SetString(PyExc_ValueError, "I/O operation on closed filter"); return NULL; } /* PYTHON 2.5: 'PyString_Size' uses Py_ssize_t for return values (may need overflow check) */ len = PyString_Size(s); if (len) { /* does the output brigade exist? */ if (!self->bb_out) { self->bb_out = apr_brigade_create(self->f->r->pool, c->bucket_alloc); } buff = apr_bucket_alloc(len, c->bucket_alloc); memcpy(buff, PyString_AS_STRING(s), len); b = apr_bucket_heap_create(buff, len, apr_bucket_free, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(self->bb_out, b); } Py_INCREF(Py_None); return Py_None; }
static apr_status_t amagent_post_filter(ap_filter_t *f, apr_bucket_brigade *bucket_out, ap_input_mode_t emode, apr_read_type_e eblock, apr_off_t nbytes) { request_rec *r = f->r; conn_rec *c = r->connection; apr_bucket *bucket; apr_size_t sz; char *clean; const char *data = apr_table_get(r->notes, amagent_post_filter_name); do { if (data == NULL) break; sz = strlen(data); clean = base64_decode(data, &sz); if (clean == NULL) break; apr_table_unset(r->notes, amagent_post_filter_name); LOG_R(APLOG_DEBUG, r, "amagent_post_filter(): reposting %ld bytes", sz); bucket = apr_bucket_heap_create((const char *) clean, sz, NULL, c->bucket_alloc); if (bucket == NULL) { free(clean); return APR_EGENERAL; } APR_BRIGADE_INSERT_TAIL(bucket_out, bucket); free(clean); bucket = apr_bucket_eos_create(c->bucket_alloc); if (bucket == NULL) { return APR_EGENERAL; } APR_BRIGADE_INSERT_TAIL(bucket_out, bucket); ap_remove_input_filter(f); return APR_SUCCESS; } while (0); apr_table_unset(r->notes, amagent_post_filter_name); ap_remove_input_filter(f); return ap_get_brigade(f->next, bucket_out, emode, eblock, nbytes); }
static apr_status_t urlReplaceFilterOutFilter(ap_filter_t *f, apr_bucket_brigade *pbbIn) { request_rec *r = f->r; conn_rec *c = r->connection; apr_bucket *pbktIn; apr_bucket_brigade *pbbOut; pbbOut=apr_brigade_create(r->pool, c->bucket_alloc); for (pbktIn = APR_BRIGADE_FIRST(pbbIn); pbktIn != APR_BRIGADE_SENTINEL(pbbIn); pbktIn = APR_BUCKET_NEXT(pbktIn)) { const char *data; apr_size_t len; char *buf; apr_size_t n; apr_bucket *pbktOut; if (APR_BUCKET_IS_EOS(pbktIn)) { apr_bucket *pbktEOS=apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(pbbOut,pbktEOS); continue; } /* read */ apr_bucket_read(pbktIn,&data,&len,APR_BLOCK_READ); /* write */ buf = apr_bucket_alloc(len, c->bucket_alloc); for (n=0 ; n < len ; ++n) buf[n] = apr_toupper(data[n]); pbktOut = apr_bucket_heap_create(buf, len, apr_bucket_free, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(pbbOut,pbktOut); } apr_brigade_cleanup(pbbIn); return ap_pass_brigade(f->next,pbbOut); }
static int xlate_in_filter(ap_filter_t *f, apr_bucket_brigade *bb, ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes) { apr_status_t rv; charset_req_t *reqinfo = ap_get_module_config(f->r->request_config, &charset_lite_module); charset_dir_t *dc = ap_get_module_config(f->r->per_dir_config, &charset_lite_module); charset_filter_ctx_t *ctx = f->ctx; apr_size_t buffer_size; int hit_eos; if (!ctx) { /* this is SetInputFilter path; grab the preallocated context, * if any; note that if we decided not to do anything in an earlier * handler, we won't even have a reqinfo */ if (reqinfo) { ctx = f->ctx = reqinfo->input_ctx; reqinfo->input_ctx = NULL; /* prevent SNAFU if user coded us twice * in the filter chain; we can't have two * instances using the same context */ } if (!ctx) { /* no idea how to translate; don't do anything */ ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(charset_filter_ctx_t)); ctx->dc = dc; ctx->noop = 1; } } if (dc->debug >= DBGLVL_GORY) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, "xlate_in_filter() - " "charset_source: %s charset_default: %s", dc && dc->charset_source ? dc->charset_source : "(none)", dc && dc->charset_default ? dc->charset_default : "(none)"); } if (!ctx->ran) { /* filter never ran before */ chk_filter_chain(f); ctx->ran = 1; } if (ctx->noop) { return ap_get_brigade(f->next, bb, mode, block, readbytes); } if (APR_BRIGADE_EMPTY(ctx->bb)) { if ((rv = ap_get_brigade(f->next, bb, mode, block, readbytes)) != APR_SUCCESS) { return rv; } } else { APR_BRIGADE_PREPEND(bb, ctx->bb); /* first use the leftovers */ } buffer_size = INPUT_XLATE_BUF_SIZE; rv = xlate_brigade(ctx, bb, ctx->tmp, &buffer_size, &hit_eos); if (rv == APR_SUCCESS) { if (!hit_eos) { /* move anything leftover into our context for next time; * we don't currently "set aside" since the data came from * down below, but I suspect that for long-term we need to * do that */ APR_BRIGADE_CONCAT(ctx->bb, bb); } if (buffer_size < INPUT_XLATE_BUF_SIZE) { /* do we have output? */ apr_bucket *e; e = apr_bucket_heap_create(ctx->tmp, INPUT_XLATE_BUF_SIZE - buffer_size, NULL, f->r->connection->bucket_alloc); /* make sure we insert at the head, because there may be * an eos bucket already there, and the eos bucket should * come after the data */ APR_BRIGADE_INSERT_HEAD(bb, e); } else { /* XXX need to get some more data... what if the last brigade * we got had only the first byte of a multibyte char? we need * to grab more data from the network instead of returning an * empty brigade */ } } else { log_xlate_error(f, rv); } return rv; }
/** * @internal * * "Sniffs" the input (request) data from the connection stream and tries * to determine who closed a connection and why. */ static int ironbee_input_filter(ap_filter_t *f, apr_bucket_brigade *bb, ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes) { conn_rec *c = f->c; ironbee_conn_context *ctx = f->ctx; ib_conn_t *iconn = ctx->iconn; ib_core_cfg_t *corecfg; ib_stream_t *istream; apr_bucket *b; apr_status_t rc; int buffering = 0; /* Any mode not handled just gets passed through. */ if ((mode != AP_MODE_GETLINE) && (mode != AP_MODE_READBYTES)) { return ap_get_brigade(f->next, bb, mode, block, readbytes); } /* Configure. */ ib_context_module_config(iconn->ctx, ib_core_module(), (void *)&corecfg); if (corecfg != NULL) { buffering = (int)corecfg->buffer_req; } /* When buffering, data is removed from the brigade and handed * to IronBee. The filter must not return an empty brigade in this * case and keeps reading until there is processed data that comes * back from IronBee. */ do { ib_tx_t *itx = iconn->tx; /* If there is any processed data, then send it now. */ if (buffering && (itx != NULL)) { ib_sdata_t *sdata; /* Take any data from the drain (processed data) and * inject it back into the filter brigade. */ ib_fctl_drain(itx->fctl, &istream); if ((istream != NULL) && (istream->nelts > 0)) { int done = 0; while (!done) { apr_bucket *ibucket = NULL; /// @todo Handle multi-bucket lines if (mode == AP_MODE_GETLINE) { done = 1; } ib_stream_pull(istream, &sdata); if (sdata == NULL) { /* No more data left. */ break; } switch (sdata->type) { case IB_STREAM_DATA: #ifdef IB_DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": DATA[%d]: %.*s", (int)sdata->dlen, (int)sdata->dlen, (char *)sdata->data); #endif /// @todo Is this creating a copy? Just need a reference. ibucket = apr_bucket_heap_create(sdata->data, sdata->dlen, NULL, bb->bucket_alloc); break; case IB_STREAM_FLUSH: #ifdef IB_DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": FLUSH"); #endif ibucket = apr_bucket_flush_create(bb->bucket_alloc); break; case IB_STREAM_EOH: #ifdef IB_DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": EOH"); #endif /// @todo Do something here??? break; case IB_STREAM_EOB: #ifdef IB_DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": EOB"); #endif /// @todo Do something here??? break; case IB_STREAM_EOS: #ifdef IB_DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": EOS"); #endif ibucket = apr_bucket_eos_create(bb->bucket_alloc); break; default: ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": UNKNOWN stream data type %d", sdata->type); } if (ibucket != NULL) { APR_BRIGADE_INSERT_TAIL(bb, ibucket); } } /* Need to send any processed data to avoid deadlock. */ if (!APR_BRIGADE_EMPTY(bb)) { return APR_SUCCESS; } } } /* Fetch data from the next filter. */ if (buffering) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, "FETCH BRIGADE (buffering)"); /* Normally Apache will request the headers line-by-line, but * IronBee does not require this. So, here the request is * fetched with READBYTES and IronBee will then break * it back up into lines when it is injected back into * the brigade after the data is processed. */ rc = ap_get_brigade(f->next, bb, AP_MODE_READBYTES, block, HUGE_STRING_LEN); } else { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, "FETCH BRIGADE (non-buffering)"); rc = ap_get_brigade(f->next, bb, mode, block, readbytes); } /* Check for any timeouts/disconnects/errors. */ if (APR_STATUS_IS_TIMEUP(rc)) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": %s server closed connection (%d)", f->frec->name, rc); ap_remove_input_filter(f); return rc; } else if (APR_STATUS_IS_EOF(rc) || apr_get_os_error() == ECONNRESET) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": %s client closed connection (%d)", f->frec->name, rc); ap_remove_input_filter(f); return rc; } else if (rc != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": %s returned %d (0x%08x) - %s", f->frec->name, rc, rc, strerror(apr_get_os_error())); return rc; } /* Process data. */ for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) { if (buffering) { /// @todo setaside into our own pool to destroy later??? apr_bucket_setaside(b, c->pool); process_bucket(f, b); APR_BUCKET_REMOVE(b); } else { process_bucket(f, b); } } } while (buffering); return APR_SUCCESS; }
static void test_splits(abts_case *tc, void *ctx) { apr_bucket_alloc_t *ba = apr_bucket_alloc_create(p); apr_bucket_brigade *bb; apr_bucket *e; char *str = "alphabeta"; int n; bb = apr_brigade_create(p, ba); APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_immortal_create(str, 9, ba)); APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_transient_create(str, 9, ba)); APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_heap_create(strdup(str), 9, free, ba)); APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_pool_create(apr_pstrdup(p, str), 9, p, ba)); ABTS_ASSERT(tc, "four buckets inserted", count_buckets(bb) == 4); /* now split each of the buckets after byte 5 */ for (n = 0, e = APR_BRIGADE_FIRST(bb); n < 4; n++) { ABTS_ASSERT(tc, "reached end of brigade", e != APR_BRIGADE_SENTINEL(bb)); ABTS_ASSERT(tc, "split bucket OK", apr_bucket_split(e, 5) == APR_SUCCESS); e = APR_BUCKET_NEXT(e); ABTS_ASSERT(tc, "split OK", e != APR_BRIGADE_SENTINEL(bb)); e = APR_BUCKET_NEXT(e); } ABTS_ASSERT(tc, "four buckets split into eight", count_buckets(bb) == 8); for (n = 0, e = APR_BRIGADE_FIRST(bb); n < 4; n++) { const char *data; apr_size_t len; apr_assert_success(tc, "read alpha from bucket", apr_bucket_read(e, &data, &len, APR_BLOCK_READ)); ABTS_ASSERT(tc, "read 5 bytes", len == 5); ABTS_STR_NEQUAL(tc, "alpha", data, 5); e = APR_BUCKET_NEXT(e); apr_assert_success(tc, "read beta from bucket", apr_bucket_read(e, &data, &len, APR_BLOCK_READ)); ABTS_ASSERT(tc, "read 4 bytes", len == 4); ABTS_STR_NEQUAL(tc, "beta", data, 5); e = APR_BUCKET_NEXT(e); } /* now delete the "alpha" buckets */ for (n = 0, e = APR_BRIGADE_FIRST(bb); n < 4; n++) { apr_bucket *f; ABTS_ASSERT(tc, "reached end of brigade", e != APR_BRIGADE_SENTINEL(bb)); f = APR_BUCKET_NEXT(e); apr_bucket_delete(e); e = APR_BUCKET_NEXT(f); } ABTS_ASSERT(tc, "eight buckets reduced to four", count_buckets(bb) == 4); flatten_match(tc, "flatten beta brigade", bb, "beta" "beta" "beta" "beta"); apr_brigade_destroy(bb); apr_bucket_alloc_destroy(ba); }
static apr_status_t fauth_output_filter(ap_filter_t *f, apr_bucket_brigade *pbbIn) { request_rec *r = f->r; conn_rec *c = r->connection; apr_bucket *pbktOut, *pbktIn; apr_bucket_brigade *pbbOut; char *uri, *buf, *hash, *req; int ig=0; int ie=0; req = malloc(sizeof(char)*(strlen(r->the_request)+1)); strncpy(req,r->the_request,strlen(r->the_request)); req[strlen(r->the_request)]='\0'; uri = strtok(req, " "); if(uri) uri = strtok(NULL, " "); if(!uri) { free(req); return ap_pass_brigade(f->next,pbbIn); } uri[strlen(uri)]='\0'; ig = is_gs_req(uri); ie = is_ee_req(uri); if( !(r->status==HTTP_NOT_FOUND||r->status==HTTP_FORBIDDEN) || r->method_number!=M_GET || ( !is_sym_req(uri) && !ig && !ie ) ) { free(req); return ap_pass_brigade(f->next,pbbIn); } if(ig||ie) { uri = malloc(sizeof(char)*(strlen(r->hostname)+1)); sprintf(uri,"/%s",r->hostname); } hash=malloc(sizeof(char)*(HASH_MAXLENGTH+1)); strncpy(hash,dbapi_lookup(uri),HASH_MAXLENGTH); hash[HASH_MAXLENGTH]='\0'; if(strncmp(hash,"404 Not Found",13)==0||hash[0]=='{') { free(hash); free(req); return ap_pass_brigade(f->next,pbbIn); } pbbOut=apr_brigade_create(r->pool, c->bucket_alloc); for (pbktIn = APR_BRIGADE_FIRST(pbbIn); pbktIn != APR_BRIGADE_SENTINEL(pbbIn); pbktIn = APR_BUCKET_NEXT(pbktIn)) { APR_BUCKET_REMOVE(pbktIn); } if(ig) { char *hasho=malloc(sizeof(char)*(HASH_MAXLENGTH+1)); memcpy(hasho,hash,strlen(hash)+1); snprintf(hash,HASH_MAXLENGTH,"<html><head><meta name=\"_globalsign-domain-verification\" content=\"%s\" /></head></html>",hasho); hash[HASH_MAXLENGTH]='\0'; free(hasho); } else if (ie) { char *hasho=malloc(sizeof(char)*(HASH_MAXLENGTH+1)); memcpy(hasho,hash,strlen(hash)+1); snprintf(hash,HASH_MAXLENGTH,"%s",hasho); hash[HASH_MAXLENGTH]='\0'; free(hasho); } buf = apr_bucket_alloc(strlen(hash), c->bucket_alloc); strcpy(buf,hash); buf[strlen(hash)]='\0'; free(hash); free(req); pbktOut = apr_bucket_heap_create(buf, strlen(buf), apr_bucket_free, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(pbbOut,pbktOut); apr_brigade_cleanup(pbbIn); f->r->status=200; apr_bucket *pbktEOS=apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(pbbOut,pbktEOS); return ap_pass_brigade(f->next,pbbOut); }
APU_DECLARE(apr_status_t) apr_brigade_writev(apr_bucket_brigade *b, apr_brigade_flush flush, void *ctx, const struct iovec *vec, apr_size_t nvec) { apr_bucket *e; apr_size_t total_len; apr_size_t i; char *buf; /* Compute the total length of the data to be written. */ total_len = 0; for (i = 0; i < nvec; i++) { total_len += vec[i].iov_len; } /* If the data to be written is very large, try to convert * the iovec to transient buckets rather than copying. */ if (total_len > APR_BUCKET_BUFF_SIZE) { if (flush) { for (i = 0; i < nvec; i++) { e = apr_bucket_transient_create(vec[i].iov_base, vec[i].iov_len, b->bucket_alloc); APR_BRIGADE_INSERT_TAIL(b, e); } return flush(b, ctx); } else { for (i = 0; i < nvec; i++) { e = apr_bucket_heap_create((const char *) vec[i].iov_base, vec[i].iov_len, NULL, b->bucket_alloc); APR_BRIGADE_INSERT_TAIL(b, e); } return APR_SUCCESS; } } i = 0; /* If there is a heap bucket at the end of the brigade * already, and its refcount is 1, copy into the existing bucket. */ e = APR_BRIGADE_LAST(b); if (!APR_BRIGADE_EMPTY(b) && APR_BUCKET_IS_HEAP(e) && ((apr_bucket_heap *)(e->data))->refcount.refcount == 1) { apr_bucket_heap *h = e->data; apr_size_t remaining = h->alloc_len - (e->length + (apr_size_t)e->start); buf = h->base + e->start + e->length; if (remaining >= total_len) { /* Simple case: all the data will fit in the * existing heap bucket */ for (; i < nvec; i++) { apr_size_t len = vec[i].iov_len; memcpy(buf, (const void *) vec[i].iov_base, len); buf += len; } e->length += total_len; return APR_SUCCESS; } else { /* More complicated case: not all of the data * will fit in the existing heap bucket. The * total data size is <= APR_BUCKET_BUFF_SIZE, * so we'll need only one additional bucket. */ const char *start_buf = buf; for (; i < nvec; i++) { apr_size_t len = vec[i].iov_len; if (len > remaining) { break; } memcpy(buf, (const void *) vec[i].iov_base, len); buf += len; remaining -= len; } e->length += (buf - start_buf); total_len -= (buf - start_buf); if (flush) { apr_status_t rv = flush(b, ctx); if (rv != APR_SUCCESS) { return rv; } } /* Now fall through into the case below to * allocate another heap bucket and copy the * rest of the array. (Note that i is not * reset to zero here; it holds the index * of the first vector element to be * written to the new bucket.) */ } } /* Allocate a new heap bucket, and copy the data into it. * The checks above ensure that the amount of data to be * written here is no larger than APR_BUCKET_BUFF_SIZE. */ buf = apr_bucket_alloc(APR_BUCKET_BUFF_SIZE, b->bucket_alloc); e = apr_bucket_heap_create(buf, APR_BUCKET_BUFF_SIZE, apr_bucket_free, b->bucket_alloc); for (; i < nvec; i++) { apr_size_t len = vec[i].iov_len; memcpy(buf, (const void *) vec[i].iov_base, len); buf += len; } e->length = total_len; APR_BRIGADE_INSERT_TAIL(b, e); return APR_SUCCESS; }
/** * Process input stream */ static apr_status_t helocon_filter_in(ap_filter_t *f, apr_bucket_brigade *b, ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes) { conn_rec *c = f->c; my_ctx *ctx = f->ctx; // Fail quickly if the connection has already been aborted. if (c->aborted) { apr_brigade_cleanup(b); return APR_ECONNABORTED; } // Fast passthrough if (ctx->phase == PHASE_DONE) { return ap_get_brigade(f->next, b, mode, block, readbytes); } // Process Head do { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (1)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase); #endif if (APR_BRIGADE_EMPTY(b)) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (2)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase); #endif apr_status_t s = ap_get_brigade(f->next, b, ctx->mode, APR_BLOCK_READ, ctx->need); if (s != APR_SUCCESS) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (fail)(1)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase); #endif return s; } } if (ctx->phase == PHASE_DONE) { return APR_SUCCESS; } if (APR_BRIGADE_EMPTY(b)) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (empty)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase); #endif return APR_SUCCESS; } apr_bucket *e = NULL; for (e = APR_BRIGADE_FIRST(b); e != APR_BRIGADE_SENTINEL(b); e = APR_BUCKET_NEXT(e)) { if (e->type == NULL) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (type=NULL)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase); #endif return APR_SUCCESS; } // We need more data if (ctx->need > 0) { const char *str = NULL; apr_size_t length = 0; apr_status_t s = apr_bucket_read(e, &str, &length, APR_BLOCK_READ); if (s != APR_SUCCESS) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " recv=%" APR_OFF_T_FMT " phase=%d readed=%" APR_SIZE_T_FMT " (fail)(2)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->recv, ctx->phase, length); #endif return s; } #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " recv=%" APR_OFF_T_FMT " phase=%d readed=%" APR_SIZE_T_FMT " (3)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->recv, ctx->phase, length); #endif if (length > 0) { if ((ctx->offset + length) > PROXY_MAX_LENGTH) { // Overflow ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in ERROR: PROXY protocol header overflow from=%s to port=%d length=%" APR_OFF_T_FMT, _CLIENT_IP, c->local_addr->port, (ctx->offset + length)); goto ABORT_CONN2; } memcpy(ctx->buf + ctx->offset, str, length); if (ctx->pad != ctx->magic) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in padding magic fail (bad=%d vs good=%d)", ctx->pad, ctx->magic); goto ABORT_CONN; } ctx->offset += length; ctx->recv += length; ctx->need -= length; ctx->buf[ctx->offset] = 0; // delete HEAD if (e->length > length) { apr_bucket_split(e, length); } } apr_bucket_delete(e); if (length == 0) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG bucket flush=%d meta=%d", APR_BUCKET_IS_FLUSH(e) ? 1 : 0, APR_BUCKET_IS_METADATA(e) ? 1 : 0); #endif continue; } } // Handle GETLINE mode if (ctx->mode == AP_MODE_GETLINE) { if ((ctx->need > 0) && (ctx->recv > 2)) { char *end = memchr(ctx->buf, '\r', ctx->offset - 1); if (end) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: GETLINE OK"); #endif if ((end[0] == '\r') && (end[1] == '\n')) { ctx->need = 0; } } } } if (ctx->need <= 0) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " recv=%" APR_OFF_T_FMT " phase=%d (4)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->recv, ctx->phase); #endif switch (ctx->phase) { case PHASE_WANT_HEAD: { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d checking=%s buf=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase, "HEAD", ctx->buf); #endif // TEST Command #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=TEST CHECK"); #endif if (strncmp(TEST, ctx->buf, 4) == 0) { apr_socket_t *csd = ap_get_module_config(c->conn_config, &core_module); apr_size_t length = strlen(TEST_RES_OK); apr_socket_send(csd, TEST_RES_OK, &length); apr_socket_shutdown(csd, APR_SHUTDOWN_WRITE); apr_socket_close(csd); #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=TEST OK"); #endif // No need to check for SUCCESS, we did that above c->aborted = 1; apr_brigade_cleanup(b); return APR_ECONNABORTED; } // HELO Command #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=HELO CHECK"); #endif if (strncmp(HELO, ctx->buf, 4) == 0) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=HELO OK"); #endif ctx->phase = PHASE_WANT_BINIP; ctx->mode = AP_MODE_READBYTES; ctx->need = 4; ctx->recv = 0; break; } // PROXY Command #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=PROXY CHECK"); #endif if (strncmp(PROXY, ctx->buf, 4) == 0) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=PROXY OK"); #endif ctx->phase = PHASE_WANT_LINE; ctx->mode = AP_MODE_GETLINE; ctx->need = PROXY_MAX_LENGTH - ctx->offset; ctx->recv = 0; break; } // ELSE... GET / POST / etc ctx->phase = PHASE_DONE; #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG from: %s:%d to port=%d newBucket (1) size=%" APR_OFF_T_FMT, _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->offset); #endif // Restore original data if (ctx->offset) { e = apr_bucket_heap_create(ctx->buf, ctx->offset, NULL, c->bucket_alloc); APR_BRIGADE_INSERT_HEAD(b, e); goto END_CONN; } break; } case PHASE_WANT_BINIP: { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d checking=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase, "BINIP"); #endif // REWRITE CLIENT IP const char *new_ip = fromBinIPtoString(c->pool, ctx->buf+4); if (!new_ip) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in ERROR: HELO+IP invalid"); goto ABORT_CONN; } apr_table_set(c->notes, NOTE_REWRITE_IP, new_ip); ctx->phase = PHASE_DONE; #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG from: %s:%d to port=%d newip=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, new_ip); #endif break; } case PHASE_WANT_LINE: { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d checking=%s buf=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase, "LINE", ctx->buf); #endif ctx->phase = PHASE_DONE; char *end = memchr(ctx->buf, '\r', ctx->offset - 1); if (!end) { goto ABORT_CONN; } if ((end[0] != '\r') || (end[1] != '\n')) { goto ABORT_CONN; } if (!process_proxy_header(f)) { goto ABORT_CONN; } // Restore original data int count = (ctx->offset - ((end - ctx->buf) + 2)); #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG from: %s:%d to port=%d newBucket (2) size=%d rest=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, count, end + 2); #endif if (count > 0) { e = apr_bucket_heap_create(end + 2, count, NULL, c->bucket_alloc); APR_BRIGADE_INSERT_HEAD(b, e); goto END_CONN; } break; } } if (ctx->phase == PHASE_DONE) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d (DONE)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase); #endif ctx->mode = mode; ctx->need = 0; ctx->recv = 0; } break; } } } while (ctx->phase != PHASE_DONE); END_CONN: return ap_get_brigade(f->next, b, mode, block, readbytes); ABORT_CONN: ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in ERROR: PROXY protocol header invalid from=%s to port=%d", _CLIENT_IP, c->local_addr->port); ABORT_CONN2: c->aborted = 1; apr_brigade_cleanup(b); return APR_ECONNABORTED; }
// // Output filter. // static apr_status_t resize_output_filter(ap_filter_t* f, apr_bucket_brigade* in_bb) { request_rec* rec =f->r; resize_conf* conf = (resize_conf*)ap_get_module_config(rec->per_dir_config, &resizeimage_module); const char* content_type, *target_type = "JPEG"; const char* image_url, *resize_param, *image_hash=NULL; Magick::Blob blob; char* vlob = NULL; size_t vlob_length = 0; int cache_hit = FALSE; AP_LOG_VERBOSE(rec, "Incoming %s.", __FUNCTION__); // Pass thru by request types. if(rec->status!=HTTP_OK || rec->main!=NULL || rec->header_only || (rec->handler!= NULL && strcmp(rec->handler, "default-handler") == 0)) goto PASS_THRU; AP_LOG_VERBOSE(rec, "-- Checking responce headers."); // Obtain and erase x-resize-image header or pass through. image_url = get_and_unset_header(rec->headers_out, X_RESIZE); if(image_url== NULL || image_url[0]=='\0') { image_url = get_and_unset_header(rec->err_headers_out, X_RESIZE); } if(image_url==NULL || image_url[0]=='\0') goto PASS_THRU; // Check content-type content_type = rec->content_type; if(content_type) { if(strcasecmp(content_type, "image/jpeg")==0) { target_type = "JPEG"; } else if(strcasecmp(content_type, "image/png")==0) { target_type = "PNG"; } else if(strcasecmp(content_type, "image/gif")==0) { target_type = "GIF"; } else goto PASS_THRU; } // Resize parameter resize_param = get_and_unset_header(rec->headers_out, X_RESIZE_PARAM); if(resize_param==NULL || resize_param[0]=='\0') { resize_param = get_and_unset_header(rec->err_headers_out, X_RESIZE_PARAM); } if(resize_param[0]=='\0') resize_param = NULL; // Image hash image_hash = get_and_unset_header(rec->headers_out, X_RESIZE_HASH); if(image_hash==NULL || image_hash[0]=='\0') { image_hash = get_and_unset_header(rec->err_headers_out, X_RESIZE_HASH); } // Open image and resize. AP_LOG_INFO(rec, "URL: %s, %s => %s (%s)", image_url, content_type, resize_param, image_hash); if(image_hash) { // Try memcached... image_hash = apr_psprintf(rec->pool, "%s:%s:%s", image_hash, target_type, resize_param); memcached_return r; uint32_t flags; vlob = memcached_get(conf->memc, image_hash, strlen(image_hash), &vlob_length, &flags, &r); if(r==MEMCACHED_SUCCESS) { AP_LOG_DEBUG(rec, "Restored from memcached: %s, len=%d", image_hash, vlob_length); cache_hit = TRUE; goto WRITE_DATA; } else { AP_LOG_DEBUG(rec, "Can't restore from memcached: %s - %s(%d)", image_hash, memcached_strerror(conf->memc, r), r); } } // Reszize try { Magick::Image image; image.read(image_url); if(resize_param) image.zoom(resize_param); image.magick(target_type); image.quality(conf->jpeg_quality); image.write(&blob); vlob = (char*)blob.data(); vlob_length = blob.length(); } catch(Magick::Exception& err) { AP_LOG_ERR(rec, __FILE__ ": Magick failed: %s", err.what()); goto PASS_THRU; } if(image_hash) { // Store to memcached... memcached_return r = memcached_set(conf->memc, image_hash, strlen(image_hash), vlob, vlob_length, conf->expire, 0); if(r==MEMCACHED_SUCCESS) { AP_LOG_DEBUG(rec, "Stored to memcached: %s(len=%d)", image_hash, vlob_length); } else { AP_LOG_DEBUG(rec, "Can't store from memcached: %s(len=%d) - %s(%d)", image_hash, vlob_length,memcached_strerror(conf->memc, r), r); } } WRITE_DATA: AP_LOG_VERBOSE(rec, "-- Creating resize buckets."); // Drop all content and headers related. while(!APR_BRIGADE_EMPTY(in_bb)) { apr_bucket* b = APR_BRIGADE_FIRST(in_bb); apr_bucket_delete(b); } rec->eos_sent = 0; rec->clength = 0; unset_header(rec, "Content-Length"); unset_header(rec, "Content-Encoding"); unset_header(rec, "Last-Modified"); unset_header(rec, "ETag"); // Start resize bucket. { apr_off_t remain = vlob_length, offset = 0; while(remain>0) { apr_off_t bs = (remain<AP_MAX_SENDFILE)? remain: AP_MAX_SENDFILE; char* heap = (char*)malloc(bs); memcpy(heap, vlob+offset, bs); apr_bucket* b = apr_bucket_heap_create(heap, bs, free, in_bb-> bucket_alloc); APR_BRIGADE_INSERT_TAIL(in_bb, b); remain -= bs; offset += bs; } APR_BRIGADE_INSERT_TAIL(in_bb, apr_bucket_eos_create(in_bb->bucket_alloc)); ap_set_content_length(rec, vlob_length); if(cache_hit) free(vlob); } AP_LOG_VERBOSE(rec, "-- Create done."); PASS_THRU: AP_LOG_VERBOSE(rec, "-- Filter done."); ap_remove_output_filter(f); return ap_pass_brigade(f->next, in_bb); }
static apr_status_t NonceFilterOutFilter(ap_filter_t *f, apr_bucket_brigade *pbbIn) { request_rec *r = f->r; NonceFilterConfig *pConfig = ap_get_module_config(r->server->module_config, &nonce_filter_module); conn_rec *c = r->connection; apr_bucket *pbktIn; apr_bucket_brigade *pbbOut; const char *data; apr_size_t len; char *buf; apr_size_t n; apr_bucket *pbktOut; pbbOut=apr_brigade_create(r->pool, c->bucket_alloc); if(rand_str == NULL) { rand_str = (char*)malloc(9*sizeof(char)); make_rstring(rand_str, 8); } char *client_nonce_body = (char*)malloc(strlen("nonce=\"") + strlen(rand_str) + strlen("\"")); strcpy(client_nonce_body, "nonce=\""); strcat(client_nonce_body, rand_str); strcat(client_nonce_body, "\""); // set headers apr_table_t *headers= r->headers_out; char *client_nonce_header= (char*)malloc(strlen("script-nonce ") + strlen(rand_str)); strcpy(client_nonce_header, "script-nonce "); strcat(client_nonce_header, rand_str); apr_table_set(headers, "X-WebKit-CSP", client_nonce_header); apr_table_set(headers, "Content-Security-Policy", client_nonce_header); //apr_table_set(headers, "X-Content-Security-Policy", client_nonce_header); free(client_nonce_header); for (pbktIn = APR_BRIGADE_FIRST(pbbIn); pbktIn != APR_BRIGADE_SENTINEL(pbbIn); pbktIn = APR_BUCKET_NEXT(pbktIn)) { if(APR_BUCKET_IS_EOS(pbktIn)) { apr_bucket *pbktEOS=apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(pbbOut,pbktEOS); continue; } apr_bucket_read(pbktIn,&data,&len,APR_BLOCK_READ); // check for server nonce in the body and replace it with client nonce const char * server_nonce = pConfig->server_nonce; apr_size_t new_bucket_size = len + (apr_size_t)(strlen(client_nonce_body)); buf = apr_bucket_alloc(new_bucket_size, c->bucket_alloc); int index = 0; int iter; for(iter = 0; iter < len; iter++) { if(data[iter] == server_nonce[0]) { int j = 0; for (; j < strlen(server_nonce) ; j++) { if(data[iter + j] != server_nonce[j]) { break; } } if(j == strlen(server_nonce)) { iter = iter + strlen(server_nonce); apr_size_t i = 0; for(; i < strlen(client_nonce_body); i++) { buf[index++] = client_nonce_body[i]; } } } buf[index++] = data[iter]; } pbktOut = apr_bucket_heap_create(buf, len, apr_bucket_free, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(pbbOut,pbktOut); } apr_brigade_cleanup(pbbIn); return ap_pass_brigade(f->next,pbbOut); }
static apr_status_t akismet_filter(ap_filter_t *f, apr_bucket_brigade *out_brigade, ap_input_mode_t input_mode, apr_read_type_e read_type, apr_off_t nbytes) { akismet_config *conf = NULL; akismet_config *sconf =NULL; akismet_config *dconf =NULL; request_rec *r = f->r; AkismetFilterContext *pctx; apr_status_t ret; apr_table_t *params_table; char* query_string=NULL; int i=0; char *next, *last; /* * decide configuration * use server level config if no directory level config not defined */ sconf = (akismet_config *)ap_get_module_config(r->server->module_config,&akismet_module); dconf = (akismet_config *)ap_get_module_config(r->per_dir_config, &akismet_module); conf = dconf; if ( !dconf || (!dconf->enabled && !dconf->apikey && !dconf->blogurl) ){ conf = sconf; } /* * parse request parameters */ params_table = apr_table_make(r->pool, PARAMS_TABLE_INIT_SIZE); if (!(pctx = f->ctx)) { f->ctx = pctx = apr_palloc(r->pool, sizeof *pctx); pctx->tmp_brigade = apr_brigade_create(r->pool, r->connection->bucket_alloc); } if (APR_BRIGADE_EMPTY(pctx->tmp_brigade)) { ret = ap_get_brigade(f->next, pctx->tmp_brigade, input_mode, read_type, nbytes); if (input_mode == AP_MODE_EATCRLF || ret != APR_SUCCESS) { return ret; } } while( !APR_BRIGADE_EMPTY(pctx->tmp_brigade) ) { apr_bucket *in_bucket = APR_BRIGADE_FIRST(pctx->tmp_brigade); apr_bucket *out_bucket; const char *data; apr_size_t len; char *buf; int n; if(APR_BUCKET_IS_EOS(in_bucket)) { APR_BUCKET_REMOVE(in_bucket); APR_BRIGADE_INSERT_TAIL(out_brigade, in_bucket); break; } ret=apr_bucket_read(in_bucket, &data, &len, read_type); if(ret != APR_SUCCESS){ return ret; } if (query_string == NULL) { query_string = apr_pstrdup(r->pool, data); } else { query_string = apr_pstrcat(r->pool, query_string, data,NULL); } out_bucket = apr_bucket_heap_create(data, len, 0, r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(out_brigade, out_bucket); apr_bucket_delete(in_bucket); } if (!query_string) { return APR_SUCCESS; } /* * split query_string and set params tables */ next = (char*)apr_strtok( query_string, "&", &last); while (next) { apr_collapse_spaces (next, next); char* k, *v; k = (char*)apr_strtok( next, "=", &v); if (k) { if ( ( conf->comment_param_key && strcasecmp( k, conf->comment_param_key)==0) || ( conf->comment_author_param_key && strcasecmp( k, conf->comment_author_param_key)==0) || (conf->comment_author_email_param_key && strcasecmp( k, conf->comment_author_email_param_key)==0) || (conf->comment_author_url_param_key && strcasecmp( k, conf->comment_author_url_param_key)==0) || (conf->comment_permalink_param_key && strcasecmp( k, conf->comment_permalink_param_key)==0) ) { apr_table_set(params_table, k, v); } } next = (char*)apr_strtok(NULL, "&", &last); } /* * comment spam check by akismet api */ return akismet_api_execute(r,conf,params_table); }
/* TODO: cleanup ctx */ static apr_status_t zlibdict_output_filter(ap_filter_t *f, apr_bucket_brigade *bb) { apr_bucket *b; zlibdict_ctx_t *ctx = f->ctx; request_rec *r = f->r; const char *client_accepts; apr_status_t status = APR_SUCCESS; apr_pool_t *subpool; int zerr; ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, "triggered zlibdict_output_filter"); /* Do nothing if asked to filter nothing. */ if (APR_BRIGADE_EMPTY(bb)) { return APR_SUCCESS; } /* First time we are called for this response? */ if (!ctx) { client_accepts = apr_table_get(r->headers_in, "Accept-Encoding"); if (client_accepts == NULL || zlibdict__header_contains(r->pool, client_accepts)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "Not compressing (no Accept-Encoding: zlibdict)"); ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } ctx = f->ctx = apr_pcalloc(r->pool, sizeof(*ctx)); ctx->bb = apr_brigade_create(r->pool, f->c->bucket_alloc); ctx->buf = apr_palloc(r->pool, DEFAULT_BUFFERSIZE); /* zstream must be NULL'd out. */ memset(&ctx->zstr, 0, sizeof(z_stream)); zerr = deflateInit2(&ctx->zstr, DEFAULT_COMPRESSION, Z_DEFLATED, DEFAULT_WINDOWSIZE, DEFAULT_MEMLEVEL, Z_DEFAULT_STRATEGY); deflateSetDictionary(&ctx->zstr, (Bytef *)propfind_dictionary, strlen(propfind_dictionary)); /* Set Content-Encoding header so our client knows how to handle this data. */ apr_table_mergen(r->headers_out, "Content-Encoding", "zlibdict"); } /* Read the data from the handler and compress it with a dictionary. */ for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) { const char *data; void *write_buf; size_t len; size_t buf_size, write_len; if (APR_BUCKET_IS_EOS(b)) { deflateEnd(&ctx->zstr); /* Remove EOS from the old list, and insert into the new. */ APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(ctx->bb, b); return ap_pass_brigade(f->next, ctx->bb); } if (APR_BUCKET_IS_METADATA(b)) continue; status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); if (status != APR_SUCCESS) break; /* The largest buffer we should need is 0.1% larger than the compressed data, + 12 bytes. This info comes from zlib.h. */ buf_size = len + (len / 1000) + 13; apr_pool_create(&subpool, r->pool); write_buf = apr_palloc(subpool, buf_size); ctx->zstr.next_in = (Bytef *)data; /* Casting away const! */ ctx->zstr.avail_in = (uInt) len; zerr = Z_OK; while (ctx->zstr.avail_in > 0 && zerr != Z_STREAM_END) { ctx->zstr.next_out = write_buf; ctx->zstr.avail_out = (uInt) buf_size; zerr = deflate(&ctx->zstr, Z_FINISH); if (zerr < 0) return -1; /* TODO: fix error */ write_len = buf_size - ctx->zstr.avail_out; if (write_len > 0) { apr_bucket *b_out; b_out = apr_bucket_heap_create(write_buf, len, NULL, f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctx->bb, b_out); /* Send what we have right now to the next filter. */ status = ap_pass_brigade(f->next, ctx->bb); if (status != APR_SUCCESS) { apr_pool_destroy(subpool); return status; } } apr_pool_destroy(subpool); } } return status; }
static int ef_unified_filter(ap_filter_t *f, apr_bucket_brigade *bb) { request_rec *r = f->r; conn_rec *c = r->connection; ef_ctx_t *ctx = f->ctx; apr_bucket *b; ef_dir_t *dc; apr_size_t len; const char *data; apr_status_t rv; char buf[4096]; apr_bucket *eos = NULL; apr_bucket_brigade *bb_tmp; dc = ctx->dc; bb_tmp = apr_brigade_create(r->pool, c->bucket_alloc); for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) { if (APR_BUCKET_IS_EOS(b)) { eos = b; break; } rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "apr_bucket_read()"); return rv; } /* Good cast, we just tested len isn't negative */ if (len > 0 && (rv = pass_data_to_filter(f, data, (apr_size_t)len, bb_tmp)) != APR_SUCCESS) { return rv; } } apr_brigade_cleanup(bb); APR_BRIGADE_CONCAT(bb, bb_tmp); apr_brigade_destroy(bb_tmp); if (eos) { /* close the child's stdin to signal that no more data is coming; * that will cause the child to finish generating output */ if ((rv = apr_file_close(ctx->proc->in)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "apr_file_close(child input)"); return rv; } /* since we've seen eos and closed the child's stdin, set the proper pipe * timeout; we don't care if we don't return from apr_file_read() for a while... */ rv = apr_file_pipe_timeout_set(ctx->proc->out, r->server->timeout); if (rv) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "apr_file_pipe_timeout_set(child output)"); return rv; } } do { len = sizeof(buf); rv = apr_file_read(ctx->proc->out, buf, &len); if ((rv && !APR_STATUS_IS_EOF(rv) && !APR_STATUS_IS_EAGAIN(rv)) || dc->debug >= DBGLVL_GORY) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, "apr_file_read(child output), len %" APR_SIZE_T_FMT, !rv ? len : -1); } if (APR_STATUS_IS_EAGAIN(rv)) { if (eos) { /* should not occur, because we have an APR timeout in place */ AP_DEBUG_ASSERT(1 != 1); } return APR_SUCCESS; } if (rv == APR_SUCCESS) { b = apr_bucket_heap_create(buf, len, NULL, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); } } while (rv == APR_SUCCESS); if (!APR_STATUS_IS_EOF(rv)) { return rv; } if (eos) { b = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); ctx->hit_eos = 1; } return APR_SUCCESS; }
static unsigned int __stdcall winnt_accept(void *lr_) { ap_listen_rec *lr = (ap_listen_rec *)lr_; apr_os_sock_info_t sockinfo; winnt_conn_ctx_t *context = NULL; DWORD BytesRead; SOCKET nlsd; core_server_config *core_sconf; const char *accf_name; int rv; int accf; int err_count = 0; HANDLE events[3]; #if APR_HAVE_IPV6 SOCKADDR_STORAGE ss_listen; int namelen = sizeof(ss_listen); #endif u_long zero = 0; core_sconf = ap_get_core_module_config(ap_server_conf->module_config); accf_name = apr_table_get(core_sconf->accf_map, lr->protocol); if (strcmp(accf_name, "data") == 0) accf = 2; else if (strcmp(accf_name, "connect") == 0) accf = 1; else if (strcmp(accf_name, "none") == 0) accf = 0; else { accf = 0; accf_name = "none"; ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf, APLOGNO(00331) "winnt_accept: unrecognized AcceptFilter '%s', " "only 'data', 'connect' or 'none' are valid. " "Using 'none' instead", accf_name); } apr_os_sock_get(&nlsd, lr->sd); #if APR_HAVE_IPV6 if (getsockname(nlsd, (struct sockaddr *)&ss_listen, &namelen) == SOCKET_ERROR) { ap_log_error(APLOG_MARK, APLOG_ERR, apr_get_netos_error(), ap_server_conf, APLOGNO(00332) "winnt_accept: getsockname error on listening socket, " "is IPv6 available?"); return 1; } #endif if (accf > 0) /* 'data' or 'connect' */ { /* first, high priority event is an already accepted connection */ events[1] = exit_event; events[2] = max_requests_per_child_event; } else /* accf == 0, 'none' */ { reinit: /* target of data or connect upon too many AcceptEx failures */ /* last, low priority event is a not yet accepted connection */ events[0] = exit_event; events[1] = max_requests_per_child_event; events[2] = CreateEvent(NULL, FALSE, FALSE, NULL); /* The event needs to be removed from the accepted socket, * if not removed from the listen socket prior to accept(), */ rv = WSAEventSelect(nlsd, events[2], FD_ACCEPT); if (rv) { ap_log_error(APLOG_MARK, APLOG_ERR, apr_get_netos_error(), ap_server_conf, APLOGNO(00333) "WSAEventSelect() failed."); CloseHandle(events[2]); return 1; } } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00334) "Child: Accept thread listening on %pI using AcceptFilter %s", lr->bind_addr, accf_name); while (!shutdown_in_progress) { if (!context) { int timeout; context = mpm_get_completion_context(&timeout); if (!context) { if (!timeout) { /* Hopefully a temporary condition in the provider? */ ++err_count; if (err_count > MAX_ACCEPTEX_ERR_COUNT) { ap_log_error(APLOG_MARK, APLOG_CRIT, 0, ap_server_conf, APLOGNO(00335) "winnt_accept: Too many failures grabbing a " "connection ctx. Aborting."); break; } } Sleep(100); continue; } } if (accf > 0) /* Either 'connect' or 'data' */ { DWORD len; char *buf; /* Create and initialize the accept socket */ #if APR_HAVE_IPV6 if (context->accept_socket == INVALID_SOCKET) { context->accept_socket = socket(ss_listen.ss_family, SOCK_STREAM, IPPROTO_TCP); context->socket_family = ss_listen.ss_family; } else if (context->socket_family != ss_listen.ss_family) { closesocket(context->accept_socket); context->accept_socket = socket(ss_listen.ss_family, SOCK_STREAM, IPPROTO_TCP); context->socket_family = ss_listen.ss_family; } #else if (context->accept_socket == INVALID_SOCKET) context->accept_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); #endif if (context->accept_socket == INVALID_SOCKET) { ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(), ap_server_conf, APLOGNO(00336) "winnt_accept: Failed to allocate an accept socket. " "Temporary resource constraint? Try again."); Sleep(100); continue; } if (accf == 2) { /* 'data' */ len = APR_BUCKET_BUFF_SIZE; buf = apr_bucket_alloc(len, context->ba); len -= PADDED_ADDR_SIZE * 2; } else /* (accf == 1) 'connect' */ { len = 0; buf = context->buff; } /* AcceptEx on the completion context. The completion context will be * signaled when a connection is accepted. */ if (!AcceptEx(nlsd, context->accept_socket, buf, len, PADDED_ADDR_SIZE, PADDED_ADDR_SIZE, &BytesRead, &context->overlapped)) { rv = apr_get_netos_error(); if ((rv == APR_FROM_OS_ERROR(WSAECONNRESET)) || (rv == APR_FROM_OS_ERROR(WSAEACCES))) { /* We can get here when: * 1) the client disconnects early * 2) handshake was incomplete */ if (accf == 2) apr_bucket_free(buf); closesocket(context->accept_socket); context->accept_socket = INVALID_SOCKET; continue; } else if ((rv == APR_FROM_OS_ERROR(WSAEINVAL)) || (rv == APR_FROM_OS_ERROR(WSAENOTSOCK))) { /* We can get here when: * 1) TransmitFile does not properly recycle the accept socket (typically * because the client disconnected) * 2) there is VPN or Firewall software installed with * buggy WSAAccept or WSADuplicateSocket implementation * 3) the dynamic address / adapter has changed * Give five chances, then fall back on AcceptFilter 'none' */ if (accf == 2) apr_bucket_free(buf); closesocket(context->accept_socket); context->accept_socket = INVALID_SOCKET; ++err_count; if (err_count > MAX_ACCEPTEX_ERR_COUNT) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(00337) "Child: Encountered too many AcceptEx " "faults accepting client connections. " "Possible causes: dynamic address renewal, " "or incompatible VPN or firewall software. "); ap_log_error(APLOG_MARK, APLOG_NOTICE, rv, ap_server_conf, APLOGNO(00338) "winnt_mpm: falling back to " "'AcceptFilter none'."); err_count = 0; accf = 0; } continue; } else if ((rv != APR_FROM_OS_ERROR(ERROR_IO_PENDING)) && (rv != APR_FROM_OS_ERROR(WSA_IO_PENDING))) { if (accf == 2) apr_bucket_free(buf); closesocket(context->accept_socket); context->accept_socket = INVALID_SOCKET; ++err_count; if (err_count > MAX_ACCEPTEX_ERR_COUNT) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(00339) "Child: Encountered too many AcceptEx " "faults accepting client connections."); ap_log_error(APLOG_MARK, APLOG_NOTICE, rv, ap_server_conf, APLOGNO(00340) "winnt_mpm: falling back to " "'AcceptFilter none'."); err_count = 0; accf = 0; goto reinit; } continue; } err_count = 0; events[0] = context->overlapped.hEvent; do { rv = WaitForMultipleObjectsEx(3, events, FALSE, INFINITE, TRUE); } while (rv == WAIT_IO_COMPLETION); if (rv == WAIT_OBJECT_0) { if ((context->accept_socket != INVALID_SOCKET) && !GetOverlappedResult((HANDLE)context->accept_socket, &context->overlapped, &BytesRead, FALSE)) { ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_os_error(), ap_server_conf, APLOGNO(00341) "winnt_accept: Asynchronous AcceptEx failed."); closesocket(context->accept_socket); context->accept_socket = INVALID_SOCKET; } } else { /* exit_event triggered or event handle was closed */ closesocket(context->accept_socket); context->accept_socket = INVALID_SOCKET; if (accf == 2) apr_bucket_free(buf); break; } if (context->accept_socket == INVALID_SOCKET) { if (accf == 2) apr_bucket_free(buf); continue; } } err_count = 0; /* Potential optimization; consider handing off to the worker */ /* Inherit the listen socket settings. Required for * shutdown() to work */ if (setsockopt(context->accept_socket, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, (char *)&nlsd, sizeof(nlsd))) { ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(), ap_server_conf, APLOGNO(00342) "setsockopt(SO_UPDATE_ACCEPT_CONTEXT) failed."); /* Not a failure condition. Keep running. */ } /* Get the local & remote address * TODO; error check */ GetAcceptExSockaddrs(buf, len, PADDED_ADDR_SIZE, PADDED_ADDR_SIZE, &context->sa_server, &context->sa_server_len, &context->sa_client, &context->sa_client_len); /* For 'data', craft a bucket for our data result * and pass to worker_main as context->overlapped.Pointer */ if (accf == 2 && BytesRead) { apr_bucket *b; b = apr_bucket_heap_create(buf, APR_BUCKET_BUFF_SIZE, apr_bucket_free, context->ba); /* Adjust the bucket to refer to the actual bytes read */ b->length = BytesRead; context->overlapped.Pointer = b; } else context->overlapped.Pointer = NULL; } else /* (accf = 0) e.g. 'none' */ { /* There is no socket reuse without AcceptEx() */ if (context->accept_socket != INVALID_SOCKET) closesocket(context->accept_socket); /* This could be a persistent event per-listener rather than * per-accept. However, the event needs to be removed from * the target socket if not removed from the listen socket * prior to accept(), or the event select is inherited. * and must be removed from the accepted socket. */ do { rv = WaitForMultipleObjectsEx(3, events, FALSE, INFINITE, TRUE); } while (rv == WAIT_IO_COMPLETION); if (rv != WAIT_OBJECT_0 + 2) { /* not FD_ACCEPT; * exit_event triggered or event handle was closed */ break; } context->sa_server = (void *) context->buff; context->sa_server_len = sizeof(context->buff) / 2; context->sa_client_len = context->sa_server_len; context->sa_client = (void *) (context->buff + context->sa_server_len); context->accept_socket = accept(nlsd, context->sa_server, &context->sa_server_len); if (context->accept_socket == INVALID_SOCKET) { rv = apr_get_netos_error(); if ( rv == APR_FROM_OS_ERROR(WSAECONNRESET) || rv == APR_FROM_OS_ERROR(WSAEINPROGRESS) || rv == APR_FROM_OS_ERROR(WSAEWOULDBLOCK) ) { ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, ap_server_conf, APLOGNO(00343) "accept() failed, retrying."); continue; } /* A more serious error than 'retry', log it */ ap_log_error(APLOG_MARK, APLOG_WARNING, rv, ap_server_conf, APLOGNO(00344) "accept() failed."); if ( rv == APR_FROM_OS_ERROR(WSAEMFILE) || rv == APR_FROM_OS_ERROR(WSAENOBUFS) ) { /* Hopefully a temporary condition in the provider? */ Sleep(100); ++err_count; if (err_count > MAX_ACCEPTEX_ERR_COUNT) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(00345) "Child: Encountered too many accept() " "resource faults, aborting."); break; } continue; } break; } /* Per MSDN, cancel the inherited association of this socket * to the WSAEventSelect API, and restore the state corresponding * to apr_os_sock_make's default assumptions (really, a flaw within * os_sock_make and os_sock_put that it does not query). */ WSAEventSelect(context->accept_socket, 0, 0); context->overlapped.Pointer = NULL; err_count = 0; context->sa_server_len = sizeof(context->buff) / 2; if (getsockname(context->accept_socket, context->sa_server, &context->sa_server_len) == SOCKET_ERROR) { ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(), ap_server_conf, APLOGNO(00346) "getsockname failed"); continue; } if ((getpeername(context->accept_socket, context->sa_client, &context->sa_client_len)) == SOCKET_ERROR) { ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(), ap_server_conf, APLOGNO(00347) "getpeername failed"); memset(&context->sa_client, '\0', sizeof(context->sa_client)); } } sockinfo.os_sock = &context->accept_socket; sockinfo.local = context->sa_server; sockinfo.remote = context->sa_client; sockinfo.family = context->sa_server->sa_family; sockinfo.type = SOCK_STREAM; sockinfo.protocol = IPPROTO_TCP; /* Restore the state corresponding to apr_os_sock_make's default * assumption of timeout -1 (really, a flaw of os_sock_make and * os_sock_put that it does not query to determine ->timeout). * XXX: Upon a fix to APR, these three statements should disappear. */ ioctlsocket(context->accept_socket, FIONBIO, &zero); setsockopt(context->accept_socket, SOL_SOCKET, SO_RCVTIMEO, (char *) &zero, sizeof(zero)); setsockopt(context->accept_socket, SOL_SOCKET, SO_SNDTIMEO, (char *) &zero, sizeof(zero)); apr_os_sock_make(&context->sock, &sockinfo, context->ptrans); /* When a connection is received, send an io completion notification * to the ThreadDispatchIOCP. */ PostQueuedCompletionStatus(ThreadDispatchIOCP, BytesRead, IOCP_CONNECTION_ACCEPTED, &context->overlapped); context = NULL; } if (!accf) CloseHandle(events[2]); if (!shutdown_in_progress) { /* Yow, hit an irrecoverable error! Tell the child to die. */ SetEvent(exit_event); } ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, ap_server_conf, APLOGNO(00348) "Child: Accept thread exiting."); return 0; }
static apr_status_t xlate_in_filter(ap_filter_t *f, apr_bucket_brigade *bb, ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes) { apr_status_t rv; charset_req_t *reqinfo = ap_get_module_config(f->r->request_config, &charset_lite_module); charset_dir_t *dc = ap_get_module_config(f->r->per_dir_config, &charset_lite_module); charset_filter_ctx_t *ctx = f->ctx; apr_size_t buffer_size; int hit_eos; if (!ctx) { /* this is SetInputFilter path; grab the preallocated context, * if any; note that if we decided not to do anything in an earlier * handler, we won't even have a reqinfo */ if (reqinfo) { ctx = f->ctx = reqinfo->input_ctx; reqinfo->input_ctx = NULL; /* prevent SNAFU if user coded us twice * in the filter chain; we can't have two * instances using the same context */ } if (!ctx) { /* no idea how to translate; don't do anything */ ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(charset_filter_ctx_t)); ctx->dc = dc; ctx->noop = 1; } } ap_log_rerror(APLOG_MARK, APLOG_TRACE6, 0, f->r, "xlate_in_filter() - " "charset_source: %s charset_default: %s", dc && dc->charset_source ? dc->charset_source : "(none)", dc && dc->charset_default ? dc->charset_default : "(none)"); if (!ctx->ran) { /* filter never ran before */ chk_filter_chain(f); ctx->ran = 1; if (!ctx->noop && !ctx->is_sb && apr_table_get(f->r->headers_in, "Content-Length")) { /* A Content-Length header is present, but it won't be valid after * conversion because we're not converting between two single-byte * charsets. This will affect most CGI scripts and may affect * some modules. * Content-Length can't be unset here because that would break * being able to read the request body. * Processing of chunked request bodies is not impacted by this * filter since the the length was not declared anyway. */ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, f->r, "Request body length may change, resulting in " "misprocessing by some modules or scripts"); } } if (ctx->noop) { return ap_get_brigade(f->next, bb, mode, block, readbytes); } if (APR_BRIGADE_EMPTY(ctx->bb)) { if ((rv = ap_get_brigade(f->next, bb, mode, block, readbytes)) != APR_SUCCESS) { return rv; } } else { APR_BRIGADE_PREPEND(bb, ctx->bb); /* first use the leftovers */ } buffer_size = INPUT_XLATE_BUF_SIZE; rv = xlate_brigade(ctx, bb, ctx->tmp, &buffer_size, &hit_eos); if (rv == APR_SUCCESS) { if (!hit_eos) { /* move anything leftover into our context for next time; * we don't currently "set aside" since the data came from * down below, but I suspect that for long-term we need to * do that */ APR_BRIGADE_CONCAT(ctx->bb, bb); } if (buffer_size < INPUT_XLATE_BUF_SIZE) { /* do we have output? */ apr_bucket *e; e = apr_bucket_heap_create(ctx->tmp, INPUT_XLATE_BUF_SIZE - buffer_size, NULL, f->r->connection->bucket_alloc); /* make sure we insert at the head, because there may be * an eos bucket already there, and the eos bucket should * come after the data */ APR_BRIGADE_INSERT_HEAD(bb, e); } else { /* XXX need to get some more data... what if the last brigade * we got had only the first byte of a multibyte char? we need * to grab more data from the network instead of returning an * empty brigade */ } /* If we have any metadata at the head of ctx->bb, go ahead and move it * onto the end of bb to be returned to our caller. */ if (!APR_BRIGADE_EMPTY(ctx->bb)) { apr_bucket *b = APR_BRIGADE_FIRST(ctx->bb); while (b != APR_BRIGADE_SENTINEL(ctx->bb) && APR_BUCKET_IS_METADATA(b)) { APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(bb, b); b = APR_BRIGADE_FIRST(ctx->bb); } } } else { log_xlate_error(f, rv); } return rv; }
// Output filter processing static apr_status_t NonceFilterOutFilter(ap_filter_t *filter, apr_bucket_brigade *bb_in) { // Get the request record request_rec *rec = filter->r; // Get the connection record conn_rec *con = rec->connection; apr_bucket *b_in; apr_bucket_brigade *bb_out; // Create the output bucket brigade bb_out = apr_brigade_create(rec->pool, con->bucket_alloc); // Set the Content Security Policy in the response header srandom(time(NULL)); char nonce[20], value[40]; sprintf(nonce, "%ld", random()); strcpy(value,"script-nonce "); strcat(value, nonce); apr_table_setn(rec->headers_out, "Content-Security-Policy", value); // Loop on all the buckets in the input bucket brigade for (b_in = APR_BRIGADE_FIRST(bb_in); b_in != APR_BRIGADE_SENTINEL(bb_in); b_in = APR_BUCKET_NEXT(b_in)) { char *old_buff; apr_size_t len; char *new_buff; apr_bucket *b_out; if(APR_BUCKET_IS_EOS(b_in)) { apr_bucket *b_eos = apr_bucket_eos_create(con->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb_out, b_eos); continue; } // read data from the bucket apr_bucket_read(b_in, &old_buff, &len, APR_BLOCK_READ); new_buff = apr_bucket_alloc(len, con->bucket_alloc); // search for the nonce char *str = NULL; char *s = old_buff; int i = 0, j = 0; while( (str = strstr(s,"nonce")) != NULL) { char *p = str - 1; str = str + 5; if(p[0] == '-') while(str[0] == ' ') str++; if(p[0] == ' ') { size_t sz = strcspn(str, "\"") ; str = str + sz + 1; } while(s != str) { new_buff[j++] = old_buff[i++]; s++; } char *nonce_str = NULL; // check if there is a valid placeholder, replace that if( (nonce_str = strstr(str,"aaaaaaaaaa")) == str ) { sprintf(&new_buff[j], "%s", nonce); j = j + strlen(nonce); size_t sz = strcspn(str, "\""); s = s + sz; i = i + sz; } // if no valid placeholder, just copy else { while(old_buff[i] != '"') { new_buff[j++] = old_buff[i++]; s++; } } str = NULL; } while(i<len) new_buff[j++] = old_buff[i++]; new_buff[j] = '\0'; // create the output bucket b_out = apr_bucket_heap_create(new_buff, len, apr_bucket_free, con->bucket_alloc); //insert the bucket at the end of the output bucket brigade APR_BRIGADE_INSERT_TAIL(bb_out, b_out); } // cleanup the input bucket brigade apr_brigade_cleanup(bb_in); // pass the new bucket brigade to the next filter in the chain return ap_pass_brigade(filter->next, bb_out); }
static int zevent_process_connection(conn_state_t *cs) { /* * code for your app,this just an example for echo test. */ apr_bucket *b; char *msg; apr_size_t len=0; int olen = 0; const char *buf; apr_status_t rv; cs->pfd->reqevents = APR_POLLIN; if(cs->pfd->rtnevents & APR_POLLIN){ len = 4096; msg = (char *)apr_bucket_alloc(len,cs->baout); if (msg == NULL) { return -1; } rv = apr_socket_recv(cs->pfd->desc.s,msg,&len); if(rv != APR_SUCCESS) { zevent_log_error(APLOG_MARK,NULL,"close socket!"); return -1; } zevent_log_error(APLOG_MARK,NULL,"recv:%s",msg); b = apr_bucket_heap_create(msg,len,NULL,cs->baout); apr_bucket_free(msg); APR_BRIGADE_INSERT_TAIL(cs->bbout,b); cs->pfd->reqevents |= APR_POLLOUT; } else { if(cs->bbout){ for (b = APR_BRIGADE_FIRST(cs->bbout); b != APR_BRIGADE_SENTINEL(cs->bbout); b = APR_BUCKET_NEXT(b)) { apr_bucket_read(b,&buf,&len,APR_BLOCK_READ); olen = len; //apr_brigade_flatten(cs->bbout,buf,&len); rv = apr_socket_send(cs->pfd->desc.s,buf,&len); if((rv == APR_SUCCESS) && (len>=olen)) { // zevent_log_error(APLOG_MARK,NULL,"send:%d bytes\n", // len); apr_bucket_delete(b); } if((rv == APR_SUCCESS && len < olen) || (rv != APR_SUCCESS)) { if(rv == APR_SUCCESS){ apr_bucket_split(b,len); apr_bucket *bucket = APR_BUCKET_NEXT(b); apr_bucket_delete(b); b = bucket; } break; } } if(b != APR_BRIGADE_SENTINEL(cs->bbout)) cs->pfd->reqevents |= APR_POLLOUT; } } apr_pollset_add(cs->pollset,cs->pfd); return 0; }