static apr_status_t serf_limit_readline(serf_bucket_t *bucket, int acceptable, int *found, const char **data, apr_size_t *len) { limit_context_t *ctx = bucket->data; apr_status_t status; if (!ctx->remaining) { *len = 0; return APR_EOF; } status = serf_bucket_readline(ctx->stream, acceptable, found, data, len); if (!SERF_BUCKET_READ_ERROR(status)) { ctx->remaining -= *len; } /* If we have met our limit and don't have a status, return EOF. */ if (!ctx->remaining && !status) { status = APR_EOF; } return status; }
/* Ensure the buffer is prepared for reading. Will return APR_SUCCESS, * APR_EOF, or some failure code. *len is only set for EOF. */ static apr_status_t common_databuf_prep(serf_databuf_t *databuf, apr_size_t *len) { apr_size_t readlen; apr_status_t status; /* if there is data in the buffer, then we're happy. */ if (databuf->remaining > 0) return APR_SUCCESS; /* if we already hit EOF, then keep returning that. */ if (APR_STATUS_IS_EOF(databuf->status)) { /* *data = NULL; ?? */ *len = 0; return APR_EOF; } /* refill the buffer */ status = (*databuf->read)(databuf->read_baton, sizeof(databuf->buf), databuf->buf, &readlen); if (SERF_BUCKET_READ_ERROR(status)) { return status; } databuf->current = databuf->buf; databuf->remaining = readlen; databuf->status = status; return APR_SUCCESS; }
static apr_status_t serf_limit_read(serf_bucket_t *bucket, apr_size_t requested, const char **data, apr_size_t *len) { limit_context_t *ctx = bucket->data; apr_status_t status; if (!ctx->remaining) { *len = 0; return APR_EOF; } if (requested == SERF_READ_ALL_AVAIL || requested > ctx->remaining) { if (ctx->remaining <= REQUESTED_MAX) { requested = (apr_size_t) ctx->remaining; } else { requested = REQUESTED_MAX; } } status = serf_bucket_read(ctx->stream, requested, data, len); if (!SERF_BUCKET_READ_ERROR(status)) { ctx->remaining -= *len; } /* If we have met our limit and don't have a status, return EOF. */ if (!ctx->remaining && !status) { status = APR_EOF; } return status; }
static apr_status_t serf_aggregate_readline(serf_bucket_t *bucket, int acceptable, int *found, const char **data, apr_size_t *len) { aggregate_context_t *ctx = bucket->data; apr_status_t status; cleanup_aggregate(ctx, bucket->allocator); do { serf_bucket_t *head; *len = 0; if (!ctx->list) { if (ctx->hold_open) { return ctx->hold_open(ctx->hold_open_baton, bucket); } else { return APR_EOF; } } head = ctx->list->bucket; status = serf_bucket_readline(head, acceptable, found, data, len); if (SERF_BUCKET_READ_ERROR(status)) return status; if (status == APR_EOF) { bucket_list_t *next_list; /* head bucket is empty, move to to-be-cleaned-up list. */ next_list = ctx->list->next; ctx->list->next = ctx->done; ctx->done = ctx->list; ctx->list = next_list; /* If we have no more in our list, return EOF. */ if (!ctx->list) { if (ctx->hold_open) { return ctx->hold_open(ctx->hold_open_baton, bucket); } else { return APR_EOF; } } /* we read something, so bail out and let the appl. read again. */ if (*len) status = APR_SUCCESS; } /* continue with APR_SUCCESS or APR_EOF and no data read yet. */ } while (!*len && status != APR_EAGAIN); return status; }
/* Helper function, runs the client and server context loops and validates that no errors were encountered, and all messages were sent and received. */ apr_status_t test_helper_run_requests_no_check(CuTest *tc, test_baton_t *tb, int num_requests, handler_baton_t handler_ctx[], apr_pool_t *pool) { apr_pool_t *iter_pool; int i, done = 0; apr_status_t status; apr_pool_create(&iter_pool, pool); while (!done) { apr_pool_clear(iter_pool); /* run server event loop */ status = run_test_server(tb->serv_ctx, 0, iter_pool); if (!APR_STATUS_IS_TIMEUP(status) && SERF_BUCKET_READ_ERROR(status)) return status; /* run proxy event loop */ if (tb->proxy_ctx) { status = run_test_server(tb->proxy_ctx, 0, iter_pool); if (!APR_STATUS_IS_TIMEUP(status) && SERF_BUCKET_READ_ERROR(status)) return status; } /* run client event loop */ status = serf_context_run(tb->context, 0, iter_pool); if (!APR_STATUS_IS_TIMEUP(status) && SERF_BUCKET_READ_ERROR(status)) return status; done = 1; for (i = 0; i < num_requests; i++) done &= handler_ctx[i].done; } apr_pool_destroy(iter_pool); return APR_SUCCESS; }
/* Returns the amount written. */ static int bio_apr_socket_write(BIO *bio, const char *in, int inlen) { apr_size_t len = inlen; serv_ctx_t *serv_ctx = bio->ptr; apr_status_t status = apr_socket_send(serv_ctx->client_sock, in, &len); serf__log_skt(TEST_VERBOSE, __FILE__, serv_ctx->client_sock, "Wrote %d of %d bytes to socket with status %d.\n", len, inlen, status); if (SERF_BUCKET_READ_ERROR(status)) return -1; return len; }
apr_status_t run_client_and_mock_servers_loops(test_baton_t *tb, int num_requests, handler_baton_t handler_ctx[], apr_pool_t *pool) { apr_pool_t *iter_pool; int i, done = 0; MockHTTP *mh = tb->mh; apr_status_t status; apr_time_t finish_time = apr_time_now() + apr_time_from_sec(15); apr_pool_create(&iter_pool, pool); while (!done) { mhError_t err; apr_pool_clear(iter_pool); /* run server event loop */ err = mhRunServerLoop(mh); /* Even if the mock server returned an error, it may have written something to the client. So process that data first, handle the error later. */ /* run client event loop */ status = serf_context_run(tb->context, 0, iter_pool); if (!APR_STATUS_IS_TIMEUP(status) && SERF_BUCKET_READ_ERROR(status)) return status; done = 1; for (i = 0; i < num_requests; i++) done &= handler_ctx[i].done; if (!done && (apr_time_now() > finish_time)) return APR_ETIMEDOUT; if (err == MOCKHTTP_TEST_FAILED) return SERF_ERROR_ISSUE_IN_TESTSUITE; } apr_pool_destroy(iter_pool); return APR_SUCCESS; }
/* This code should be replaced with header buckets. */ static apr_status_t fetch_headers(serf_bucket_t *bkt, response_context_t *ctx) { apr_status_t status; /* RFC 2616 says that CRLF is the only line ending, but we can easily * accept any kind of line ending. */ status = fetch_line(ctx, SERF_NEWLINE_ANY); if (SERF_BUCKET_READ_ERROR(status)) { return status; } /* Something was read. Process it. */ if (ctx->linebuf.state == SERF_LINEBUF_READY && ctx->linebuf.used) { const char *end_key; const char *c; end_key = c = memchr(ctx->linebuf.line, ':', ctx->linebuf.used); if (!c) { /* Bad headers? */ return SERF_ERROR_BAD_HTTP_RESPONSE; } /* Skip over initial ':' */ c++; /* And skip all whitespaces. */ for(; c < ctx->linebuf.line + ctx->linebuf.used; c++) { if (!apr_isspace(*c)) { break; } } /* Always copy the headers (from the linebuf into new mem). */ /* ### we should be able to optimize some mem copies */ serf_bucket_headers_setx( ctx->headers, ctx->linebuf.line, end_key - ctx->linebuf.line, 1, c, ctx->linebuf.line + ctx->linebuf.used - c, 1); } return status; }
/* Returns the amount read. */ static int bio_bucket_read(BIO *bio, char *in, int inlen) { serf_ssl_context_t *ctx = bio->ptr; const char *data; apr_status_t status; apr_size_t len; #ifdef SSL_VERBOSE printf("bio_bucket_read called for %d bytes\n", inlen); #endif if (ctx->encrypt.status == SERF_ERROR_WAIT_CONN && BIO_should_read(ctx->bio)) { #ifdef SSL_VERBOSE printf("bio_bucket_read waiting: (%d %d %d)\n", BIO_should_retry(ctx->bio), BIO_should_read(ctx->bio), BIO_get_retry_flags(ctx->bio)); #endif /* Falling back... */ ctx->encrypt.exhausted_reset = 1; BIO_clear_retry_flags(bio); } status = serf_bucket_read(ctx->decrypt.pending, inlen, &data, &len); ctx->decrypt.status = status; #ifdef SSL_VERBOSE printf("bio_bucket_read received %d bytes (%d)\n", len, status); #endif if (!SERF_BUCKET_READ_ERROR(status)) { /* Oh suck. */ if (len) { memcpy(in, data, len); return len; } if (APR_STATUS_IS_EOF(status)) { BIO_set_retry_read(bio); return -1; } } return -1; }
static apr_status_t serf_mock_peek(serf_bucket_t *bucket, const char **data, apr_size_t *len) { mockbkt_context_t *ctx = bucket->data; mockbkt_action *action; apr_status_t status; status = next_action(ctx); if (status) return status; action = &ctx->actions[ctx->current_action]; *len = ctx->remaining_data; *data = ctx->current_data; /* peek only returns an error, APR_EOF or APR_SUCCESS. APR_EAGAIN is returned as APR_SUCCESS. */ if (SERF_BUCKET_READ_ERROR(action->status)) return status; return action->status == APR_EOF ? APR_EOF : APR_SUCCESS; }
/* Returns the amount read. */ static int bio_file_read(BIO *bio, char *in, int inlen) { apr_file_t *file = bio->ptr; apr_status_t status; apr_size_t len; BIO_clear_retry_flags(bio); len = inlen; status = apr_file_read(file, in, &len); if (!SERF_BUCKET_READ_ERROR(status)) { /* Oh suck. */ if (APR_STATUS_IS_EOF(status)) { BIO_set_retry_read(bio); return -1; } else { return len; } } return -1; }
apr_status_t handle_response(serf_request_t *request, serf_bucket_t *response, void *handler_baton, apr_pool_t *pool) { handler_baton_t *ctx = handler_baton; if (! response) { serf_connection_request_create(ctx->tb->connection, setup_request, ctx); return APR_SUCCESS; } while (1) { apr_status_t status; const char *data; apr_size_t len; status = serf_bucket_read(response, 2048, &data, &len); if (SERF_BUCKET_READ_ERROR(status)) return status; if (APR_STATUS_IS_EOF(status)) { APR_ARRAY_PUSH(ctx->handled_requests, int) = ctx->req_id; ctx->done = TRUE; return APR_EOF; } if (APR_STATUS_IS_EAGAIN(status)) { return status; } } return APR_SUCCESS; }
/* Returns the amount read. */ static int bio_apr_socket_read(BIO *bio, char *in, int inlen) { apr_size_t len = inlen; serv_ctx_t *serv_ctx = bio->ptr; apr_status_t status; BIO_clear_retry_flags(bio); status = apr_socket_recv(serv_ctx->client_sock, in, &len); serv_ctx->bio_read_status = status; serf__log_skt(TEST_VERBOSE, __FILE__, serv_ctx->client_sock, "Read %d bytes from socket with status %d.\n", len, status); if (status == APR_EAGAIN) { BIO_set_retry_read(bio); if (len == 0) return -1; } if (SERF_BUCKET_READ_ERROR(status)) return -1; return len; }
/* This function reads a decrypted stream and returns an encrypted stream. */ static apr_status_t ssl_encrypt(void *baton, apr_size_t bufsize, char *buf, apr_size_t *len) { const char *data; apr_size_t interim_bufsize; serf_ssl_context_t *ctx = baton; apr_status_t status; #ifdef SSL_VERBOSE printf("ssl_encrypt: begin %d\n", bufsize); #endif /* Try to read already encrypted but unread data first. */ status = serf_bucket_read(ctx->encrypt.pending, bufsize, &data, len); if (SERF_BUCKET_READ_ERROR(status)) { return status; } /* Aha, we read something. Return that now. */ if (*len) { memcpy(buf, data, *len); if (APR_STATUS_IS_EOF(status)) { status = APR_SUCCESS; } #ifdef SSL_VERBOSE printf("ssl_encrypt: %d %d %d (quick read)\n", status, *len, BIO_get_retry_flags(ctx->bio)); #endif return status; } if (BIO_should_retry(ctx->bio) && BIO_should_write(ctx->bio)) { #ifdef SSL_VERBOSE printf("ssl_encrypt: %d %d %d (should write exit)\n", status, *len, BIO_get_retry_flags(ctx->bio)); #endif return APR_EAGAIN; } /* If we were previously blocked, unblock ourselves now. */ if (BIO_should_read(ctx->bio)) { #ifdef SSL_VERBOSE printf("ssl_encrypt: reset %d %d (%d %d %d)\n", status, ctx->encrypt.status, BIO_should_retry(ctx->bio), BIO_should_read(ctx->bio), BIO_get_retry_flags(ctx->bio)); #endif ctx->encrypt.status = APR_SUCCESS; ctx->encrypt.exhausted_reset = 0; } /* Oh well, read from our stream now. */ interim_bufsize = bufsize; do { apr_size_t interim_len; if (!ctx->encrypt.status) { struct iovec vecs[64]; int vecs_read; status = serf_bucket_read_iovec(ctx->encrypt.stream, interim_bufsize, 64, vecs, &vecs_read); if (!SERF_BUCKET_READ_ERROR(status) && vecs_read) { char *vecs_data; int i, cur, vecs_data_len; int ssl_len; /* Combine the buffers of the iovec into one buffer, as that is with SSL_write requires. */ vecs_data_len = 0; for (i = 0; i < vecs_read; i++) { vecs_data_len += vecs[i].iov_len; } vecs_data = serf_bucket_mem_alloc(ctx->allocator, vecs_data_len); cur = 0; for (i = 0; i < vecs_read; i++) { memcpy(vecs_data + cur, vecs[i].iov_base, vecs[i].iov_len); cur += vecs[i].iov_len; } interim_bufsize -= vecs_data_len; interim_len = vecs_data_len; #ifdef SSL_VERBOSE printf("ssl_encrypt: bucket read %d bytes; status %d\n", interim_len, status); printf("---\n%s\n-(%d)-\n", vecs_data, interim_len); #endif /* Stash our status away. */ ctx->encrypt.status = status; ssl_len = SSL_write(ctx->ssl, vecs_data, interim_len); #ifdef SSL_VERBOSE printf("ssl_encrypt: SSL write: %d\n", ssl_len); #endif /* We're done. */ serf_bucket_mem_free(ctx->allocator, vecs_data); /* If we failed to write... */ if (ssl_len < 0) { int ssl_err; /* Ah, bugger. We need to put that data back. */ serf_bucket_aggregate_prepend_iovec(ctx->encrypt.stream, vecs, vecs_read); ssl_err = SSL_get_error(ctx->ssl, ssl_len); #ifdef SSL_VERBOSE printf("ssl_encrypt: SSL write error: %d\n", ssl_err); #endif if (ssl_err == SSL_ERROR_SYSCALL) { status = ctx->encrypt.status; if (SERF_BUCKET_READ_ERROR(status)) { return status; } } else { /* Oh, no. */ if (ssl_err == SSL_ERROR_WANT_READ) { status = SERF_ERROR_WAIT_CONN; } else { status = APR_EGENERAL; } } #ifdef SSL_VERBOSE printf("ssl_encrypt: SSL write error: %d %d\n", status, *len); #endif } } } else { interim_len = 0; *len = 0; status = ctx->encrypt.status; } } while (!status && interim_bufsize); /* Okay, we exhausted our underlying stream. */ if (!SERF_BUCKET_READ_ERROR(status)) { apr_status_t agg_status; struct iovec vecs[64]; int vecs_read, i; /* We read something! */ agg_status = serf_bucket_read_iovec(ctx->encrypt.pending, bufsize, 64, vecs, &vecs_read); *len = 0; for (i = 0; i < vecs_read; i++) { memcpy(buf + *len, vecs[i].iov_base, vecs[i].iov_len); *len += vecs[i].iov_len; } #ifdef SSL_VERBOSE printf("ssl_encrypt read agg: %d %d %d %d\n", status, agg_status, ctx->encrypt.status, *len); #endif if (!agg_status) { status = agg_status; } } if (status == SERF_ERROR_WAIT_CONN && BIO_should_retry(ctx->bio) && BIO_should_read(ctx->bio)) { ctx->encrypt.exhausted = ctx->encrypt.status; ctx->encrypt.status = SERF_ERROR_WAIT_CONN; } #ifdef SSL_VERBOSE printf("ssl_encrypt finished: %d %d (%d %d %d)\n", status, *len, BIO_should_retry(ctx->bio), BIO_should_read(ctx->bio), BIO_get_retry_flags(ctx->bio)); #endif return status; }
/* This function reads an encrypted stream and returns the decrypted stream. */ static apr_status_t ssl_decrypt(void *baton, apr_size_t bufsize, char *buf, apr_size_t *len) { serf_ssl_context_t *ctx = baton; apr_size_t priv_len; apr_status_t status; const char *data; int ssl_len; #ifdef SSL_VERBOSE printf("ssl_decrypt: begin %d\n", bufsize); #endif /* Is there some data waiting to be read? */ ssl_len = SSL_read(ctx->ssl, buf, bufsize); if (ssl_len > 0) { #ifdef SSL_VERBOSE printf("ssl_decrypt: %d bytes (%d); status: %d; flags: %d\n", ssl_len, bufsize, ctx->decrypt.status, BIO_get_retry_flags(ctx->bio)); #endif *len = ssl_len; return APR_SUCCESS; } status = serf_bucket_read(ctx->decrypt.stream, bufsize, &data, &priv_len); if (!SERF_BUCKET_READ_ERROR(status) && priv_len) { serf_bucket_t *tmp; #ifdef SSL_VERBOSE printf("ssl_decrypt: read %d bytes (%d); status: %d\n", priv_len, bufsize, status); #endif tmp = serf_bucket_simple_copy_create(data, priv_len, ctx->decrypt.pending->allocator); serf_bucket_aggregate_append(ctx->decrypt.pending, tmp); ssl_len = SSL_read(ctx->ssl, buf, bufsize); if (ssl_len < 0) { int ssl_err; ssl_err = SSL_get_error(ctx->ssl, ssl_len); switch (ssl_err) { case SSL_ERROR_SYSCALL: *len = 0; status = ctx->decrypt.status; break; case SSL_ERROR_WANT_READ: *len = 0; status = APR_EAGAIN; break; case SSL_ERROR_SSL: *len = 0; status = ctx->pending_err ? ctx->pending_err : APR_EGENERAL; ctx->pending_err = 0; break; default: *len = 0; status = APR_EGENERAL; break; } } else { *len = ssl_len; #ifdef SSL_VERBOSE printf("---\n%s\n-(%d)-\n", buf, *len); #endif } } else { *len = 0; } #ifdef SSL_VERBOSE printf("ssl_decrypt: %d %d %d\n", status, *len, BIO_get_retry_flags(ctx->bio)); #endif return status; }
static apr_status_t handle_response(serf_request_t *request, serf_bucket_t *response, void *vbaton, apr_pool_t *pool) { apr_status_t rv; s_baton_t *ctx = vbaton; const char *data; apr_size_t len; serf_status_line sl; if (response == NULL) { ctx->rstatus = HTTP_INTERNAL_SERVER_ERROR; return APR_EGENERAL; } /* XXXXXXX: Create better error message. */ rv = serf_bucket_response_status(response, &sl); if (rv) { if (APR_STATUS_IS_EAGAIN(rv)) { return APR_SUCCESS; } ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, ctx->r, APLOGNO(01121) "serf_bucket_response_status..."); ctx->rstatus = HTTP_INTERNAL_SERVER_ERROR; if (mpm_supprts_serf) { ap_mpm_register_timed_callback(apr_time_from_msec(1), timed_cleanup_callback, ctx); } return rv; } /** * XXXXX: If I understood serf buckets better, it might be possible to not * copy all of the data here, and better stream it to the client. **/ do { apr_brigade_cleanup(ctx->tmpbb); rv = serf_bucket_read(response, AP_IOBUFSIZE, &data, &len); if (SERF_BUCKET_READ_ERROR(rv)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, ctx->r, APLOGNO(01122) "serf_bucket_read(response)"); return rv; } if (!ctx->done_headers) { serf_bucket_t *hdrs; serf_status_line line; /* TODO: improve */ serf_bucket_response_status(response, &line); ctx->r->status = line.code; hdrs = serf_bucket_response_get_headers(response); serf_bucket_headers_do(hdrs, copy_headers_out, ctx); ctx->done_headers = 1; } if (len > 0) { /* TODO: make APR bucket <-> serf bucket stuff more magical. */ apr_brigade_write(ctx->tmpbb, NULL, NULL, data, len); } if (APR_STATUS_IS_EOF(rv)) { ctx->keep_reading = 0; ctx->rstatus = ap_pass_brigade(ctx->r->output_filters, ctx->tmpbb); if (mpm_supprts_serf) { ap_mpm_register_timed_callback(apr_time_from_msec(1), timed_cleanup_callback, ctx); } return APR_EOF; } ctx->rstatus = ap_pass_brigade(ctx->r->output_filters, ctx->tmpbb); /* XXXX: Should we send a flush now? */ if (APR_STATUS_IS_EAGAIN(rv)) { return APR_SUCCESS; } } while (1); }
static apr_status_t read_aggregate(serf_bucket_t *bucket, apr_size_t requested, int vecs_size, struct iovec *vecs, int *vecs_used) { aggregate_context_t *ctx = bucket->data; int cur_vecs_used; apr_status_t status; *vecs_used = 0; if (!ctx->list) { if (ctx->hold_open) { return ctx->hold_open(ctx->hold_open_baton, bucket); } else { return APR_EOF; } } status = APR_SUCCESS; while (requested) { serf_bucket_t *head = ctx->list->bucket; status = serf_bucket_read_iovec(head, requested, vecs_size, vecs, &cur_vecs_used); if (SERF_BUCKET_READ_ERROR(status)) return status; /* Add the number of vecs we read to our running total. */ *vecs_used += cur_vecs_used; if (cur_vecs_used > 0 || status) { bucket_list_t *next_list; /* If we got SUCCESS (w/bytes) or EAGAIN, we want to return now * as it isn't safe to read more without returning to our caller. */ if (!status || APR_STATUS_IS_EAGAIN(status) || status == SERF_ERROR_WAIT_CONN) { return status; } /* However, if we read EOF, we can stash this bucket in a * to-be-freed list and move on to the next bucket. This ensures * that the bucket stays alive (so as not to violate our read * semantics). We'll destroy this list of buckets the next time * we are asked to perform a read operation - thus ensuring the * proper read lifetime. */ next_list = ctx->list->next; ctx->list->next = ctx->done; ctx->done = ctx->list; ctx->list = next_list; /* If we have no more in our list, return EOF. */ if (!ctx->list) { if (ctx->hold_open) { return ctx->hold_open(ctx->hold_open_baton, bucket); } else { return APR_EOF; } } /* At this point, it safe to read the next bucket - if we can. */ /* If the caller doesn't want ALL_AVAIL, decrement the size * of the items we just read from the list. */ if (requested != SERF_READ_ALL_AVAIL) { int i; for (i = 0; i < cur_vecs_used; i++) requested -= vecs[i].iov_len; } /* Adjust our vecs to account for what we just read. */ vecs_size -= cur_vecs_used; vecs += cur_vecs_used; /* We reached our max. Oh well. */ if (!requested || !vecs_size) { return APR_SUCCESS; } } } return status; }
static apr_status_t s_handle_response(serf_request_t *UNUSED(request), serf_bucket_t *response, void *handler_ctx, apr_pool_t *UNUSED(pool)) { const char *data; apr_size_t len; serf_status_line sl; apr_status_t rv; handler_ctx_t *ctx = handler_ctx; rv = serf_bucket_response_status(response, &sl); if (rv != APR_SUCCESS) { if (APR_STATUS_IS_EAGAIN(rv)) { return rv; } ctx->rv = rv; apr_atomic_dec32(&ctx->requests_outstanding); return rv; } ctx->reason = sl.reason; ctx->response_code = sl.code; while (1) { rv = serf_bucket_read(response, 2048, &data, &len); if (SERF_BUCKET_READ_ERROR(rv)) { ctx->rv = rv; apr_atomic_dec32(&ctx->requests_outstanding); DBG(ctx->r, "REQ[%X] (ERROR)", TO_ADDR(ctx->r)); DBG(ctx->r,"REQ[%X] end %s()",TO_ADDR(ctx->r),__func__); return rv; } if (APR_STATUS_IS_EAGAIN(rv)) { /* 0 byte return if EAGAIN returned. */ DBG(ctx->r,"REQ[%X] (EAGAIN) len:[%d]", TO_ADDR(ctx->r), (int)len); DBG(ctx->r,"REQ[%X] end %s()",TO_ADDR(ctx->r),__func__); return rv; } if (len > 0) { if (! ctx->response) { ctx->response = apr_palloc(ctx->pool, len); ctx->response[0] = 0; ctx->response_len = 0; } else { char *tmp = apr_palloc(ctx->pool, ctx->response_len); memcpy(tmp, ctx->response, ctx->response_len); ctx->response = apr_palloc(ctx->pool, ctx->response_len + len); memcpy(ctx->response, tmp, ctx->response_len); } memcpy(&ctx->response[ctx->response_len], data, len); ctx->response_len += len; ctx->response[ctx->response_len] = 0; } if (APR_STATUS_IS_EOF(rv)) { serf_bucket_t *hdrs; char *tmp_headers = ""; hdrs = serf_bucket_response_get_headers(response); while (1) { rv = serf_bucket_read(hdrs, 2048, &data, &len); if (SERF_BUCKET_READ_ERROR(rv)) return rv; tmp_headers = apr_pstrcat(ctx->pool, tmp_headers, apr_psprintf(ctx->pool , "%.*s", (unsigned int)len, data), NULL); if (APR_STATUS_IS_EOF(rv)) { break; } } ctx->headers_out = apr_table_make(ctx->pool, 0); char *pstat; char *pair = NULL; for (;;) { pair = apr_strtok(tmp_headers, "\n", &pstat); if (!pair) break; tmp_headers = NULL; char *key; char *val; char *tpair = apr_pstrdup(ctx->pool, pair); key = tpair; val = strchr(tpair, ':'); if (val) { *val = 0; val++; key = qs_trim_string(ctx->pool, key); val = qs_trim_string(ctx->pool, val); DBG(ctx->r,"REQ[%X] key:[%s], val:[%s]", TO_ADDR(ctx->r),key, val); apr_table_add(ctx->headers_out, key, val); } } ctx->rv = APR_SUCCESS; apr_atomic_dec32(&ctx->requests_outstanding); DBG(ctx->r,"REQ[%X] (NORMAL)", TO_ADDR(ctx->r)); DBG(ctx->r,"REQ[%X] end %s()",TO_ADDR(ctx->r),__func__); return APR_EOF; } if (APR_STATUS_IS_EAGAIN(rv)) { DBG(ctx->r,"REQ[%X] (EAGAIN)", TO_ADDR(ctx->r)); DBG(ctx->r,"REQ[%X] end %s()",TO_ADDR(ctx->r),__func__); return rv; } } }
static apr_status_t handle_response(serf_request_t *request, serf_bucket_t *response, void *handler_baton, apr_pool_t *pool) { const char *data; apr_size_t len; serf_status_line sl; apr_status_t status; handler_baton_t *ctx = handler_baton; if (!response) { /* Oh no! We've been cancelled! */ abort(); } status = serf_bucket_response_status(response, &sl); if (status) { if (APR_STATUS_IS_EAGAIN(status)) { return APR_SUCCESS; } abort(); } while (1) { status = serf_bucket_read(response, 2048, &data, &len); if (SERF_BUCKET_READ_ERROR(status)) return status; /*fwrite(data, 1, len, stdout);*/ if (!ctx->hdr_read) { serf_bucket_t *hdrs; const char *val; printf("Processing %s\n", ctx->path); hdrs = serf_bucket_response_get_headers(response); val = serf_bucket_headers_get(hdrs, "Content-Type"); /* FIXME: This check isn't quite right because Content-Type could * be decorated; ideally strcasestr would be correct. */ if (val && strcasecmp(val, "text/html") == 0) { ctx->is_html = 1; apr_pool_create(&ctx->parser_pool, NULL); ctx->parser = apr_xml_parser_create(ctx->parser_pool); } else { ctx->is_html = 0; } ctx->hdr_read = 1; } if (ctx->is_html) { apr_status_t xs; xs = apr_xml_parser_feed(ctx->parser, data, len); /* Uh-oh. */ if (xs) { #ifdef SERF_VERBOSE printf("XML parser error (feed): %d\n", xs); #endif ctx->is_html = 0; } } /* are we done yet? */ if (APR_STATUS_IS_EOF(status)) { if (ctx->is_html) { apr_xml_doc *xmld; apr_status_t xs; doc_path_t *dup; xs = apr_xml_parser_done(ctx->parser, &xmld); if (xs) { #ifdef SERF_VERBOSE printf("XML parser error (done): %d\n", xs); #endif return xs; } dup = (doc_path_t*) serf_bucket_mem_alloc(ctx->doc_queue_alloc, sizeof(doc_path_t)); dup->doc = xmld; dup->path = (char*)serf_bucket_mem_alloc(ctx->doc_queue_alloc, ctx->path_len); memcpy(dup->path, ctx->path, ctx->path_len); dup->pool = ctx->parser_pool; *(doc_path_t **)apr_array_push(ctx->doc_queue) = dup; apr_thread_cond_signal(ctx->doc_queue_condvar); } apr_atomic_dec32(ctx->requests_outstanding); serf_bucket_mem_free(ctx->allocator, ctx->path); if (ctx->query) { serf_bucket_mem_free(ctx->allocator, ctx->query); serf_bucket_mem_free(ctx->allocator, ctx->full_path); } if (ctx->fragment) { serf_bucket_mem_free(ctx->allocator, ctx->fragment); } serf_bucket_mem_free(ctx->allocator, ctx); return APR_EOF; } /* have we drained the response so far? */ if (APR_STATUS_IS_EAGAIN(status)) return APR_SUCCESS; /* loop to read some more. */ } /* NOTREACHED */ }
static apr_status_t create_chunk(serf_bucket_t *bucket) { chunk_context_t *ctx = bucket->data; serf_bucket_t *simple_bkt; apr_size_t chunk_len; apr_size_t stream_len; struct iovec vecs[66]; /* 64 + chunk trailer + EOF trailer = 66 */ int vecs_read; int i; if (ctx->state != STATE_FETCH) { return APR_SUCCESS; } ctx->last_status = serf_bucket_read_iovec(ctx->stream, SERF_READ_ALL_AVAIL, 64, vecs, &vecs_read); if (SERF_BUCKET_READ_ERROR(ctx->last_status)) { /* Uh-oh. */ return ctx->last_status; } /* Count the length of the data we read. */ stream_len = 0; for (i = 0; i < vecs_read; i++) { stream_len += vecs[i].iov_len; } /* assert: stream_len in hex < sizeof(ctx->chunk_hdr) */ /* Inserting a 0 byte chunk indicates a terminator, which already happens * during the EOF handler below. Adding another one here will cause the * EOF chunk to be interpreted by the server as a new request. So, * we'll only do this if we have something to write. */ if (stream_len) { /* Build the chunk header. */ chunk_len = apr_snprintf(ctx->chunk_hdr, sizeof(ctx->chunk_hdr), "%" APR_UINT64_T_HEX_FMT CRLF, (apr_uint64_t)stream_len); /* Create a copy of the chunk header so we can have multiple chunks * in the pipeline at the same time. */ simple_bkt = serf_bucket_simple_copy_create(ctx->chunk_hdr, chunk_len, bucket->allocator); serf_bucket_aggregate_append(ctx->chunk, simple_bkt); /* Insert the chunk footer. */ vecs[vecs_read].iov_base = CRLF; vecs[vecs_read++].iov_len = sizeof(CRLF) - 1; } /* We've reached the end of the line for the stream. */ if (APR_STATUS_IS_EOF(ctx->last_status)) { /* Insert the chunk footer. */ vecs[vecs_read].iov_base = "0" CRLF CRLF; vecs[vecs_read++].iov_len = sizeof("0" CRLF CRLF) - 1; ctx->state = STATE_EOF; } else { /* Okay, we can return data. */ ctx->state = STATE_CHUNK; } serf_bucket_aggregate_append_iovec(ctx->chunk, vecs, vecs_read); return APR_SUCCESS; }
static apr_status_t serf_deflate_read(serf_bucket_t *bucket, apr_size_t requested, const char **data, apr_size_t *len) { deflate_context_t *ctx = bucket->data; apr_status_t status; const char *private_data; apr_size_t private_len; int zRC; while (1) { switch (ctx->state) { case STATE_READING_HEADER: case STATE_READING_VERIFY: status = serf_bucket_read(ctx->stream, ctx->stream_left, &private_data, &private_len); if (SERF_BUCKET_READ_ERROR(status)) { return status; } memcpy(ctx->hdr_buffer + (ctx->stream_size - ctx->stream_left), private_data, private_len); ctx->stream_left -= private_len; if (ctx->stream_left == 0) { ctx->state++; if (APR_STATUS_IS_EAGAIN(status)) { *len = 0; return status; } } else if (status) { *len = 0; return status; } break; case STATE_HEADER: if (ctx->hdr_buffer[0] != deflate_magic[0] || ctx->hdr_buffer[1] != deflate_magic[1]) { return SERF_ERROR_DECOMPRESSION_FAILED; } if (ctx->hdr_buffer[3] != 0) { return SERF_ERROR_DECOMPRESSION_FAILED; } ctx->state++; break; case STATE_VERIFY: { unsigned long compCRC, compLen, actualLen; /* Do the checksum computation. */ compCRC = getLong((unsigned char*)ctx->hdr_buffer); if (ctx->crc != compCRC) { return SERF_ERROR_DECOMPRESSION_FAILED; } compLen = getLong((unsigned char*)ctx->hdr_buffer + 4); /* The length in the trailer is module 2^32, so do the same for the actual length. */ actualLen = ctx->zstream.total_out; actualLen &= 0xFFFFFFFF; if (actualLen != compLen) { return SERF_ERROR_DECOMPRESSION_FAILED; } ctx->state++; break; } case STATE_INIT: zRC = inflateInit2(&ctx->zstream, ctx->windowSize); if (zRC != Z_OK) { return SERF_ERROR_DECOMPRESSION_FAILED; } ctx->zstream.next_out = ctx->buffer; ctx->zstream.avail_out = ctx->bufferSize; ctx->state++; break; case STATE_FINISH: inflateEnd(&ctx->zstream); serf_bucket_aggregate_prepend(ctx->stream, ctx->inflate_stream); ctx->inflate_stream = 0; ctx->state++; break; case STATE_INFLATE: /* Do we have anything already uncompressed to read? */ status = serf_bucket_read(ctx->inflate_stream, requested, data, len); if (SERF_BUCKET_READ_ERROR(status)) { return status; } /* Hide EOF. */ if (APR_STATUS_IS_EOF(status)) { status = ctx->stream_status; if (APR_STATUS_IS_EOF(status)) { /* We've read all of the data from our stream, but we * need to continue to iterate until we flush * out the zlib buffer. */ status = APR_SUCCESS; } } if (*len != 0) { return status; } /* We tried; but we have nothing buffered. Fetch more. */ /* It is possible that we maxed out avail_out before * exhausting avail_in; therefore, continue using the * previous buffer. Otherwise, fetch more data from * our stream bucket. */ if (ctx->zstream.avail_in == 0) { /* When we empty our inflated stream, we'll return this * status - this allow us to eventually pass up EAGAINs. */ ctx->stream_status = serf_bucket_read(ctx->stream, ctx->bufferSize, &private_data, &private_len); if (SERF_BUCKET_READ_ERROR(ctx->stream_status)) { return ctx->stream_status; } if (!private_len && APR_STATUS_IS_EAGAIN(ctx->stream_status)) { *len = 0; status = ctx->stream_status; ctx->stream_status = APR_SUCCESS; return status; } ctx->zstream.next_in = (unsigned char*)private_data; ctx->zstream.avail_in = private_len; } while (1) { zRC = inflate(&ctx->zstream, Z_NO_FLUSH); /* We're full or zlib requires more space. Either case, clear out our buffer, reset, and return. */ if (zRC == Z_BUF_ERROR || ctx->zstream.avail_out == 0) { serf_bucket_t *tmp; ctx->zstream.next_out = ctx->buffer; private_len = ctx->bufferSize - ctx->zstream.avail_out; ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, private_len); /* FIXME: There probably needs to be a free func. */ tmp = SERF_BUCKET_SIMPLE_STRING_LEN((char *)ctx->buffer, private_len, bucket->allocator); serf_bucket_aggregate_append(ctx->inflate_stream, tmp); ctx->zstream.avail_out = ctx->bufferSize; break; } if (zRC == Z_STREAM_END) { serf_bucket_t *tmp; private_len = ctx->bufferSize - ctx->zstream.avail_out; ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, private_len); /* FIXME: There probably needs to be a free func. */ tmp = SERF_BUCKET_SIMPLE_STRING_LEN((char *)ctx->buffer, private_len, bucket->allocator); serf_bucket_aggregate_append(ctx->inflate_stream, tmp); ctx->zstream.avail_out = ctx->bufferSize; /* Push back the remaining data to be read. */ tmp = serf_bucket_aggregate_create(bucket->allocator); serf_bucket_aggregate_prepend(tmp, ctx->stream); ctx->stream = tmp; /* We now need to take the remaining avail_in and * throw it in ctx->stream so our next read picks it up. */ tmp = SERF_BUCKET_SIMPLE_STRING_LEN( (const char*)ctx->zstream.next_in, ctx->zstream.avail_in, bucket->allocator); serf_bucket_aggregate_prepend(ctx->stream, tmp); switch (ctx->format) { case SERF_DEFLATE_GZIP: ctx->stream_left = ctx->stream_size = DEFLATE_VERIFY_SIZE; ctx->state++; break; case SERF_DEFLATE_DEFLATE: /* Deflate does not have a verify footer. */ ctx->state = STATE_FINISH; break; default: /* Not reachable */ return APR_EGENERAL; } break; } /* Any other error? */ if (zRC != Z_OK) { return SERF_ERROR_DECOMPRESSION_FAILED; } /* As long as zRC == Z_OK, just keep looping. */ } /* Okay, we've inflated. Try to read. */ status = serf_bucket_read(ctx->inflate_stream, requested, data, len); /* Hide EOF. */ if (APR_STATUS_IS_EOF(status)) { status = ctx->stream_status; /* If the inflation wasn't finished, return APR_SUCCESS. */ if (zRC != Z_STREAM_END) return APR_SUCCESS; /* If our stream is finished too and all data was inflated, * return SUCCESS so we'll iterate one more time. */ if (APR_STATUS_IS_EOF(status)) { /* No more data to read from the stream, and everything inflated. If all data was received correctly, state should have been advanced to STATE_READING_VERIFY or STATE_FINISH. If not, then the data was incomplete and we have an error. */ if (ctx->state != STATE_INFLATE) return APR_SUCCESS; else return SERF_ERROR_DECOMPRESSION_FAILED; } } return status; case STATE_DONE: /* We're done inflating. Use our finished buffer. */ return serf_bucket_read(ctx->stream, requested, data, len); default: /* Not reachable */ return APR_EGENERAL; } } /* NOTREACHED */ }
/* Verify received requests and take the necessary actions (return a response, kill the connection ...) */ static apr_status_t replay(serv_ctx_t *servctx, apr_int16_t rtnevents, apr_pool_t *pool) { apr_status_t status = APR_SUCCESS; test_server_action_t *action; if (rtnevents & APR_POLLIN) { if (servctx->message_list == NULL) { /* we're not expecting any requests to reach this server! */ serf__log(TEST_VERBOSE, __FILE__, "Received request where none was expected.\n"); return SERF_ERROR_ISSUE_IN_TESTSUITE; } if (servctx->cur_action >= servctx->action_count) { char buf[128]; apr_size_t len = sizeof(buf); status = servctx->read(servctx, buf, &len); if (! APR_STATUS_IS_EAGAIN(status)) { /* we're out of actions! */ serf__log(TEST_VERBOSE, __FILE__, "Received more requests than expected.\n"); return SERF_ERROR_ISSUE_IN_TESTSUITE; } return status; } action = &servctx->action_list[servctx->cur_action]; serf__log(TEST_VERBOSE, __FILE__, "POLLIN while replaying action %d, kind: %d.\n", servctx->cur_action, action->kind); /* Read the remaining data from the client and kill the socket. */ if (action->kind == SERVER_IGNORE_AND_KILL_CONNECTION) { char buf[128]; apr_size_t len = sizeof(buf); status = servctx->read(servctx, buf, &len); if (status == APR_EOF) { serf__log(TEST_VERBOSE, __FILE__, "Killing this connection.\n"); apr_socket_close(servctx->client_sock); servctx->client_sock = NULL; next_action(servctx); return APR_SUCCESS; } return status; } else if (action->kind == SERVER_RECV || (action->kind == SERVER_RESPOND && servctx->outstanding_responses == 0)) { apr_size_t msg_len, len; char buf[128]; test_server_message_t *message; message = &servctx->message_list[servctx->cur_message]; msg_len = strlen(message->text); do { len = msg_len - servctx->message_buf_pos; if (len > sizeof(buf)) len = sizeof(buf); status = servctx->read(servctx, buf, &len); if (SERF_BUCKET_READ_ERROR(status)) return status; if (status == APR_EOF) { serf__log(TEST_VERBOSE, __FILE__, "Server: Client hung up the connection.\n"); break; } if (servctx->options & TEST_SERVER_DUMP) fwrite(buf, len, 1, stdout); if (strncmp(buf, message->text + servctx->message_buf_pos, len) != 0) { /* ## TODO: Better diagnostics. */ printf("Expected: (\n"); fwrite(message->text + servctx->message_buf_pos, len, 1, stdout); printf(")\n"); printf("Actual: (\n"); fwrite(buf, len, 1, stdout); printf(")\n"); return SERF_ERROR_ISSUE_IN_TESTSUITE; } servctx->message_buf_pos += len; if (servctx->message_buf_pos >= msg_len) { next_message(servctx); servctx->message_buf_pos -= msg_len; if (action->kind == SERVER_RESPOND) servctx->outstanding_responses++; if (action->kind == SERVER_RECV) next_action(servctx); break; } } while (!status); } else if (action->kind == PROXY_FORWARD) { apr_size_t len; char buf[BUFSIZE]; serf_bucket_t *tmp; /* Read all incoming data from the client to forward it to the server later. */ do { len = BUFSIZE; status = servctx->read(servctx, buf, &len); if (SERF_BUCKET_READ_ERROR(status)) return status; serf__log(TEST_VERBOSE, __FILE__, "proxy: reading %d bytes %.*s from client with " "status %d.\n", len, len, buf, status); if (status == APR_EOF) { serf__log(TEST_VERBOSE, __FILE__, "Proxy: client hung up the connection. Reset the " "connection to the server.\n"); /* We have to stop forwarding, if a new connection opens the CONNECT request should not be forwarded to the server. */ next_action(servctx); } if (!servctx->servstream) servctx->servstream = serf__bucket_stream_create( servctx->allocator, detect_eof,servctx); if (len) { tmp = serf_bucket_simple_copy_create(buf, len, servctx->allocator); serf_bucket_aggregate_append(servctx->servstream, tmp); } } while (!status); } } if (rtnevents & APR_POLLOUT) { action = &servctx->action_list[servctx->cur_action]; serf__log(TEST_VERBOSE, __FILE__, "POLLOUT when replaying action %d, kind: %d.\n", servctx->cur_action, action->kind); if (action->kind == SERVER_RESPOND && servctx->outstanding_responses) { apr_size_t msg_len; apr_size_t len; msg_len = strlen(action->text); len = msg_len - servctx->action_buf_pos; status = servctx->send(servctx, action->text + servctx->action_buf_pos, &len); if (status != APR_SUCCESS) return status; if (servctx->options & TEST_SERVER_DUMP) fwrite(action->text + servctx->action_buf_pos, len, 1, stdout); servctx->action_buf_pos += len; if (servctx->action_buf_pos >= msg_len) { next_action(servctx); servctx->outstanding_responses--; } } else if (action->kind == SERVER_KILL_CONNECTION || action->kind == SERVER_IGNORE_AND_KILL_CONNECTION) { serf__log(TEST_VERBOSE, __FILE__, "Killing this connection.\n"); apr_socket_close(servctx->client_sock); servctx->client_sock = NULL; next_action(servctx); } else if (action->kind == PROXY_FORWARD) { apr_size_t len; char *buf; if (!servctx->proxy_client_sock) { serf__log(TEST_VERBOSE, __FILE__, "Proxy: setting up connection " "to server.\n"); status = create_client_socket(&servctx->proxy_client_sock, servctx, action->text); if (!servctx->clientstream) servctx->clientstream = serf__bucket_stream_create( servctx->allocator, detect_eof,servctx); } /* Send all data received from the server to the client. */ do { apr_size_t readlen; readlen = BUFSIZE; status = serf_bucket_read(servctx->clientstream, readlen, &buf, &readlen); if (SERF_BUCKET_READ_ERROR(status)) return status; if (!readlen) break; len = readlen; serf__log(TEST_VERBOSE, __FILE__, "proxy: sending %d bytes to client.\n", len); status = servctx->send(servctx, buf, &len); if (status != APR_SUCCESS) { return status; } if (len != readlen) /* abort for now, return buf to aggregate if not everything could be sent. */ return APR_EGENERAL; } while (!status); } } else if (rtnevents & APR_POLLIN) { /* ignore */ } else { printf("Unknown rtnevents: %d\n", rtnevents); abort(); } return status; }
/* Exchange data between proxy and server */ static apr_status_t proxy_replay(serv_ctx_t *servctx, apr_int16_t rtnevents, apr_pool_t *pool) { apr_status_t status; if (rtnevents & APR_POLLIN) { apr_size_t len; char buf[BUFSIZE]; serf_bucket_t *tmp; serf__log(TEST_VERBOSE, __FILE__, "proxy_replay: POLLIN\n"); /* Read all incoming data from the server to forward it to the client later. */ do { len = BUFSIZE; status = apr_socket_recv(servctx->proxy_client_sock, buf, &len); if (SERF_BUCKET_READ_ERROR(status)) return status; serf__log(TEST_VERBOSE, __FILE__, "proxy: reading %d bytes %.*s from server.\n", len, len, buf); tmp = serf_bucket_simple_copy_create(buf, len, servctx->allocator); serf_bucket_aggregate_append(servctx->clientstream, tmp); } while (!status); } if (rtnevents & APR_POLLOUT) { apr_size_t len; char *buf; serf__log(TEST_VERBOSE, __FILE__, "proxy_replay: POLLOUT\n"); /* Send all data received from the client to the server. */ do { apr_size_t readlen; readlen = BUFSIZE; if (!servctx->servstream) servctx->servstream = serf__bucket_stream_create( servctx->allocator, detect_eof,servctx); status = serf_bucket_read(servctx->servstream, BUFSIZE, &buf, &readlen); if (SERF_BUCKET_READ_ERROR(status)) return status; if (!readlen) break; len = readlen; serf__log(TEST_VERBOSE, __FILE__, "proxy: sending %d bytes %.*s to server.\n", len, len, buf); status = apr_socket_send(servctx->proxy_client_sock, buf, &len); if (status != APR_SUCCESS) { return status; } if (len != readlen) /* abort for now */ return APR_EGENERAL; } while (!status); } else if (rtnevents & APR_POLLIN) { /* ignore */ } else { printf("Unknown rtnevents: %d\n", rtnevents); abort(); } return status; }
/* internal test for the mock buckets */ static void test_basic_mock_bucket(CuTest *tc) { serf_bucket_t *mock_bkt; apr_pool_t *test_pool = tc->testBaton; serf_bucket_alloc_t *alloc = serf_bucket_allocator_create(test_pool, NULL, NULL); /* read one line */ { mockbkt_action actions[]= { { 1, "HTTP/1.1 200 OK" CRLF, APR_EOF }, }; mock_bkt = serf_bucket_mock_create(actions, 1, alloc); read_and_check_bucket(tc, mock_bkt, "HTTP/1.1 200 OK" CRLF); mock_bkt = serf_bucket_mock_create(actions, 1, alloc); readlines_and_check_bucket(tc, mock_bkt, SERF_NEWLINE_CRLF, "HTTP/1.1 200 OK" CRLF, 1); } /* read one line, character per character */ { apr_status_t status; const char *expected = "HTTP/1.1 200 OK" CRLF; mockbkt_action actions[]= { { 1, "HTTP/1.1 200 OK" CRLF, APR_EOF }, }; mock_bkt = serf_bucket_mock_create(actions, 1, alloc); do { const char *data; apr_size_t len; status = serf_bucket_read(mock_bkt, 1, &data, &len); CuAssert(tc, "Got error during bucket reading.", !SERF_BUCKET_READ_ERROR(status)); CuAssert(tc, "Read more data than expected.", strlen(expected) >= len); CuAssert(tc, "Read data is not equal to expected.", strncmp(expected, data, len) == 0); CuAssert(tc, "Read more data than requested.", len <= 1); expected += len; } while(!APR_STATUS_IS_EOF(status)); CuAssert(tc, "Read less data than expected.", strlen(expected) == 0); } /* read multiple lines */ { mockbkt_action actions[]= { { 1, "HTTP/1.1 200 OK" CRLF, APR_SUCCESS }, { 1, "Content-Type: text/plain" CRLF, APR_EOF }, }; mock_bkt = serf_bucket_mock_create(actions, 2, alloc); readlines_and_check_bucket(tc, mock_bkt, SERF_NEWLINE_CRLF, "HTTP/1.1 200 OK" CRLF "Content-Type: text/plain" CRLF, 2); } /* read empty line */ { mockbkt_action actions[]= { { 1, "HTTP/1.1 200 OK" CRLF, APR_SUCCESS }, { 1, "", APR_EAGAIN }, { 1, "Content-Type: text/plain" CRLF, APR_EOF }, }; mock_bkt = serf_bucket_mock_create(actions, 3, alloc); read_and_check_bucket(tc, mock_bkt, "HTTP/1.1 200 OK" CRLF "Content-Type: text/plain" CRLF); mock_bkt = serf_bucket_mock_create(actions, 3, alloc); readlines_and_check_bucket(tc, mock_bkt, SERF_NEWLINE_CRLF, "HTTP/1.1 200 OK" CRLF "Content-Type: text/plain" CRLF, 2); } /* read empty line */ { mockbkt_action actions[]= { { 1, "HTTP/1.1 200 OK" CR, APR_SUCCESS }, { 1, "", APR_EAGAIN }, { 1, LF, APR_EOF }, }; mock_bkt = serf_bucket_mock_create(actions, sizeof(actions)/sizeof(actions[0]), alloc); read_and_check_bucket(tc, mock_bkt, "HTTP/1.1 200 OK" CRLF); mock_bkt = serf_bucket_mock_create(actions, sizeof(actions)/sizeof(actions[0]), alloc); readlines_and_check_bucket(tc, mock_bkt, SERF_NEWLINE_CRLF, "HTTP/1.1 200 OK" CRLF, 1); } /* test more_data_arrived */ { apr_status_t status; const char *data; apr_size_t len; int i; mockbkt_action actions[]= { { 1, "", APR_EAGAIN }, { 1, "blabla", APR_EOF }, }; mock_bkt = serf_bucket_mock_create(actions, sizeof(actions)/sizeof(actions[0]), alloc); for (i = 0; i < 5; i++) { status = serf_bucket_peek(mock_bkt, &data, &len); CuAssertIntEquals(tc, APR_SUCCESS, status); CuAssertIntEquals(tc, 0, len); CuAssertIntEquals(tc, '\0', *data); } serf_bucket_mock_more_data_arrived(mock_bkt); status = serf_bucket_peek(mock_bkt, &data, &len); CuAssertIntEquals(tc, APR_EOF, status); CuAssertIntEquals(tc, 6, len); CuAssert(tc, "Read data is not equal to expected.", strncmp("blabla", data, len) == 0); } }
static apr_status_t handle_response(serf_request_t *request, serf_bucket_t *response, void *handler_baton, apr_pool_t *pool) { serf_status_line sl; apr_status_t status; handler_baton_t *ctx = handler_baton; if (!response) { /* A NULL response probably means that the connection was closed while this request was already written. Just requeue it. */ serf_connection_t *conn = serf_request_get_conn(request); serf_connection_request_create(conn, setup_request, handler_baton); return APR_SUCCESS; } status = serf_bucket_response_status(response, &sl); if (status) { return status; } while (1) { struct iovec vecs[64]; int vecs_read; apr_size_t bytes_written; status = serf_bucket_read_iovec(response, 8000, 64, vecs, &vecs_read); if (SERF_BUCKET_READ_ERROR(status)) return status; /* got some data. print it out. */ if (vecs_read) { apr_file_writev(ctx->output_file, vecs, vecs_read, &bytes_written); } /* are we done yet? */ if (APR_STATUS_IS_EOF(status)) { if (ctx->print_headers) { serf_bucket_t *hdrs; hdrs = serf_bucket_response_get_headers(response); while (1) { status = serf_bucket_read_iovec(hdrs, 8000, 64, vecs, &vecs_read); if (SERF_BUCKET_READ_ERROR(status)) return status; if (vecs_read) { apr_file_writev(ctx->output_file, vecs, vecs_read, &bytes_written); } if (APR_STATUS_IS_EOF(status)) { break; } } } apr_atomic_inc32(&ctx->completed_requests); return APR_EOF; } /* have we drained the response so far? */ if (APR_STATUS_IS_EAGAIN(status)) return status; /* loop to read some more. */ } /* NOTREACHED */ }
static apr_status_t serf_dechunk_read(serf_bucket_t *bucket, apr_size_t requested, const char **data, apr_size_t *len) { dechunk_context_t *ctx = bucket->data; apr_status_t status; while (1) { switch (ctx->state) { case STATE_SIZE: /* fetch a line terminated by CRLF */ status = serf_linebuf_fetch(&ctx->linebuf, ctx->stream, SERF_NEWLINE_CRLF); if (SERF_BUCKET_READ_ERROR(status)) return status; /* if a line was read, then parse it. */ if (ctx->linebuf.state == SERF_LINEBUF_READY) { /* NUL-terminate the line. if it filled the entire buffer, then just assume the thing is too large. */ if (ctx->linebuf.used == sizeof(ctx->linebuf.line)) return APR_FROM_OS_ERROR(ERANGE); ctx->linebuf.line[ctx->linebuf.used] = '\0'; /* convert from HEX digits. */ ctx->body_left = apr_strtoi64(ctx->linebuf.line, NULL, 16); if (errno == ERANGE) { return APR_FROM_OS_ERROR(ERANGE); } if (ctx->body_left == 0) { /* Just read the last-chunk marker. We're DONE. */ ctx->state = STATE_DONE; status = APR_EOF; } else { /* Got a size, so we'll start reading the chunk now. */ ctx->state = STATE_CHUNK; } /* If we can read more, then go do so. */ if (!status) continue; } /* assert: status != 0 */ /* Note that we didn't actually read anything, so our callers * don't get confused. */ *len = 0; return status; case STATE_CHUNK: if (requested > ctx->body_left) { requested = ctx->body_left; } /* Delegate to the stream bucket to do the read. */ status = serf_bucket_read(ctx->stream, requested, data, len); if (SERF_BUCKET_READ_ERROR(status)) return status; /* Some data was read, so decrement the amount left and see * if we're done reading this chunk. */ ctx->body_left -= *len; if (!ctx->body_left) { ctx->state = STATE_TERM; ctx->body_left = 2; /* CRLF */ } /* Return the data we just read. */ return status; case STATE_TERM: /* Delegate to the stream bucket to do the read. */ status = serf_bucket_read(ctx->stream, ctx->body_left, data, len); if (SERF_BUCKET_READ_ERROR(status)) return status; /* Some data was read, so decrement the amount left and see * if we're done reading the chunk terminator. */ ctx->body_left -= *len; if (!ctx->body_left) { ctx->state = STATE_SIZE; } if (status) return status; break; case STATE_DONE: /* Just keep returning EOF */ return APR_EOF; default: /* Not reachable */ return APR_EGENERAL; } } /* NOTREACHED */ }
apr_status_t serf_linebuf_fetch( serf_linebuf_t *linebuf, serf_bucket_t *bucket, int acceptable) { /* If we had a complete line, then assume the caller has used it, so * we can now reset the state. */ if (linebuf->state == SERF_LINEBUF_READY) { linebuf->state = SERF_LINEBUF_EMPTY; /* Reset the line_used, too, so we don't have to test the state * before using this value. */ linebuf->used = 0; linebuf->line[0] = '\0'; } while (1) { apr_status_t status; const char *data; apr_size_t len; if (linebuf->state == SERF_LINEBUF_CRLF_SPLIT) { /* On the previous read, we received just a CR. The LF might * be present, but the bucket couldn't see it. We need to * examine a single character to determine how to handle the * split CRLF. */ status = serf_bucket_peek(bucket, &data, &len); if (SERF_BUCKET_READ_ERROR(status)) return status; if (len > 0) { if (*data == '\n') { /* We saw the second part of CRLF. We don't need to * save that character, so do an actual read to suck * up that character. */ /* ### check status */ (void) serf_bucket_read(bucket, 1, &data, &len); } /* else: * We saw the first character of the next line. Thus, * the current line is terminated by the CR. Just * ignore whatever we peeked at. The next reader will * see it and handle it as appropriate. */ /* Whatever was read, the line is now ready for use. */ linebuf->state = SERF_LINEBUF_READY; } else { /* no data available, try again later. */ return APR_EAGAIN; } } else { int found; status = serf_bucket_readline(bucket, acceptable, &found, &data, &len); if (SERF_BUCKET_READ_ERROR(status)) { return status; } /* Some bucket types (socket) might need an extra read to find out EOF state, so they'll return no data in that read. This means we're done reading, return what we got. */ if (APR_STATUS_IS_EOF(status) && len == 0) { return status; } if (linebuf->used + len + 1 > sizeof(linebuf->line)) { return SERF_ERROR_LINE_TOO_LONG; } /* Note: our logic doesn't change for SERF_LINEBUF_PARTIAL. That * only affects how we fill the buffer. It is a communication to * our caller on whether the line is ready or not. */ /* If we didn't see a newline, then we should mark the line * buffer as partially complete. */ if (found == SERF_NEWLINE_NONE) { linebuf->state = SERF_LINEBUF_PARTIAL; } else if (found == SERF_NEWLINE_CRLF_SPLIT) { linebuf->state = SERF_LINEBUF_CRLF_SPLIT; /* Toss the partial CR. We won't ever need it. */ if (len > 0) --len; } else { /* We got a newline (of some form). We don't need it * in the line buffer, so back up the length. Then * mark the line as ready. */ len -= 1 + (found == SERF_NEWLINE_CRLF); linebuf->state = SERF_LINEBUF_READY; } /* The C99 standard (7.21.1/2) requires valid data pointer * even for zero length array for all functions unless explicitly * stated otherwise. So don't copy data even most mempy() * implementations have special handling for zero length copy. */ if (len > 0) { /* ### it would be nice to avoid this copy if at all possible, ### and just return the a data/len pair to the caller. we're ### keeping it simple for now. */ memcpy(&linebuf->line[linebuf->used], data, len); linebuf->line[linebuf->used + len] = '\0'; linebuf->used += len; } } /* If we saw anything besides "success. please read again", then * we should return that status. If the line was completed, then * we should also return. */ if (status || linebuf->state == SERF_LINEBUF_READY) return status; /* We got APR_SUCCESS and the line buffer is not complete. Let's * loop to read some more data. */ } /* NOTREACHED */ }
/* Read the headers of the response and try the available handlers if authentication or validation is needed. */ apr_status_t serf__handle_auth_response(int *consumed_response, serf_request_t *request, serf_bucket_t *response, void *baton, apr_pool_t *pool) { apr_status_t status; serf_status_line sl; *consumed_response = 0; status = serf_bucket_response_status(response, &sl); if (SERF_BUCKET_READ_ERROR(status)) { return status; } if (!sl.version && (APR_STATUS_IS_EOF(status) || APR_STATUS_IS_EAGAIN(status))) { return status; } status = serf_bucket_response_wait_for_headers(response); if (status) { if (!APR_STATUS_IS_EOF(status)) { return status; } /* If status is APR_EOF, there were no headers to read. This can be ok in some situations, and it definitely means there's no authentication requested now. */ return APR_SUCCESS; } if (sl.code == 401 || sl.code == 407) { /* Authentication requested. */ /* Don't bother handling the authentication request if the response wasn't received completely yet. Serf will call serf__handle_auth_response again when more data is received. */ status = discard_body(response); *consumed_response = 1; /* Discard all response body before processing authentication. */ if (!APR_STATUS_IS_EOF(status)) { return status; } status = dispatch_auth(sl.code, request, response, baton, pool); if (status != APR_SUCCESS) { return status; } /* Requeue the request with the necessary auth headers. */ /* ### Application doesn't know about this request! */ serf_connection_priority_request_create(request->conn, request->setup, request->setup_baton); return APR_EOF; } return APR_SUCCESS; }
/* If a 200 OK was received for the CONNECT request, consider the connection as ready for use. */ static apr_status_t handle_response(serf_request_t *request, serf_bucket_t *response, void *handler_baton, apr_pool_t *pool) { apr_status_t status; serf_status_line sl; req_ctx_t *ctx = handler_baton; serf_connection_t *conn = request->conn; /* CONNECT request was cancelled. Assuming that this is during connection reset, we can safely discard the request as a new one will be created when setting up the next connection. */ if (!response) return APR_SUCCESS; status = serf_bucket_response_status(response, &sl); if (SERF_BUCKET_READ_ERROR(status)) { return status; } if (!sl.version && (APR_STATUS_IS_EOF(status) || APR_STATUS_IS_EAGAIN(status))) { return status; } status = serf_bucket_response_wait_for_headers(response); if (status && !APR_STATUS_IS_EOF(status)) { return status; } /* RFC 2817: Any successful (2xx) response to a CONNECT request indicates that the proxy has established a connection to the requested host and port, and has switched to tunneling the current connection to that server connection. */ if (sl.code >= 200 && sl.code < 300) { serf_bucket_t *hdrs; const char *val; conn->state = SERF_CONN_CONNECTED; /* Body is supposed to be empty. */ apr_pool_destroy(ctx->pool); serf_bucket_destroy(conn->ssltunnel_ostream); serf_bucket_destroy(conn->stream); conn->stream = NULL; ctx = NULL; serf__log_skt(CONN_VERBOSE, __FILE__, conn->skt, "successfully set up ssl tunnel.\n"); /* Fix for issue #123: ignore the "Connection: close" header here, leaving the header in place would make the serf's main context loop close this connection immediately after reading the 200 OK response. */ hdrs = serf_bucket_response_get_headers(response); val = serf_bucket_headers_get(hdrs, "Connection"); if (val && strcasecmp("close", val) == 0) { serf__log_skt(CONN_VERBOSE, __FILE__, conn->skt, "Ignore Connection: close header on this reponse, don't " "close the connection now that the tunnel is set up.\n"); serf__bucket_headers_remove(hdrs, "Connection"); } return APR_EOF; } /* Authentication failure and 2xx Ok are handled at this point, the rest are errors. */ return SERF_ERROR_SSLTUNNEL_SETUP_FAILED; }
/* Read the headers of the response and try the available handlers if authentication or validation is needed. */ apr_status_t serf__handle_auth_response(int *consumed_response, serf_request_t *request, serf_bucket_t *response, void *baton, apr_pool_t *pool) { apr_status_t status; serf_status_line sl; *consumed_response = 0; /* TODO: the response bucket was created by the application, not at all guaranteed that this is of type response_bucket!! */ status = serf_bucket_response_status(response, &sl); if (SERF_BUCKET_READ_ERROR(status)) { return status; } if (!sl.version && (APR_STATUS_IS_EOF(status) || APR_STATUS_IS_EAGAIN(status))) { return status; } status = serf_bucket_response_wait_for_headers(response); if (status) { if (!APR_STATUS_IS_EOF(status)) { return status; } /* If status is APR_EOF, there were no headers to read. This can be ok in some situations, and it definitely means there's no authentication requested now. */ return APR_SUCCESS; } if (sl.code == 401 || sl.code == 407) { /* Authentication requested. */ /* Don't bother handling the authentication request if the response wasn't received completely yet. Serf will call serf__handle_auth_response again when more data is received. */ status = discard_body(response); *consumed_response = 1; /* Discard all response body before processing authentication. */ if (!APR_STATUS_IS_EOF(status)) { return status; } status = dispatch_auth(sl.code, request, response, baton, pool); if (status != APR_SUCCESS) { return status; } /* Requeue the request with the necessary auth headers. */ /* ### Application doesn't know about this request! */ if (request->ssltunnel) { serf__ssltunnel_request_create(request->conn, request->setup, request->setup_baton); } else { serf_connection_priority_request_create(request->conn, request->setup, request->setup_baton); } return APR_EOF; } else { serf__validate_response_func_t validate_resp; serf_connection_t *conn = request->conn; serf_context_t *ctx = conn->ctx; serf__authn_info_t *authn_info; apr_status_t resp_status = APR_SUCCESS; /* Validate the response server authn headers. */ authn_info = serf__get_authn_info_for_server(conn); if (authn_info->scheme) { validate_resp = authn_info->scheme->validate_response_func; resp_status = validate_resp(authn_info->scheme, HOST, sl.code, conn, request, response, pool); } /* Validate the response proxy authn headers. */ authn_info = &ctx->proxy_authn_info; if (!resp_status && authn_info->scheme) { validate_resp = authn_info->scheme->validate_response_func; resp_status = validate_resp(authn_info->scheme, PROXY, sl.code, conn, request, response, pool); } if (resp_status) { /* If there was an error in the final step of the authentication, consider the reponse body as invalid and discard it. */ status = discard_body(response); *consumed_response = 1; if (!APR_STATUS_IS_EOF(status)) { return status; } /* The whole body was discarded, now return our error. */ return resp_status; } } return APR_SUCCESS; }