static apr_status_t serf_chunk_read_iovec(serf_bucket_t *bucket, apr_size_t requested, int vecs_size, struct iovec *vecs, int *vecs_used) { chunk_context_t *ctx = bucket->data; apr_status_t status; /* Before proceeding, we need to fetch some data from the stream. */ if (ctx->state == STATE_FETCH) { status = create_chunk(bucket); if (status) { return status; } } status = serf_bucket_read_iovec(ctx->chunk, requested, vecs_size, vecs, vecs_used); /* Mask EOF from aggregate bucket. */ if (APR_STATUS_IS_EOF(status) && ctx->state == STATE_CHUNK) { status = ctx->last_status; ctx->state = STATE_FETCH; } return status; }
static apr_status_t serf_barrier_read_iovec(serf_bucket_t *bucket, apr_size_t requested, int vecs_size, struct iovec *vecs, int *vecs_used) { barrier_context_t *ctx = bucket->data; return serf_bucket_read_iovec(ctx->stream, requested, vecs_size, vecs, vecs_used); }
static apr_status_t handle_response(serf_request_t *request, serf_bucket_t *response, void *handler_baton, apr_pool_t *pool) { serf_status_line sl; apr_status_t status; handler_baton_t *ctx = handler_baton; if (!response) { /* A NULL response probably means that the connection was closed while this request was already written. Just requeue it. */ serf_connection_t *conn = serf_request_get_conn(request); serf_connection_request_create(conn, setup_request, handler_baton); return APR_SUCCESS; } status = serf_bucket_response_status(response, &sl); if (status) { return status; } while (1) { struct iovec vecs[64]; int vecs_read; apr_size_t bytes_written; status = serf_bucket_read_iovec(response, 8000, 64, vecs, &vecs_read); if (SERF_BUCKET_READ_ERROR(status)) return status; /* got some data. print it out. */ if (vecs_read) { apr_file_writev(ctx->output_file, vecs, vecs_read, &bytes_written); } /* are we done yet? */ if (APR_STATUS_IS_EOF(status)) { if (ctx->print_headers) { serf_bucket_t *hdrs; hdrs = serf_bucket_response_get_headers(response); while (1) { status = serf_bucket_read_iovec(hdrs, 8000, 64, vecs, &vecs_read); if (SERF_BUCKET_READ_ERROR(status)) return status; if (vecs_read) { apr_file_writev(ctx->output_file, vecs, vecs_read, &bytes_written); } if (APR_STATUS_IS_EOF(status)) { break; } } } apr_atomic_inc32(&ctx->completed_requests); return APR_EOF; } /* have we drained the response so far? */ if (APR_STATUS_IS_EAGAIN(status)) return status; /* loop to read some more. */ } /* NOTREACHED */ }
/* This function reads a decrypted stream and returns an encrypted stream. */ static apr_status_t ssl_encrypt(void *baton, apr_size_t bufsize, char *buf, apr_size_t *len) { const char *data; apr_size_t interim_bufsize; serf_ssl_context_t *ctx = baton; apr_status_t status; #ifdef SSL_VERBOSE printf("ssl_encrypt: begin %d\n", bufsize); #endif /* Try to read already encrypted but unread data first. */ status = serf_bucket_read(ctx->encrypt.pending, bufsize, &data, len); if (SERF_BUCKET_READ_ERROR(status)) { return status; } /* Aha, we read something. Return that now. */ if (*len) { memcpy(buf, data, *len); if (APR_STATUS_IS_EOF(status)) { status = APR_SUCCESS; } #ifdef SSL_VERBOSE printf("ssl_encrypt: %d %d %d (quick read)\n", status, *len, BIO_get_retry_flags(ctx->bio)); #endif return status; } if (BIO_should_retry(ctx->bio) && BIO_should_write(ctx->bio)) { #ifdef SSL_VERBOSE printf("ssl_encrypt: %d %d %d (should write exit)\n", status, *len, BIO_get_retry_flags(ctx->bio)); #endif return APR_EAGAIN; } /* If we were previously blocked, unblock ourselves now. */ if (BIO_should_read(ctx->bio)) { #ifdef SSL_VERBOSE printf("ssl_encrypt: reset %d %d (%d %d %d)\n", status, ctx->encrypt.status, BIO_should_retry(ctx->bio), BIO_should_read(ctx->bio), BIO_get_retry_flags(ctx->bio)); #endif ctx->encrypt.status = APR_SUCCESS; ctx->encrypt.exhausted_reset = 0; } /* Oh well, read from our stream now. */ interim_bufsize = bufsize; do { apr_size_t interim_len; if (!ctx->encrypt.status) { struct iovec vecs[64]; int vecs_read; status = serf_bucket_read_iovec(ctx->encrypt.stream, interim_bufsize, 64, vecs, &vecs_read); if (!SERF_BUCKET_READ_ERROR(status) && vecs_read) { char *vecs_data; int i, cur, vecs_data_len; int ssl_len; /* Combine the buffers of the iovec into one buffer, as that is with SSL_write requires. */ vecs_data_len = 0; for (i = 0; i < vecs_read; i++) { vecs_data_len += vecs[i].iov_len; } vecs_data = serf_bucket_mem_alloc(ctx->allocator, vecs_data_len); cur = 0; for (i = 0; i < vecs_read; i++) { memcpy(vecs_data + cur, vecs[i].iov_base, vecs[i].iov_len); cur += vecs[i].iov_len; } interim_bufsize -= vecs_data_len; interim_len = vecs_data_len; #ifdef SSL_VERBOSE printf("ssl_encrypt: bucket read %d bytes; status %d\n", interim_len, status); printf("---\n%s\n-(%d)-\n", vecs_data, interim_len); #endif /* Stash our status away. */ ctx->encrypt.status = status; ssl_len = SSL_write(ctx->ssl, vecs_data, interim_len); #ifdef SSL_VERBOSE printf("ssl_encrypt: SSL write: %d\n", ssl_len); #endif /* We're done. */ serf_bucket_mem_free(ctx->allocator, vecs_data); /* If we failed to write... */ if (ssl_len < 0) { int ssl_err; /* Ah, bugger. We need to put that data back. */ serf_bucket_aggregate_prepend_iovec(ctx->encrypt.stream, vecs, vecs_read); ssl_err = SSL_get_error(ctx->ssl, ssl_len); #ifdef SSL_VERBOSE printf("ssl_encrypt: SSL write error: %d\n", ssl_err); #endif if (ssl_err == SSL_ERROR_SYSCALL) { status = ctx->encrypt.status; if (SERF_BUCKET_READ_ERROR(status)) { return status; } } else { /* Oh, no. */ if (ssl_err == SSL_ERROR_WANT_READ) { status = SERF_ERROR_WAIT_CONN; } else { status = APR_EGENERAL; } } #ifdef SSL_VERBOSE printf("ssl_encrypt: SSL write error: %d %d\n", status, *len); #endif } } } else { interim_len = 0; *len = 0; status = ctx->encrypt.status; } } while (!status && interim_bufsize); /* Okay, we exhausted our underlying stream. */ if (!SERF_BUCKET_READ_ERROR(status)) { apr_status_t agg_status; struct iovec vecs[64]; int vecs_read, i; /* We read something! */ agg_status = serf_bucket_read_iovec(ctx->encrypt.pending, bufsize, 64, vecs, &vecs_read); *len = 0; for (i = 0; i < vecs_read; i++) { memcpy(buf + *len, vecs[i].iov_base, vecs[i].iov_len); *len += vecs[i].iov_len; } #ifdef SSL_VERBOSE printf("ssl_encrypt read agg: %d %d %d %d\n", status, agg_status, ctx->encrypt.status, *len); #endif if (!agg_status) { status = agg_status; } } if (status == SERF_ERROR_WAIT_CONN && BIO_should_retry(ctx->bio) && BIO_should_read(ctx->bio)) { ctx->encrypt.exhausted = ctx->encrypt.status; ctx->encrypt.status = SERF_ERROR_WAIT_CONN; } #ifdef SSL_VERBOSE printf("ssl_encrypt finished: %d %d (%d %d %d)\n", status, *len, BIO_should_retry(ctx->bio), BIO_should_read(ctx->bio), BIO_get_retry_flags(ctx->bio)); #endif return status; }
static apr_status_t create_chunk(serf_bucket_t *bucket) { chunk_context_t *ctx = bucket->data; serf_bucket_t *simple_bkt; apr_size_t chunk_len; apr_size_t stream_len; struct iovec vecs[66]; /* 64 + chunk trailer + EOF trailer = 66 */ int vecs_read; int i; if (ctx->state != STATE_FETCH) { return APR_SUCCESS; } ctx->last_status = serf_bucket_read_iovec(ctx->stream, SERF_READ_ALL_AVAIL, 64, vecs, &vecs_read); if (SERF_BUCKET_READ_ERROR(ctx->last_status)) { /* Uh-oh. */ return ctx->last_status; } /* Count the length of the data we read. */ stream_len = 0; for (i = 0; i < vecs_read; i++) { stream_len += vecs[i].iov_len; } /* assert: stream_len in hex < sizeof(ctx->chunk_hdr) */ /* Inserting a 0 byte chunk indicates a terminator, which already happens * during the EOF handler below. Adding another one here will cause the * EOF chunk to be interpreted by the server as a new request. So, * we'll only do this if we have something to write. */ if (stream_len) { /* Build the chunk header. */ chunk_len = apr_snprintf(ctx->chunk_hdr, sizeof(ctx->chunk_hdr), "%" APR_UINT64_T_HEX_FMT CRLF, (apr_uint64_t)stream_len); /* Create a copy of the chunk header so we can have multiple chunks * in the pipeline at the same time. */ simple_bkt = serf_bucket_simple_copy_create(ctx->chunk_hdr, chunk_len, bucket->allocator); serf_bucket_aggregate_append(ctx->chunk, simple_bkt); /* Insert the chunk footer. */ vecs[vecs_read].iov_base = CRLF; vecs[vecs_read++].iov_len = sizeof(CRLF) - 1; } /* We've reached the end of the line for the stream. */ if (APR_STATUS_IS_EOF(ctx->last_status)) { /* Insert the chunk footer. */ vecs[vecs_read].iov_base = "0" CRLF CRLF; vecs[vecs_read++].iov_len = sizeof("0" CRLF CRLF) - 1; ctx->state = STATE_EOF; } else { /* Okay, we can return data. */ ctx->state = STATE_CHUNK; } serf_bucket_aggregate_append_iovec(ctx->chunk, vecs, vecs_read); return APR_SUCCESS; }
static apr_status_t read_aggregate(serf_bucket_t *bucket, apr_size_t requested, int vecs_size, struct iovec *vecs, int *vecs_used) { aggregate_context_t *ctx = bucket->data; int cur_vecs_used; apr_status_t status; *vecs_used = 0; if (!ctx->list) { if (ctx->hold_open) { return ctx->hold_open(ctx->hold_open_baton, bucket); } else { return APR_EOF; } } status = APR_SUCCESS; while (requested) { serf_bucket_t *head = ctx->list->bucket; status = serf_bucket_read_iovec(head, requested, vecs_size, vecs, &cur_vecs_used); if (SERF_BUCKET_READ_ERROR(status)) return status; /* Add the number of vecs we read to our running total. */ *vecs_used += cur_vecs_used; if (cur_vecs_used > 0 || status) { bucket_list_t *next_list; /* If we got SUCCESS (w/bytes) or EAGAIN, we want to return now * as it isn't safe to read more without returning to our caller. */ if (!status || APR_STATUS_IS_EAGAIN(status) || status == SERF_ERROR_WAIT_CONN) { return status; } /* However, if we read EOF, we can stash this bucket in a * to-be-freed list and move on to the next bucket. This ensures * that the bucket stays alive (so as not to violate our read * semantics). We'll destroy this list of buckets the next time * we are asked to perform a read operation - thus ensuring the * proper read lifetime. */ next_list = ctx->list->next; ctx->list->next = ctx->done; ctx->done = ctx->list; ctx->list = next_list; /* If we have no more in our list, return EOF. */ if (!ctx->list) { if (ctx->hold_open) { return ctx->hold_open(ctx->hold_open_baton, bucket); } else { return APR_EOF; } } /* At this point, it safe to read the next bucket - if we can. */ /* If the caller doesn't want ALL_AVAIL, decrement the size * of the items we just read from the list. */ if (requested != SERF_READ_ALL_AVAIL) { int i; for (i = 0; i < cur_vecs_used; i++) requested -= vecs[i].iov_len; } /* Adjust our vecs to account for what we just read. */ vecs_size -= cur_vecs_used; vecs += cur_vecs_used; /* We reached our max. Oh well. */ if (!requested || !vecs_size) { return APR_SUCCESS; } } } return status; }