static void serf_ssl_encrypt_destroy_and_data(serf_bucket_t *bucket) { ssl_context_t *ctx = bucket->data; serf_ssl_context_t *ssl_ctx = ctx->ssl_ctx; if (ssl_ctx->encrypt.stream == *ctx->our_stream) { serf_bucket_destroy(*ctx->our_stream); serf_bucket_destroy(ssl_ctx->encrypt.pending); /* Reset our encrypted status and databuf. */ ssl_ctx->encrypt.status = APR_SUCCESS; ssl_ctx->encrypt.databuf.status = APR_SUCCESS; /* Advance to the next stream - if we have one. */ if (ssl_ctx->encrypt.stream_next == NULL) { ssl_ctx->encrypt.stream = NULL; ssl_ctx->encrypt.pending = NULL; } else { bucket_list_t *cur; cur = ssl_ctx->encrypt.stream_next; ssl_ctx->encrypt.stream = cur->bucket; ssl_ctx->encrypt.pending = serf_bucket_aggregate_create(cur->bucket->allocator); ssl_ctx->encrypt.stream_next = cur->next; serf_bucket_mem_free(ssl_ctx->allocator, cur); } } else { /* Ah, darn. We haven't sent this one along yet. */ return; } serf_ssl_destroy_and_data(bucket); }
static void serf_chunk_destroy(serf_bucket_t *bucket) { chunk_context_t *ctx = bucket->data; serf_bucket_destroy(ctx->stream); serf_bucket_destroy(ctx->chunk); serf_default_destroy_and_data(bucket); }
static void serf_deflate_destroy_and_data(serf_bucket_t *bucket) { deflate_context_t *ctx = bucket->data; /* We may have appended inflate_stream into the stream bucket. * If so, avoid free'ing it twice. */ if (ctx->inflate_stream) { serf_bucket_destroy(ctx->inflate_stream); } serf_bucket_destroy(ctx->stream); serf_default_destroy_and_data(bucket); }
static void serf_response_destroy_and_data(serf_bucket_t *bucket) { response_context_t *ctx = bucket->data; if (ctx->state != STATE_STATUS_LINE) { serf_bucket_mem_free(bucket->allocator, (void*)ctx->sl.reason); } serf_bucket_destroy(ctx->stream); if (ctx->body != NULL) serf_bucket_destroy(ctx->body); serf_bucket_destroy(ctx->headers); serf_default_destroy_and_data(bucket); }
static void serf_ssl_decrypt_destroy_and_data(serf_bucket_t *bucket) { ssl_context_t *ctx = bucket->data; serf_bucket_destroy(*ctx->our_stream); serf_ssl_destroy_and_data(bucket); }
static void serf_limit_destroy(serf_bucket_t *bucket) { limit_context_t *ctx = bucket->data; serf_bucket_destroy(ctx->stream); serf_default_destroy_and_data(bucket); }
static apr_status_t ssl_free_context( serf_ssl_context_t *ssl_ctx) { apr_pool_t *p; /* If never had the pending buckets, don't try to free them. */ if (ssl_ctx->decrypt.pending != NULL) { serf_bucket_destroy(ssl_ctx->decrypt.pending); } if (ssl_ctx->encrypt.pending != NULL) { serf_bucket_destroy(ssl_ctx->encrypt.pending); } /* SSL_free implicitly frees the underlying BIO. */ SSL_free(ssl_ctx->ssl); SSL_CTX_free(ssl_ctx->ctx); p = ssl_ctx->pool; serf_bucket_mem_free(ssl_ctx->allocator, ssl_ctx); apr_pool_destroy(p); return APR_SUCCESS; }
static void serf_aggregate_destroy_and_data(serf_bucket_t *bucket) { aggregate_context_t *ctx = bucket->data; bucket_list_t *next_ctx; while (ctx->list) { if (ctx->bucket_owner) { serf_bucket_destroy(ctx->list->bucket); } next_ctx = ctx->list->next; serf_bucket_mem_free(bucket->allocator, ctx->list); ctx->list = next_ctx; } cleanup_aggregate(ctx, bucket->allocator); serf_default_destroy_and_data(bucket); }
static void cleanup_aggregate(aggregate_context_t *ctx, serf_bucket_alloc_t *allocator) { bucket_list_t *next_list; /* If we finished reading a bucket during the previous read, then * we can now toss that bucket. */ while (ctx->done != NULL) { next_list = ctx->done->next; if (ctx->bucket_owner) { serf_bucket_destroy(ctx->done->bucket); } serf_bucket_mem_free(allocator, ctx->done); ctx->done = next_list; } }
/* If a 200 OK was received for the CONNECT request, consider the connection as ready for use. */ static apr_status_t handle_response(serf_request_t *request, serf_bucket_t *response, void *handler_baton, apr_pool_t *pool) { apr_status_t status; serf_status_line sl; req_ctx_t *ctx = handler_baton; serf_connection_t *conn = request->conn; /* CONNECT request was cancelled. Assuming that this is during connection reset, we can safely discard the request as a new one will be created when setting up the next connection. */ if (!response) return APR_SUCCESS; status = serf_bucket_response_status(response, &sl); if (SERF_BUCKET_READ_ERROR(status)) { return status; } if (!sl.version && (APR_STATUS_IS_EOF(status) || APR_STATUS_IS_EAGAIN(status))) { return status; } status = serf_bucket_response_wait_for_headers(response); if (status && !APR_STATUS_IS_EOF(status)) { return status; } /* RFC 2817: Any successful (2xx) response to a CONNECT request indicates that the proxy has established a connection to the requested host and port, and has switched to tunneling the current connection to that server connection. */ if (sl.code >= 200 && sl.code < 300) { serf_bucket_t *hdrs; const char *val; conn->state = SERF_CONN_CONNECTED; /* Body is supposed to be empty. */ apr_pool_destroy(ctx->pool); serf_bucket_destroy(conn->ssltunnel_ostream); serf_bucket_destroy(conn->stream); conn->stream = NULL; ctx = NULL; serf__log_skt(CONN_VERBOSE, __FILE__, conn->skt, "successfully set up ssl tunnel.\n"); /* Fix for issue #123: ignore the "Connection: close" header here, leaving the header in place would make the serf's main context loop close this connection immediately after reading the 200 OK response. */ hdrs = serf_bucket_response_get_headers(response); val = serf_bucket_headers_get(hdrs, "Connection"); if (val && strcasecmp("close", val) == 0) { serf__log_skt(CONN_VERBOSE, __FILE__, conn->skt, "Ignore Connection: close header on this reponse, don't " "close the connection now that the tunnel is set up.\n"); serf__bucket_headers_remove(hdrs, "Connection"); } return APR_EOF; } /* Authentication failure and 2xx Ok are handled at this point, the rest are errors. */ return SERF_ERROR_SSLTUNNEL_SETUP_FAILED; }