static serf_ssl_context_t *ssl_init_context(void) { serf_ssl_context_t *ssl_ctx; apr_pool_t *pool; serf_bucket_alloc_t *allocator; init_ssl_libraries(); apr_pool_create(&pool, NULL); allocator = serf_bucket_allocator_create(pool, NULL, NULL); ssl_ctx = serf_bucket_mem_alloc(allocator, sizeof(*ssl_ctx)); ssl_ctx->refcount = 0; ssl_ctx->pool = pool; ssl_ctx->allocator = allocator; ssl_ctx->ctx = SSL_CTX_new(SSLv23_client_method()); SSL_CTX_set_client_cert_cb(ssl_ctx->ctx, ssl_need_client_cert); ssl_ctx->cached_cert = 0; ssl_ctx->cached_cert_pw = 0; ssl_ctx->pending_err = APR_SUCCESS; SSL_CTX_set_verify(ssl_ctx->ctx, SSL_VERIFY_PEER, validate_server_certificate); SSL_CTX_set_options(ssl_ctx->ctx, SSL_OP_ALL); ssl_ctx->ssl = SSL_new(ssl_ctx->ctx); ssl_ctx->bio = BIO_new(&bio_bucket_method); ssl_ctx->bio->ptr = ssl_ctx; SSL_set_bio(ssl_ctx->ssl, ssl_ctx->bio, ssl_ctx->bio); SSL_set_connect_state(ssl_ctx->ssl); SSL_set_app_data(ssl_ctx->ssl, ssl_ctx); ssl_ctx->encrypt.stream = NULL; ssl_ctx->encrypt.stream_next = NULL; ssl_ctx->encrypt.pending = serf_bucket_aggregate_create(allocator); ssl_ctx->encrypt.status = APR_SUCCESS; serf_databuf_init(&ssl_ctx->encrypt.databuf); ssl_ctx->encrypt.databuf.read = ssl_encrypt; ssl_ctx->encrypt.databuf.read_baton = ssl_ctx; ssl_ctx->decrypt.stream = NULL; ssl_ctx->decrypt.pending = serf_bucket_aggregate_create(allocator); ssl_ctx->decrypt.status = APR_SUCCESS; serf_databuf_init(&ssl_ctx->decrypt.databuf); ssl_ctx->decrypt.databuf.read = ssl_decrypt; ssl_ctx->decrypt.databuf.read_baton = ssl_ctx; return ssl_ctx; }
static test_baton_t *initTestCtx(apr_pool_t *pool) { test_baton_t *tb; tb = apr_pcalloc(pool, sizeof(*tb)); tb->pool = pool; tb->bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL); tb->accepted_requests = apr_array_make(pool, 10, sizeof(int)); tb->sent_requests = apr_array_make(pool, 10, sizeof(int)); tb->handled_requests = apr_array_make(pool, 10, sizeof(int)); return tb; }
static apr_status_t setup(test_baton_t **tb_p, serf_connection_setup_t conn_setup, int use_proxy, apr_pool_t *pool) { apr_status_t status; test_baton_t *tb; apr_uri_t url; tb = apr_pcalloc(pool, sizeof(*tb)); *tb_p = tb; tb->pool = pool; tb->context = serf_context_create(pool); tb->bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL); status = default_server_address(&tb->serv_addr, pool); if (status != APR_SUCCESS) return status; if (use_proxy) { status = default_proxy_address(&tb->proxy_addr, pool); if (status != APR_SUCCESS) return status; /* Configure serf to use the proxy server */ serf_config_proxy(tb->context, tb->proxy_addr); } status = apr_uri_parse(pool, SERV_URL, &url); if (status != APR_SUCCESS) return status; status = serf_connection_create2(&tb->connection, tb->context, url, conn_setup ? conn_setup : default_conn_setup, tb, default_closed_connection, tb, pool); return status; }
/* Setup the client context, ready to connect and send requests to a server.*/ static apr_status_t setup(test_baton_t **tb_p, serf_connection_setup_t conn_setup, const char *serv_url, int use_proxy, apr_size_t message_count, apr_pool_t *pool) { test_baton_t *tb; apr_status_t status; tb = apr_pcalloc(pool, sizeof(*tb)); *tb_p = tb; tb->pool = pool; tb->context = serf_context_create(pool); tb->bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL); tb->accepted_requests = apr_array_make(pool, message_count, sizeof(int)); tb->sent_requests = apr_array_make(pool, message_count, sizeof(int)); tb->handled_requests = apr_array_make(pool, message_count, sizeof(int)); tb->serv_url = serv_url; tb->conn_setup = conn_setup; status = default_server_address(&tb->serv_addr, pool); if (status != APR_SUCCESS) return status; if (use_proxy) { status = default_proxy_address(&tb->proxy_addr, pool); if (status != APR_SUCCESS) return status; /* Configure serf to use the proxy server */ serf_config_proxy(tb->context, tb->proxy_addr); } status = use_new_connection(tb, pool); return status; }
/* Test setting up the openssl library. */ static void test_ssl_init(CuTest *tc) { serf_bucket_t *bkt, *stream; serf_ssl_context_t *ssl_context; apr_status_t status; apr_pool_t *test_pool = test_setup(); serf_bucket_alloc_t *alloc = serf_bucket_allocator_create(test_pool, NULL, NULL); stream = SERF_BUCKET_SIMPLE_STRING("", alloc); bkt = serf_bucket_ssl_decrypt_create(stream, NULL, alloc); ssl_context = serf_bucket_ssl_decrypt_context_get(bkt); bkt = serf_bucket_ssl_encrypt_create(stream, ssl_context, alloc); status = serf_ssl_use_default_certificates(ssl_context); CuAssertIntEquals(tc, APR_SUCCESS, status); test_teardown(test_pool); }
/* Setup the context needed to start a TCP server on adress. message_list is a list of expected requests. action_list is the list of responses to be returned in order. */ void setup_test_server(serv_ctx_t **servctx_p, apr_sockaddr_t *address, test_server_message_t *message_list, apr_size_t message_count, test_server_action_t *action_list, apr_size_t action_count, apr_int32_t options, apr_pool_t *pool) { serv_ctx_t *servctx; servctx = apr_pcalloc(pool, sizeof(*servctx)); apr_pool_cleanup_register(pool, servctx, cleanup_server, apr_pool_cleanup_null); *servctx_p = servctx; servctx->serv_addr = address; servctx->options = options; servctx->pool = pool; servctx->allocator = serf_bucket_allocator_create(pool, NULL, NULL); servctx->message_list = message_list; servctx->message_count = message_count; servctx->action_list = action_list; servctx->action_count = action_count; /* Start replay from first action. */ servctx->cur_action = 0; servctx->action_buf_pos = 0; servctx->outstanding_responses = 0; servctx->read = socket_read; servctx->send = socket_write; *servctx_p = servctx; }
int main(int argc, const char **argv) { apr_status_t status; apr_pool_t *pool; apr_sockaddr_t *address; serf_context_t *context; serf_connection_t *connection; app_baton_t app_ctx; handler_baton_t *handler_ctx; apr_uri_t url; const char *raw_url, *method; int count; apr_getopt_t *opt; char opt_c; char *authn = NULL; const char *opt_arg; /* For the parser threads */ apr_thread_t *thread[3]; apr_threadattr_t *tattr; apr_status_t parser_status; parser_baton_t *parser_ctx; apr_initialize(); atexit(apr_terminate); apr_pool_create(&pool, NULL); apr_atomic_init(pool); /* serf_initialize(); */ /* Default to one round of fetching. */ count = 1; /* Default to GET. */ method = "GET"; apr_getopt_init(&opt, pool, argc, argv); while ((status = apr_getopt(opt, "a:hv", &opt_c, &opt_arg)) == APR_SUCCESS) { int srclen, enclen; switch (opt_c) { case 'a': srclen = strlen(opt_arg); enclen = apr_base64_encode_len(srclen); authn = apr_palloc(pool, enclen + 6); strcpy(authn, "Basic "); (void) apr_base64_encode(&authn[6], opt_arg, srclen); break; case 'h': print_usage(pool); exit(0); break; case 'v': puts("Serf version: " SERF_VERSION_STRING); exit(0); default: break; } } if (opt->ind != opt->argc - 1) { print_usage(pool); exit(-1); } raw_url = argv[opt->ind]; apr_uri_parse(pool, raw_url, &url); if (!url.port) { url.port = apr_uri_port_of_scheme(url.scheme); } if (!url.path) { url.path = "/"; } if (strcasecmp(url.scheme, "https") == 0) { app_ctx.using_ssl = 1; } else { app_ctx.using_ssl = 0; } status = apr_sockaddr_info_get(&address, url.hostname, APR_UNSPEC, url.port, 0, pool); if (status) { printf("Error creating address: %d\n", status); exit(1); } context = serf_context_create(pool); /* ### Connection or Context should have an allocator? */ app_ctx.bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL); app_ctx.ssl_ctx = NULL; app_ctx.authn = authn; connection = serf_connection_create(context, address, conn_setup, &app_ctx, closed_connection, &app_ctx, pool); handler_ctx = (handler_baton_t*)serf_bucket_mem_alloc(app_ctx.bkt_alloc, sizeof(handler_baton_t)); handler_ctx->allocator = app_ctx.bkt_alloc; handler_ctx->doc_queue = apr_array_make(pool, 1, sizeof(doc_path_t*)); handler_ctx->doc_queue_alloc = app_ctx.bkt_alloc; handler_ctx->requests_outstanding = (apr_uint32_t*)serf_bucket_mem_alloc(app_ctx.bkt_alloc, sizeof(apr_uint32_t)); apr_atomic_set32(handler_ctx->requests_outstanding, 0); handler_ctx->hdr_read = 0; parser_ctx = (void*)serf_bucket_mem_alloc(app_ctx.bkt_alloc, sizeof(parser_baton_t)); parser_ctx->requests_outstanding = handler_ctx->requests_outstanding; parser_ctx->connection = connection; parser_ctx->app_ctx = &app_ctx; parser_ctx->doc_queue = handler_ctx->doc_queue; parser_ctx->doc_queue_alloc = handler_ctx->doc_queue_alloc; /* Restrict ourselves to this host. */ parser_ctx->hostinfo = url.hostinfo; status = apr_thread_mutex_create(&parser_ctx->mutex, APR_THREAD_MUTEX_DEFAULT, pool); if (status) { printf("Couldn't create mutex %d\n", status); return status; } status = apr_thread_cond_create(&parser_ctx->condvar, pool); if (status) { printf("Couldn't create condvar: %d\n", status); return status; } /* Let the handler now which condvar to use. */ handler_ctx->doc_queue_condvar = parser_ctx->condvar; apr_threadattr_create(&tattr, pool); /* Start the parser thread. */ apr_thread_create(&thread[0], tattr, parser_thread, parser_ctx, pool); /* Deliver the first request. */ create_request(url.hostinfo, url.path, NULL, NULL, parser_ctx, pool); /* Go run our normal thread. */ while (1) { int tries = 0; status = serf_context_run(context, SERF_DURATION_FOREVER, pool); if (APR_STATUS_IS_TIMEUP(status)) continue; if (status) { char buf[200]; printf("Error running context: (%d) %s\n", status, apr_strerror(status, buf, sizeof(buf))); exit(1); } /* We run this check to allow our parser threads to add more * requests to our queue. */ for (tries = 0; tries < 3; tries++) { if (!apr_atomic_read32(handler_ctx->requests_outstanding)) { #ifdef SERF_VERBOSE printf("Waiting..."); #endif apr_sleep(100000); #ifdef SERF_VERBOSE printf("Done\n"); #endif } else { break; } } if (tries >= 3) { break; } /* Debugging purposes only! */ serf_debug__closed_conn(app_ctx.bkt_alloc); } printf("Quitting...\n"); serf_connection_close(connection); /* wake up the parser via condvar signal */ apr_thread_cond_signal(parser_ctx->condvar); status = apr_thread_join(&parser_status, thread[0]); if (status) { printf("Error joining thread: %d\n", status); return status; } serf_bucket_mem_free(app_ctx.bkt_alloc, handler_ctx->requests_outstanding); serf_bucket_mem_free(app_ctx.bkt_alloc, parser_ctx); apr_pool_destroy(pool); return 0; }
int main(int argc, const char **argv) { apr_status_t status; apr_pool_t *pool; serf_bucket_alloc_t *bkt_alloc; serf_context_t *context; serf_connection_t **connections; app_baton_t app_ctx; handler_baton_t handler_ctx; serf_bucket_t *req_hdrs = NULL; apr_uri_t url; const char *proxy = NULL; const char *raw_url, *method, *req_body_path = NULL; int count, inflight, conn_count; int i; int print_headers, debug; const char *username = NULL; const char *password = ""; const char *pem_path = NULL, *pem_pwd = NULL; apr_getopt_t *opt; int opt_c; const char *opt_arg; apr_initialize(); atexit(apr_terminate); apr_pool_create(&pool, NULL); /* serf_initialize(); */ bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL); /* Default to one round of fetching with no limit to max inflight reqs. */ count = 1; inflight = 0; conn_count = 1; /* Default to GET. */ method = "GET"; /* Do not print headers by default. */ print_headers = 0; /* Do not debug by default. */ debug = 0; apr_getopt_init(&opt, pool, argc, argv); while ((status = apr_getopt_long(opt, options, &opt_c, &opt_arg)) == APR_SUCCESS) { switch (opt_c) { case 'U': username = opt_arg; break; case 'P': password = opt_arg; break; case 'd': debug = 1; break; case 'f': req_body_path = opt_arg; break; case 'h': print_usage(pool); exit(0); break; case 'H': print_headers = 1; break; case 'm': method = opt_arg; break; case 'n': errno = 0; count = apr_strtoi64(opt_arg, NULL, 10); if (errno) { printf("Problem converting number of times to fetch URL (%d)\n", errno); return errno; } break; case 'c': errno = 0; conn_count = apr_strtoi64(opt_arg, NULL, 10); if (errno) { printf("Problem converting number of concurrent connections to use (%d)\n", errno); return errno; } if (conn_count <= 0) { printf("Invalid number of concurrent connections to use (%d)\n", conn_count); return 1; } break; case 'x': errno = 0; inflight = apr_strtoi64(opt_arg, NULL, 10); if (errno) { printf("Problem converting number of requests to have outstanding (%d)\n", errno); return errno; } break; case 'p': proxy = opt_arg; break; case 'r': { char *sep; char *hdr_val; if (req_hdrs == NULL) { /* first request header, allocate bucket */ req_hdrs = serf_bucket_headers_create(bkt_alloc); } sep = strchr(opt_arg, ':'); if ((sep == NULL) || (sep == opt_arg) || (strlen(sep) <= 1)) { printf("Invalid request header string (%s)\n", opt_arg); return EINVAL; } hdr_val = sep + 1; while (*hdr_val == ' ') { hdr_val++; } serf_bucket_headers_setx(req_hdrs, opt_arg, (sep - opt_arg), 1, hdr_val, strlen(hdr_val), 1); } break; case CERTFILE: pem_path = opt_arg; break; case CERTPWD: pem_pwd = opt_arg; break; case 'v': puts("Serf version: " SERF_VERSION_STRING); exit(0); default: break; } } if (opt->ind != opt->argc - 1) { print_usage(pool); exit(-1); } raw_url = argv[opt->ind]; apr_uri_parse(pool, raw_url, &url); if (!url.port) { url.port = apr_uri_port_of_scheme(url.scheme); } if (!url.path) { url.path = "/"; } if (strcasecmp(url.scheme, "https") == 0) { app_ctx.using_ssl = 1; } else { app_ctx.using_ssl = 0; } if (strcasecmp(method, "HEAD") == 0) { app_ctx.head_request = 1; } else { app_ctx.head_request = 0; } app_ctx.hostinfo = url.hostinfo; app_ctx.pem_path = pem_path; app_ctx.pem_pwd = pem_pwd; context = serf_context_create(pool); app_ctx.serf_ctx = context; if (proxy) { apr_sockaddr_t *proxy_address = NULL; apr_port_t proxy_port; char *proxy_host; char *proxy_scope; status = apr_parse_addr_port(&proxy_host, &proxy_scope, &proxy_port, proxy, pool); if (status) { printf("Cannot parse proxy hostname/port: %d\n", status); apr_pool_destroy(pool); exit(1); } if (!proxy_host) { printf("Proxy hostname must be specified\n"); apr_pool_destroy(pool); exit(1); } if (!proxy_port) { printf("Proxy port must be specified\n"); apr_pool_destroy(pool); exit(1); } status = apr_sockaddr_info_get(&proxy_address, proxy_host, APR_UNSPEC, proxy_port, 0, pool); if (status) { printf("Cannot resolve proxy address '%s': %d\n", proxy_host, status); apr_pool_destroy(pool); exit(1); } serf_config_proxy(context, proxy_address); } if (username) { serf_config_authn_types(context, SERF_AUTHN_ALL); } else { serf_config_authn_types(context, SERF_AUTHN_NTLM | SERF_AUTHN_NEGOTIATE); } serf_config_credentials_callback(context, credentials_callback); /* Setup debug logging */ if (debug) { serf_log_output_t *output; apr_status_t status; status = serf_logging_create_stream_output(&output, context, SERF_LOG_DEBUG, SERF_LOGCOMP_ALL_MSG, SERF_LOG_DEFAULT_LAYOUT, stderr, pool); if (!status) serf_logging_add_output(context, output); } /* ### Connection or Context should have an allocator? */ app_ctx.bkt_alloc = bkt_alloc; connections = apr_pcalloc(pool, conn_count * sizeof(serf_connection_t*)); for (i = 0; i < conn_count; i++) { conn_baton_t *conn_ctx = apr_pcalloc(pool, sizeof(*conn_ctx)); conn_ctx->app = &app_ctx; conn_ctx->ssl_ctx = NULL; status = serf_connection_create2(&connections[i], context, url, conn_setup, conn_ctx, closed_connection, conn_ctx, pool); if (status) { printf("Error creating connection: %d\n", status); apr_pool_destroy(pool); exit(1); } serf_connection_set_max_outstanding_requests(connections[i], inflight); } handler_ctx.completed_requests = 0; handler_ctx.print_headers = print_headers; #if APR_VERSION_AT_LEAST(1, 3, 0) apr_file_open_flags_stdout(&handler_ctx.output_file, APR_BUFFERED, pool); #else apr_file_open_stdout(&handler_ctx.output_file, pool); #endif handler_ctx.host = url.hostinfo; handler_ctx.method = method; handler_ctx.path = apr_pstrcat(pool, url.path, url.query ? "?" : "", url.query ? url.query : "", NULL); handler_ctx.username = username; handler_ctx.password = password; handler_ctx.auth_attempts = 0; handler_ctx.req_body_path = req_body_path; handler_ctx.acceptor = accept_response; handler_ctx.acceptor_baton = &app_ctx; handler_ctx.handler = handle_response; handler_ctx.req_hdrs = req_hdrs; for (i = 0; i < count; i++) { /* We don't need the returned request here. */ serf_connection_request_create(connections[i % conn_count], setup_request, &handler_ctx); } while (1) { status = serf_context_run(context, SERF_DURATION_FOREVER, pool); if (APR_STATUS_IS_TIMEUP(status)) continue; if (status) { char buf[200]; const char *err_string; err_string = serf_error_string(status); if (!err_string) { err_string = apr_strerror(status, buf, sizeof(buf)); } printf("Error running context: (%d) %s\n", status, err_string); apr_pool_destroy(pool); exit(1); } if (apr_atomic_read32(&handler_ctx.completed_requests) >= count) { break; } /* Debugging purposes only! */ serf_debug__closed_conn(app_ctx.bkt_alloc); } apr_file_close(handler_ctx.output_file); for (i = 0; i < conn_count; i++) { serf_connection_close(connections[i]); } apr_pool_destroy(pool); return 0; }
/* internal test for the mock buckets */ static void test_basic_mock_bucket(CuTest *tc) { serf_bucket_t *mock_bkt; apr_pool_t *test_pool = tc->testBaton; serf_bucket_alloc_t *alloc = serf_bucket_allocator_create(test_pool, NULL, NULL); /* read one line */ { mockbkt_action actions[]= { { 1, "HTTP/1.1 200 OK" CRLF, APR_EOF }, }; mock_bkt = serf_bucket_mock_create(actions, 1, alloc); read_and_check_bucket(tc, mock_bkt, "HTTP/1.1 200 OK" CRLF); mock_bkt = serf_bucket_mock_create(actions, 1, alloc); readlines_and_check_bucket(tc, mock_bkt, SERF_NEWLINE_CRLF, "HTTP/1.1 200 OK" CRLF, 1); } /* read one line, character per character */ { apr_status_t status; const char *expected = "HTTP/1.1 200 OK" CRLF; mockbkt_action actions[]= { { 1, "HTTP/1.1 200 OK" CRLF, APR_EOF }, }; mock_bkt = serf_bucket_mock_create(actions, 1, alloc); do { const char *data; apr_size_t len; status = serf_bucket_read(mock_bkt, 1, &data, &len); CuAssert(tc, "Got error during bucket reading.", !SERF_BUCKET_READ_ERROR(status)); CuAssert(tc, "Read more data than expected.", strlen(expected) >= len); CuAssert(tc, "Read data is not equal to expected.", strncmp(expected, data, len) == 0); CuAssert(tc, "Read more data than requested.", len <= 1); expected += len; } while(!APR_STATUS_IS_EOF(status)); CuAssert(tc, "Read less data than expected.", strlen(expected) == 0); } /* read multiple lines */ { mockbkt_action actions[]= { { 1, "HTTP/1.1 200 OK" CRLF, APR_SUCCESS }, { 1, "Content-Type: text/plain" CRLF, APR_EOF }, }; mock_bkt = serf_bucket_mock_create(actions, 2, alloc); readlines_and_check_bucket(tc, mock_bkt, SERF_NEWLINE_CRLF, "HTTP/1.1 200 OK" CRLF "Content-Type: text/plain" CRLF, 2); } /* read empty line */ { mockbkt_action actions[]= { { 1, "HTTP/1.1 200 OK" CRLF, APR_SUCCESS }, { 1, "", APR_EAGAIN }, { 1, "Content-Type: text/plain" CRLF, APR_EOF }, }; mock_bkt = serf_bucket_mock_create(actions, 3, alloc); read_and_check_bucket(tc, mock_bkt, "HTTP/1.1 200 OK" CRLF "Content-Type: text/plain" CRLF); mock_bkt = serf_bucket_mock_create(actions, 3, alloc); readlines_and_check_bucket(tc, mock_bkt, SERF_NEWLINE_CRLF, "HTTP/1.1 200 OK" CRLF "Content-Type: text/plain" CRLF, 2); } /* read empty line */ { mockbkt_action actions[]= { { 1, "HTTP/1.1 200 OK" CR, APR_SUCCESS }, { 1, "", APR_EAGAIN }, { 1, LF, APR_EOF }, }; mock_bkt = serf_bucket_mock_create(actions, sizeof(actions)/sizeof(actions[0]), alloc); read_and_check_bucket(tc, mock_bkt, "HTTP/1.1 200 OK" CRLF); mock_bkt = serf_bucket_mock_create(actions, sizeof(actions)/sizeof(actions[0]), alloc); readlines_and_check_bucket(tc, mock_bkt, SERF_NEWLINE_CRLF, "HTTP/1.1 200 OK" CRLF, 1); } /* test more_data_arrived */ { apr_status_t status; const char *data; apr_size_t len; int i; mockbkt_action actions[]= { { 1, "", APR_EAGAIN }, { 1, "blabla", APR_EOF }, }; mock_bkt = serf_bucket_mock_create(actions, sizeof(actions)/sizeof(actions[0]), alloc); for (i = 0; i < 5; i++) { status = serf_bucket_peek(mock_bkt, &data, &len); CuAssertIntEquals(tc, APR_SUCCESS, status); CuAssertIntEquals(tc, 0, len); CuAssertIntEquals(tc, '\0', *data); } serf_bucket_mock_more_data_arrived(mock_bkt); status = serf_bucket_peek(mock_bkt, &data, &len); CuAssertIntEquals(tc, APR_EOF, status); CuAssertIntEquals(tc, 6, len); CuAssert(tc, "Read data is not equal to expected.", strncmp("blabla", data, len) == 0); } }
apr_table_t * default_chxj_serf_head(request_rec *r, apr_pool_t *ppool, const char *url_path, int *response_code) { apr_pool_t *pool; apr_uri_t url; apr_status_t rv; apr_sockaddr_t *address = NULL; serf_context_t *context; serf_connection_t *connection; app_ctx_t app_ctx; handler_ctx_t handler_ctx; char *ret; DBG(r,"REQ[%X] start %s()",TO_ADDR(r),__func__); s_init(ppool, &pool); apr_uri_parse(pool, url_path, &url); if (!url.port) { url.port = apr_uri_port_of_scheme(url.scheme); } if (!url.port) { url.port = 80; } if (!url.path) { url.path = "/"; } if (!url.hostname) { url.hostname = "localhost"; } if (url.query) { url.path = apr_psprintf(pool, "%s?%s", url.path, url.query); } rv = apr_sockaddr_info_get(&address, url.hostname, APR_UNSPEC, url.port, 0, pool); if (rv != APR_SUCCESS) { char buf[256]; ERR(r, "apr_sockaddr_info_get() failed: rv:[%d|%s]", rv, apr_strerror(rv, buf, 256)); return NULL; } memset(&app_ctx, 0, sizeof(app_ctx_t)); app_ctx.bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL); if (strcasecmp(url.scheme, "https") == 0) { app_ctx.ssl_flag = 1; } context = serf_context_create(pool); connection = serf_connection_create(context, address, s_connection_setup, &app_ctx, s_connection_closed, &app_ctx, pool); memset(&handler_ctx, 0, sizeof(handler_ctx_t)); handler_ctx.requests_outstanding = 0; handler_ctx.host = url.hostinfo; /*========================================================================================================*/ /* XXX Maybe, libserf doesn't support the HEAD request. Because the part body is waited for with polling. */ /*========================================================================================================*/ handler_ctx.method = "GET"; handler_ctx.path = url.path; handler_ctx.user_agent = (char *)apr_table_get(r->headers_in, CHXJ_HTTP_USER_AGENT); if (! handler_ctx.user_agent) { handler_ctx.user_agent = (char *)apr_table_get(r->headers_in, HTTP_USER_AGENT); } handler_ctx.post_data = NULL; handler_ctx.post_data_len = 0; handler_ctx.acceptor = s_accept_response; handler_ctx.acceptor_ctx = &app_ctx; handler_ctx.handler = s_handle_response; handler_ctx.pool = pool; handler_ctx.r = r; handler_ctx.response_len = 0; handler_ctx.response = NULL; serf_connection_request_create(connection, s_setup_request, &handler_ctx); while (1) { rv = serf_context_run(context, SERF_DURATION_FOREVER, pool); if (APR_STATUS_IS_TIMEUP(rv)) continue; if (rv) { char buf[200]; ERR(r, "Error running context: (%d) %s\n", rv, apr_strerror(rv, buf, sizeof(buf))); break; } if (!apr_atomic_read32(&handler_ctx.requests_outstanding)) { if (handler_ctx.rv != APR_SUCCESS) { char buf[200]; ERR(r, "Error running context: (%d) %s\n", handler_ctx.rv, apr_strerror(handler_ctx.rv, buf, sizeof(buf))); } break; } } DBG(r,"REQ[%X] end of serf request",TO_ADDR(r)); DBG(r,"REQ[%X] response_code:[%d]", TO_ADDR(r),handler_ctx.response_code); DBG(r,"REQ[%X] response:[%s][%" APR_SIZE_T_FMT "]", TO_ADDR(r),handler_ctx.response, handler_ctx.response_len); serf_connection_close(connection); if (handler_ctx.response) { ret = apr_pstrdup(ppool, handler_ctx.response); } else { ret = apr_pstrdup(ppool, ""); } *response_code = handler_ctx.response_code; DBG(r,"REQ[%X] end %s()",TO_ADDR(r),__func__); return handler_ctx.headers_out; }
char * default_chxj_serf_get(request_rec *r, apr_pool_t *ppool, const char *url_path, int set_headers_flag, apr_size_t *response_len) { apr_pool_t *pool; apr_uri_t url; apr_status_t rv; apr_sockaddr_t *address = NULL; serf_context_t *context; serf_connection_t *connection; app_ctx_t app_ctx; handler_ctx_t handler_ctx; char *ret; s_init(ppool, &pool); apr_uri_parse(pool, url_path, &url); if (!url.port) { url.port = apr_uri_port_of_scheme(url.scheme); } if (!url.port) { url.port = 80; } if (!url.path) { url.path = "/"; } if (!url.hostname) { url.hostname = "localhost"; } if (url.query) { url.path = apr_psprintf(pool, "%s?%s", url.path, url.query); } rv = apr_sockaddr_info_get(&address, url.hostname, APR_UNSPEC, url.port, 0, pool); if (rv != APR_SUCCESS) { char buf[256]; ERR(r, "REQ[%X] %s:%d apr_sockaddr_info_get() failed: rv:[%d|%s] - Please check DNS settings.", (unsigned int)(apr_size_t)r, __FILE__,__LINE__, rv, apr_strerror(rv, buf, 256)); return NULL; } memset(&app_ctx, 0, sizeof(app_ctx_t)); app_ctx.bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL); if (strcasecmp(url.scheme, "https") == 0) { app_ctx.ssl_flag = 1; } context = serf_context_create(pool); connection = serf_connection_create(context, address, s_connection_setup, &app_ctx, s_connection_closed, &app_ctx, pool); memset(&handler_ctx, 0, sizeof(handler_ctx_t)); handler_ctx.requests_outstanding = 0; handler_ctx.host = url.hostinfo; handler_ctx.method = "GET"; handler_ctx.path = url.path; handler_ctx.user_agent = (char *)apr_table_get(r->headers_in, CHXJ_HTTP_USER_AGENT); if (!handler_ctx.user_agent) { handler_ctx.user_agent = (char *)apr_table_get(r->headers_in, HTTP_USER_AGENT); } handler_ctx.post_data = NULL; handler_ctx.post_data_len = 0; handler_ctx.acceptor = s_accept_response; handler_ctx.acceptor_ctx = &app_ctx; handler_ctx.handler = s_handle_response; handler_ctx.pool = pool; handler_ctx.r = r; handler_ctx.response_len = 0; handler_ctx.response = NULL; serf_connection_request_create(connection, s_setup_request, &handler_ctx); while (1) { rv = serf_context_run(context, SERF_DURATION_FOREVER, pool); if (APR_STATUS_IS_TIMEUP(rv)) continue; if (rv) { char buf[200]; ERR(r, "Error running context: (%d) %s\n", rv, apr_strerror(rv, buf, sizeof(buf))); break; } if (!apr_atomic_read32(&handler_ctx.requests_outstanding)) { if (handler_ctx.rv != APR_SUCCESS) { char buf[200]; ERR(r, "Error running context: (%d) %s\n", handler_ctx.rv, apr_strerror(handler_ctx.rv, buf, sizeof(buf))); } break; } } serf_connection_close(connection); if (handler_ctx.response) { ret = apr_palloc(ppool, handler_ctx.response_len + 1); memset(ret, 0, handler_ctx.response_len + 1); memcpy(ret, handler_ctx.response, handler_ctx.response_len); } else { ret = apr_pstrdup(ppool, ""); } *response_len = handler_ctx.response_len; if (set_headers_flag) { r->headers_out = apr_table_copy(pool, handler_ctx.headers_out); *response_len = handler_ctx.response_len; char *contentType = (char *)apr_table_get(handler_ctx.headers_out, "Content-Type"); if (contentType) { chxj_set_content_type(r, contentType); } } return ret; }
/* Implements svn_ra__vtable_t.dup_session */ static svn_error_t * ra_serf_dup_session(svn_ra_session_t *new_session, svn_ra_session_t *old_session, const char *new_session_url, apr_pool_t *result_pool, apr_pool_t *scratch_pool) { svn_ra_serf__session_t *old_sess = old_session->priv; svn_ra_serf__session_t *new_sess; apr_status_t status; new_sess = apr_pmemdup(result_pool, old_sess, sizeof(*new_sess)); new_sess->pool = result_pool; if (new_sess->config) SVN_ERR(svn_config_copy_config(&new_sess->config, new_sess->config, result_pool)); /* max_connections */ /* using_ssl */ /* using_compression */ /* http10 */ /* using_chunked_requests */ /* detect_chunking */ if (new_sess->useragent) new_sess->useragent = apr_pstrdup(result_pool, new_sess->useragent); if (new_sess->vcc_url) new_sess->vcc_url = apr_pstrdup(result_pool, new_sess->vcc_url); new_sess->auth_state = NULL; new_sess->auth_attempts = 0; /* Callback functions to get info from WC */ /* wc_callbacks */ /* wc_callback_baton */ /* progress_func */ /* progress_baton */ /* cancel_func */ /* cancel_baton */ /* shim_callbacks */ new_sess->pending_error = NULL; /* authn_types */ /* Keys and values are static */ if (new_sess->capabilities) new_sess->capabilities = apr_hash_copy(result_pool, new_sess->capabilities); if (new_sess->activity_collection_url) { new_sess->activity_collection_url = apr_pstrdup(result_pool, new_sess->activity_collection_url); } /* using_proxy */ if (new_sess->proxy_username) { new_sess->proxy_username = apr_pstrdup(result_pool, new_sess->proxy_username); } if (new_sess->proxy_password) { new_sess->proxy_username = apr_pstrdup(result_pool, new_sess->proxy_password); } new_sess->proxy_auth_attempts = 0; /* trust_default_ca */ if (new_sess->ssl_authorities) { new_sess->ssl_authorities = apr_pstrdup(result_pool, new_sess->ssl_authorities); } if (new_sess->uuid) new_sess->uuid = apr_pstrdup(result_pool, new_sess->uuid); /* timeout */ /* supports_deadprop_count */ if (new_sess->me_resource) new_sess->me_resource = apr_pstrdup(result_pool, new_sess->me_resource); if (new_sess->rev_stub) new_sess->rev_stub = apr_pstrdup(result_pool, new_sess->rev_stub); if (new_sess->txn_stub) new_sess->txn_stub = apr_pstrdup(result_pool, new_sess->txn_stub); if (new_sess->txn_root_stub) new_sess->txn_root_stub = apr_pstrdup(result_pool, new_sess->txn_root_stub); if (new_sess->vtxn_stub) new_sess->vtxn_stub = apr_pstrdup(result_pool, new_sess->vtxn_stub); if (new_sess->vtxn_root_stub) new_sess->vtxn_root_stub = apr_pstrdup(result_pool, new_sess->vtxn_root_stub); /* Keys and values are static */ if (new_sess->supported_posts) new_sess->supported_posts = apr_hash_copy(result_pool, new_sess->supported_posts); /* ### Can we copy this? */ SVN_ERR(svn_ra_serf__blncache_create(&new_sess->blncache, new_sess->pool)); if (new_sess->server_allows_bulk) new_sess->server_allows_bulk = apr_pstrdup(result_pool, new_sess->server_allows_bulk); new_sess->repos_root_str = apr_pstrdup(result_pool, new_sess->repos_root_str); SVN_ERR(svn_ra_serf__uri_parse(&new_sess->repos_root, new_sess->repos_root_str, result_pool)); new_sess->session_url_str = apr_pstrdup(result_pool, new_session_url); SVN_ERR(svn_ra_serf__uri_parse(&new_sess->session_url, new_sess->session_url_str, result_pool)); /* svn_boolean_t supports_inline_props */ /* supports_rev_rsrc_replay */ new_sess->context = serf_context_create(result_pool); SVN_ERR(load_config(new_sess, old_sess->config, result_pool)); new_sess->conns[0] = apr_pcalloc(result_pool, sizeof(*new_sess->conns[0])); new_sess->conns[0]->bkt_alloc = serf_bucket_allocator_create(result_pool, NULL, NULL); new_sess->conns[0]->session = new_sess; new_sess->conns[0]->last_status_code = -1; /* go ahead and tell serf about the connection. */ status = serf_connection_create2(&new_sess->conns[0]->conn, new_sess->context, new_sess->session_url, svn_ra_serf__conn_setup, new_sess->conns[0], svn_ra_serf__conn_closed, new_sess->conns[0], result_pool); if (status) return svn_ra_serf__wrap_err(status, NULL); /* Set the progress callback. */ serf_context_set_progress_cb(new_sess->context, svn_ra_serf__progress, new_sess); new_sess->num_conns = 1; new_sess->cur_conn = 0; new_session->priv = new_sess; return SVN_NO_ERROR; }
/* Implements svn_ra__vtable_t.open_session(). */ static svn_error_t * svn_ra_serf__open(svn_ra_session_t *session, const char **corrected_url, const char *session_URL, const svn_ra_callbacks2_t *callbacks, void *callback_baton, svn_auth_baton_t *auth_baton, apr_hash_t *config, apr_pool_t *result_pool, apr_pool_t *scratch_pool) { apr_status_t status; svn_ra_serf__session_t *serf_sess; apr_uri_t url; const char *client_string = NULL; svn_error_t *err; if (corrected_url) *corrected_url = NULL; serf_sess = apr_pcalloc(result_pool, sizeof(*serf_sess)); serf_sess->pool = result_pool; if (config) SVN_ERR(svn_config_copy_config(&serf_sess->config, config, result_pool)); else serf_sess->config = NULL; serf_sess->wc_callbacks = callbacks; serf_sess->wc_callback_baton = callback_baton; serf_sess->auth_baton = auth_baton; serf_sess->progress_func = callbacks->progress_func; serf_sess->progress_baton = callbacks->progress_baton; serf_sess->cancel_func = callbacks->cancel_func; serf_sess->cancel_baton = callback_baton; /* todo: reuse serf context across sessions */ serf_sess->context = serf_context_create(serf_sess->pool); SVN_ERR(svn_ra_serf__blncache_create(&serf_sess->blncache, serf_sess->pool)); SVN_ERR(svn_ra_serf__uri_parse(&url, session_URL, serf_sess->pool)); if (!url.port) { url.port = apr_uri_port_of_scheme(url.scheme); } serf_sess->session_url = url; serf_sess->session_url_str = apr_pstrdup(serf_sess->pool, session_URL); serf_sess->using_ssl = (svn_cstring_casecmp(url.scheme, "https") == 0); serf_sess->supports_deadprop_count = svn_tristate_unknown; serf_sess->capabilities = apr_hash_make(serf_sess->pool); /* We have to assume that the server only supports HTTP/1.0. Once it's clear HTTP/1.1 is supported, we can upgrade. */ serf_sess->http10 = TRUE; /* If we switch to HTTP/1.1, then we will use chunked requests. We may disable this, if we find an intervening proxy does not support chunked requests. */ serf_sess->using_chunked_requests = TRUE; SVN_ERR(load_config(serf_sess, config, serf_sess->pool)); serf_sess->conns[0] = apr_pcalloc(serf_sess->pool, sizeof(*serf_sess->conns[0])); serf_sess->conns[0]->bkt_alloc = serf_bucket_allocator_create(serf_sess->pool, NULL, NULL); serf_sess->conns[0]->session = serf_sess; serf_sess->conns[0]->last_status_code = -1; /* create the user agent string */ if (callbacks->get_client_string) SVN_ERR(callbacks->get_client_string(callback_baton, &client_string, scratch_pool)); if (client_string) serf_sess->useragent = apr_pstrcat(result_pool, get_user_agent_string(scratch_pool), " ", client_string, SVN_VA_NULL); else serf_sess->useragent = get_user_agent_string(result_pool); /* go ahead and tell serf about the connection. */ status = serf_connection_create2(&serf_sess->conns[0]->conn, serf_sess->context, url, svn_ra_serf__conn_setup, serf_sess->conns[0], svn_ra_serf__conn_closed, serf_sess->conns[0], serf_sess->pool); if (status) return svn_ra_serf__wrap_err(status, NULL); /* Set the progress callback. */ serf_context_set_progress_cb(serf_sess->context, svn_ra_serf__progress, serf_sess); serf_sess->num_conns = 1; session->priv = serf_sess; /* The following code explicitly works around a bug in serf <= r2319 / 1.3.8 where serf doesn't report the request as failed/cancelled when the authorization request handler fails to handle the request. As long as we allocate the request in a subpool of the serf connection pool, we know that the handler is always cleaned before the connection. Luckily our caller now passes us two pools which handle this case. */ #if defined(SVN_DEBUG) && !SERF_VERSION_AT_LEAST(1,4,0) /* Currently ensured by svn_ra_open4(). If failing causes segfault in basic_tests.py 48, "basic auth test" */ SVN_ERR_ASSERT((serf_sess->pool != scratch_pool) && apr_pool_is_ancestor(serf_sess->pool, scratch_pool)); #endif err = svn_ra_serf__exchange_capabilities(serf_sess, corrected_url, result_pool, scratch_pool); /* serf should produce a usable error code instead of APR_EGENERAL */ if (err && err->apr_err == APR_EGENERAL) err = svn_error_createf(SVN_ERR_RA_DAV_REQUEST_FAILED, err, _("Connection to '%s' failed"), session_URL); SVN_ERR(err); /* We have set up a useful connection (that doesn't indication a redirect). If we've been told there is possibly a worrisome proxy in our path to the server AND we switched to HTTP/1.1 (chunked requests), then probe for problems in any proxy. */ if ((corrected_url == NULL || *corrected_url == NULL) && serf_sess->detect_chunking && !serf_sess->http10) SVN_ERR(svn_ra_serf__probe_proxy(serf_sess, scratch_pool)); return SVN_NO_ERROR; }
apr_status_t test_server_create(test_baton_t **tb_p, test_server_action_t *action_list, apr_size_t action_count, apr_int32_t options, const char *host_url, apr_sockaddr_t *address, serf_connection_setup_t conn_setup, apr_pool_t *pool) { apr_status_t status; test_baton_t *tb; tb = apr_palloc(pool, sizeof(*tb)); *tb_p = tb; if (address) { tb->serv_addr = address; } else { status = get_server_address(&tb->serv_addr, pool); if (status != APR_SUCCESS) return status; } tb->pool = pool; tb->options = options; tb->context = serf_context_create(pool); tb->bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL); if (host_url) { apr_uri_t url; status = apr_uri_parse(pool, host_url, &url); if (status != APR_SUCCESS) return status; status = serf_connection_create2(&tb->connection, tb->context, url, conn_setup ? conn_setup : default_conn_setup, tb, default_closed_connection, tb, pool); if (status != APR_SUCCESS) return status; } else { tb->connection = serf_connection_create(tb->context, tb->serv_addr, conn_setup ? conn_setup : default_conn_setup, tb, default_closed_connection, tb, pool); } tb->action_list = action_list; tb->action_count = action_count; /* Prepare a server. */ status = prepare_server(tb, pool); if (status != APR_SUCCESS) return status; return APR_SUCCESS; }