dav_error *dav_repos_deliver_principal_search_property_set(request_rec * r, const dav_resource * resource, const apr_xml_doc * doc, ap_filter_t * output) { apr_bucket_brigade *bb; apr_pool_t *pool = resource->pool; TRACE(); bb = apr_brigade_create(pool, output->c->bucket_alloc); send_xml(bb, output, "<D:principal-search-property-set xmlns:D=\"DAV:\">" DEBUG_CR); send_xml(bb, output, "<D:principal-search-property>" DEBUG_CR); send_xml(bb, output, "<D:prop>" DEBUG_CR); send_xml(bb, output, "<D:displayname/>" DEBUG_CR); send_xml(bb, output, "</D:prop>" DEBUG_CR); send_xml(bb, output, "<D:description xml:lang=\"en\">Full name</D:description>" DEBUG_CR); send_xml(bb, output, "</D:principal-search-property>" DEBUG_CR); send_xml(bb, output, "</D:principal-search-property-set>" DEBUG_CR); ap_fflush(output, bb); return NULL; }
static int proxy_wstunnel_transfer(request_rec *r, conn_rec *c_i, conn_rec *c_o, apr_bucket_brigade *bb, char *name) { int rv; #ifdef DEBUGGING apr_off_t len; #endif do { apr_brigade_cleanup(bb); rv = ap_get_brigade(c_i->input_filters, bb, AP_MODE_READBYTES, APR_NONBLOCK_READ, AP_IOBUFSIZE); if (rv == APR_SUCCESS) { if (c_o->aborted) { return APR_EPIPE; } if (APR_BRIGADE_EMPTY(bb)) { break; } #ifdef DEBUGGING len = -1; apr_brigade_length(bb, 0, &len); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02440) "read %" APR_OFF_T_FMT " bytes from %s", len, name); #endif rv = ap_pass_brigade(c_o->output_filters, bb); if (rv == APR_SUCCESS) { ap_fflush(c_o->output_filters, bb); } else { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02441) "error on %s - ap_pass_brigade", name); } } else if (!APR_STATUS_IS_EAGAIN(rv) && !APR_STATUS_IS_EOF(rv)) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, APLOGNO(02442) "error on %s - ap_get_brigade", name); } } while (rv == APR_SUCCESS); ap_log_rerror(APLOG_MARK, APLOG_TRACE2, rv, r, "wstunnel_transfer complete"); if (APR_STATUS_IS_EAGAIN(rv)) { rv = APR_SUCCESS; } return rv; }
/* CONNECT handler */ static int proxy_connect_handler(request_rec *r, proxy_worker *worker, proxy_server_conf *conf, char *url, const char *proxyname, apr_port_t proxyport) { connect_conf *c_conf = ap_get_module_config(r->server->module_config, &proxy_connect_module); apr_pool_t *p = r->pool; apr_socket_t *sock; conn_rec *c = r->connection; conn_rec *backconn; apr_bucket_brigade *bb = apr_brigade_create(p, c->bucket_alloc); apr_status_t rv; apr_size_t nbytes; char buffer[HUGE_STRING_LEN]; apr_socket_t *client_socket = ap_get_conn_socket(c); int failed, rc; int client_error = 0; apr_pollset_t *pollset; apr_pollfd_t pollfd; const apr_pollfd_t *signalled; apr_int32_t pollcnt, pi; apr_int16_t pollevent; apr_sockaddr_t *nexthop; apr_uri_t uri; const char *connectname; int connectport = 0; /* is this for us? */ if (r->method_number != M_CONNECT) { ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "declining URL %s", url); return DECLINED; } ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "serving URL %s", url); /* * Step One: Determine Who To Connect To * * Break up the URL to determine the host to connect to */ /* we break the URL into host, port, uri */ if (APR_SUCCESS != apr_uri_parse_hostinfo(p, url, &uri)) { return ap_proxyerror(r, HTTP_BAD_REQUEST, apr_pstrcat(p, "URI cannot be parsed: ", url, NULL)); } ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01019) "connecting %s to %s:%d", url, uri.hostname, uri.port); /* Determine host/port of next hop; from request URI or of a proxy. */ connectname = proxyname ? proxyname : uri.hostname; connectport = proxyname ? proxyport : uri.port; /* Do a DNS lookup for the next hop */ rv = apr_sockaddr_info_get(&nexthop, connectname, APR_UNSPEC, connectport, 0, p); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02327) "failed to resolve hostname '%s'", connectname); return ap_proxyerror(r, HTTP_BAD_GATEWAY, apr_pstrcat(p, "DNS lookup failure for: ", connectname, NULL)); } /* Check ProxyBlock directive on the hostname/address. */ if (ap_proxy_checkproxyblock2(r, conf, uri.hostname, proxyname ? NULL : nexthop) != OK) { return ap_proxyerror(r, HTTP_FORBIDDEN, "Connect to remote machine blocked"); } ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "connecting to remote proxy %s on port %d", connectname, connectport); /* Check if it is an allowed port */ if(!allowed_port(c_conf, uri.port)) { return ap_proxyerror(r, HTTP_FORBIDDEN, "Connect to remote machine blocked"); } /* * Step Two: Make the Connection * * We have determined who to connect to. Now make the connection. */ /* * At this point we have a list of one or more IP addresses of * the machine to connect to. If configured, reorder this * list so that the "best candidate" is first try. "best * candidate" could mean the least loaded server, the fastest * responding server, whatever. * * For now we do nothing, ie we get DNS round robin. * XXX FIXME */ failed = ap_proxy_connect_to_backend(&sock, "CONNECT", nexthop, connectname, conf, r); /* handle a permanent error from the above loop */ if (failed) { if (proxyname) { return DECLINED; } else { return HTTP_SERVICE_UNAVAILABLE; } } /* setup polling for connection */ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "setting up poll()"); if ((rv = apr_pollset_create(&pollset, 2, r->pool, 0)) != APR_SUCCESS) { apr_socket_close(sock); ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01020) "error apr_pollset_create()"); return HTTP_INTERNAL_SERVER_ERROR; } /* Add client side to the poll */ pollfd.p = r->pool; pollfd.desc_type = APR_POLL_SOCKET; pollfd.reqevents = APR_POLLIN; pollfd.desc.s = client_socket; pollfd.client_data = NULL; apr_pollset_add(pollset, &pollfd); /* Add the server side to the poll */ pollfd.desc.s = sock; apr_pollset_add(pollset, &pollfd); /* * Step Three: Send the Request * * Send the HTTP/1.1 CONNECT request to the remote server */ backconn = ap_run_create_connection(c->pool, r->server, sock, c->id, c->sbh, c->bucket_alloc); if (!backconn) { /* peer reset */ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01021) "an error occurred creating a new connection " "to %pI (%s)", nexthop, connectname); apr_socket_close(sock); return HTTP_INTERNAL_SERVER_ERROR; } ap_proxy_ssl_disable(backconn); rc = ap_run_pre_connection(backconn, sock); if (rc != OK && rc != DONE) { backconn->aborted = 1; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01022) "pre_connection setup failed (%d)", rc); return HTTP_INTERNAL_SERVER_ERROR; } ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, "connection complete to %pI (%s)", nexthop, connectname); apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu", backconn->local_addr->port)); /* If we are connecting through a remote proxy, we need to pass * the CONNECT request on to it. */ if (proxyport) { /* FIXME: Error checking ignored. */ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "sending the CONNECT request to the remote proxy"); ap_fprintf(backconn->output_filters, bb, "CONNECT %s HTTP/1.0" CRLF, r->uri); ap_fprintf(backconn->output_filters, bb, "Proxy-agent: %s" CRLF CRLF, ap_get_server_banner()); ap_fflush(backconn->output_filters, bb); } else { ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "Returning 200 OK"); nbytes = apr_snprintf(buffer, sizeof(buffer), "HTTP/1.0 200 Connection Established" CRLF); ap_xlate_proto_to_ascii(buffer, nbytes); ap_fwrite(c->output_filters, bb, buffer, nbytes); nbytes = apr_snprintf(buffer, sizeof(buffer), "Proxy-agent: %s" CRLF CRLF, ap_get_server_banner()); ap_xlate_proto_to_ascii(buffer, nbytes); ap_fwrite(c->output_filters, bb, buffer, nbytes); ap_fflush(c->output_filters, bb); #if 0 /* This is safer code, but it doesn't work yet. I'm leaving it * here so that I can fix it later. */ r->status = HTTP_OK; r->header_only = 1; apr_table_set(r->headers_out, "Proxy-agent: %s", ap_get_server_banner()); ap_rflush(r); #endif } ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "setting up poll()"); /* * Step Four: Handle Data Transfer * * Handle two way transfer of data over the socket (this is a tunnel). */ /* we are now acting as a tunnel - the input/output filter stacks should * not contain any non-connection filters. */ r->output_filters = c->output_filters; r->proto_output_filters = c->output_filters; r->input_filters = c->input_filters; r->proto_input_filters = c->input_filters; /* r->sent_bodyct = 1;*/ while (1) { /* Infinite loop until error (one side closes the connection) */ if ((rv = apr_pollset_poll(pollset, -1, &pollcnt, &signalled)) != APR_SUCCESS) { if (APR_STATUS_IS_EINTR(rv)) { continue; } apr_socket_close(sock); ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01023) "error apr_poll()"); return HTTP_INTERNAL_SERVER_ERROR; } #ifdef DEBUGGING ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01024) "woke from poll(), i=%d", pollcnt); #endif for (pi = 0; pi < pollcnt; pi++) { const apr_pollfd_t *cur = &signalled[pi]; if (cur->desc.s == sock) { pollevent = cur->rtnevents; if (pollevent & APR_POLLIN) { #ifdef DEBUGGING ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01025) "sock was readable"); #endif rv = proxy_connect_transfer(r, backconn, c, bb, "sock"); } else if ((pollevent & APR_POLLERR) || (pollevent & APR_POLLHUP)) { rv = APR_EPIPE; ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, APLOGNO(01026) "err/hup on backconn"); } if (rv != APR_SUCCESS) client_error = 1; } else if (cur->desc.s == client_socket) { pollevent = cur->rtnevents; if (pollevent & APR_POLLIN) { #ifdef DEBUGGING ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01027) "client was readable"); #endif rv = proxy_connect_transfer(r, c, backconn, bb, "client"); } } else { rv = APR_EBADF; ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01028) "unknown socket in pollset"); } } if (rv != APR_SUCCESS) { break; } } ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "finished with poll() - cleaning up"); /* * Step Five: Clean Up * * Close the socket and clean up */ if (client_error) apr_socket_close(sock); else ap_lingering_close(backconn); c->aborted = 1; c->keepalive = AP_CONN_CLOSE; return OK; }
dav_error * dav_svn__get_locations_report(const dav_resource *resource, const apr_xml_doc *doc, ap_filter_t *output) { svn_error_t *serr; dav_error *derr = NULL; apr_status_t apr_err; apr_bucket_brigade *bb; dav_svn__authz_read_baton arb; /* The parameters to do the operation on. */ const char *relative_path = NULL; const char *abs_path; svn_revnum_t peg_revision = SVN_INVALID_REVNUM; apr_array_header_t *location_revisions; /* XML Parsing Variables */ int ns; apr_xml_elem *child; apr_hash_t *fs_locations; location_revisions = apr_array_make(resource->pool, 0, sizeof(svn_revnum_t)); /* Sanity check. */ ns = dav_svn__find_ns(doc->namespaces, SVN_XML_NAMESPACE); if (ns == -1) { return dav_svn__new_error_tag(resource->pool, HTTP_BAD_REQUEST, 0, "The request does not contain the 'svn:' " "namespace, so it is not going to have " "certain required elements.", SVN_DAV_ERROR_NAMESPACE, SVN_DAV_ERROR_TAG); } /* Gather the parameters. */ for (child = doc->root->first_child; child != NULL; child = child->next) { /* If this element isn't one of ours, then skip it. */ if (child->ns != ns) continue; if (strcmp(child->name, "peg-revision") == 0) peg_revision = SVN_STR_TO_REV(dav_xml_get_cdata(child, resource->pool, 1)); else if (strcmp(child->name, "location-revision") == 0) { svn_revnum_t revision = SVN_STR_TO_REV(dav_xml_get_cdata(child, resource->pool, 1)); APR_ARRAY_PUSH(location_revisions, svn_revnum_t) = revision; } else if (strcmp(child->name, "path") == 0) { relative_path = dav_xml_get_cdata(child, resource->pool, 0); if ((derr = dav_svn__test_canonical(relative_path, resource->pool))) return derr; } } /* Now we should have the parameters ready - let's check if they are all present. */ if (! (relative_path && SVN_IS_VALID_REVNUM(peg_revision))) { return dav_svn__new_error_tag(resource->pool, HTTP_BAD_REQUEST, 0, "Not all parameters passed.", SVN_DAV_ERROR_NAMESPACE, SVN_DAV_ERROR_TAG); } /* Append the relative path to the base FS path to get an absolute repository path. */ abs_path = svn_path_join(resource->info->repos_path, relative_path, resource->pool); /* Build an authz read baton */ arb.r = resource->info->r; arb.repos = resource->info->repos; serr = svn_repos_trace_node_locations(resource->info->repos->fs, &fs_locations, abs_path, peg_revision, location_revisions, dav_svn__authz_read_func(&arb), &arb, resource->pool); if (serr) { return dav_svn__convert_err(serr, HTTP_INTERNAL_SERVER_ERROR, serr->message, resource->pool); } bb = apr_brigade_create(resource->pool, output->c->bucket_alloc); apr_err = send_get_locations_report(output, bb, resource, fs_locations); if (apr_err) derr = dav_svn__convert_err(svn_error_create(apr_err, 0, NULL), HTTP_INTERNAL_SERVER_ERROR, "Error writing REPORT response.", resource->pool); /* Flush the contents of the brigade (returning an error only if we don't already have one). */ if (((apr_err = ap_fflush(output, bb))) && (! derr)) return dav_svn__convert_err(svn_error_create(apr_err, 0, NULL), HTTP_INTERNAL_SERVER_ERROR, "Error flushing brigade.", resource->pool); return derr; }
dav_error *dav_repos_deliver_principal_property_search(request_rec * r, const dav_resource * resource, const apr_xml_doc * doc, ap_filter_t * output) { /* this buffers the output for a bit and is automatically flushed, at appropriate times, by the Apache filter system. */ apr_bucket_brigade *bb; apr_pool_t *pool = resource->pool; dav_repos_db *db = resource->info->db; dav_repos_resource *db_r = (dav_repos_resource *) resource->info->db_r; apr_xml_elem *principal_property_search; apr_xml_elem *elem; apr_xml_elem *prop; apr_xml_elem *props; apr_xml_elem *match; int flag; TRACE(); principal_property_search = dav_find_child(doc->root, "principal-property-search"); props = dav_find_child(doc->root, "prop"); sabridge_get_collection_children(db, db_r, 1, "read", NULL, NULL, NULL); bb = apr_brigade_create(pool, output->c->bucket_alloc); r->status = HTTP_MULTI_STATUS; send_xml(bb, output, "<D:multistatus xmlns:D=\"DAV:\">" DEBUG_CR); while (db_r != NULL) { flag = 1; for (elem = principal_property_search->first_child; elem && flag; elem = elem->next) { if (!strcmp(elem->name, "property-search")) { prop = dav_find_child(elem, "prop"); match = dav_find_child(elem, "match"); dav_repos_build_lpr_hash(db_r); const char *val = apr_hash_get(db_r->lpr_hash, prop->first_child->name, APR_HASH_KEY_STRING); if (!strstr(val, match->first_cdata.first->text)) flag = 0; } } if (flag) { send_xml(bb, output, "<D:response>"); send_xml(bb, output, dav_repos_mk_href(pool, db_r->uri)); send_xml(bb, output, "<D:propstat>"); send_xml(bb, output, "<D:prop>"); for (props = props->first_child; props; props = props->next) { const char *val; val = apr_hash_get(db_r->lpr_hash, props->name, APR_HASH_KEY_STRING); const char *str = apr_psprintf(pool, "<D:%s>%s</D:%s>" DEBUG_CR, props->name, apr_xml_quote_string(pool, val, 0), props->name); send_xml(bb, output, str); } send_xml(bb, output, "</D:prop>"); send_xml(bb, output, "<D:status>HTTP/1.1 200 OK</D:status>" DEBUG_CR); send_xml(bb, output, "</D:propstat>"); send_xml(bb, output, "</D:response>"); } db_r = db_r->next; } send_xml(bb, output, "</D:multistatus>"); /* flush the contents of the brigade */ ap_fflush(output, bb); return NULL; }
dav_error *dav_repos_deliver_principal_match(request_rec * r, const dav_resource * resource, const apr_xml_doc * doc, ap_filter_t * output) { /* this buffers the output for a bit and is automatically flushed, at appropriate times, by the Apache filter system. */ apr_bucket_brigade *bb; apr_pool_t *pool = resource->pool; dav_repos_db *db = resource->info->db; dav_repos_resource *db_r = (dav_repos_resource *) resource->info->db_r; apr_xml_elem *principal_properties; char *req_username = r->user; long principal_id; dav_error *err = NULL; TRACE(); if (!req_username) req_username = "******"; if((err = dbms_get_principal_id_from_name(pool, db, req_username, &principal_id))) { return err; } principal_properties = dav_find_child(doc->root, "principal-property"); sabridge_get_collection_children(db, db_r, DAV_INFINITY, "read", NULL, NULL, NULL); bb = apr_brigade_create(pool, output->c->bucket_alloc); r->status = HTTP_MULTI_STATUS; send_xml(bb, output, "<D:multistatus xmlns:D=\"DAV:\">" DEBUG_CR); while ((db_r = db_r->next)) { // Currently supporting DAV:owner only if ((principal_properties && !strcmp(principal_properties->name, "owner") && db_r->owner_id == principal_id) || (!principal_properties && // Found no principal_properties !strcmp(db_r->displayname, req_username))) { send_xml(bb, output, "<D:response>"); const char *str = apr_psprintf(pool, "<D:href>%s</D:href>" DEBUG_CR, apr_xml_quote_string(pool, db_r->uri, 0)); send_xml(bb, output, str); send_xml(bb, output, "<D:status>HTTP/1.1 200 OK</D:status>" DEBUG_CR); send_xml(bb, output, "</D:response>"); } } send_xml(bb, output, "</D:multistatus>"); /* flush the contents of the brigade */ ap_fflush(output, bb); return NULL; }
dav_error *dav_repos_deliver_acl_principal_prop_set(request_rec * r, const dav_resource * resource, const apr_xml_doc * doc, ap_filter_t * output) { /* this buffers the output for a bit and is automatically flushed, at appropriate times, by the Apache filter system. */ apr_bucket_brigade *bb; apr_pool_t *pool = resource->pool; dav_repos_db *db = resource->info->db; dav_repos_resource *db_r = (dav_repos_resource *) resource->info->db_r; apr_xml_elem *props; dav_repos_resource *principals = NULL; TRACE(); props = dav_find_child(doc->root, "prop"); dbms_get_principals(db, pool, db_r, principals); bb = apr_brigade_create(pool, output->c->bucket_alloc); r->status = HTTP_MULTI_STATUS; send_xml(bb, output, "<D:multistatus xmlns:D=\"DAV:\">" DEBUG_CR); while (principals != NULL) { sabridge_get_property(db, principals); dav_repos_build_lpr_hash(principals); send_xml(bb, output, "<D:response>"); send_xml(bb, output, dav_repos_mk_href(pool, principals->uri)); send_xml(bb, output, "<D:propstat>"); send_xml(bb, output, "<D:prop>"); for (props = props->first_child; props; props = props->next) { const char *val; val = apr_hash_get(principals->lpr_hash, props->name, APR_HASH_KEY_STRING); const char *str = apr_psprintf(pool, "<D:%s>%s</D:%s>" DEBUG_CR, props->name, apr_xml_quote_string(pool, val, 0), props->name); send_xml(bb, output, str); } send_xml(bb, output, "</D:prop>"); send_xml(bb, output, "<D:status>HTTP/1.1 200 OK</D:status>" DEBUG_CR); send_xml(bb, output, "</D:propstat>"); send_xml(bb, output, "</D:response>"); principals = principals->next; } send_xml(bb, output, "</D:multistatus>"); /* flush the contents of the brigade */ ap_fflush(output, bb); return NULL; }
dav_error * dav_svn__replay_report(const dav_resource *resource, const apr_xml_doc *doc, ap_filter_t *output) { svn_revnum_t low_water_mark = SVN_INVALID_REVNUM; svn_revnum_t rev = SVN_INVALID_REVNUM; const svn_delta_editor_t *editor; svn_boolean_t send_deltas = TRUE; dav_svn__authz_read_baton arb; const char *base_dir = resource->info->repos_path; apr_bucket_brigade *bb; apr_xml_elem *child; svn_fs_root_t *root; svn_error_t *err; void *edit_baton; int ns; /* The request won't have a repos_path if it's for the root. */ if (! base_dir) base_dir = ""; arb.r = resource->info->r; arb.repos = resource->info->repos; ns = dav_svn__find_ns(doc->namespaces, SVN_XML_NAMESPACE); if (ns == -1) return dav_svn__new_error_tag(resource->pool, HTTP_BAD_REQUEST, 0, "The request does not contain the 'svn:' " "namespace, so it is not going to have an " "svn:revision element. That element is " "required.", SVN_DAV_ERROR_NAMESPACE, SVN_DAV_ERROR_TAG); for (child = doc->root->first_child; child != NULL; child = child->next) { if (child->ns == ns) { const char *cdata; if (strcmp(child->name, "revision") == 0) { cdata = dav_xml_get_cdata(child, resource->pool, 1); if (! cdata) return malformed_element_error("revision", resource->pool); rev = SVN_STR_TO_REV(cdata); } else if (strcmp(child->name, "low-water-mark") == 0) { cdata = dav_xml_get_cdata(child, resource->pool, 1); if (! cdata) return malformed_element_error("low-water-mark", resource->pool); low_water_mark = SVN_STR_TO_REV(cdata); } else if (strcmp(child->name, "send-deltas") == 0) { cdata = dav_xml_get_cdata(child, resource->pool, 1); if (! cdata) return malformed_element_error("send-deltas", resource->pool); send_deltas = atoi(cdata); } } } if (! SVN_IS_VALID_REVNUM(rev)) return dav_svn__new_error_tag (resource->pool, HTTP_BAD_REQUEST, 0, "Request was missing the revision argument.", SVN_DAV_ERROR_NAMESPACE, SVN_DAV_ERROR_TAG); if (! SVN_IS_VALID_REVNUM(low_water_mark)) return dav_svn__new_error_tag (resource->pool, HTTP_BAD_REQUEST, 0, "Request was missing the low-water-mark argument.", SVN_DAV_ERROR_NAMESPACE, SVN_DAV_ERROR_TAG); bb = apr_brigade_create(resource->pool, output->c->bucket_alloc); if ((err = svn_fs_revision_root(&root, resource->info->repos->fs, rev, resource->pool))) return dav_svn__convert_err(err, HTTP_INTERNAL_SERVER_ERROR, "Couldn't retrieve revision root", resource->pool); make_editor(&editor, &edit_baton, bb, output, resource->pool); if ((err = svn_repos_replay2(root, base_dir, low_water_mark, send_deltas, editor, edit_baton, dav_svn__authz_read_func(&arb), &arb, resource->pool))) return dav_svn__convert_err(err, HTTP_INTERNAL_SERVER_ERROR, "Problem replaying revision", resource->pool); if ((err = end_report(edit_baton))) return dav_svn__convert_err(err, HTTP_INTERNAL_SERVER_ERROR, "Problem closing editor drive", resource->pool); { const char *action, *log_base_dir; if (base_dir && base_dir[0] != '\0') log_base_dir = svn_path_uri_encode(base_dir, resource->info->r->pool); else log_base_dir = "/"; action = apr_psprintf(resource->info->r->pool, "replay %s r%ld", log_base_dir, rev); dav_svn__operational_log(resource->info, action); } ap_fflush(output, bb); return NULL; }
/* This implements `svn_log_entry_receiver_t'. BATON is a `struct log_receiver_baton *'. */ static svn_error_t * log_receiver(void *baton, svn_log_entry_t *log_entry, apr_pool_t *pool) { struct log_receiver_baton *lrb = baton; apr_pool_t *iterpool = svn_pool_create(pool); SVN_ERR(maybe_send_header(lrb)); if (log_entry->revision == SVN_INVALID_REVNUM) { /* If the stack depth is zero, we've seen the last revision, so don't send it, just return. The footer will be sent later. */ if (lrb->stack_depth == 0) return SVN_NO_ERROR; else lrb->stack_depth--; } SVN_ERR(dav_svn__brigade_printf(lrb->bb, lrb->output, "<S:log-item>" DEBUG_CR "<D:version-name>%ld" "</D:version-name>" DEBUG_CR, log_entry->revision)); if (log_entry->revprops) { apr_hash_index_t *hi; for (hi = apr_hash_first(pool, log_entry->revprops); hi != NULL; hi = apr_hash_next(hi)) { char *name; void *val; const svn_string_t *value; const char *encoding_str = ""; svn_pool_clear(iterpool); apr_hash_this(hi, (void *)&name, NULL, &val); value = val; /* If the client is okay with us encoding binary (or really, any non-XML-safe) property values, do so as necessary. */ if (lrb->encode_binary_props) { if (! svn_xml_is_xml_safe(value->data, value->len)) { value = svn_base64_encode_string2(value, TRUE, iterpool); encoding_str = " encoding=\"base64\""; } } if (strcmp(name, SVN_PROP_REVISION_AUTHOR) == 0) SVN_ERR(dav_svn__brigade_printf (lrb->bb, lrb->output, "<D:creator-displayname%s>%s</D:creator-displayname>" DEBUG_CR, encoding_str, apr_xml_quote_string(iterpool, value->data, 0))); else if (strcmp(name, SVN_PROP_REVISION_DATE) == 0) /* ### this should be DAV:creation-date, but we need to format ### that date a bit differently */ SVN_ERR(dav_svn__brigade_printf (lrb->bb, lrb->output, "<S:date%s>%s</S:date>" DEBUG_CR, encoding_str, apr_xml_quote_string(iterpool, value->data, 0))); else if (strcmp(name, SVN_PROP_REVISION_LOG) == 0) SVN_ERR(dav_svn__brigade_printf (lrb->bb, lrb->output, "<D:comment%s>%s</D:comment>" DEBUG_CR, encoding_str, apr_xml_quote_string(pool, svn_xml_fuzzy_escape(value->data, iterpool), 0))); else SVN_ERR(dav_svn__brigade_printf (lrb->bb, lrb->output, "<S:revprop name=\"%s\"%s>%s</S:revprop>" DEBUG_CR, apr_xml_quote_string(iterpool, name, 0), encoding_str, apr_xml_quote_string(iterpool, value->data, 0))); } } if (log_entry->has_children) { SVN_ERR(dav_svn__brigade_puts(lrb->bb, lrb->output, "<S:has-children/>")); lrb->stack_depth++; } if (log_entry->subtractive_merge) SVN_ERR(dav_svn__brigade_puts(lrb->bb, lrb->output, "<S:subtractive-merge/>")); if (log_entry->changed_paths2) { apr_hash_index_t *hi; char *path; for (hi = apr_hash_first(pool, log_entry->changed_paths2); hi != NULL; hi = apr_hash_next(hi)) { void *val; svn_log_changed_path2_t *log_item; const char *close_element = NULL; svn_pool_clear(iterpool); apr_hash_this(hi, (void *) &path, NULL, &val); log_item = val; /* ### todo: is there a D: namespace equivalent for `changed-path'? Should use it if so. */ switch (log_item->action) { case 'A': case 'R': SVN_ERR(start_path_with_copy_from(&close_element, lrb, log_item, iterpool)); break; case 'D': SVN_ERR(dav_svn__brigade_puts(lrb->bb, lrb->output, "<S:deleted-path")); close_element = "S:deleted-path"; break; case 'M': SVN_ERR(dav_svn__brigade_puts(lrb->bb, lrb->output, "<S:modified-path")); close_element = "S:modified-path"; break; default: break; } /* If we need to close the element, then send the attributes that apply to all changed items and then close the element. */ if (close_element) SVN_ERR(dav_svn__brigade_printf (lrb->bb, lrb->output, " node-kind=\"%s\"" " text-mods=\"%s\"" " prop-mods=\"%s\">%s</%s>" DEBUG_CR, svn_node_kind_to_word(log_item->node_kind), svn_tristate__to_word(log_item->text_modified), svn_tristate__to_word(log_item->props_modified), apr_xml_quote_string(iterpool, path, 0), close_element)); } } svn_pool_destroy(iterpool); SVN_ERR(dav_svn__brigade_puts(lrb->bb, lrb->output, "</S:log-item>" DEBUG_CR)); /* In general APR will flush the brigade every 8000 bytes through the filter stack, but log items may not be generated that fast, especially in combination with authz and busy servers. We now explictly flush after log-item 4, 16, 64 and 256 to produce a few results fast. This introduces 4 full flushes of our brigade and the installed output filters at growing intervals and then falls back to the standard buffering of 8000 bytes + whatever buffers are added in output filters. */ lrb->result_count++; if (lrb->result_count == lrb->next_forced_flush) { apr_status_t apr_err; /* This flush is similar to that in dav_svn__final_flush_or_error(). Compared to using ap_filter_flush(), which we use in other place this adds a flush frame before flushing the brigade, to make output filters perform a flush as well */ /* No brigade empty check. We want output filters to flush anyway */ apr_err = ap_fflush(lrb->output, lrb->bb); if (apr_err) return svn_error_create(apr_err, NULL, NULL); /* Check for an aborted connection, just like our brigade write helper functions, since the brigade functions don't appear to be return useful errors when the connection is dropped. */ if (lrb->output->c->aborted) return svn_error_create(SVN_ERR_APMOD_CONNECTION_ABORTED, NULL, NULL); if (lrb->result_count < 256) lrb->next_forced_flush = lrb->next_forced_flush * 4; } return SVN_NO_ERROR; }