static int refresh_lock(struct remote_lock *lock) { struct active_request_slot *slot; struct slot_results results; struct curl_slist *dav_headers; int rc = 0; lock->refreshing = 1; dav_headers = get_dav_token_headers(lock, DAV_HEADER_IF | DAV_HEADER_TIMEOUT); slot = get_active_slot(); slot->results = &results; curl_setup_http_get(slot->curl, lock->url, DAV_LOCK); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); if (start_active_slot(slot)) { run_active_slot(slot); if (results.curl_result != CURLE_OK) { fprintf(stderr, "LOCK HTTP error %ld\n", results.http_code); } else { lock->start_time = time(NULL); rc = 1; } } lock->refreshing = 0; curl_slist_free_all(dav_headers); return rc; }
static void update_remote_info_refs(struct remote_lock *lock) { struct buffer buffer = { STRBUF_INIT, 0 }; struct active_request_slot *slot; struct slot_results results; struct curl_slist *dav_headers; remote_ls("refs/", (PROCESS_FILES | RECURSIVE), add_remote_info_ref, &buffer.buf); if (!aborted) { dav_headers = get_dav_token_headers(lock, DAV_HEADER_IF); slot = get_active_slot(); slot->results = &results; curl_setup_http(slot->curl, lock->url, DAV_PUT, &buffer, fwrite_null); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); if (start_active_slot(slot)) { run_active_slot(slot); if (results.curl_result != CURLE_OK) { fprintf(stderr, "PUT error: curl result=%d, HTTP code=%ld\n", results.curl_result, results.http_code); } } } strbuf_release(&buffer.buf); }
static int update_remote(unsigned char *sha1, struct remote_lock *lock) { struct active_request_slot *slot; struct slot_results results; struct buffer out_buffer = { STRBUF_INIT, 0 }; struct curl_slist *dav_headers; dav_headers = get_dav_token_headers(lock, DAV_HEADER_IF); strbuf_addf(&out_buffer.buf, "%s\n", sha1_to_hex(sha1)); slot = get_active_slot(); slot->results = &results; curl_setup_http(slot->curl, lock->url, DAV_PUT, &out_buffer, fwrite_null); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); if (start_active_slot(slot)) { run_active_slot(slot); strbuf_release(&out_buffer.buf); if (results.curl_result != CURLE_OK) { fprintf(stderr, "PUT error: curl result=%d, HTTP code=%ld\n", results.curl_result, results.http_code); /* We should attempt recovery? */ return 0; } } else { strbuf_release(&out_buffer.buf); fprintf(stderr, "Unable to start PUT request\n"); return 0; } return 1; }
static void fetch_alternates(struct walker *walker, const char *base) { struct strbuf buffer = STRBUF_INIT; char *url; struct active_request_slot *slot; struct alternates_request alt_req; struct walker_data *cdata = walker->data; /* * If another request has already started fetching alternates, * wait for them to arrive and return to processing this request's * curl message */ #ifdef USE_CURL_MULTI while (cdata->got_alternates == 0) { step_active_slots(); } #endif /* Nothing to do if they've already been fetched */ if (cdata->got_alternates == 1) return; /* Start the fetch */ cdata->got_alternates = 0; if (walker->get_verbosely) fprintf(stderr, "Getting alternates list for %s\n", base); url = xmalloc(strlen(base) + 31); sprintf(url, "%s/objects/info/http-alternates", base); /* * Use a callback to process the result, since another request * may fail and need to have alternates loaded before continuing */ slot = get_active_slot(); slot->callback_func = process_alternates_response; alt_req.walker = walker; slot->callback_data = &alt_req; curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer); curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer); curl_easy_setopt(slot->curl, CURLOPT_URL, url); alt_req.base = base; alt_req.url = url; alt_req.buffer = &buffer; alt_req.http_specific = 1; alt_req.slot = slot; if (start_active_slot(slot)) run_active_slot(slot); else cdata->got_alternates = -1; strbuf_release(&buffer); free(url); }
static int fetch_object(struct walker *walker, struct alt_base *repo, unsigned char *sha1) { char *hex = sha1_to_hex(sha1); int ret = 0; struct object_request *obj_req = object_queue_head; while (obj_req != NULL && hashcmp(obj_req->sha1, sha1)) obj_req = obj_req->next; if (obj_req == NULL) return error("Couldn't find request for %s in the queue", hex); if (has_sha1_file(obj_req->sha1)) { abort_object_request(obj_req); return 0; } #ifdef USE_CURL_MULTI while (obj_req->state == WAITING) { step_active_slots(); } #else start_object_request(walker, obj_req); #endif while (obj_req->state == ACTIVE) { run_active_slot(obj_req->slot); } if (obj_req->local != -1) { close(obj_req->local); obj_req->local = -1; } if (obj_req->state == ABORTED) { ret = error("Request for %s aborted", hex); } else if (obj_req->curl_result != CURLE_OK && obj_req->http_code != 416) { if (missing_target(obj_req)) ret = -1; /* Be silent, it is probably in a pack. */ else ret = error("%s (curl_result = %d, http_code = %ld, sha1 = %s)", obj_req->errorstr, obj_req->curl_result, obj_req->http_code, hex); } else if (obj_req->zret != Z_STREAM_END) { walker->corrupt_object_found++; ret = error("File %s (%s) corrupt", hex, obj_req->url); } else if (hashcmp(obj_req->sha1, obj_req->real_sha1)) { ret = error("File %s has bad hash", hex); } else if (obj_req->rename < 0) { ret = error("unable to write sha1 filename %s", obj_req->filename); } release_object_request(obj_req); return ret; }
static int http_fetch_pack(struct walker *walker, struct alt_base *repo, unsigned char *sha1) { struct packed_git *target; int ret; struct slot_results results; struct http_pack_request *preq; if (fetch_indices(walker, repo)) return -1; target = find_sha1_pack(sha1, repo->packs); if (!target) return -1; if (walker->get_verbosely) { fprintf(stderr, "Getting pack %s\n", sha1_to_hex(target->sha1)); fprintf(stderr, " which contains %s\n", sha1_to_hex(sha1)); } preq = new_http_pack_request(target, repo->base); if (preq == NULL) goto abort; preq->lst = &repo->packs; preq->slot->results = &results; if (start_active_slot(preq->slot)) { run_active_slot(preq->slot); if (results.curl_result != CURLE_OK) { error("Unable to get pack file %s\n%s", preq->url, curl_errorstr); goto abort; } } else { error("Unable to start request"); goto abort; } ret = finish_http_pack_request(preq); release_http_pack_request(preq); if (ret) return ret; return 0; abort: return -1; }
static int unlock_remote(struct remote_lock *lock) { struct active_request_slot *slot; struct slot_results results; struct remote_lock *prev = repo->locks; struct curl_slist *dav_headers; int rc = 0; dav_headers = get_dav_token_headers(lock, DAV_HEADER_LOCK); slot = get_active_slot(); slot->results = &results; curl_setup_http_get(slot->curl, lock->url, DAV_UNLOCK); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); if (start_active_slot(slot)) { run_active_slot(slot); if (results.curl_result == CURLE_OK) rc = 1; else fprintf(stderr, "UNLOCK HTTP error %ld\n", results.http_code); } else { fprintf(stderr, "Unable to start UNLOCK request\n"); } curl_slist_free_all(dav_headers); if (repo->locks == lock) { repo->locks = lock->next; } else { while (prev && prev->next != lock) prev = prev->next; if (prev) prev->next = prev->next->next; } free(lock->owner); free(lock->url); free(lock->token); free(lock); return rc; }
static int fetch_pack(struct walker *walker, struct alt_base *repo, unsigned char *sha1) { char *url; struct packed_git *target; struct packed_git **lst; FILE *packfile; char *filename; char tmpfile[PATH_MAX]; int ret; long prev_posn = 0; char range[RANGE_HEADER_SIZE]; struct curl_slist *range_header = NULL; struct walker_data *data = walker->data; struct active_request_slot *slot; struct slot_results results; if (fetch_indices(walker, repo)) return -1; target = find_sha1_pack(sha1, repo->packs); if (!target) return -1; if (walker->get_verbosely) { fprintf(stderr, "Getting pack %s\n", sha1_to_hex(target->sha1)); fprintf(stderr, " which contains %s\n", sha1_to_hex(sha1)); } url = xmalloc(strlen(repo->base) + 65); sprintf(url, "%s/objects/pack/pack-%s.pack", repo->base, sha1_to_hex(target->sha1)); filename = sha1_pack_name(target->sha1); snprintf(tmpfile, sizeof(tmpfile), "%s.temp", filename); packfile = fopen(tmpfile, "a"); if (!packfile) return error("Unable to open local file %s for pack", tmpfile); slot = get_active_slot(); slot->results = &results; curl_easy_setopt(slot->curl, CURLOPT_FILE, packfile); curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite); curl_easy_setopt(slot->curl, CURLOPT_URL, url); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header); slot->local = packfile; /* If there is data present from a previous transfer attempt, resume where it left off */ prev_posn = ftell(packfile); if (prev_posn>0) { if (walker->get_verbosely) fprintf(stderr, "Resuming fetch of pack %s at byte %ld\n", sha1_to_hex(target->sha1), prev_posn); sprintf(range, "Range: bytes=%ld-", prev_posn); range_header = curl_slist_append(range_header, range); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, range_header); } if (start_active_slot(slot)) { run_active_slot(slot); if (results.curl_result != CURLE_OK) { fclose(packfile); return error("Unable to get pack file %s\n%s", url, curl_errorstr); } } else { fclose(packfile); return error("Unable to start request"); } target->pack_size = ftell(packfile); fclose(packfile); ret = move_temp_to_file(tmpfile, filename); if (ret) return ret; lst = &repo->packs; while (*lst != target) lst = &((*lst)->next); *lst = (*lst)->next; if (verify_pack(target)) return -1; install_packed_git(target); return 0; }
static int fetch_indices(struct walker *walker, struct alt_base *repo) { unsigned char sha1[20]; char *url; struct strbuf buffer = STRBUF_INIT; char *data; int i = 0; int ret = 0; struct active_request_slot *slot; struct slot_results results; if (repo->got_indices) return 0; if (walker->get_verbosely) fprintf(stderr, "Getting pack list for %s\n", repo->base); url = xmalloc(strlen(repo->base) + 21); sprintf(url, "%s/objects/info/packs", repo->base); slot = get_active_slot(); slot->results = &results; curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer); curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer); curl_easy_setopt(slot->curl, CURLOPT_URL, url); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, NULL); if (start_active_slot(slot)) { run_active_slot(slot); if (results.curl_result != CURLE_OK) { if (missing_target(&results)) { repo->got_indices = 1; goto cleanup; } else { repo->got_indices = 0; ret = error("%s", curl_errorstr); goto cleanup; } } } else { repo->got_indices = 0; ret = error("Unable to start request"); goto cleanup; } data = buffer.buf; while (i < buffer.len) { switch (data[i]) { case 'P': i++; if (i + 52 <= buffer.len && !prefixcmp(data + i, " pack-") && !prefixcmp(data + i + 46, ".pack\n")) { get_sha1_hex(data + i + 6, sha1); setup_index(walker, repo, sha1); i += 51; break; } default: while (i < buffer.len && data[i] != '\n') i++; } i++; } repo->got_indices = 1; cleanup: strbuf_release(&buffer); free(url); return ret; }
static int fetch_index(struct walker *walker, struct alt_base *repo, unsigned char *sha1) { char *hex = sha1_to_hex(sha1); char *filename; char *url; char tmpfile[PATH_MAX]; long prev_posn = 0; char range[RANGE_HEADER_SIZE]; struct curl_slist *range_header = NULL; struct walker_data *data = walker->data; FILE *indexfile; struct active_request_slot *slot; struct slot_results results; if (has_pack_index(sha1)) return 0; if (walker->get_verbosely) fprintf(stderr, "Getting index for pack %s\n", hex); url = xmalloc(strlen(repo->base) + 64); sprintf(url, "%s/objects/pack/pack-%s.idx", repo->base, hex); filename = sha1_pack_index_name(sha1); snprintf(tmpfile, sizeof(tmpfile), "%s.temp", filename); indexfile = fopen(tmpfile, "a"); if (!indexfile) return error("Unable to open local file %s for pack index", tmpfile); slot = get_active_slot(); slot->results = &results; curl_easy_setopt(slot->curl, CURLOPT_FILE, indexfile); curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite); curl_easy_setopt(slot->curl, CURLOPT_URL, url); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header); slot->local = indexfile; /* If there is data present from a previous transfer attempt, resume where it left off */ prev_posn = ftell(indexfile); if (prev_posn>0) { if (walker->get_verbosely) fprintf(stderr, "Resuming fetch of index for pack %s at byte %ld\n", hex, prev_posn); sprintf(range, "Range: bytes=%ld-", prev_posn); range_header = curl_slist_append(range_header, range); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, range_header); } if (start_active_slot(slot)) { run_active_slot(slot); if (results.curl_result != CURLE_OK) { fclose(indexfile); return error("Unable to get pack index %s\n%s", url, curl_errorstr); } } else { fclose(indexfile); return error("Unable to start request"); } fclose(indexfile); return move_temp_to_file(tmpfile, filename); }
static int fetch_object(struct walker *walker, struct alt_base *repo, unsigned char *sha1) { char *hex = sha1_to_hex(sha1); int ret = 0; struct object_request *obj_req = object_queue_head; struct http_object_request *req; while (obj_req != NULL && hashcmp(obj_req->sha1, sha1)) obj_req = obj_req->next; if (obj_req == NULL) return error("Couldn't find request for %s in the queue", hex); if (has_sha1_file(obj_req->sha1)) { if (obj_req->req != NULL) abort_http_object_request(obj_req->req); abort_object_request(obj_req); return 0; } #ifdef USE_CURL_MULTI while (obj_req->state == WAITING) step_active_slots(); #else start_object_request(walker, obj_req); #endif /* * obj_req->req might change when fetching alternates in the callback * process_object_response; therefore, the "shortcut" variable, req, * is used only after we're done with slots. */ while (obj_req->state == ACTIVE) run_active_slot(obj_req->req->slot); req = obj_req->req; if (req->localfile != -1) { close(req->localfile); req->localfile = -1; } if (obj_req->state == ABORTED) { ret = error("Request for %s aborted", hex); } else if (req->curl_result != CURLE_OK && req->http_code != 416) { if (missing_target(req)) ret = -1; /* Be silent, it is probably in a pack. */ else ret = error("%s (curl_result = %d, http_code = %ld, sha1 = %s)", req->errorstr, req->curl_result, req->http_code, hex); } else if (req->zret != Z_STREAM_END) { walker->corrupt_object_found++; ret = error("File %s (%s) corrupt", hex, req->url); } else if (hashcmp(obj_req->sha1, req->real_sha1)) { ret = error("File %s has bad hash", hex); } else if (req->rename < 0) { ret = error("unable to write sha1 filename %s", sha1_file_name(req->sha1)); } release_http_object_request(req); release_object_request(obj_req); return ret; }
static struct ref *get_refs_via_curl(struct transport *transport) { struct strbuf buffer = STRBUF_INIT; char *data, *start, *mid; char *ref_name; char *refs_url; int i = 0; struct active_request_slot *slot; struct slot_results results; struct ref *refs = NULL; struct ref *ref = NULL; struct ref *last_ref = NULL; struct walker *walker; if (!transport->data) transport->data = get_http_walker(transport->url, transport->remote); walker = transport->data; refs_url = xmalloc(strlen(transport->url) + 11); sprintf(refs_url, "%s/info/refs", transport->url); slot = get_active_slot(); slot->results = &results; curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer); curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer); curl_easy_setopt(slot->curl, CURLOPT_URL, refs_url); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, NULL); if (start_active_slot(slot)) { run_active_slot(slot); if (results.curl_result != CURLE_OK) { strbuf_release(&buffer); if (missing_target(&results)) { return NULL; } else { error("%s", curl_errorstr); return NULL; } } } else { strbuf_release(&buffer); error("Unable to start request"); return NULL; } data = buffer.buf; start = NULL; mid = data; while (i < buffer.len) { if (!start) start = &data[i]; if (data[i] == '\t') mid = &data[i]; if (data[i] == '\n') { data[i] = 0; ref_name = mid + 1; ref = xmalloc(sizeof(struct ref) + strlen(ref_name) + 1); memset(ref, 0, sizeof(struct ref)); strcpy(ref->name, ref_name); get_sha1_hex(start, ref->old_sha1); if (!refs) refs = ref; if (last_ref) last_ref->next = ref; last_ref = ref; start = NULL; } i++; } strbuf_release(&buffer); ref = alloc_ref_from_str("HEAD"); if (!walker->fetch_ref(walker, ref) && !resolve_remote_symref(ref, refs)) { ref->next = refs; refs = ref; } else { free(ref); } return refs; }
static struct remote_lock *lock_remote(const char *path, long timeout) { struct active_request_slot *slot; struct slot_results results; struct buffer out_buffer = { STRBUF_INIT, 0 }; struct strbuf in_buffer = STRBUF_INIT; char *url; char *ep; char timeout_header[25]; struct remote_lock *lock = NULL; struct curl_slist *dav_headers = http_copy_default_headers(); struct xml_ctx ctx; char *escaped; url = xstrfmt("%s%s", repo->url, path); /* Make sure leading directories exist for the remote ref */ ep = strchr(url + strlen(repo->url) + 1, '/'); while (ep) { char saved_character = ep[1]; ep[1] = '\0'; slot = get_active_slot(); slot->results = &results; curl_setup_http_get(slot->curl, url, DAV_MKCOL); if (start_active_slot(slot)) { run_active_slot(slot); if (results.curl_result != CURLE_OK && results.http_code != 405) { fprintf(stderr, "Unable to create branch path %s\n", url); free(url); return NULL; } } else { fprintf(stderr, "Unable to start MKCOL request\n"); free(url); return NULL; } ep[1] = saved_character; ep = strchr(ep + 1, '/'); } escaped = xml_entities(ident_default_email()); strbuf_addf(&out_buffer.buf, LOCK_REQUEST, escaped); free(escaped); xsnprintf(timeout_header, sizeof(timeout_header), "Timeout: Second-%ld", timeout); dav_headers = curl_slist_append(dav_headers, timeout_header); dav_headers = curl_slist_append(dav_headers, "Content-Type: text/xml"); slot = get_active_slot(); slot->results = &results; curl_setup_http(slot->curl, url, DAV_LOCK, &out_buffer, fwrite_buffer); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); curl_easy_setopt(slot->curl, CURLOPT_FILE, &in_buffer); lock = xcalloc(1, sizeof(*lock)); lock->timeout = -1; if (start_active_slot(slot)) { run_active_slot(slot); if (results.curl_result == CURLE_OK) { XML_Parser parser = XML_ParserCreate(NULL); enum XML_Status result; ctx.name = xcalloc(10, 1); ctx.len = 0; ctx.cdata = NULL; ctx.userFunc = handle_new_lock_ctx; ctx.userData = lock; XML_SetUserData(parser, &ctx); XML_SetElementHandler(parser, xml_start_tag, xml_end_tag); XML_SetCharacterDataHandler(parser, xml_cdata); result = XML_Parse(parser, in_buffer.buf, in_buffer.len, 1); free(ctx.name); if (result != XML_STATUS_OK) { fprintf(stderr, "XML error: %s\n", XML_ErrorString( XML_GetErrorCode(parser))); lock->timeout = -1; } XML_ParserFree(parser); } } else { fprintf(stderr, "Unable to start LOCK request\n"); } curl_slist_free_all(dav_headers); strbuf_release(&out_buffer.buf); strbuf_release(&in_buffer); if (lock->token == NULL || lock->timeout <= 0) { free(lock->token); free(lock->owner); free(url); FREE_AND_NULL(lock); } else { lock->url = url; lock->start_time = time(NULL); lock->next = repo->locks; repo->locks = lock; } return lock; }
static int delete_remote_branch(const char *pattern, int force) { struct ref *refs = remote_refs; struct ref *remote_ref = NULL; struct object_id head_oid; char *symref = NULL; int match; int patlen = strlen(pattern); int i; struct active_request_slot *slot; struct slot_results results; char *url; /* Find the remote branch(es) matching the specified branch name */ for (match = 0; refs; refs = refs->next) { char *name = refs->name; int namelen = strlen(name); if (namelen < patlen || memcmp(name + namelen - patlen, pattern, patlen)) continue; if (namelen != patlen && name[namelen - patlen - 1] != '/') continue; match++; remote_ref = refs; } if (match == 0) return error("No remote branch matches %s", pattern); if (match != 1) return error("More than one remote branch matches %s", pattern); /* * Remote HEAD must be a symref (not exactly foolproof; a remote * symlink to a symref will look like a symref) */ fetch_symref("HEAD", &symref, &head_oid); if (!symref) return error("Remote HEAD is not a symref"); /* Remote branch must not be the remote HEAD */ for (i = 0; symref && i < MAXDEPTH; i++) { if (!strcmp(remote_ref->name, symref)) return error("Remote branch %s is the current HEAD", remote_ref->name); fetch_symref(symref, &symref, &head_oid); } /* Run extra sanity checks if delete is not forced */ if (!force) { /* Remote HEAD must resolve to a known object */ if (symref) return error("Remote HEAD symrefs too deep"); if (is_null_oid(&head_oid)) return error("Unable to resolve remote HEAD"); if (!has_object_file(&head_oid)) return error("Remote HEAD resolves to object %s\nwhich does not exist locally, perhaps you need to fetch?", oid_to_hex(&head_oid)); /* Remote branch must resolve to a known object */ if (is_null_oid(&remote_ref->old_oid)) return error("Unable to resolve remote branch %s", remote_ref->name); if (!has_object_file(&remote_ref->old_oid)) return error("Remote branch %s resolves to object %s\nwhich does not exist locally, perhaps you need to fetch?", remote_ref->name, oid_to_hex(&remote_ref->old_oid)); /* Remote branch must be an ancestor of remote HEAD */ if (!verify_merge_base(&head_oid, remote_ref)) { return error("The branch '%s' is not an ancestor " "of your current HEAD.\n" "If you are sure you want to delete it," " run:\n\t'git http-push -D %s %s'", remote_ref->name, repo->url, pattern); } } /* Send delete request */ fprintf(stderr, "Removing remote branch '%s'\n", remote_ref->name); if (dry_run) return 0; url = xstrfmt("%s%s", repo->url, remote_ref->name); slot = get_active_slot(); slot->results = &results; curl_setup_http_get(slot->curl, url, DAV_DELETE); if (start_active_slot(slot)) { run_active_slot(slot); free(url); if (results.curl_result != CURLE_OK) return error("DELETE request failed (%d/%ld)", results.curl_result, results.http_code); } else { free(url); return error("Unable to start DELETE request"); } return 0; }
static int locking_available(void) { struct active_request_slot *slot; struct slot_results results; struct strbuf in_buffer = STRBUF_INIT; struct buffer out_buffer = { STRBUF_INIT, 0 }; struct curl_slist *dav_headers = http_copy_default_headers(); struct xml_ctx ctx; int lock_flags = 0; char *escaped; escaped = xml_entities(repo->url); strbuf_addf(&out_buffer.buf, PROPFIND_SUPPORTEDLOCK_REQUEST, escaped); free(escaped); dav_headers = curl_slist_append(dav_headers, "Depth: 0"); dav_headers = curl_slist_append(dav_headers, "Content-Type: text/xml"); slot = get_active_slot(); slot->results = &results; curl_setup_http(slot->curl, repo->url, DAV_PROPFIND, &out_buffer, fwrite_buffer); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); curl_easy_setopt(slot->curl, CURLOPT_FILE, &in_buffer); if (start_active_slot(slot)) { run_active_slot(slot); if (results.curl_result == CURLE_OK) { XML_Parser parser = XML_ParserCreate(NULL); enum XML_Status result; ctx.name = xcalloc(10, 1); ctx.len = 0; ctx.cdata = NULL; ctx.userFunc = handle_lockprop_ctx; ctx.userData = &lock_flags; XML_SetUserData(parser, &ctx); XML_SetElementHandler(parser, xml_start_tag, xml_end_tag); result = XML_Parse(parser, in_buffer.buf, in_buffer.len, 1); free(ctx.name); if (result != XML_STATUS_OK) { fprintf(stderr, "XML error: %s\n", XML_ErrorString( XML_GetErrorCode(parser))); lock_flags = 0; } XML_ParserFree(parser); if (!lock_flags) error("no DAV locking support on %s", repo->url); } else { error("Cannot access URL %s, return code %d", repo->url, results.curl_result); lock_flags = 0; } } else { error("Unable to start PROPFIND request on %s", repo->url); } strbuf_release(&out_buffer.buf); strbuf_release(&in_buffer); curl_slist_free_all(dav_headers); return lock_flags; }
/* * NEEDSWORK: remote_ls() ignores info/refs on the remote side. But it * should _only_ heed the information from that file, instead of trying to * determine the refs from the remote file system (badly: it does not even * know about packed-refs). */ static void remote_ls(const char *path, int flags, void (*userFunc)(struct remote_ls_ctx *ls), void *userData) { char *url = xstrfmt("%s%s", repo->url, path); struct active_request_slot *slot; struct slot_results results; struct strbuf in_buffer = STRBUF_INIT; struct buffer out_buffer = { STRBUF_INIT, 0 }; struct curl_slist *dav_headers = http_copy_default_headers(); struct xml_ctx ctx; struct remote_ls_ctx ls; ls.flags = flags; ls.path = xstrdup(path); ls.dentry_name = NULL; ls.dentry_flags = 0; ls.userData = userData; ls.userFunc = userFunc; strbuf_addstr(&out_buffer.buf, PROPFIND_ALL_REQUEST); dav_headers = curl_slist_append(dav_headers, "Depth: 1"); dav_headers = curl_slist_append(dav_headers, "Content-Type: text/xml"); slot = get_active_slot(); slot->results = &results; curl_setup_http(slot->curl, url, DAV_PROPFIND, &out_buffer, fwrite_buffer); curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers); curl_easy_setopt(slot->curl, CURLOPT_FILE, &in_buffer); if (start_active_slot(slot)) { run_active_slot(slot); if (results.curl_result == CURLE_OK) { XML_Parser parser = XML_ParserCreate(NULL); enum XML_Status result; ctx.name = xcalloc(10, 1); ctx.len = 0; ctx.cdata = NULL; ctx.userFunc = handle_remote_ls_ctx; ctx.userData = &ls; XML_SetUserData(parser, &ctx); XML_SetElementHandler(parser, xml_start_tag, xml_end_tag); XML_SetCharacterDataHandler(parser, xml_cdata); result = XML_Parse(parser, in_buffer.buf, in_buffer.len, 1); free(ctx.name); if (result != XML_STATUS_OK) { fprintf(stderr, "XML error: %s\n", XML_ErrorString( XML_GetErrorCode(parser))); } XML_ParserFree(parser); } } else { fprintf(stderr, "Unable to start PROPFIND request\n"); } free(ls.path); free(url); strbuf_release(&out_buffer.buf); strbuf_release(&in_buffer); curl_slist_free_all(dav_headers); }