static int got_oid(const char *hex, struct object_id *oid) { struct object *o; int we_knew_they_have = 0; if (get_oid_hex(hex, oid)) die("git upload-pack: expected SHA1 object, got '%s'", hex); if (!has_object_file(oid)) return -1; o = parse_object(oid); if (!o) die("oops (%s)", oid_to_hex(oid)); if (o->type == OBJ_COMMIT) { struct commit_list *parents; struct commit *commit = (struct commit *)o; if (o->flags & THEY_HAVE) we_knew_they_have = 1; else o->flags |= THEY_HAVE; if (!oldest_have || (commit->date < oldest_have)) oldest_have = commit->date; for (parents = commit->parents; parents; parents = parents->next) parents->item->object.flags |= THEY_HAVE; } if (!we_knew_they_have) { add_object_array(o, NULL, &have_obj); return 1; } return 0; }
static void one_remote_ref(const char *refname) { struct ref *ref; struct object *obj; ref = alloc_ref(refname); if (http_fetch_ref(repo->url, ref) != 0) { fprintf(stderr, "Unable to fetch ref %s from %s\n", refname, repo->url); free(ref); return; } /* * Fetch a copy of the object if it doesn't exist locally - it * may be required for updating server info later. */ if (repo->can_update_info_refs && !has_object_file(&ref->old_oid)) { obj = lookup_unknown_object(ref->old_oid.hash); fprintf(stderr, " fetch %s for %s\n", oid_to_hex(&ref->old_oid), refname); add_fetch_request(obj); } ref->next = remote_refs; remote_refs = ref; }
static void finish_object(struct object *obj, const char *name, void *cb_data) { struct rev_list_info *info = cb_data; if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid)) die("missing blob object '%s'", oid_to_hex(&obj->oid)); if (info->revs->verify_objects && !obj->parsed && obj->type != OBJ_COMMIT) parse_object(&obj->oid); }
static void feed_object(const struct object_id *oid, FILE *fh, int negative) { if (negative && !has_object_file(oid)) return; if (negative) putc('^', fh); fputs(oid_to_hex(oid), fh); putc('\n', fh); }
static int finish_object(struct object *obj, const char *name, void *cb_data) { struct rev_list_info *info = cb_data; if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid)) { finish_object__ma(obj); return 1; } if (info->revs->verify_objects && !obj->parsed && obj->type != OBJ_COMMIT) parse_object(the_repository, &obj->oid); return 0; }
struct object *parse_object(const struct object_id *oid) { unsigned long size; enum object_type type; int eaten; const struct object_id *repl = lookup_replace_object(the_repository, oid); void *buffer; struct object *obj; obj = lookup_object(oid->hash); if (obj && obj->parsed) return obj; if ((obj && obj->type == OBJ_BLOB && has_object_file(oid)) || (!obj && has_object_file(oid) && oid_object_info(the_repository, oid, NULL) == OBJ_BLOB)) { if (check_object_signature(repl, NULL, 0, NULL) < 0) { error("sha1 mismatch %s", oid_to_hex(oid)); return NULL; } parse_blob_buffer(lookup_blob(oid), NULL, 0); return lookup_object(oid->hash); } buffer = read_object_file(oid, &type, &size); if (buffer) { if (check_object_signature(repl, buffer, size, type_name(type)) < 0) { free(buffer); error("sha1 mismatch %s", oid_to_hex(repl)); return NULL; } obj = parse_object_buffer(oid, type, size, buffer, &eaten); if (!eaten) free(buffer); return obj; } return NULL; }
static void write_followtags(const struct ref *refs, const char *msg) { const struct ref *ref; for (ref = refs; ref; ref = ref->next) { if (!starts_with(ref->name, "refs/tags/")) continue; if (ends_with(ref->name, "^{}")) continue; if (!has_object_file(&ref->old_oid)) continue; update_ref(msg, ref->name, ref->old_oid.hash, NULL, 0, UPDATE_REFS_DIE_ON_ERR); } }
static int mark_object(struct object *obj, int type, void *data, struct fsck_options *options) { struct object *parent = data; /* * The only case data is NULL or type is OBJ_ANY is when * mark_object_reachable() calls us. All the callers of * that function has non-NULL obj hence ... */ if (!obj) { /* ... these references to parent->fld are safe here */ printf("broken link from %7s %s\n", printable_type(parent), describe_object(parent)); printf("broken link from %7s %s\n", (type == OBJ_ANY ? "unknown" : type_name(type)), "unknown"); errors_found |= ERROR_REACHABLE; return 1; } if (type != OBJ_ANY && obj->type != type) /* ... and the reference to parent is safe here */ objerror(parent, "wrong object type in link"); if (obj->flags & REACHABLE) return 0; obj->flags |= REACHABLE; if (is_promisor_object(&obj->oid)) /* * Further recursion does not need to be performed on this * object since it is a promisor object (so it does not need to * be added to "pending"). */ return 0; if (!(obj->flags & HAS_OBJ)) { if (parent && !has_object_file(&obj->oid)) { printf("broken link from %7s %s\n", printable_type(parent), describe_object(parent)); printf(" to %7s %s\n", printable_type(obj), describe_object(obj)); errors_found |= ERROR_REACHABLE; } return 1; } add_object_array(obj, NULL, &pending); return 0; }
static int prune_notes_helper(const struct object_id *object_oid, const struct object_id *note_oid, char *note_path, void *cb_data) { struct note_delete_list **l = (struct note_delete_list **) cb_data; struct note_delete_list *n; if (has_object_file(object_oid)) return 0; /* nothing to do for this note */ /* failed to find object => prune this note */ n = (struct note_delete_list *) xmalloc(sizeof(*n)); n->next = *l; n->sha1 = object_oid->hash; *l = n; return 0; }
static int tree_is_complete(const struct object_id *oid) { struct tree_desc desc; struct name_entry entry; int complete; struct tree *tree; tree = lookup_tree(the_repository, oid); if (!tree) return 0; if (tree->object.flags & SEEN) return 1; if (tree->object.flags & INCOMPLETE) return 0; if (!tree->buffer) { enum object_type type; unsigned long size; void *data = read_object_file(oid, &type, &size); if (!data) { tree->object.flags |= INCOMPLETE; return 0; } tree->buffer = data; tree->size = size; } init_tree_desc(&desc, tree->buffer, tree->size); complete = 1; while (tree_entry(&desc, &entry)) { if (!has_object_file(&entry.oid) || (S_ISDIR(entry.mode) && !tree_is_complete(&entry.oid))) { tree->object.flags |= INCOMPLETE; complete = 0; } } free_tree_buffer(tree); if (complete) tree->object.flags |= SEEN; return complete; }
static int process_haves(struct oid_array *haves, struct oid_array *common) { int i; /* Process haves */ for (i = 0; i < haves->nr; i++) { const struct object_id *oid = &haves->oid[i]; struct object *o; int we_knew_they_have = 0; if (!has_object_file(oid)) continue; oid_array_append(common, oid); o = parse_object(oid); if (!o) die("oops (%s)", oid_to_hex(oid)); if (o->type == OBJ_COMMIT) { struct commit_list *parents; struct commit *commit = (struct commit *)o; if (o->flags & THEY_HAVE) we_knew_they_have = 1; else o->flags |= THEY_HAVE; if (!oldest_have || (commit->date < oldest_have)) oldest_have = commit->date; for (parents = commit->parents; parents; parents = parents->next) parents->item->object.flags |= THEY_HAVE; } if (!we_knew_they_have) add_object_array(o, NULL, &have_obj); } return 0; }
static void update_shallow(struct fetch_pack_args *args, struct ref **sought, int nr_sought, struct shallow_info *si) { struct oid_array ref = OID_ARRAY_INIT; int *status; int i; if (args->deepen && alternate_shallow_file) { if (*alternate_shallow_file == '\0') { /* --unshallow */ unlink_or_warn(git_path_shallow(the_repository)); rollback_lock_file(&shallow_lock); } else commit_lock_file(&shallow_lock); return; } if (!si->shallow || !si->shallow->nr) return; if (args->cloning) { /* * remote is shallow, but this is a clone, there are * no objects in repo to worry about. Accept any * shallow points that exist in the pack (iow in repo * after get_pack() and reprepare_packed_git()) */ struct oid_array extra = OID_ARRAY_INIT; struct object_id *oid = si->shallow->oid; for (i = 0; i < si->shallow->nr; i++) if (has_object_file(&oid[i])) oid_array_append(&extra, &oid[i]); if (extra.nr) { setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, &extra); commit_lock_file(&shallow_lock); } oid_array_clear(&extra); return; } if (!si->nr_ours && !si->nr_theirs) return; remove_nonexistent_theirs_shallow(si); if (!si->nr_ours && !si->nr_theirs) return; for (i = 0; i < nr_sought; i++) oid_array_append(&ref, &sought[i]->old_oid); si->ref = &ref; if (args->update_shallow) { /* * remote is also shallow, .git/shallow may be updated * so all refs can be accepted. Make sure we only add * shallow roots that are actually reachable from new * refs. */ struct oid_array extra = OID_ARRAY_INIT; struct object_id *oid = si->shallow->oid; assign_shallow_commits_to_refs(si, NULL, NULL); if (!si->nr_ours && !si->nr_theirs) { oid_array_clear(&ref); return; } for (i = 0; i < si->nr_ours; i++) oid_array_append(&extra, &oid[si->ours[i]]); for (i = 0; i < si->nr_theirs; i++) oid_array_append(&extra, &oid[si->theirs[i]]); setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, &extra); commit_lock_file(&shallow_lock); oid_array_clear(&extra); oid_array_clear(&ref); return; } /* * remote is also shallow, check what ref is safe to update * without updating .git/shallow */ status = xcalloc(nr_sought, sizeof(*status)); assign_shallow_commits_to_refs(si, NULL, status); if (si->nr_ours || si->nr_theirs) { for (i = 0; i < nr_sought; i++) if (status[i]) sought[i]->status = REF_STATUS_REJECT_SHALLOW; } free(status); oid_array_clear(&ref); }
static int update_one(struct cache_tree *it, struct cache_entry **cache, int entries, const char *base, int baselen, int *skip_count, int flags) { struct strbuf buffer; int missing_ok = flags & WRITE_TREE_MISSING_OK; int dryrun = flags & WRITE_TREE_DRY_RUN; int repair = flags & WRITE_TREE_REPAIR; int to_invalidate = 0; int i; assert(!(dryrun && repair)); *skip_count = 0; if (0 <= it->entry_count && has_sha1_file(it->oid.hash)) return it->entry_count; /* * We first scan for subtrees and update them; we start by * marking existing subtrees -- the ones that are unmarked * should not be in the result. */ for (i = 0; i < it->subtree_nr; i++) it->down[i]->used = 0; /* * Find the subtrees and update them. */ i = 0; while (i < entries) { const struct cache_entry *ce = cache[i]; struct cache_tree_sub *sub; const char *path, *slash; int pathlen, sublen, subcnt, subskip; path = ce->name; pathlen = ce_namelen(ce); if (pathlen <= baselen || memcmp(base, path, baselen)) break; /* at the end of this level */ slash = strchr(path + baselen, '/'); if (!slash) { i++; continue; } /* * a/bbb/c (base = a/, slash = /c) * ==> * path+baselen = bbb/c, sublen = 3 */ sublen = slash - (path + baselen); sub = find_subtree(it, path + baselen, sublen, 1); if (!sub->cache_tree) sub->cache_tree = cache_tree(); subcnt = update_one(sub->cache_tree, cache + i, entries - i, path, baselen + sublen + 1, &subskip, flags); if (subcnt < 0) return subcnt; if (!subcnt) die("index cache-tree records empty sub-tree"); i += subcnt; sub->count = subcnt; /* to be used in the next loop */ *skip_count += subskip; sub->used = 1; } discard_unused_subtrees(it); /* * Then write out the tree object for this level. */ strbuf_init(&buffer, 8192); i = 0; while (i < entries) { const struct cache_entry *ce = cache[i]; struct cache_tree_sub *sub = NULL; const char *path, *slash; int pathlen, entlen; const struct object_id *oid; unsigned mode; int expected_missing = 0; int contains_ita = 0; int ce_missing_ok; path = ce->name; pathlen = ce_namelen(ce); if (pathlen <= baselen || memcmp(base, path, baselen)) break; /* at the end of this level */ slash = strchr(path + baselen, '/'); if (slash) { entlen = slash - (path + baselen); sub = find_subtree(it, path + baselen, entlen, 0); if (!sub) die("cache-tree.c: '%.*s' in '%s' not found", entlen, path + baselen, path); i += sub->count; oid = &sub->cache_tree->oid; mode = S_IFDIR; contains_ita = sub->cache_tree->entry_count < 0; if (contains_ita) { to_invalidate = 1; expected_missing = 1; } } else { oid = &ce->oid; mode = ce->ce_mode; entlen = pathlen - baselen; i++; } ce_missing_ok = mode == S_IFGITLINK || missing_ok || (repository_format_partial_clone && ce_skip_worktree(ce)); if (is_null_oid(oid) || (!ce_missing_ok && !has_object_file(oid))) { strbuf_release(&buffer); if (expected_missing) return -1; return error("invalid object %06o %s for '%.*s'", mode, oid_to_hex(oid), entlen+baselen, path); } /* * CE_REMOVE entries are removed before the index is * written to disk. Skip them to remain consistent * with the future on-disk index. */ if (ce->ce_flags & CE_REMOVE) { *skip_count = *skip_count + 1; continue; } /* * CE_INTENT_TO_ADD entries exist on on-disk index but * they are not part of generated trees. Invalidate up * to root to force cache-tree users to read elsewhere. */ if (!sub && ce_intent_to_add(ce)) { to_invalidate = 1; continue; } /* * "sub" can be an empty tree if all subentries are i-t-a. */ if (contains_ita && is_empty_tree_oid(oid)) continue; strbuf_grow(&buffer, entlen + 100); strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0'); strbuf_add(&buffer, oid->hash, the_hash_algo->rawsz); #if DEBUG fprintf(stderr, "cache-tree update-one %o %.*s\n", mode, entlen, path + baselen); #endif } if (repair) { struct object_id oid; hash_object_file(buffer.buf, buffer.len, tree_type, &oid); if (has_object_file(&oid)) oidcpy(&it->oid, &oid); else to_invalidate = 1; } else if (dryrun) { hash_object_file(buffer.buf, buffer.len, tree_type, &it->oid); } else if (write_object_file(buffer.buf, buffer.len, tree_type, &it->oid)) { strbuf_release(&buffer); return -1; } strbuf_release(&buffer); it->entry_count = to_invalidate ? -1 : i - *skip_count; #if DEBUG fprintf(stderr, "cache-tree update-one (%d ent, %d subtree) %s\n", it->entry_count, it->subtree_nr, oid_to_hex(&it->oid)); #endif return i; }
static int everything_local(struct fetch_pack_args *args, struct ref **refs, struct ref **sought, int nr_sought) { struct ref *ref; int retval; unsigned long cutoff = 0; save_commit_buffer = 0; for (ref = *refs; ref; ref = ref->next) { struct object *o; if (!has_object_file(&ref->old_oid)) continue; o = parse_object(ref->old_oid.hash); if (!o) continue; /* We already have it -- which may mean that we were * in sync with the other side at some time after * that (it is OK if we guess wrong here). */ if (o->type == OBJ_COMMIT) { struct commit *commit = (struct commit *)o; if (!cutoff || cutoff < commit->date) cutoff = commit->date; } } if (!args->depth) { for_each_ref(mark_complete_oid, NULL); for_each_alternate_ref(mark_alternate_complete, NULL); commit_list_sort_by_date(&complete); if (cutoff) mark_recent_complete_commits(args, cutoff); } /* * Mark all complete remote refs as common refs. * Don't mark them common yet; the server has to be told so first. */ for (ref = *refs; ref; ref = ref->next) { struct object *o = deref_tag(lookup_object(ref->old_oid.hash), NULL, 0); if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE)) continue; if (!(o->flags & SEEN)) { rev_list_push((struct commit *)o, COMMON_REF | SEEN); mark_common((struct commit *)o, 1, 1); } } filter_refs(args, refs, sought, nr_sought); for (retval = 1, ref = *refs; ref ; ref = ref->next) { const unsigned char *remote = ref->old_oid.hash; struct object *o; o = lookup_object(remote); if (!o || !(o->flags & COMPLETE)) { retval = 0; if (!args->verbose) continue; fprintf(stderr, "want %s (%s)\n", sha1_to_hex(remote), ref->name); continue; } if (!args->verbose) continue; fprintf(stderr, "already have %s (%s)\n", sha1_to_hex(remote), ref->name); } return retval; }
int cmd_main(int argc, const char **argv) { struct transfer_request *request; struct transfer_request *next_request; int nr_refspec = 0; const char **refspec = NULL; struct remote_lock *ref_lock = NULL; struct remote_lock *info_ref_lock = NULL; struct rev_info revs; int delete_branch = 0; int force_delete = 0; int objects_to_send; int rc = 0; int i; int new_refs; struct ref *ref, *local_refs; repo = xcalloc(1, sizeof(*repo)); argv++; for (i = 1; i < argc; i++, argv++) { const char *arg = *argv; if (*arg == '-') { if (!strcmp(arg, "--all")) { push_all = MATCH_REFS_ALL; continue; } if (!strcmp(arg, "--force")) { force_all = 1; continue; } if (!strcmp(arg, "--dry-run")) { dry_run = 1; continue; } if (!strcmp(arg, "--helper-status")) { helper_status = 1; continue; } if (!strcmp(arg, "--verbose")) { push_verbosely = 1; http_is_verbose = 1; continue; } if (!strcmp(arg, "-d")) { delete_branch = 1; continue; } if (!strcmp(arg, "-D")) { delete_branch = 1; force_delete = 1; continue; } if (!strcmp(arg, "-h")) usage(http_push_usage); } if (!repo->url) { char *path = strstr(arg, "//"); str_end_url_with_slash(arg, &repo->url); repo->path_len = strlen(repo->url); if (path) { repo->path = strchr(path+2, '/'); if (repo->path) repo->path_len = strlen(repo->path); } continue; } refspec = argv; nr_refspec = argc - i; break; } #ifndef USE_CURL_MULTI die("git-push is not available for http/https repository when not compiled with USE_CURL_MULTI"); #endif if (!repo->url) usage(http_push_usage); if (delete_branch && nr_refspec != 1) die("You must specify only one branch name when deleting a remote branch"); setup_git_directory(); memset(remote_dir_exists, -1, 256); http_init(NULL, repo->url, 1); #ifdef USE_CURL_MULTI is_running_queue = 0; #endif /* Verify DAV compliance/lock support */ if (!locking_available()) { rc = 1; goto cleanup; } sigchain_push_common(remove_locks_on_signal); /* Check whether the remote has server info files */ repo->can_update_info_refs = 0; repo->has_info_refs = remote_exists("info/refs"); repo->has_info_packs = remote_exists("objects/info/packs"); if (repo->has_info_refs) { info_ref_lock = lock_remote("info/refs", LOCK_TIME); if (info_ref_lock) repo->can_update_info_refs = 1; else { error("cannot lock existing info/refs"); rc = 1; goto cleanup; } } if (repo->has_info_packs) fetch_indices(); /* Get a list of all local and remote heads to validate refspecs */ local_refs = get_local_heads(); fprintf(stderr, "Fetching remote heads...\n"); get_dav_remote_heads(); run_request_queue(); /* Remove a remote branch if -d or -D was specified */ if (delete_branch) { if (delete_remote_branch(refspec[0], force_delete) == -1) { fprintf(stderr, "Unable to delete remote branch %s\n", refspec[0]); if (helper_status) printf("error %s cannot remove\n", refspec[0]); } goto cleanup; } /* match them up */ if (match_push_refs(local_refs, &remote_refs, nr_refspec, (const char **) refspec, push_all)) { rc = -1; goto cleanup; } if (!remote_refs) { fprintf(stderr, "No refs in common and none specified; doing nothing.\n"); if (helper_status) printf("error null no match\n"); rc = 0; goto cleanup; } new_refs = 0; for (ref = remote_refs; ref; ref = ref->next) { struct argv_array commit_argv = ARGV_ARRAY_INIT; if (!ref->peer_ref) continue; if (is_null_oid(&ref->peer_ref->new_oid)) { if (delete_remote_branch(ref->name, 1) == -1) { error("Could not remove %s", ref->name); if (helper_status) printf("error %s cannot remove\n", ref->name); rc = -4; } else if (helper_status) printf("ok %s\n", ref->name); new_refs++; continue; } if (!oidcmp(&ref->old_oid, &ref->peer_ref->new_oid)) { if (push_verbosely) fprintf(stderr, "'%s': up-to-date\n", ref->name); if (helper_status) printf("ok %s up to date\n", ref->name); continue; } if (!force_all && !is_null_oid(&ref->old_oid) && !ref->force) { if (!has_object_file(&ref->old_oid) || !ref_newer(&ref->peer_ref->new_oid, &ref->old_oid)) { /* * We do not have the remote ref, or * we know that the remote ref is not * an ancestor of what we are trying to * push. Either way this can be losing * commits at the remote end and likely * we were not up to date to begin with. */ error("remote '%s' is not an ancestor of\n" "local '%s'.\n" "Maybe you are not up-to-date and " "need to pull first?", ref->name, ref->peer_ref->name); if (helper_status) printf("error %s non-fast forward\n", ref->name); rc = -2; continue; } } oidcpy(&ref->new_oid, &ref->peer_ref->new_oid); new_refs++; fprintf(stderr, "updating '%s'", ref->name); if (strcmp(ref->name, ref->peer_ref->name)) fprintf(stderr, " using '%s'", ref->peer_ref->name); fprintf(stderr, "\n from %s\n to %s\n", oid_to_hex(&ref->old_oid), oid_to_hex(&ref->new_oid)); if (dry_run) { if (helper_status) printf("ok %s\n", ref->name); continue; } /* Lock remote branch ref */ ref_lock = lock_remote(ref->name, LOCK_TIME); if (ref_lock == NULL) { fprintf(stderr, "Unable to lock remote branch %s\n", ref->name); if (helper_status) printf("error %s lock error\n", ref->name); rc = 1; continue; } /* Set up revision info for this refspec */ argv_array_push(&commit_argv, ""); /* ignored */ argv_array_push(&commit_argv, "--objects"); argv_array_push(&commit_argv, oid_to_hex(&ref->new_oid)); if (!push_all && !is_null_oid(&ref->old_oid)) argv_array_pushf(&commit_argv, "^%s", oid_to_hex(&ref->old_oid)); init_revisions(&revs, setup_git_directory()); setup_revisions(commit_argv.argc, commit_argv.argv, &revs, NULL); revs.edge_hint = 0; /* just in case */ /* Generate a list of objects that need to be pushed */ pushing = 0; if (prepare_revision_walk(&revs)) die("revision walk setup failed"); mark_edges_uninteresting(&revs, NULL); objects_to_send = get_delta(&revs, ref_lock); finish_all_active_slots(); /* Push missing objects to remote, this would be a convenient time to pack them first if appropriate. */ pushing = 1; if (objects_to_send) fprintf(stderr, " sending %d objects\n", objects_to_send); run_request_queue(); /* Update the remote branch if all went well */ if (aborted || !update_remote(ref->new_oid.hash, ref_lock)) rc = 1; if (!rc) fprintf(stderr, " done\n"); if (helper_status) printf("%s %s\n", !rc ? "ok" : "error", ref->name); unlock_remote(ref_lock); check_locks(); argv_array_clear(&commit_argv); } /* Update remote server info if appropriate */ if (repo->has_info_refs && new_refs) { if (info_ref_lock && repo->can_update_info_refs) { fprintf(stderr, "Updating remote server info\n"); if (!dry_run) update_remote_info_refs(info_ref_lock); } else { fprintf(stderr, "Unable to update server info\n"); } } cleanup: if (info_ref_lock) unlock_remote(info_ref_lock); free(repo); http_cleanup(); request = request_queue_head; while (request != NULL) { next_request = request->next; release_request(request); request = next_request; } return rc; }
static int delete_remote_branch(const char *pattern, int force) { struct ref *refs = remote_refs; struct ref *remote_ref = NULL; struct object_id head_oid; char *symref = NULL; int match; int patlen = strlen(pattern); int i; struct active_request_slot *slot; struct slot_results results; char *url; /* Find the remote branch(es) matching the specified branch name */ for (match = 0; refs; refs = refs->next) { char *name = refs->name; int namelen = strlen(name); if (namelen < patlen || memcmp(name + namelen - patlen, pattern, patlen)) continue; if (namelen != patlen && name[namelen - patlen - 1] != '/') continue; match++; remote_ref = refs; } if (match == 0) return error("No remote branch matches %s", pattern); if (match != 1) return error("More than one remote branch matches %s", pattern); /* * Remote HEAD must be a symref (not exactly foolproof; a remote * symlink to a symref will look like a symref) */ fetch_symref("HEAD", &symref, &head_oid); if (!symref) return error("Remote HEAD is not a symref"); /* Remote branch must not be the remote HEAD */ for (i = 0; symref && i < MAXDEPTH; i++) { if (!strcmp(remote_ref->name, symref)) return error("Remote branch %s is the current HEAD", remote_ref->name); fetch_symref(symref, &symref, &head_oid); } /* Run extra sanity checks if delete is not forced */ if (!force) { /* Remote HEAD must resolve to a known object */ if (symref) return error("Remote HEAD symrefs too deep"); if (is_null_oid(&head_oid)) return error("Unable to resolve remote HEAD"); if (!has_object_file(&head_oid)) return error("Remote HEAD resolves to object %s\nwhich does not exist locally, perhaps you need to fetch?", oid_to_hex(&head_oid)); /* Remote branch must resolve to a known object */ if (is_null_oid(&remote_ref->old_oid)) return error("Unable to resolve remote branch %s", remote_ref->name); if (!has_object_file(&remote_ref->old_oid)) return error("Remote branch %s resolves to object %s\nwhich does not exist locally, perhaps you need to fetch?", remote_ref->name, oid_to_hex(&remote_ref->old_oid)); /* Remote branch must be an ancestor of remote HEAD */ if (!verify_merge_base(&head_oid, remote_ref)) { return error("The branch '%s' is not an ancestor " "of your current HEAD.\n" "If you are sure you want to delete it," " run:\n\t'git http-push -D %s %s'", remote_ref->name, repo->url, pattern); } } /* Send delete request */ fprintf(stderr, "Removing remote branch '%s'\n", remote_ref->name); if (dry_run) return 0; url = xstrfmt("%s%s", repo->url, remote_ref->name); slot = get_active_slot(); slot->results = &results; curl_setup_http_get(slot->curl, url, DAV_DELETE); if (start_active_slot(slot)) { run_active_slot(slot); free(url); if (results.curl_result != CURLE_OK) return error("DELETE request failed (%d/%ld)", results.curl_result, results.http_code); } else { free(url); return error("Unable to start DELETE request"); } return 0; }