static int gitmodule_oid_from_commit(const struct object_id *treeish_name, struct object_id *gitmodules_oid, struct strbuf *rev) { int ret = 0; if (is_null_oid(treeish_name)) { oidclr(gitmodules_oid); return 1; } strbuf_addf(rev, "%s:.gitmodules", oid_to_hex(treeish_name)); if (get_oid(rev->buf, gitmodules_oid) >= 0) ret = 1; return ret; }
int git_checkout_file(const char* ref, const char* path, char* outputpath) { struct cache_entry *ce; int ret; struct object_id oid; struct tree * root; struct checkout state; struct pathspec pathspec; const char *matchbuf[1]; ret = get_oid(ref, &oid); if(ret) return ret; reprepare_packed_git(the_repository); root = parse_tree_indirect(&oid); if(!root) { free_all_pack(); return -1; } ce = xcalloc(1, cache_entry_size(strlen(path))); matchbuf[0] = NULL; parse_pathspec(&pathspec, PATHSPEC_ALL_MAGIC, PATHSPEC_PREFER_CWD, path, matchbuf); pathspec.items[0].nowildcard_len = pathspec.items[0].len; ret = read_tree_recursive(root, "", 0, 0, &pathspec, update_some, ce); clear_pathspec(&pathspec); if(ret) { free_all_pack(); free(ce); return ret; } memset(&state, 0, sizeof(state)); state.force = 1; state.refresh_cache = 0; ret = write_entry(ce, outputpath, &state, 0); free_all_pack(); free(ce); return ret; }
static int check_tracking_name(struct remote *remote, void *cb_data) { struct tracking_name_data *cb = cb_data; struct refspec query; memset(&query, 0, sizeof(struct refspec)); query.src = cb->src_ref; if (remote_find_tracking(remote, &query) || get_oid(query.dst, cb->dst_oid)) { free(query.dst); return 0; } if (cb->dst_ref) { free(query.dst); cb->unique = 0; return 0; } cb->dst_ref = query.dst; return 0; }
static int edit_and_replace(const char *object_ref, int force, int raw) { char *tmpfile; enum object_type type; struct object_id old_oid, new_oid, prev; struct strbuf ref = STRBUF_INIT; if (get_oid(object_ref, &old_oid) < 0) return error("Not a valid object name: '%s'", object_ref); type = oid_object_info(the_repository, &old_oid, NULL); if (type < 0) return error("unable to get object type for %s", oid_to_hex(&old_oid)); if (check_ref_valid(&old_oid, &prev, &ref, force)) { strbuf_release(&ref); return -1; } strbuf_release(&ref); tmpfile = git_pathdup("REPLACE_EDITOBJ"); if (export_object(&old_oid, type, raw, tmpfile)) { free(tmpfile); return -1; } if (launch_editor(tmpfile, NULL, NULL) < 0) { free(tmpfile); return error("editing object file failed"); } if (import_object(&new_oid, type, raw, tmpfile)) { free(tmpfile); return -1; } free(tmpfile); if (!oidcmp(&old_oid, &new_oid)) return error("new object is the same as the old one: '%s'", oid_to_hex(&old_oid)); return replace_object_oid(object_ref, &old_oid, "replacement", &new_oid, force); }
static void append_one_rev(const char *av) { struct object_id revkey; if (!get_oid(av, &revkey)) { append_ref(av, &revkey, 0); return; } if (strchr(av, '*') || strchr(av, '?') || strchr(av, '[')) { /* glob style match */ int saved_matches = ref_name_cnt; match_ref_pattern = av; match_ref_slash = count_slashes(av); for_each_ref(append_matching_ref, NULL); if (saved_matches == ref_name_cnt && ref_name_cnt < MAX_REVS) error(_("no matching refs with %s"), av); sort_ref_range(saved_matches, ref_name_cnt); return; } die("bad sha1 reference %s", av); }
static int rollback_is_safe(void) { struct strbuf sb = STRBUF_INIT; struct object_id expected_head, actual_head; if (strbuf_read_file(&sb, git_path_abort_safety_file(), 0) >= 0) { strbuf_trim(&sb); if (get_oid_hex(sb.buf, &expected_head)) { strbuf_release(&sb); die(_("could not parse %s"), git_path_abort_safety_file()); } strbuf_release(&sb); } else if (errno == ENOENT) oidclr(&expected_head); else die_errno(_("could not read '%s'"), git_path_abort_safety_file()); if (get_oid("HEAD", &actual_head)) oidclr(&actual_head); return !oidcmp(&actual_head, &expected_head); }
static int show(int argc, const char **argv, const char *prefix) { const char *object_ref; struct notes_tree *t; struct object_id object; const struct object_id *note; int retval; struct option options[] = { OPT_END() }; argc = parse_options(argc, argv, prefix, options, git_notes_show_usage, 0); if (1 < argc) { error(_("too many parameters")); usage_with_options(git_notes_show_usage, options); } object_ref = argc ? argv[0] : "HEAD"; if (get_oid(object_ref, &object)) die(_("failed to resolve '%s' as a valid ref."), object_ref); t = init_notes_check("show", 0); note = get_note(t, &object); if (!note) retval = error(_("no note found for object %s."), oid_to_hex(&object)); else { const char *show_args[3] = {"show", oid_to_hex(note), NULL}; retval = execv_git_cmd(show_args); } free_notes(t); return retval; }
static int replace_parents(struct strbuf *buf, int argc, const char **argv) { struct strbuf new_parents = STRBUF_INIT; const char *parent_start, *parent_end; int i; /* find existing parents */ parent_start = buf->buf; parent_start += GIT_SHA1_HEXSZ + 6; /* "tree " + "hex sha1" + "\n" */ parent_end = parent_start; while (starts_with(parent_end, "parent ")) parent_end += 48; /* "parent " + "hex sha1" + "\n" */ /* prepare new parents */ for (i = 0; i < argc; i++) { struct object_id oid; if (get_oid(argv[i], &oid) < 0) { strbuf_release(&new_parents); return error(_("Not a valid object name: '%s'"), argv[i]); } if (!lookup_commit_reference(&oid)) { strbuf_release(&new_parents); return error(_("could not parse %s"), argv[i]); } strbuf_addf(&new_parents, "parent %s\n", oid_to_hex(&oid)); } /* replace existing parents with new ones */ strbuf_splice(buf, parent_start - buf->buf, parent_end - parent_start, new_parents.buf, new_parents.len); strbuf_release(&new_parents); return 0; }
std::vector<uint8_t> EC_Group::DER_encode(EC_Group_Encoding form) const { if(form == EC_DOMPAR_ENC_EXPLICIT) { const size_t ecpVers1 = 1; OID curve_type("1.2.840.10045.1.1"); const size_t p_bytes = m_curve.get_p().bytes(); return DER_Encoder() .start_cons(SEQUENCE) .encode(ecpVers1) .start_cons(SEQUENCE) .encode(curve_type) .encode(m_curve.get_p()) .end_cons() .start_cons(SEQUENCE) .encode(BigInt::encode_1363(m_curve.get_a(), p_bytes), OCTET_STRING) .encode(BigInt::encode_1363(m_curve.get_b(), p_bytes), OCTET_STRING) .end_cons() .encode(EC2OSP(m_base_point, PointGFp::UNCOMPRESSED), OCTET_STRING) .encode(m_order) .encode(m_cofactor) .end_cons() .get_contents_unlocked(); } else if(form == EC_DOMPAR_ENC_OID) return DER_Encoder().encode(OID(get_oid())).get_contents_unlocked(); else if(form == EC_DOMPAR_ENC_IMPLICITCA) return DER_Encoder().encode_null().get_contents_unlocked(); else throw Internal_Error("EC_Group::DER_encode: Unknown encoding"); }
static int list(int argc, const char **argv, const char *prefix) { struct notes_tree *t; struct object_id object; const struct object_id *note; int retval = -1; struct option options[] = { OPT_END() }; if (argc) argc = parse_options(argc, argv, prefix, options, git_notes_list_usage, 0); if (1 < argc) { error(_("too many parameters")); usage_with_options(git_notes_list_usage, options); } t = init_notes_check("list", 0); if (argc) { if (get_oid(argv[0], &object)) die(_("failed to resolve '%s' as a valid ref."), argv[0]); note = get_note(t, &object); if (note) { puts(oid_to_hex(note)); retval = 0; } else retval = error(_("no note found for object %s."), oid_to_hex(&object)); } else retval = for_each_note(t, 0, list_each_note, NULL); free_notes(t); return retval; }
/* * make_variant_int: Converts our external (Variant) representation to a VariantInt. */ static VariantInt make_variant_int(Variant v, FunctionCallInfo fcinfo, IOFuncSelector func) { VariantCache *cache; VariantInt vi; long data_length; /* long instead of size_t because we're subtracting */ Pointer ptr; uint flags; /* Ensure v is fully detoasted */ Assert(!VARATT_IS_EXTENDED(v)); /* May need to be careful about what context this stuff is palloc'd in */ vi = palloc0(sizeof(VariantDataInt)); vi->typid = get_oid(v, &flags); #ifdef VARIANT_TEST_OID vi->typid -= OID_MASK; #endif vi->typmod = v->typmod; vi->isnull = (flags & VAR_ISNULL ? true : false); cache = get_cache(fcinfo, vi, func); /* * by-value type. We do special things with all pass-by-reference when we * store, so we only use this for typbyval even though fetch_att supports * pass-by-reference. * * Note that fetch_att sanity-checks typlen for us (because we're only passing typbyval). */ if(cache->typbyval) { if(!vi->isnull) { Pointer p = VDATAPTR_ALIGN(v, cache->typalign); vi->data = fetch_att(p, cache->typbyval, cache->typlen); } return vi; } /* we don't store a varlena header for varlena data; instead we compute * it's size based on ours: * * Our size - our header size - overflow byte (if present) * * For cstring, we don't store the trailing NUL */ data_length = VARSIZE(v) - VHDRSZ - (flags & VAR_OVERFLOW ? 1 : 0); if( data_length < 0 ) elog(ERROR, "Negative data_length %li", data_length); if (cache->typlen == -1) /* varlena */ { ptr = palloc0(data_length + VARHDRSZ); SET_VARSIZE(ptr, data_length + VARHDRSZ); memcpy(VARDATA(ptr), VDATAPTR(v), data_length); } else if(cache->typlen == -2) /* cstring */ { ptr = palloc(data_length + 1); /* Need space for NUL terminator */ memcpy(ptr, VDATAPTR(v), data_length); *(ptr + data_length + 1) = '\0'; } else /* Fixed size, pass by reference */ { if(vi->isnull) { vi->data = (Datum) 0; return vi; } Assert(data_length == cache->typlen); ptr = palloc0(data_length); Assert(ptr == (char *) att_align_nominal(ptr, cache->typalign)); memcpy(ptr, VDATAPTR(v), data_length); } vi->data = PointerGetDatum(ptr); return vi; }
int notes_merge(struct notes_merge_options *o, struct notes_tree *local_tree, struct object_id *result_oid) { struct object_id local_oid, remote_oid; struct commit *local, *remote; struct commit_list *bases = NULL; const struct object_id *base_oid, *base_tree_oid; int result = 0; assert(o->local_ref && o->remote_ref); assert(!strcmp(o->local_ref, local_tree->ref)); oidclr(result_oid); trace_printf("notes_merge(o->local_ref = %s, o->remote_ref = %s)\n", o->local_ref, o->remote_ref); /* Dereference o->local_ref into local_sha1 */ if (read_ref_full(o->local_ref, 0, &local_oid, NULL)) die("Failed to resolve local notes ref '%s'", o->local_ref); else if (!check_refname_format(o->local_ref, 0) && is_null_oid(&local_oid)) local = NULL; /* local_oid == null_oid indicates unborn ref */ else if (!(local = lookup_commit_reference(&local_oid))) die("Could not parse local commit %s (%s)", oid_to_hex(&local_oid), o->local_ref); trace_printf("\tlocal commit: %.7s\n", oid_to_hex(&local_oid)); /* Dereference o->remote_ref into remote_oid */ if (get_oid(o->remote_ref, &remote_oid)) { /* * Failed to get remote_oid. If o->remote_ref looks like an * unborn ref, perform the merge using an empty notes tree. */ if (!check_refname_format(o->remote_ref, 0)) { oidclr(&remote_oid); remote = NULL; } else { die("Failed to resolve remote notes ref '%s'", o->remote_ref); } } else if (!(remote = lookup_commit_reference(&remote_oid))) { die("Could not parse remote commit %s (%s)", oid_to_hex(&remote_oid), o->remote_ref); } trace_printf("\tremote commit: %.7s\n", oid_to_hex(&remote_oid)); if (!local && !remote) die("Cannot merge empty notes ref (%s) into empty notes ref " "(%s)", o->remote_ref, o->local_ref); if (!local) { /* result == remote commit */ oidcpy(result_oid, &remote_oid); goto found_result; } if (!remote) { /* result == local commit */ oidcpy(result_oid, &local_oid); goto found_result; } assert(local && remote); /* Find merge bases */ bases = get_merge_bases(local, remote); if (!bases) { base_oid = &null_oid; base_tree_oid = the_hash_algo->empty_tree; if (o->verbosity >= 4) printf("No merge base found; doing history-less merge\n"); } else if (!bases->next) { base_oid = &bases->item->object.oid; base_tree_oid = &bases->item->tree->object.oid; if (o->verbosity >= 4) printf("One merge base found (%.7s)\n", oid_to_hex(base_oid)); } else { /* TODO: How to handle multiple merge-bases? */ base_oid = &bases->item->object.oid; base_tree_oid = &bases->item->tree->object.oid; if (o->verbosity >= 3) printf("Multiple merge bases found. Using the first " "(%.7s)\n", oid_to_hex(base_oid)); } if (o->verbosity >= 4) printf("Merging remote commit %.7s into local commit %.7s with " "merge-base %.7s\n", oid_to_hex(&remote->object.oid), oid_to_hex(&local->object.oid), oid_to_hex(base_oid)); if (!oidcmp(&remote->object.oid, base_oid)) { /* Already merged; result == local commit */ if (o->verbosity >= 2) printf("Already up to date!\n"); oidcpy(result_oid, &local->object.oid); goto found_result; } if (!oidcmp(&local->object.oid, base_oid)) { /* Fast-forward; result == remote commit */ if (o->verbosity >= 2) printf("Fast-forward\n"); oidcpy(result_oid, &remote->object.oid); goto found_result; } result = merge_from_diffs(o, base_tree_oid, &local->tree->object.oid, &remote->tree->object.oid, local_tree); if (result != 0) { /* non-trivial merge (with or without conflicts) */ /* Commit (partial) result */ struct commit_list *parents = NULL; commit_list_insert(remote, &parents); /* LIFO order */ commit_list_insert(local, &parents); create_notes_commit(local_tree, parents, o->commit_msg.buf, o->commit_msg.len, result_oid->hash); } found_result: free_commit_list(bases); strbuf_release(&(o->commit_msg)); trace_printf("notes_merge(): result = %i, result_oid = %.7s\n", result, oid_to_hex(result_oid)); return result; }
int cmd_ls_tree(int argc, const char **argv, const char *prefix) { struct object_id oid; struct tree *tree; int i, full_tree = 0; const struct option ls_tree_options[] = { OPT_BIT('d', NULL, &ls_options, N_("only show trees"), LS_TREE_ONLY), OPT_BIT('r', NULL, &ls_options, N_("recurse into subtrees"), LS_RECURSIVE), OPT_BIT('t', NULL, &ls_options, N_("show trees when recursing"), LS_SHOW_TREES), OPT_SET_INT('z', NULL, &line_termination, N_("terminate entries with NUL byte"), 0), OPT_BIT('l', "long", &ls_options, N_("include object size"), LS_SHOW_SIZE), OPT_BIT(0, "name-only", &ls_options, N_("list only filenames"), LS_NAME_ONLY), OPT_BIT(0, "name-status", &ls_options, N_("list only filenames"), LS_NAME_ONLY), OPT_SET_INT(0, "full-name", &chomp_prefix, N_("use full path names"), 0), OPT_BOOL(0, "full-tree", &full_tree, N_("list entire tree; not just current directory " "(implies --full-name)")), OPT__ABBREV(&abbrev), OPT_END() }; git_config(git_default_config, NULL); ls_tree_prefix = prefix; if (prefix && *prefix) chomp_prefix = strlen(prefix); argc = parse_options(argc, argv, prefix, ls_tree_options, ls_tree_usage, 0); if (full_tree) { ls_tree_prefix = prefix = NULL; chomp_prefix = 0; } /* -d -r should imply -t, but -d by itself should not have to. */ if ( (LS_TREE_ONLY|LS_RECURSIVE) == ((LS_TREE_ONLY|LS_RECURSIVE) & ls_options)) ls_options |= LS_SHOW_TREES; if (argc < 1) usage_with_options(ls_tree_usage, ls_tree_options); if (get_oid(argv[0], &oid)) die("Not a valid object name %s", argv[0]); /* * show_recursive() rolls its own matching code and is * generally ignorant of 'struct pathspec'. The magic mask * cannot be lifted until it is converted to use * match_pathspec() or tree_entry_interesting() */ parse_pathspec(&pathspec, PATHSPEC_ALL_MAGIC & ~(PATHSPEC_FROMTOP | PATHSPEC_LITERAL), PATHSPEC_PREFER_CWD, prefix, argv + 1); for (i = 0; i < pathspec.nr; i++) pathspec.items[i].nowildcard_len = pathspec.items[i].len; pathspec.has_wildcard = 0; tree = parse_tree_indirect(&oid); if (!tree) die("not a tree object"); return !!read_tree_recursive(tree, "", 0, 0, &pathspec, show_tree, NULL); }
int cmd_fsck(int argc, const char **argv, const char *prefix) { int i; struct alternate_object_database *alt; /* fsck knows how to handle missing promisor objects */ fetch_if_missing = 0; errors_found = 0; check_replace_refs = 0; argc = parse_options(argc, argv, prefix, fsck_opts, fsck_usage, 0); fsck_walk_options.walk = mark_object; fsck_obj_options.walk = mark_used; fsck_obj_options.error_func = fsck_error_func; if (check_strict) fsck_obj_options.strict = 1; if (show_progress == -1) show_progress = isatty(2); if (verbose) show_progress = 0; if (write_lost_and_found) { check_full = 1; include_reflogs = 0; } if (name_objects) fsck_walk_options.object_names = xcalloc(1, sizeof(struct decoration)); git_config(fsck_config, NULL); fsck_head_link(); if (connectivity_only) { for_each_loose_object(mark_loose_for_connectivity, NULL, 0); for_each_packed_object(mark_packed_for_connectivity, NULL, 0); } else { struct alternate_object_database *alt_odb_list; fsck_object_dir(get_object_directory()); prepare_alt_odb(the_repository); alt_odb_list = the_repository->objects->alt_odb_list; for (alt = alt_odb_list; alt; alt = alt->next) fsck_object_dir(alt->path); if (check_full) { struct packed_git *p; uint32_t total = 0, count = 0; struct progress *progress = NULL; if (show_progress) { for (p = get_packed_git(the_repository); p; p = p->next) { if (open_pack_index(p)) continue; total += p->num_objects; } progress = start_progress(_("Checking objects"), total); } for (p = get_packed_git(the_repository); p; p = p->next) { /* verify gives error messages itself */ if (verify_pack(p, fsck_obj_buffer, progress, count)) errors_found |= ERROR_PACK; count += p->num_objects; } stop_progress(&progress); } } for (i = 0; i < argc; i++) { const char *arg = argv[i]; struct object_id oid; if (!get_oid(arg, &oid)) { struct object *obj = lookup_object(oid.hash); if (!obj || !(obj->flags & HAS_OBJ)) { if (is_promisor_object(&oid)) continue; error("%s: object missing", oid_to_hex(&oid)); errors_found |= ERROR_OBJECT; continue; } obj->flags |= USED; if (name_objects) add_decoration(fsck_walk_options.object_names, obj, xstrdup(arg)); mark_object_reachable(obj); continue; } error("invalid parameter: expected sha1, got '%s'", arg); errors_found |= ERROR_OBJECT; } /* * If we've not been given any explicit head information, do the * default ones from .git/refs. We also consider the index file * in this case (ie this implies --cache). */ if (!argc) { get_default_heads(); keep_cache_objects = 1; } if (keep_cache_objects) { verify_index_checksum = 1; verify_ce_order = 1; read_cache(); for (i = 0; i < active_nr; i++) { unsigned int mode; struct blob *blob; struct object *obj; mode = active_cache[i]->ce_mode; if (S_ISGITLINK(mode)) continue; blob = lookup_blob(&active_cache[i]->oid); if (!blob) continue; obj = &blob->object; obj->flags |= USED; if (name_objects) add_decoration(fsck_walk_options.object_names, obj, xstrfmt(":%s", active_cache[i]->name)); mark_object_reachable(obj); } if (active_cache_tree) fsck_cache_tree(active_cache_tree); } check_connectivity(); return errors_found; }
AlgorithmIdentifier EC_PublicKey::algorithm_identifier() const { return AlgorithmIdentifier(get_oid(), DER_domain()); }
/* * Reads the patches into a string list, with the `util` field being populated * as struct object_id (will need to be free()d). */ static int read_patches(const char *range, struct string_list *list) { struct child_process cp = CHILD_PROCESS_INIT; FILE *in; struct strbuf buf = STRBUF_INIT, line = STRBUF_INIT; struct patch_util *util = NULL; int in_header = 1; argv_array_pushl(&cp.args, "log", "--no-color", "-p", "--no-merges", "--reverse", "--date-order", "--decorate=no", "--no-abbrev-commit", range, NULL); cp.out = -1; cp.no_stdin = 1; cp.git_cmd = 1; if (start_command(&cp)) return error_errno(_("could not start `log`")); in = fdopen(cp.out, "r"); if (!in) { error_errno(_("could not read `log` output")); finish_command(&cp); return -1; } while (strbuf_getline(&line, in) != EOF) { const char *p; if (skip_prefix(line.buf, "commit ", &p)) { if (util) { string_list_append(list, buf.buf)->util = util; strbuf_reset(&buf); } util = xcalloc(sizeof(*util), 1); if (get_oid(p, &util->oid)) { error(_("could not parse commit '%s'"), p); free(util); string_list_clear(list, 1); strbuf_release(&buf); strbuf_release(&line); fclose(in); finish_command(&cp); return -1; } util->matching = -1; in_header = 1; continue; } if (starts_with(line.buf, "diff --git")) { in_header = 0; strbuf_addch(&buf, '\n'); if (!util->diff_offset) util->diff_offset = buf.len; strbuf_addbuf(&buf, &line); } else if (in_header) { if (starts_with(line.buf, "Author: ")) { strbuf_addbuf(&buf, &line); strbuf_addstr(&buf, "\n\n"); } else if (starts_with(line.buf, " ")) { strbuf_rtrim(&line); strbuf_addbuf(&buf, &line); strbuf_addch(&buf, '\n'); } continue; } else if (starts_with(line.buf, "@@ ")) strbuf_addstr(&buf, "@@"); else if (!line.buf[0] || starts_with(line.buf, "index ")) /* * A completely blank (not ' \n', which is context) * line is not valid in a diff. We skip it * silently, because this neatly handles the blank * separator line between commits in git-log * output. * * We also want to ignore the diff's `index` lines * because they contain exact blob hashes in which * we are not interested. */ continue; else strbuf_addbuf(&buf, &line); strbuf_addch(&buf, '\n'); util->diffsize++; } fclose(in); strbuf_release(&line); if (util) string_list_append(list, buf.buf)->util = util; strbuf_release(&buf); if (finish_command(&cp)) return -1; return 0; }
static int get_stash_info(struct stash_info *info, int argc, const char **argv) { int ret; char *end_of_rev; char *expanded_ref; const char *revision; const char *commit = NULL; struct object_id dummy; struct strbuf symbolic = STRBUF_INIT; if (argc > 1) { int i; struct strbuf refs_msg = STRBUF_INIT; for (i = 0; i < argc; i++) strbuf_addf(&refs_msg, " '%s'", argv[i]); fprintf_ln(stderr, _("Too many revisions specified:%s"), refs_msg.buf); strbuf_release(&refs_msg); return -1; } if (argc == 1) commit = argv[0]; strbuf_init(&info->revision, 0); if (!commit) { if (!ref_exists(ref_stash)) { free_stash_info(info); fprintf_ln(stderr, _("No stash entries found.")); return -1; } strbuf_addf(&info->revision, "%s@{0}", ref_stash); } else if (strspn(commit, "0123456789") == strlen(commit)) { strbuf_addf(&info->revision, "%s@{%s}", ref_stash, commit); } else { strbuf_addstr(&info->revision, commit); } revision = info->revision.buf; if (get_oid(revision, &info->w_commit)) { error(_("%s is not a valid reference"), revision); free_stash_info(info); return -1; } assert_stash_like(info, revision); info->has_u = !get_oidf(&info->u_tree, "%s^3:", revision); end_of_rev = strchrnul(revision, '@'); strbuf_add(&symbolic, revision, end_of_rev - revision); ret = dwim_ref(symbolic.buf, symbolic.len, &dummy, &expanded_ref); strbuf_release(&symbolic); switch (ret) { case 0: /* Not found, but valid ref */ info->is_stash_ref = 0; break; case 1: info->is_stash_ref = !strcmp(expanded_ref, ref_stash); break; default: /* Invalid or ambiguous */ free_stash_info(info); } free(expanded_ref); return !(ret == 0 || ret == 1); }
int cmd_rm(int argc, const char **argv, const char *prefix) { int i; struct pathspec pathspec; char *seen; git_config(git_default_config, NULL); argc = parse_options(argc, argv, prefix, builtin_rm_options, builtin_rm_usage, 0); if (!argc) usage_with_options(builtin_rm_usage, builtin_rm_options); if (!index_only) setup_work_tree(); hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR); if (read_cache() < 0) die(_("index file corrupt")); parse_pathspec(&pathspec, 0, PATHSPEC_PREFER_CWD, prefix, argv); refresh_index(&the_index, REFRESH_QUIET, &pathspec, NULL, NULL); seen = xcalloc(pathspec.nr, 1); for (i = 0; i < active_nr; i++) { const struct cache_entry *ce = active_cache[i]; if (!ce_path_match(ce, &pathspec, seen)) continue; ALLOC_GROW(list.entry, list.nr + 1, list.alloc); list.entry[list.nr].name = xstrdup(ce->name); list.entry[list.nr].is_submodule = S_ISGITLINK(ce->ce_mode); if (list.entry[list.nr++].is_submodule && !is_staging_gitmodules_ok(&the_index)) die (_("Please stage your changes to .gitmodules or stash them to proceed")); } if (pathspec.nr) { const char *original; int seen_any = 0; for (i = 0; i < pathspec.nr; i++) { original = pathspec.items[i].original; if (!seen[i]) { if (!ignore_unmatch) { die(_("pathspec '%s' did not match any files"), original); } } else { seen_any = 1; } if (!recursive && seen[i] == MATCHED_RECURSIVELY) die(_("not removing '%s' recursively without -r"), *original ? original : "."); } if (!seen_any) exit(0); } if (!index_only) submodules_absorb_gitdir_if_needed(prefix); /* * If not forced, the file, the index and the HEAD (if exists) * must match; but the file can already been removed, since * this sequence is a natural "novice" way: * * rm F; git rm F * * Further, if HEAD commit exists, "diff-index --cached" must * report no changes unless forced. */ if (!force) { struct object_id oid; if (get_oid("HEAD", &oid)) oidclr(&oid); if (check_local_mod(&oid, index_only)) exit(1); } /* * First remove the names from the index: we won't commit * the index unless all of them succeed. */ for (i = 0; i < list.nr; i++) { const char *path = list.entry[i].name; if (!quiet) printf("rm '%s'\n", path); if (remove_file_from_cache(path)) die(_("git rm: unable to remove %s"), path); } if (show_only) return 0; /* * Then, unless we used "--cached", remove the filenames from * the workspace. If we fail to remove the first one, we * abort the "git rm" (but once we've successfully removed * any file at all, we'll go ahead and commit to it all: * by then we've already committed ourselves and can't fail * in the middle) */ if (!index_only) { int removed = 0, gitmodules_modified = 0; struct strbuf buf = STRBUF_INIT; for (i = 0; i < list.nr; i++) { const char *path = list.entry[i].name; if (list.entry[i].is_submodule) { strbuf_reset(&buf); strbuf_addstr(&buf, path); if (remove_dir_recursively(&buf, 0)) die(_("could not remove '%s'"), path); removed = 1; if (!remove_path_from_gitmodules(path)) gitmodules_modified = 1; continue; } if (!remove_path(path)) { removed = 1; continue; } if (!removed) die_errno("git rm: '%s'", path); } strbuf_release(&buf); if (gitmodules_modified) stage_updated_gitmodules(&the_index); } if (active_cache_changed) { if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK)) die(_("Unable to write new index file")); } return 0; }
int cmd_prune(int argc, const char **argv, const char *prefix) { struct rev_info revs; struct progress *progress = NULL; int exclude_promisor_objects = 0; const struct option options[] = { OPT__DRY_RUN(&show_only, N_("do not remove, show only")), OPT__VERBOSE(&verbose, N_("report pruned objects")), OPT_BOOL(0, "progress", &show_progress, N_("show progress")), OPT_EXPIRY_DATE(0, "expire", &expire, N_("expire objects older than <time>")), OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects, N_("limit traversal to objects outside promisor packfiles")), OPT_END() }; char *s; git_config(git_default_config, NULL); expire = TIME_MAX; save_commit_buffer = 0; check_replace_refs = 0; ref_paranoia = 1; init_revisions(&revs, prefix); argc = parse_options(argc, argv, prefix, options, prune_usage, 0); if (repository_format_precious_objects) die(_("cannot prune in a precious-objects repo")); while (argc--) { struct object_id oid; const char *name = *argv++; if (!get_oid(name, &oid)) { struct object *object = parse_object_or_die(&oid, name); add_pending_object(&revs, object, ""); } else die("unrecognized argument: %s", name); } if (show_progress == -1) show_progress = isatty(2); if (show_progress) progress = start_delayed_progress(_("Checking connectivity"), 0); if (exclude_promisor_objects) { fetch_if_missing = 0; revs.exclude_promisor_objects = 1; } mark_reachable_objects(&revs, 1, expire, progress); stop_progress(&progress); for_each_loose_file_in_objdir(get_object_directory(), prune_object, prune_cruft, prune_subdir, NULL); prune_packed_objects(show_only ? PRUNE_PACKED_DRY_RUN : 0); remove_temporary_files(get_object_directory()); s = mkpathdup("%s/pack", get_object_directory()); remove_temporary_files(s); free(s); if (is_repository_shallow()) prune_shallow(show_only); return 0; }
int cgit_ref_path_exists(const char *path, const char *ref, int file_only) { struct object_id oid; unsigned long size; struct pathspec_item path_items = { .match = xstrdup(path), .len = strlen(path) }; struct pathspec paths = { .nr = 1, .items = &path_items }; struct walk_tree_context walk_tree_ctx = { .match_path = path, .matched_oid = &oid, .found_path = 0, .file_only = file_only }; if (get_oid(ref, &oid)) goto done; if (oid_object_info(the_repository, &oid, &size) != OBJ_COMMIT) goto done; read_tree_recursive(lookup_commit_reference(the_repository, &oid)->maybe_tree, "", 0, 0, &paths, walk_tree, &walk_tree_ctx); done: free(path_items.match); return walk_tree_ctx.found_path; } int cgit_print_file(char *path, const char *head, int file_only) { struct object_id oid; enum object_type type; char *buf; unsigned long size; struct commit *commit; struct pathspec_item path_items = { .match = path, .len = strlen(path) }; struct pathspec paths = { .nr = 1, .items = &path_items }; struct walk_tree_context walk_tree_ctx = { .match_path = path, .matched_oid = &oid, .found_path = 0, .file_only = file_only }; if (get_oid(head, &oid)) return -1; type = oid_object_info(the_repository, &oid, &size); if (type == OBJ_COMMIT) { commit = lookup_commit_reference(the_repository, &oid); read_tree_recursive(commit->maybe_tree, "", 0, 0, &paths, walk_tree, &walk_tree_ctx); if (!walk_tree_ctx.found_path) return -1; type = oid_object_info(the_repository, &oid, &size); } if (type == OBJ_BAD) return -1; buf = read_object_file(&oid, &type, &size); if (!buf) return -1; buf[size] = '\0'; html_raw(buf, size); free(buf); return 0; } void cgit_print_blob(const char *hex, char *path, const char *head, int file_only) { struct object_id oid; enum object_type type; char *buf; unsigned long size; struct commit *commit; struct pathspec_item path_items = { .match = path, .len = path ? strlen(path) : 0 }; struct pathspec paths = { .nr = 1, .items = &path_items }; struct walk_tree_context walk_tree_ctx = { .match_path = path, .matched_oid = &oid, .found_path = 0, .file_only = file_only }; if (hex) { if (get_oid_hex(hex, &oid)) { cgit_print_error_page(400, "Bad request", "Bad hex value: %s", hex); return; } } else { if (get_oid(head, &oid)) { cgit_print_error_page(404, "Not found", "Bad ref: %s", head); return; } } type = oid_object_info(the_repository, &oid, &size); if ((!hex) && type == OBJ_COMMIT && path) { commit = lookup_commit_reference(the_repository, &oid); read_tree_recursive(commit->maybe_tree, "", 0, 0, &paths, walk_tree, &walk_tree_ctx); type = oid_object_info(the_repository, &oid, &size); } if (type == OBJ_BAD) { cgit_print_error_page(404, "Not found", "Bad object name: %s", hex); return; } buf = read_object_file(&oid, &type, &size); if (!buf) { cgit_print_error_page(500, "Internal server error", "Error reading object %s", hex); return; } buf[size] = '\0'; if (buffer_is_binary(buf, size)) ctx.page.mimetype = "application/octet-stream"; else ctx.page.mimetype = "text/plain"; ctx.page.filename = path; html("X-Content-Type-Options: nosniff\n"); html("Content-Security-Policy: default-src 'none'\n"); cgit_print_http_headers(); html_raw(buf, size); free(buf); }
int cmd_read_tree(int argc, const char **argv, const char *unused_prefix) { int i, stage = 0; struct object_id oid; struct tree_desc t[MAX_UNPACK_TREES]; struct unpack_trees_options opts; int prefix_set = 0; const struct option read_tree_options[] = { { OPTION_CALLBACK, 0, "index-output", NULL, N_("file"), N_("write resulting index to <file>"), PARSE_OPT_NONEG, index_output_cb }, OPT_BOOL(0, "empty", &read_empty, N_("only empty the index")), OPT__VERBOSE(&opts.verbose_update, N_("be verbose")), OPT_GROUP(N_("Merging")), OPT_BOOL('m', NULL, &opts.merge, N_("perform a merge in addition to a read")), OPT_BOOL(0, "trivial", &opts.trivial_merges_only, N_("3-way merge if no file level merging required")), OPT_BOOL(0, "aggressive", &opts.aggressive, N_("3-way merge in presence of adds and removes")), OPT_BOOL(0, "reset", &opts.reset, N_("same as -m, but discard unmerged entries")), { OPTION_STRING, 0, "prefix", &opts.prefix, N_("<subdirectory>/"), N_("read the tree into the index under <subdirectory>/"), PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP }, OPT_BOOL('u', NULL, &opts.update, N_("update working tree with merge result")), { OPTION_CALLBACK, 0, "exclude-per-directory", &opts, N_("gitignore"), N_("allow explicitly ignored files to be overwritten"), PARSE_OPT_NONEG, exclude_per_directory_cb }, OPT_BOOL('i', NULL, &opts.index_only, N_("don't check the working tree after merging")), OPT__DRY_RUN(&opts.dry_run, N_("don't update the index or the work tree")), OPT_BOOL(0, "no-sparse-checkout", &opts.skip_sparse_checkout, N_("skip applying sparse checkout filter")), OPT_BOOL(0, "debug-unpack", &opts.debug_unpack, N_("debug unpack-trees")), { OPTION_CALLBACK, 0, "recurse-submodules", NULL, "checkout", "control recursive updating of submodules", PARSE_OPT_OPTARG, option_parse_recurse_submodules_worktree_updater }, OPT_END() }; memset(&opts, 0, sizeof(opts)); opts.head_idx = -1; opts.src_index = &the_index; opts.dst_index = &the_index; git_config(git_read_tree_config, NULL); argc = parse_options(argc, argv, unused_prefix, read_tree_options, read_tree_usage, 0); load_submodule_cache(); hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR); prefix_set = opts.prefix ? 1 : 0; if (1 < opts.merge + opts.reset + prefix_set) die("Which one? -m, --reset, or --prefix?"); /* * NEEDSWORK * * The old index should be read anyway even if we're going to * destroy all index entries because we still need to preserve * certain information such as index version or split-index * mode. */ if (opts.reset || opts.merge || opts.prefix) { if (read_cache_unmerged() && (opts.prefix || opts.merge)) die("You need to resolve your current index first"); stage = opts.merge = 1; } resolve_undo_clear(); for (i = 0; i < argc; i++) { const char *arg = argv[i]; if (get_oid(arg, &oid)) die("Not a valid object name %s", arg); if (list_tree(&oid) < 0) die("failed to unpack tree object %s", arg); stage++; } if (!nr_trees && !read_empty && !opts.merge) warning("read-tree: emptying the index with no arguments is deprecated; use --empty"); else if (nr_trees > 0 && read_empty) die("passing trees as arguments contradicts --empty"); if (1 < opts.index_only + opts.update) die("-u and -i at the same time makes no sense"); if ((opts.update || opts.index_only) && !opts.merge) die("%s is meaningless without -m, --reset, or --prefix", opts.update ? "-u" : "-i"); if ((opts.dir && !opts.update)) die("--exclude-per-directory is meaningless unless -u"); if (opts.merge && !opts.index_only) setup_work_tree(); if (opts.merge) { switch (stage - 1) { case 0: die("you must specify at least one tree to merge"); break; case 1: opts.fn = opts.prefix ? bind_merge : oneway_merge; break; case 2: opts.fn = twoway_merge; opts.initial_checkout = is_cache_unborn(); break; case 3: default: opts.fn = threeway_merge; break; } if (stage - 1 >= 3) opts.head_idx = stage - 2; else opts.head_idx = 1; } if (opts.debug_unpack) opts.fn = debug_merge; cache_tree_free(&active_cache_tree); for (i = 0; i < nr_trees; i++) { struct tree *tree = trees[i]; parse_tree(tree); init_tree_desc(t+i, tree->buffer, tree->size); } if (unpack_trees(nr_trees, t, &opts)) return 128; if (opts.debug_unpack || opts.dry_run) return 0; /* do not write the index out */ /* * When reading only one tree (either the most basic form, * "-m ent" or "--reset ent" form), we can obtain a fully * valid cache-tree because the index must match exactly * what came from the tree. */ if (nr_trees == 1 && !opts.prefix) prime_cache_tree(&the_index, trees[0]); if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK)) die("unable to write new index file"); return 0; }
cc_size ccder_decode_rsa_pub_x509_n(const uint8_t *der, const uint8_t *der_end) { if((der = ccder_decode_constructed_tl(CCDER_CONSTRUCTED_SEQUENCE, &der_end, der, der_end)) == NULL) return 0; if((der = get_oid(der, der_end)) == NULL) return 0; return get_pub_n(der, der_end); }
/* * Create an external variant from our internal representation */ static Variant make_variant(VariantInt vi, FunctionCallInfo fcinfo, IOFuncSelector func) { VariantCache *cache; Variant v; bool oid_overflow=OID_TOO_LARGE(vi->typid); long variant_length, data_length; /* long because we subtract */ Pointer data_ptr = 0; uint flags = 0; cache = get_cache(fcinfo, vi, func); Assert(cache->typid = vi->typid); #ifdef VARIANT_TEST_OID vi->typid += OID_MASK; oid_overflow=OID_TOO_LARGE(vi->typid); #endif if(vi->isnull) { flags |= VAR_ISNULL; data_length = 0; } else if(cache->typlen == -1) /* varlena */ { /* * Short varlena is OK, but we need to make sure it's not external. It's OK * to leave compressed varlena's alone too, but detoast_packed will * uncompress them. We'll just follow rangetype.c's lead here. */ vi->data = PointerGetDatum( PG_DETOAST_DATUM_PACKED(vi->data) ); data_ptr = DatumGetPointer(vi->data); /* * Because we don't store varlena aligned or with it's header, our * data_length is simply the varlena length. */ data_length = VARSIZE_ANY_EXHDR(vi->data); data_ptr = VARDATA_ANY(data_ptr); } else if(cache->typlen == -2) /* cstring */ { data_length = strlen(DatumGetCString(vi->data)); /* We don't store NUL terminator */ data_ptr = DatumGetPointer(vi->data); } else { Assert(cache->typlen >= 0); if(cache->typbyval) { data_length = VHDRSZ; /* Start with header size to make sure alignment is correct */ data_length = (long) VDATAPTR_ALIGN(data_length, cache->typalign); data_length += cache->typlen; data_length -= VHDRSZ; } else /* fixed length, pass by reference */ { data_length = cache->typlen; data_ptr = DatumGetPointer(vi->data); } } /* If typid is too large then we need an extra byte */ variant_length = VHDRSZ + data_length + (oid_overflow ? sizeof(char) : 0); if( variant_length < 0 ) elog(ERROR, "Negative variant_length %li", variant_length); v = palloc0(variant_length); SET_VARSIZE(v, variant_length); v->pOid = vi->typid; v->typmod = vi->typmod; if(oid_overflow) { flags |= VAR_OVERFLOW; /* Reset high pOid byte to zero */ v->pOid &= 0x00FFFFFF; /* Store high byte of OID at the end of our structure */ *((char *) v + VARSIZE(v) - 1) = vi->typid >> 24; } /* * Be careful not to overwrite the valid OID data */ v->pOid |= flags; Assert( get_oid(v, &flags) == vi->typid ); if(!vi->isnull) { if(cache->typbyval) { Pointer p = VDATAPTR_ALIGN(v, cache->typalign); store_att_byval(p, vi->data, cache->typlen); } else memcpy(VDATAPTR(v), data_ptr, data_length); } return v; }
AlgorithmIdentifier DL_Scheme_PublicKey::algorithm_identifier() const { return AlgorithmIdentifier(get_oid(), m_group.DER_encode(group_format())); }
static int do_create_stash(struct pathspec ps, struct strbuf *stash_msg_buf, int include_untracked, int patch_mode, struct stash_info *info, struct strbuf *patch, int quiet) { int ret = 0; int flags = 0; int untracked_commit_option = 0; const char *head_short_sha1 = NULL; const char *branch_ref = NULL; const char *branch_name = "(no branch)"; struct commit *head_commit = NULL; struct commit_list *parents = NULL; struct strbuf msg = STRBUF_INIT; struct strbuf commit_tree_label = STRBUF_INIT; struct strbuf untracked_files = STRBUF_INIT; prepare_fallback_ident("git stash", "git@stash"); read_cache_preload(NULL); refresh_cache(REFRESH_QUIET); if (get_oid("HEAD", &info->b_commit)) { if (!quiet) fprintf_ln(stderr, _("You do not have " "the initial commit yet")); ret = -1; goto done; } else { head_commit = lookup_commit(the_repository, &info->b_commit); } if (!check_changes(ps, include_untracked, &untracked_files)) { ret = 1; goto done; } branch_ref = resolve_ref_unsafe("HEAD", 0, NULL, &flags); if (flags & REF_ISSYMREF) branch_name = strrchr(branch_ref, '/') + 1; head_short_sha1 = find_unique_abbrev(&head_commit->object.oid, DEFAULT_ABBREV); strbuf_addf(&msg, "%s: %s ", branch_name, head_short_sha1); pp_commit_easy(CMIT_FMT_ONELINE, head_commit, &msg); strbuf_addf(&commit_tree_label, "index on %s\n", msg.buf); commit_list_insert(head_commit, &parents); if (write_cache_as_tree(&info->i_tree, 0, NULL) || commit_tree(commit_tree_label.buf, commit_tree_label.len, &info->i_tree, parents, &info->i_commit, NULL, NULL)) { if (!quiet) fprintf_ln(stderr, _("Cannot save the current " "index state")); ret = -1; goto done; } if (include_untracked) { if (save_untracked_files(info, &msg, untracked_files)) { if (!quiet) fprintf_ln(stderr, _("Cannot save " "the untracked files")); ret = -1; goto done; } untracked_commit_option = 1; } if (patch_mode) { ret = stash_patch(info, ps, patch, quiet); if (ret < 0) { if (!quiet) fprintf_ln(stderr, _("Cannot save the current " "worktree state")); goto done; } else if (ret > 0) { goto done; } } else { if (stash_working_tree(info, ps)) { if (!quiet) fprintf_ln(stderr, _("Cannot save the current " "worktree state")); ret = -1; goto done; } } if (!stash_msg_buf->len) strbuf_addf(stash_msg_buf, "WIP on %s", msg.buf); else strbuf_insertf(stash_msg_buf, 0, "On %s: ", branch_name); /* * `parents` will be empty after calling `commit_tree()`, so there is * no need to call `free_commit_list()` */ parents = NULL; if (untracked_commit_option) commit_list_insert(lookup_commit(the_repository, &info->u_commit), &parents); commit_list_insert(lookup_commit(the_repository, &info->i_commit), &parents); commit_list_insert(head_commit, &parents); if (commit_tree(stash_msg_buf->buf, stash_msg_buf->len, &info->w_tree, parents, &info->w_commit, NULL, NULL)) { if (!quiet) fprintf_ln(stderr, _("Cannot record " "working tree state")); ret = -1; goto done; } done: strbuf_release(&commit_tree_label); strbuf_release(&msg); strbuf_release(&untracked_files); return ret; }
oid_t node_smart_ptr::stylesheet_id() { return get_oid(BASE_COMPONENT_INDEX+6); }
int main(int argc, char *argv[]) { extern char *optarg; extern int optind; int opt; char hostname[MAXHOSTNAMELEN]; IPAddress ip_address; IPAddress my_ip_addr; Oid *enterprise; int generic, specific, level; SNMP_variable *variables; struct hostent *hp; int trap_port = -1; u_long time_stamp = (u_long)-1; int enterprise_flag= 0, a_flag = 0, i_flag = 0; optind = 1; /* the default host name is local host */ gethostname(hostname, sizeof(hostname)); /* default Oid for enterprise is sun */ enterprise = &sun_oid; /* generic, specific */ generic = 6; specific = 1; { char domain_path[MAXPATHLEN]; setlocale(LC_ALL, ""); sprintf(domain_path, SEA_LOCALE_PATH); bindtextdomain(DOMAIN_MGET, domain_path); bindtextdomain(DOMAIN_SGET, domain_path); bindtextdomain(DOMAIN_LIBGET, domain_path); bindtextdomain(DOMAIN_LGET, domain_path); bindtextdomain(DOMAIN_FGET, domain_path); /* formatting string */ } /* get command-line options */ while ((opt = getopt(argc, argv, "h:c:e:E:g:s:i:t:a:T:p:")) != EOF) { switch (opt) { case 'T': level = atoi(optarg); if(trace_set(level, error_label)){ fprintf(stderr, " %d is not a valid trace level!\n", level); usage(); } break; case 'h': /* host to send trap to */ if (strlcpy(hostname, optarg, sizeof (hostname)) > MAXHOSTNAMELEN) { fprintf(stderr, "%s: hostname too long!\ \n", optarg); exit(1); } case 'c': trap_community = optarg; break; case 'e': if (enterprise_flag) { usage(); } enterprise = SSAOidStrToOid(optarg,error_label); if (!enterprise){ /* error */ fprintf(stderr, "%s: not a valid enterprise oid string!\n", optarg); usage(); } enterprise_flag = 1; break; case 'E': if (enterprise_flag) { usage(); } enterprise = get_oid(optarg); if (!enterprise) { usage(); } enterprise_flag = 1; break; case 'g': /* generic trap type */ if (is_number(optarg)) usage(); generic = atoi(optarg); if ((generic > 6 ) || (generic < 0)) usage(); break; case 's': /* specific trap type */ if (is_number(optarg)) usage(); specific = atoi(optarg); break; case 'i': if (name_to_ip_address(optarg, &my_ip_addr, error_label)) { usage(); } i_flag = 1; break; case 't': /* timestamp */ time_stamp = atol(optarg); break; case 'p': if (is_number(optarg)) usage(); trap_port = atoi(optarg); break; case 'a': /* attribute information */ if ((variables = get_variable(optarg))== NULL){ fprintf(stderr, "%s: not a valid variable!\n", optarg); usage(); } a_flag = 1; break; case '?': /* usage help */ usage(); break; default: usage(); break; } /* switch */ }/* while */
oid_t locator_smart_ptr::node_id() { return get_oid(BASE_COMPONENT_INDEX+1); }
int cmd_pull(int argc, const char **argv, const char *prefix) { const char *repo, **refspecs; struct oid_array merge_heads = OID_ARRAY_INIT; struct object_id orig_head, curr_head; struct object_id rebase_fork_point; int autostash; if (!getenv("GIT_REFLOG_ACTION")) set_reflog_message(argc, argv); git_config(git_pull_config, NULL); argc = parse_options(argc, argv, prefix, pull_options, pull_usage, 0); parse_repo_refspecs(argc, argv, &repo, &refspecs); if (!opt_ff) opt_ff = xstrdup_or_null(config_get_ff()); if (opt_rebase < 0) opt_rebase = config_get_rebase(); if (read_cache_unmerged()) die_resolve_conflict("pull"); if (file_exists(git_path_merge_head(the_repository))) die_conclude_merge(); if (get_oid("HEAD", &orig_head)) oidclr(&orig_head); if (!opt_rebase && opt_autostash != -1) die(_("--[no-]autostash option is only valid with --rebase.")); autostash = config_autostash; if (opt_rebase) { if (opt_autostash != -1) autostash = opt_autostash; if (is_null_oid(&orig_head) && !is_cache_unborn()) die(_("Updating an unborn branch with changes added to the index.")); if (!autostash) require_clean_work_tree(N_("pull with rebase"), _("please commit or stash them."), 1, 0); if (get_rebase_fork_point(&rebase_fork_point, repo, *refspecs)) oidclr(&rebase_fork_point); } if (run_fetch(repo, refspecs)) return 1; if (opt_dry_run) return 0; if (get_oid("HEAD", &curr_head)) oidclr(&curr_head); if (!is_null_oid(&orig_head) && !is_null_oid(&curr_head) && !oideq(&orig_head, &curr_head)) { /* * The fetch involved updating the current branch. * * The working tree and the index file are still based on * orig_head commit, but we are merging into curr_head. * Update the working tree to match curr_head. */ warning(_("fetch updated the current branch head.\n" "fast-forwarding your working tree from\n" "commit %s."), oid_to_hex(&orig_head)); if (checkout_fast_forward(&orig_head, &curr_head, 0)) die(_("Cannot fast-forward your working tree.\n" "After making sure that you saved anything precious from\n" "$ git diff %s\n" "output, run\n" "$ git reset --hard\n" "to recover."), oid_to_hex(&orig_head)); } get_merge_heads(&merge_heads); if (!merge_heads.nr) die_no_merge_candidates(repo, refspecs); if (is_null_oid(&orig_head)) { if (merge_heads.nr > 1) die(_("Cannot merge multiple branches into empty head.")); return pull_into_void(merge_heads.oid, &curr_head); } if (opt_rebase && merge_heads.nr > 1) die(_("Cannot rebase onto multiple branches.")); if (opt_rebase) { int ret = 0; if ((recurse_submodules == RECURSE_SUBMODULES_ON || recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND) && submodule_touches_in_range(&rebase_fork_point, &curr_head)) die(_("cannot rebase with locally recorded submodule modifications")); if (!autostash) { struct commit_list *list = NULL; struct commit *merge_head, *head; head = lookup_commit_reference(the_repository, &orig_head); commit_list_insert(head, &list); merge_head = lookup_commit_reference(the_repository, &merge_heads.oid[0]); if (is_descendant_of(merge_head, list)) { /* we can fast-forward this without invoking rebase */ opt_ff = "--ff-only"; ret = run_merge(); } } ret = run_rebase(&curr_head, merge_heads.oid, &rebase_fork_point); if (!ret && (recurse_submodules == RECURSE_SUBMODULES_ON || recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND)) ret = rebase_submodules(); return ret; } else { int ret = run_merge(); if (!ret && (recurse_submodules == RECURSE_SUBMODULES_ON || recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND)) ret = update_submodules(); return ret; } }
oid_t node_smart_ptr::doc_id() { return get_oid(BASE_COMPONENT_INDEX+5); }