char *write_tree_from_memory(struct merge_options *o) { struct cache_tree *it; char root_id[41]; if (unmerged_index(o->index)) { int i; fprintf(stderr, "BUG: There are unmerged index entries:\n"); for (i = 0; i < o->index->cache_nr; i++) { struct cache_entry *ce = o->index->cache[i]; if (ce_stage(ce)) fprintf(stderr, "BUG: %d %.*s", ce_stage(ce), (int)ce_namelen(ce), ce->name); } g_assert(0); } /* if (!active_cache_tree) */ it = cache_tree(); if (cache_tree_update(it, o->index->cache, o->index->cache_nr, 0, 0, commit_trees_cb) < 0) { g_warning("error building trees"); cache_tree_free (&it); return NULL; } rawdata_to_hex(it->sha1, root_id, 20); cache_tree_free (&it); return g_strdup(root_id); }
static int read_tree_trivial(unsigned char *common, unsigned char *head, unsigned char *one) { int i, nr_trees = 0; struct tree *trees[MAX_UNPACK_TREES]; struct tree_desc t[MAX_UNPACK_TREES]; struct unpack_trees_options opts; memset(&opts, 0, sizeof(opts)); opts.head_idx = 2; opts.src_index = &the_index; opts.dst_index = &the_index; opts.update = 1; opts.verbose_update = 1; opts.trivial_merges_only = 1; opts.merge = 1; trees[nr_trees] = parse_tree_indirect(common); if (!trees[nr_trees++]) return -1; trees[nr_trees] = parse_tree_indirect(head); if (!trees[nr_trees++]) return -1; trees[nr_trees] = parse_tree_indirect(one); if (!trees[nr_trees++]) return -1; opts.fn = threeway_merge; cache_tree_free(&active_cache_tree); for (i = 0; i < nr_trees; i++) { parse_tree(trees[i]); init_tree_desc(t+i, trees[i]->buffer, trees[i]->size); } if (unpack_trees(nr_trees, t, &opts)) return -1; return 0; }
static int git_merge_trees(int index_only, struct tree *common, struct tree *head, struct tree *merge) { int rc; struct tree_desc t[3]; struct unpack_trees_options opts; memset(&opts, 0, sizeof(opts)); if (index_only) opts.index_only = 1; else opts.update = 1; opts.merge = 1; opts.head_idx = 2; opts.fn = threeway_merge; opts.src_index = &the_index; opts.dst_index = &the_index; init_tree_desc_from_tree(t+0, common); init_tree_desc_from_tree(t+1, head); init_tree_desc_from_tree(t+2, merge); rc = unpack_trees(3, t, &opts); cache_tree_free(&active_cache_tree); return rc; }
void prime_cache_tree(struct index_state *istate, struct tree *tree) { cache_tree_free(&istate->cache_tree); istate->cache_tree = cache_tree(); prime_cache_tree_rec(istate->cache_tree, tree); istate->cache_changed |= CACHE_TREE_CHANGED; }
int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix) { int entries, was_valid, newfd; struct lock_file *lock_file; /* * We can't free this memory, it becomes part of a linked list * parsed atexit() */ lock_file = xcalloc(1, sizeof(struct lock_file)); newfd = hold_locked_index(lock_file, 1); entries = read_cache(); if (entries < 0) return WRITE_TREE_UNREADABLE_INDEX; if (flags & WRITE_TREE_IGNORE_CACHE_TREE) cache_tree_free(&(active_cache_tree)); if (!active_cache_tree) active_cache_tree = cache_tree(); was_valid = cache_tree_fully_valid(active_cache_tree); if (!was_valid) { int missing_ok = flags & WRITE_TREE_MISSING_OK; if (cache_tree_update(active_cache_tree, active_cache, active_nr, missing_ok, 0) < 0) return WRITE_TREE_UNMERGED_INDEX; if (0 <= newfd) { if (!write_cache(newfd, active_cache, active_nr) && !commit_lock_file(lock_file)) newfd = -1; } /* Not being able to write is fine -- we are only interested * in updating the cache-tree part, and if the next caller * ends up using the old index with unupdated cache-tree part * it misses the work we did here, but that is just a * performance penalty and not a big deal. */ } if (prefix) { struct cache_tree *subtree = cache_tree_find(active_cache_tree, prefix); if (!subtree) return WRITE_TREE_PREFIX_ERROR; hashcpy(sha1, subtree->sha1); } else hashcpy(sha1, active_cache_tree->sha1); if (0 <= newfd) rollback_lock_file(lock_file); return 0; }
int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, const char *index_path, int flags, const char *prefix) { int entries, was_valid, newfd; struct lock_file *lock_file; /* * We can't free this memory, it becomes part of a linked list * parsed atexit() */ lock_file = xcalloc(1, sizeof(struct lock_file)); newfd = hold_lock_file_for_update(lock_file, index_path, LOCK_DIE_ON_ERROR); entries = read_index_from(index_state, index_path); if (entries < 0) return WRITE_TREE_UNREADABLE_INDEX; if (flags & WRITE_TREE_IGNORE_CACHE_TREE) cache_tree_free(&index_state->cache_tree); if (!index_state->cache_tree) index_state->cache_tree = cache_tree(); was_valid = cache_tree_fully_valid(index_state->cache_tree); if (!was_valid) { if (cache_tree_update(index_state, flags) < 0) return WRITE_TREE_UNMERGED_INDEX; if (0 <= newfd) { if (!write_locked_index(index_state, lock_file, COMMIT_LOCK)) newfd = -1; } /* Not being able to write is fine -- we are only interested * in updating the cache-tree part, and if the next caller * ends up using the old index with unupdated cache-tree part * it misses the work we did here, but that is just a * performance penalty and not a big deal. */ } if (prefix) { struct cache_tree *subtree; subtree = cache_tree_find(index_state->cache_tree, prefix); if (!subtree) return WRITE_TREE_PREFIX_ERROR; hashcpy(sha1, subtree->sha1); } else hashcpy(sha1, index_state->cache_tree->sha1); if (0 <= newfd) rollback_lock_file(lock_file); return 0; }
void cache_tree_free(struct cache_tree **it_p) { int i; struct cache_tree *it = *it_p; if (!it) return; for (i = 0; i < it->subtree_nr; i++) if (it->down[i]) cache_tree_free(&it->down[i]->cache_tree); free(it->down); free(it); *it_p = NULL; }
int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix) { int entries, was_valid; struct lock_file lock_file = LOCK_INIT; int ret = 0; hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR); entries = read_index_from(index_state, index_path, get_git_dir()); if (entries < 0) { ret = WRITE_TREE_UNREADABLE_INDEX; goto out; } if (flags & WRITE_TREE_IGNORE_CACHE_TREE) cache_tree_free(&index_state->cache_tree); if (!index_state->cache_tree) index_state->cache_tree = cache_tree(); was_valid = cache_tree_fully_valid(index_state->cache_tree); if (!was_valid) { if (cache_tree_update(index_state, flags) < 0) { ret = WRITE_TREE_UNMERGED_INDEX; goto out; } write_locked_index(index_state, &lock_file, COMMIT_LOCK); /* Not being able to write is fine -- we are only interested * in updating the cache-tree part, and if the next caller * ends up using the old index with unupdated cache-tree part * it misses the work we did here, but that is just a * performance penalty and not a big deal. */ } if (prefix) { struct cache_tree *subtree; subtree = cache_tree_find(index_state->cache_tree, prefix); if (!subtree) { ret = WRITE_TREE_PREFIX_ERROR; goto out; } oidcpy(oid, &subtree->oid); } else oidcpy(oid, &index_state->cache_tree->oid); out: rollback_lock_file(&lock_file); return ret; }
static void discard_unused_subtrees(struct cache_tree *it) { struct cache_tree_sub **down = it->down; int nr = it->subtree_nr; int dst, src; for (dst = src = 0; src < nr; src++) { struct cache_tree_sub *s = down[src]; if (s->used) down[dst++] = s; else { cache_tree_free(&s->cache_tree); free(s); it->subtree_nr--; } } }
void cache_tree_invalidate_path(struct cache_tree *it, const char *path) { /* a/b/c * ==> invalidate self * ==> find "a", have it invalidate "b/c" * a * ==> invalidate self * ==> if "a" exists as a subtree, remove it. */ const char *slash; int namelen; struct cache_tree_sub *down; #if DEBUG fprintf(stderr, "cache-tree invalidate <%s>\n", path); #endif if (!it) return; slash = strchr(path, '/'); it->entry_count = -1; if (!slash) { int pos; namelen = strlen(path); pos = subtree_pos(it, path, namelen); if (0 <= pos) { cache_tree_free(&it->down[pos]->cache_tree); free(it->down[pos]); /* 0 1 2 3 4 5 * ^ ^subtree_nr = 6 * pos * move 4 and 5 up one place (2 entries) * 2 = 6 - 3 - 1 = subtree_nr - pos - 1 */ memmove(it->down+pos, it->down+pos+1, sizeof(struct cache_tree_sub *) * (it->subtree_nr - pos - 1)); it->subtree_nr--; } return; } namelen = slash - path; down = find_subtree(it, path, namelen, 0); if (down) cache_tree_invalidate_path(down->cache_tree, slash + 1); }
static int do_invalidate_path(struct cache_tree *it, const char *path) { /* a/b/c * ==> invalidate self * ==> find "a", have it invalidate "b/c" * a * ==> invalidate self * ==> if "a" exists as a subtree, remove it. */ const char *slash; int namelen; struct cache_tree_sub *down; #if DEBUG_CACHE_TREE fprintf(stderr, "cache-tree invalidate <%s>\n", path); #endif if (!it) return 0; slash = strchrnul(path, '/'); namelen = slash - path; it->entry_count = -1; if (!*slash) { int pos; pos = subtree_pos(it, path, namelen); if (0 <= pos) { cache_tree_free(&it->down[pos]->cache_tree); free(it->down[pos]); /* 0 1 2 3 4 5 * ^ ^subtree_nr = 6 * pos * move 4 and 5 up one place (2 entries) * 2 = 6 - 3 - 1 = subtree_nr - pos - 1 */ MOVE_ARRAY(it->down + pos, it->down + pos + 1, it->subtree_nr - pos - 1); it->subtree_nr--; } return 1; } down = find_subtree(it, path, namelen, 0); if (down) do_invalidate_path(down->cache_tree, slash + 1); return 1; }
int read_tree(struct tree *tree, int stage, const char **match) { read_tree_fn_t fn = NULL; int i, err; /* * Currently the only existing callers of this function all * call it with stage=1 and after making sure there is nothing * at that stage; we could always use read_one_entry_quick(). * * But when we decide to straighten out git-read-tree not to * use unpack_trees() in some cases, this will probably start * to matter. */ /* * See if we have cache entry at the stage. If so, * do it the original slow way, otherwise, append and then * sort at the end. */ for (i = 0; !fn && i < active_nr; i++) { struct cache_entry *ce = active_cache[i]; if (ce_stage(ce) == stage) fn = read_one_entry; } if (!fn) fn = read_one_entry_quick; err = read_tree_recursive(tree, "", 0, stage, match, fn); if (fn == read_one_entry || err) return err; /* * Sort the cache entry -- we need to nuke the cache tree, though. */ cache_tree_free(&active_cache_tree); qsort(active_cache, active_nr, sizeof(active_cache[0]), cmp_cache_name_compare); return 0; }
void prime_cache_tree(struct cache_tree **it, struct tree *tree) { cache_tree_free(it); *it = cache_tree(); prime_cache_tree_rec(*it, tree); }
static struct cache_tree *read_one(const char **buffer, unsigned long *size_p) { const char *buf = *buffer; unsigned long size = *size_p; const char *cp; char *ep; struct cache_tree *it; int i, subtree_nr; it = NULL; /* skip name, but make sure name exists */ while (size && *buf) { size--; buf++; } if (!size) goto free_return; buf++; size--; it = cache_tree(); cp = buf; it->entry_count = strtol(cp, &ep, 10); if (cp == ep) goto free_return; cp = ep; subtree_nr = strtol(cp, &ep, 10); if (cp == ep) goto free_return; while (size && *buf && *buf != '\n') { size--; buf++; } if (!size) goto free_return; buf++; size--; if (0 <= it->entry_count) { if (size < 20) goto free_return; hashcpy(it->sha1, (const unsigned char*)buf); buf += 20; size -= 20; } #if DEBUG if (0 <= it->entry_count) fprintf(stderr, "cache-tree <%s> (%d ent, %d subtree) %s\n", *buffer, it->entry_count, subtree_nr, sha1_to_hex(it->sha1)); else fprintf(stderr, "cache-tree <%s> (%d subtrees) invalid\n", *buffer, subtree_nr); #endif /* * Just a heuristic -- we do not add directories that often but * we do not want to have to extend it immediately when we do, * hence +2. */ it->subtree_alloc = subtree_nr + 2; it->down = xcalloc(it->subtree_alloc, sizeof(struct cache_tree_sub *)); for (i = 0; i < subtree_nr; i++) { /* read each subtree */ struct cache_tree *sub; struct cache_tree_sub *subtree; const char *name = buf; sub = read_one(&buf, &size); if (!sub) goto free_return; subtree = cache_tree_sub(it, name); subtree->cache_tree = sub; } if (subtree_nr != it->subtree_nr) die("cache-tree: internal error"); *buffer = buf; *size_p = size; return it; free_return: cache_tree_free(&it); return NULL; }
int cmd_read_tree(int argc, const char **argv, const char *unused_prefix) { int i, newfd, stage = 0; unsigned char sha1[20]; struct tree_desc t[MAX_UNPACK_TREES]; struct unpack_trees_options opts; int prefix_set = 0; const struct option read_tree_options[] = { { OPTION_CALLBACK, 0, "index-output", NULL, N_("file"), N_("write resulting index to <file>"), PARSE_OPT_NONEG, index_output_cb }, OPT_SET_INT(0, "empty", &read_empty, N_("only empty the index"), 1), OPT__VERBOSE(&opts.verbose_update, N_("be verbose")), OPT_GROUP(N_("Merging")), OPT_SET_INT('m', NULL, &opts.merge, N_("perform a merge in addition to a read"), 1), OPT_SET_INT(0, "trivial", &opts.trivial_merges_only, N_("3-way merge if no file level merging required"), 1), OPT_SET_INT(0, "aggressive", &opts.aggressive, N_("3-way merge in presence of adds and removes"), 1), OPT_SET_INT(0, "reset", &opts.reset, N_("same as -m, but discard unmerged entries"), 1), { OPTION_STRING, 0, "prefix", &opts.prefix, N_("<subdirectory>/"), N_("read the tree into the index under <subdirectory>/"), PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP }, OPT_SET_INT('u', NULL, &opts.update, N_("update working tree with merge result"), 1), { OPTION_CALLBACK, 0, "exclude-per-directory", &opts, N_("gitignore"), N_("allow explicitly ignored files to be overwritten"), PARSE_OPT_NONEG, exclude_per_directory_cb }, OPT_SET_INT('i', NULL, &opts.index_only, N_("don't check the working tree after merging"), 1), OPT__DRY_RUN(&opts.dry_run, N_("don't update the index or the work tree")), OPT_SET_INT(0, "no-sparse-checkout", &opts.skip_sparse_checkout, N_("skip applying sparse checkout filter"), 1), OPT_SET_INT(0, "debug-unpack", &opts.debug_unpack, N_("debug unpack-trees"), 1), OPT_END() }; memset(&opts, 0, sizeof(opts)); opts.head_idx = -1; opts.src_index = &the_index; opts.dst_index = &the_index; git_config(git_default_config, NULL); argc = parse_options(argc, argv, unused_prefix, read_tree_options, read_tree_usage, 0); newfd = hold_locked_index(&lock_file, 1); prefix_set = opts.prefix ? 1 : 0; if (1 < opts.merge + opts.reset + prefix_set) die("Which one? -m, --reset, or --prefix?"); if (opts.reset || opts.merge || opts.prefix) { if (read_cache_unmerged() && (opts.prefix || opts.merge)) die("You need to resolve your current index first"); stage = opts.merge = 1; } resolve_undo_clear(); for (i = 0; i < argc; i++) { const char *arg = argv[i]; if (get_sha1(arg, sha1)) die("Not a valid object name %s", arg); if (list_tree(sha1) < 0) die("failed to unpack tree object %s", arg); stage++; } if (nr_trees == 0 && !read_empty) warning("read-tree: emptying the index with no arguments is deprecated; use --empty"); else if (nr_trees > 0 && read_empty) die("passing trees as arguments contradicts --empty"); if (1 < opts.index_only + opts.update) die("-u and -i at the same time makes no sense"); if ((opts.update||opts.index_only) && !opts.merge) die("%s is meaningless without -m, --reset, or --prefix", opts.update ? "-u" : "-i"); if ((opts.dir && !opts.update)) die("--exclude-per-directory is meaningless unless -u"); if (opts.merge && !opts.index_only) setup_work_tree(); if (opts.merge) { if (stage < 2) die("just how do you expect me to merge %d trees?", stage-1); switch (stage - 1) { case 1: opts.fn = opts.prefix ? bind_merge : oneway_merge; break; case 2: opts.fn = twoway_merge; opts.initial_checkout = is_cache_unborn(); break; case 3: default: opts.fn = threeway_merge; break; } if (stage - 1 >= 3) opts.head_idx = stage - 2; else opts.head_idx = 1; } if (opts.debug_unpack) opts.fn = debug_merge; cache_tree_free(&active_cache_tree); for (i = 0; i < nr_trees; i++) { struct tree *tree = trees[i]; parse_tree(tree); init_tree_desc(t+i, tree->buffer, tree->size); } if (unpack_trees(nr_trees, t, &opts)) return 128; if (opts.debug_unpack || opts.dry_run) return 0; /* do not write the index out */ /* * When reading only one tree (either the most basic form, * "-m ent" or "--reset ent" form), we can obtain a fully * valid cache-tree because the index must match exactly * what came from the tree. */ if (nr_trees == 1 && !opts.prefix) prime_cache_tree(&active_cache_tree, trees[0]); if (write_cache(newfd, active_cache, active_nr) || commit_locked_index(&lock_file)) die("unable to write new index file"); return 0; }