Example #1
0
static int is_index_unchanged(void)
{
	unsigned char head_sha1[20];
	struct commit *head_commit;

	if (!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, head_sha1, NULL))
		return error(_("Could not resolve HEAD commit\n"));

	head_commit = lookup_commit(head_sha1);

	/*
	 * If head_commit is NULL, check_commit, called from
	 * lookup_commit, would have indicated that head_commit is not
	 * a commit object already.  parse_commit() will return failure
	 * without further complaints in such a case.  Otherwise, if
	 * the commit is invalid, parse_commit() will complain.  So
	 * there is nothing for us to say here.  Just return failure.
	 */
	if (parse_commit(head_commit))
		return -1;

	if (!active_cache_tree)
		active_cache_tree = cache_tree();

	if (!cache_tree_fully_valid(active_cache_tree))
		if (cache_tree_update(&the_index, 0))
			return error(_("Unable to update cache tree\n"));

	return !hashcmp(active_cache_tree->sha1, head_commit->tree->object.oid.hash);
}
Example #2
0
struct tree *write_tree_from_memory(struct merge_options *o)
{
	struct tree *result = NULL;

	if (unmerged_cache()) {
		int i;
		output(o, 0, "There are unmerged index entries:");
		for (i = 0; i < active_nr; i++) {
			struct cache_entry *ce = active_cache[i];
			if (ce_stage(ce))
				output(o, 0, "%d %.*s", ce_stage(ce), ce_namelen(ce), ce->name);
		}
		return NULL;
	}

	if (!active_cache_tree)
		active_cache_tree = cache_tree();

	if (!cache_tree_fully_valid(active_cache_tree) &&
	    cache_tree_update(active_cache_tree,
			      active_cache, active_nr, 0, 0) < 0)
		die("error building trees");

	result = lookup_tree(active_cache_tree->sha1);

	return result;
}
struct tree *write_tree_from_memory(struct merge_options *o)
{
	struct tree *result = NULL;

	if (unmerged_cache()) {
		int i;
		fprintf(stderr, "BUG: There are unmerged index entries:\n");
		for (i = 0; i < active_nr; i++) {
			struct cache_entry *ce = active_cache[i];
			if (ce_stage(ce))
				fprintf(stderr, "BUG: %d %.*s", ce_stage(ce),
					(int)ce_namelen(ce), ce->name);
		}
		die("Bug in merge-recursive.c");
	}

	if (!active_cache_tree)
		active_cache_tree = cache_tree();

	if (!cache_tree_fully_valid(active_cache_tree) &&
	    cache_tree_update(active_cache_tree,
			      active_cache, active_nr, 0, 0) < 0)
		die("error building trees");

	result = lookup_tree(active_cache_tree->sha1);

	return result;
}
Example #4
0
int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix)
{
	int entries, was_valid, newfd;
	struct lock_file *lock_file;

	/*
	 * We can't free this memory, it becomes part of a linked list
	 * parsed atexit()
	 */
	lock_file = xcalloc(1, sizeof(struct lock_file));

	newfd = hold_locked_index(lock_file, 1);

	entries = read_cache();
	if (entries < 0)
		return WRITE_TREE_UNREADABLE_INDEX;
	if (flags & WRITE_TREE_IGNORE_CACHE_TREE)
		cache_tree_free(&(active_cache_tree));

	if (!active_cache_tree)
		active_cache_tree = cache_tree();

	was_valid = cache_tree_fully_valid(active_cache_tree);
	if (!was_valid) {
		int missing_ok = flags & WRITE_TREE_MISSING_OK;

		if (cache_tree_update(active_cache_tree,
				      active_cache, active_nr,
				      missing_ok, 0) < 0)
			return WRITE_TREE_UNMERGED_INDEX;
		if (0 <= newfd) {
			if (!write_cache(newfd, active_cache, active_nr) &&
			    !commit_lock_file(lock_file))
				newfd = -1;
		}
		/* Not being able to write is fine -- we are only interested
		 * in updating the cache-tree part, and if the next caller
		 * ends up using the old index with unupdated cache-tree part
		 * it misses the work we did here, but that is just a
		 * performance penalty and not a big deal.
		 */
	}

	if (prefix) {
		struct cache_tree *subtree =
			cache_tree_find(active_cache_tree, prefix);
		if (!subtree)
			return WRITE_TREE_PREFIX_ERROR;
		hashcpy(sha1, subtree->sha1);
	}
	else
		hashcpy(sha1, active_cache_tree->sha1);

	if (0 <= newfd)
		rollback_lock_file(lock_file);

	return 0;
}
Example #5
0
File: cache-tree.c Project: 9b/git
int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
{
	int entries, was_valid, newfd;
	struct lock_file *lock_file;

	/*
	 * We can't free this memory, it becomes part of a linked list
	 * parsed atexit()
	 */
	lock_file = xcalloc(1, sizeof(struct lock_file));

	newfd = hold_lock_file_for_update(lock_file, index_path, LOCK_DIE_ON_ERROR);

	entries = read_index_from(index_state, index_path);
	if (entries < 0)
		return WRITE_TREE_UNREADABLE_INDEX;
	if (flags & WRITE_TREE_IGNORE_CACHE_TREE)
		cache_tree_free(&index_state->cache_tree);

	if (!index_state->cache_tree)
		index_state->cache_tree = cache_tree();

	was_valid = cache_tree_fully_valid(index_state->cache_tree);
	if (!was_valid) {
		if (cache_tree_update(index_state, flags) < 0)
			return WRITE_TREE_UNMERGED_INDEX;
		if (0 <= newfd) {
			if (!write_locked_index(index_state, lock_file, COMMIT_LOCK))
				newfd = -1;
		}
		/* Not being able to write is fine -- we are only interested
		 * in updating the cache-tree part, and if the next caller
		 * ends up using the old index with unupdated cache-tree part
		 * it misses the work we did here, but that is just a
		 * performance penalty and not a big deal.
		 */
	}

	if (prefix) {
		struct cache_tree *subtree;
		subtree = cache_tree_find(index_state->cache_tree, prefix);
		if (!subtree)
			return WRITE_TREE_PREFIX_ERROR;
		hashcpy(sha1, subtree->sha1);
	}
	else
		hashcpy(sha1, index_state->cache_tree->sha1);

	if (0 <= newfd)
		rollback_lock_file(lock_file);

	return 0;
}
Example #6
0
int cache_tree_fully_valid(struct cache_tree *it)
{
	int i;
	if (!it)
		return 0;
	if (it->entry_count < 0 || !has_sha1_file(it->sha1))
		return 0;
	for (i = 0; i < it->subtree_nr; i++) {
		if (!cache_tree_fully_valid(it->down[i]->cache_tree))
			return 0;
	}
	return 1;
}
Example #7
0
int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
{
	int entries, was_valid;
	struct lock_file lock_file = LOCK_INIT;
	int ret = 0;

	hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR);

	entries = read_index_from(index_state, index_path, get_git_dir());
	if (entries < 0) {
		ret = WRITE_TREE_UNREADABLE_INDEX;
		goto out;
	}
	if (flags & WRITE_TREE_IGNORE_CACHE_TREE)
		cache_tree_free(&index_state->cache_tree);

	if (!index_state->cache_tree)
		index_state->cache_tree = cache_tree();

	was_valid = cache_tree_fully_valid(index_state->cache_tree);
	if (!was_valid) {
		if (cache_tree_update(index_state, flags) < 0) {
			ret = WRITE_TREE_UNMERGED_INDEX;
			goto out;
		}
		write_locked_index(index_state, &lock_file, COMMIT_LOCK);
		/* Not being able to write is fine -- we are only interested
		 * in updating the cache-tree part, and if the next caller
		 * ends up using the old index with unupdated cache-tree part
		 * it misses the work we did here, but that is just a
		 * performance penalty and not a big deal.
		 */
	}

	if (prefix) {
		struct cache_tree *subtree;
		subtree = cache_tree_find(index_state->cache_tree, prefix);
		if (!subtree) {
			ret = WRITE_TREE_PREFIX_ERROR;
			goto out;
		}
		oidcpy(oid, &subtree->oid);
	}
	else
		oidcpy(oid, &index_state->cache_tree->oid);

out:
	rollback_lock_file(&lock_file);
	return ret;
}
Example #8
0
/*
 * N-way merge "len" trees.  Returns 0 on success, -1 on failure to manipulate the
 * resulting index, -2 on failure to reflect the changes to the work tree.
 *
 * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally
 */
int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o)
{
	int i, ret;
	static struct cache_entry *dfc;
	struct exclude_list el;

	if (len > MAX_UNPACK_TREES)
		die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES);
	memset(&state, 0, sizeof(state));
	state.base_dir = "";
	state.force = 1;
	state.quiet = 1;
	state.refresh_cache = 1;
	state.istate = &o->result;

	memset(&el, 0, sizeof(el));
	if (!core_apply_sparse_checkout || !o->update)
		o->skip_sparse_checkout = 1;
	if (!o->skip_sparse_checkout) {
		char *sparse = git_pathdup("info/sparse-checkout");
		if (add_excludes_from_file_to_list(sparse, "", 0, &el, 0) < 0)
			o->skip_sparse_checkout = 1;
		else
			o->el = &el;
		free(sparse);
	}

	memset(&o->result, 0, sizeof(o->result));
	o->result.initialized = 1;
	o->result.timestamp.sec = o->src_index->timestamp.sec;
	o->result.timestamp.nsec = o->src_index->timestamp.nsec;
	o->result.version = o->src_index->version;
	o->result.split_index = o->src_index->split_index;
	if (o->result.split_index)
		o->result.split_index->refcount++;
	hashcpy(o->result.sha1, o->src_index->sha1);
	o->merge_size = len;
	mark_all_ce_unused(o->src_index);

	/*
	 * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries
	 */
	if (!o->skip_sparse_checkout)
		mark_new_skip_worktree(o->el, o->src_index, 0, CE_NEW_SKIP_WORKTREE);

	if (!dfc)
		dfc = xcalloc(1, cache_entry_size(0));
	o->df_conflict_entry = dfc;

	if (len) {
		const char *prefix = o->prefix ? o->prefix : "";
		struct traverse_info info;

		setup_traverse_info(&info, prefix);
		info.fn = unpack_callback;
		info.data = o;
		info.show_all_errors = o->show_all_errors;
		info.pathspec = o->pathspec;

		if (o->prefix) {
			/*
			 * Unpack existing index entries that sort before the
			 * prefix the tree is spliced into.  Note that o->merge
			 * is always true in this case.
			 */
			while (1) {
				struct cache_entry *ce = next_cache_entry(o);
				if (!ce)
					break;
				if (ce_in_traverse_path(ce, &info))
					break;
				if (unpack_index_entry(ce, o) < 0)
					goto return_failed;
			}
		}

		if (traverse_trees(len, t, &info) < 0)
			goto return_failed;
	}

	/* Any left-over entries in the index? */
	if (o->merge) {
		while (1) {
			struct cache_entry *ce = next_cache_entry(o);
			if (!ce)
				break;
			if (unpack_index_entry(ce, o) < 0)
				goto return_failed;
		}
	}
	mark_all_ce_unused(o->src_index);

	if (o->trivial_merges_only && o->nontrivial_merge) {
		ret = unpack_failed(o, "Merge requires file-level merging");
		goto done;
	}

	if (!o->skip_sparse_checkout) {
		int empty_worktree = 1;

		/*
		 * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #1
		 * If the will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE
		 * so apply_sparse_checkout() won't attempt to remove it from worktree
		 */
		mark_new_skip_worktree(o->el, &o->result, CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);

		ret = 0;
		for (i = 0; i < o->result.cache_nr; i++) {
			struct cache_entry *ce = o->result.cache[i];

			/*
			 * Entries marked with CE_ADDED in merged_entry() do not have
			 * verify_absent() check (the check is effectively disabled
			 * because CE_NEW_SKIP_WORKTREE is set unconditionally).
			 *
			 * Do the real check now because we have had
			 * correct CE_NEW_SKIP_WORKTREE
			 */
			if (ce->ce_flags & CE_ADDED &&
			    verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {
				if (!o->show_all_errors)
					goto return_failed;
				ret = -1;
			}

			if (apply_sparse_checkout(&o->result, ce, o)) {
				if (!o->show_all_errors)
					goto return_failed;
				ret = -1;
			}
			if (!ce_skip_worktree(ce))
				empty_worktree = 0;

		}
		if (ret < 0)
			goto return_failed;
		/*
		 * Sparse checkout is meant to narrow down checkout area
		 * but it does not make sense to narrow down to empty working
		 * tree. This is usually a mistake in sparse checkout rules.
		 * Do not allow users to do that.
		 */
		if (o->result.cache_nr && empty_worktree) {
			ret = unpack_failed(o, "Sparse checkout leaves no entry on working directory");
			goto done;
		}
	}

	o->src_index = NULL;
	ret = check_updates(o) ? (-2) : 0;
	if (o->dst_index) {
		if (!ret) {
			if (!o->result.cache_tree)
				o->result.cache_tree = cache_tree();
			if (!cache_tree_fully_valid(o->result.cache_tree))
				cache_tree_update(&o->result,
						  WRITE_TREE_SILENT |
						  WRITE_TREE_REPAIR);
		}
		discard_index(o->dst_index);
		*o->dst_index = o->result;
	} else {
		discard_index(&o->result);
	}

done:
	clear_exclude_list(&el);
	return ret;

return_failed:
	if (o->show_all_errors)
		display_error_msgs(o);
	mark_all_ce_unused(o->src_index);
	ret = unpack_failed(o, NULL);
	if (o->exiting_early)
		ret = 0;
	goto done;
}
Example #9
0
static int merge_working_tree(const struct checkout_opts *opts,
			      struct branch_info *old_branch_info,
			      struct branch_info *new_branch_info,
			      int *writeout_error)
{
	int ret;
	struct lock_file lock_file = LOCK_INIT;

	hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
	if (read_cache_preload(NULL) < 0)
		return error(_("index file corrupt"));

	resolve_undo_clear();
	if (opts->force) {
		ret = reset_tree(get_commit_tree(new_branch_info->commit),
				 opts, 1, writeout_error);
		if (ret)
			return ret;
	} else {
		struct tree_desc trees[2];
		struct tree *tree;
		struct unpack_trees_options topts;

		memset(&topts, 0, sizeof(topts));
		topts.head_idx = -1;
		topts.src_index = &the_index;
		topts.dst_index = &the_index;

		setup_unpack_trees_porcelain(&topts, "checkout");

		refresh_cache(REFRESH_QUIET);

		if (unmerged_cache()) {
			error(_("you need to resolve your current index first"));
			return 1;
		}

		/* 2-way merge to the new branch */
		topts.initial_checkout = is_cache_unborn();
		topts.update = 1;
		topts.merge = 1;
		topts.gently = opts->merge && old_branch_info->commit;
		topts.verbose_update = opts->show_progress;
		topts.fn = twoway_merge;
		if (opts->overwrite_ignore) {
			topts.dir = xcalloc(1, sizeof(*topts.dir));
			topts.dir->flags |= DIR_SHOW_IGNORED;
			setup_standard_excludes(topts.dir);
		}
		tree = parse_tree_indirect(old_branch_info->commit ?
					   &old_branch_info->commit->object.oid :
					   the_hash_algo->empty_tree);
		init_tree_desc(&trees[0], tree->buffer, tree->size);
		tree = parse_tree_indirect(&new_branch_info->commit->object.oid);
		init_tree_desc(&trees[1], tree->buffer, tree->size);

		ret = unpack_trees(2, trees, &topts);
		clear_unpack_trees_porcelain(&topts);
		if (ret == -1) {
			/*
			 * Unpack couldn't do a trivial merge; either
			 * give up or do a real merge, depending on
			 * whether the merge flag was used.
			 */
			struct tree *result;
			struct tree *work;
			struct merge_options o;
			if (!opts->merge)
				return 1;

			/*
			 * Without old_branch_info->commit, the below is the same as
			 * the two-tree unpack we already tried and failed.
			 */
			if (!old_branch_info->commit)
				return 1;

			/* Do more real merge */

			/*
			 * We update the index fully, then write the
			 * tree from the index, then merge the new
			 * branch with the current tree, with the old
			 * branch as the base. Then we reset the index
			 * (but not the working tree) to the new
			 * branch, leaving the working tree as the
			 * merged version, but skipping unmerged
			 * entries in the index.
			 */

			add_files_to_cache(NULL, NULL, 0);
			/*
			 * NEEDSWORK: carrying over local changes
			 * when branches have different end-of-line
			 * normalization (or clean+smudge rules) is
			 * a pain; plumb in an option to set
			 * o.renormalize?
			 */
			init_merge_options(&o, the_repository);
			o.verbosity = 0;
			work = write_tree_from_memory(&o);

			ret = reset_tree(get_commit_tree(new_branch_info->commit),
					 opts, 1,
					 writeout_error);
			if (ret)
				return ret;
			o.ancestor = old_branch_info->name;
			o.branch1 = new_branch_info->name;
			o.branch2 = "local";
			ret = merge_trees(&o,
					  get_commit_tree(new_branch_info->commit),
					  work,
					  get_commit_tree(old_branch_info->commit),
					  &result);
			if (ret < 0)
				exit(128);
			ret = reset_tree(get_commit_tree(new_branch_info->commit),
					 opts, 0,
					 writeout_error);
			strbuf_release(&o.obuf);
			if (ret)
				return ret;
		}
	}

	if (!active_cache_tree)
		active_cache_tree = cache_tree();

	if (!cache_tree_fully_valid(active_cache_tree))
		cache_tree_update(&the_index, WRITE_TREE_SILENT | WRITE_TREE_REPAIR);

	if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
		die(_("unable to write new index file"));

	if (!opts->force && !opts->quiet)
		show_local_changes(&new_branch_info->commit->object, &opts->diff_options);

	return 0;
}