/* * Get the new blocks that need to be checked out if we ff to @remote. */ static int get_new_blocks_ff (SeafRepo *repo, SeafCommit *head, SeafCommit *remote, BlockList **bl) { SeafRepoManager *mgr = repo->manager; char index_path[SEAF_PATH_MAX]; struct tree_desc trees[2]; struct unpack_trees_options topts; struct index_state istate; int ret = 0; memset (&istate, 0, sizeof(istate)); snprintf (index_path, SEAF_PATH_MAX, "%s/%s", mgr->index_dir, repo->id); if (read_index_from (&istate, index_path) < 0) { g_warning ("Failed to load index.\n"); return -1; } fill_tree_descriptor (&trees[0], head->root_id); fill_tree_descriptor (&trees[1], remote->root_id); memset(&topts, 0, sizeof(topts)); topts.base = repo->worktree; topts.head_idx = -1; topts.src_index = &istate; topts.update = 1; topts.merge = 1; topts.fn = twoway_merge; /* unpack_trees() doesn't update index or worktree. */ if (unpack_trees (2, trees, &topts) < 0) { g_warning ("Failed to ff to commit %s.\n", remote->commit_id); ret = -1; goto out; } *bl = block_list_new (); collect_new_blocks_from_index (&topts.result, *bl); out: tree_desc_free (&trees[0]); tree_desc_free (&trees[1]); discard_index (&istate); discard_index (&topts.result); return ret; }
/* * Get the new blocks that need to be checked out if we do a real merge. */ static int get_new_blocks_merge (SeafRepo *repo, SeafCommit *head, SeafCommit *remote, SeafCommit *common, BlockList **bl) { struct merge_options opts; char index_path[SEAF_PATH_MAX]; struct index_state istate; int ret, clean; memset (&istate, 0, sizeof(istate)); snprintf (index_path, SEAF_PATH_MAX, "%s/%s", repo->manager->index_dir, repo->id); if (read_index_from (&istate, index_path) < 0) { g_warning ("Failed to load index.\n"); return -1; } init_merge_options (&opts); opts.index = &istate; opts.worktree = repo->worktree; opts.ancestor = "common ancestor"; opts.branch1 = seaf->session->base.user_name; opts.branch2 = remote->creator_name; opts.collect_blocks_only = TRUE; *bl = block_list_new(); opts.bl = *bl; ret = merge_recursive (&opts, head->root_id, remote->root_id, common->root_id, &clean, NULL); clear_merge_options (&opts); discard_index (&istate); return ret; }
static void* compute_repo_size (void *vjob) { RepoSizeJob *job = vjob; Scheduler *sched = job->sched; SeafRepo *repo = NULL; SeafCommit *head = NULL; char *cached_head_id = NULL; BlockList *bl; char *block_id; BlockMetadata *bmd; guint64 size = 0; repo = seaf_repo_manager_get_repo (sched->seaf->repo_mgr, job->repo_id); if (!repo) { g_warning ("[scheduler] failed to get repo %s.\n", job->repo_id); return vjob; } cached_head_id = get_cached_head_id (sched->seaf->db, job->repo_id); if (g_strcmp0 (cached_head_id, repo->head->commit_id) == 0) goto out; head = seaf_commit_manager_get_commit (sched->seaf->commit_mgr, repo->head->commit_id); if (!head) { g_warning ("[scheduler] failed to get head commit %s.\n", repo->head->commit_id); goto out; } /* Load block list first so that we don't need to count duplicate blocks. * We only calculate the size of the head commit. */ bl = block_list_new (); if (seaf_fs_manager_populate_blocklist (seaf->fs_mgr, head->root_id, bl) < 0) { block_list_free (bl); goto out; } int i; for (i = 0; i < bl->n_blocks; ++i) { block_id = g_ptr_array_index (bl->block_ids, i); bmd = seaf_block_manager_stat_block (sched->seaf->block_mgr, block_id); if (bmd) { size += bmd->size; g_free (bmd); } } block_list_free (bl); if (set_repo_size (sched->seaf->db, job->repo_id, repo->head->commit_id, size) < 0) g_warning ("[scheduler] failed to store repo size %s.\n", job->repo_id); out: seaf_repo_unref (repo); seaf_commit_unref (head); g_free (cached_head_id); return vjob; }