/** * Release VFILE lock and invoke external merge tool for this file. * * TODO 2010/07/12 The MERGE-PLAN is an array and allows for * TODO multiple steps (for an n-way sub-merge cascade). * TODO But we don't have that part turned on yet in * TODO sg_mrg__private_biuld_wd_issues.h:_make_file_merge_plan(), * TODO so for now, we only expect 1 step. * TODO * TODO Also, when we do have multiple steps, we might want to * TODO be able to use the 'status' field to see which steps * TODO were already performed in an earlier RESOLVE. * TODO * TODO Also, when we want to support more than 1 step we need * TODO to copy pvaPlan because when we release the pendingtree * TODO the pvhIssue becomes invalidated too. */ static void _resolve__fix__run_external_file_merge(SG_context * pCtx, struct _resolve_data * pData, const char * pszGid, const SG_vhash * pvhIssue, SG_string * pStrRepoPath, enum _fix_status * pFixStatus) { _resolve__step_pathnames * pStepPathnames = NULL; _resolve__external_tool * pET = NULL; const SG_varray * pvaPlan; const SG_vhash * pvhStep_0; SG_int64 r64; SG_uint32 nrSteps; SG_mrg_automerge_result result; SG_bool bMerged = SG_FALSE; SG_bool bIsResolved = SG_FALSE; SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvhIssue, "conflict_file_merge_plan", (SG_varray **)&pvaPlan) ); SG_ERR_CHECK( SG_varray__count(pCtx, pvaPlan, &nrSteps) ); if (nrSteps > 1) SG_ERR_THROW2( SG_ERR_ASSERT, (pCtx, "TODO RESOLVE more than 1 step in auto-merge plan for '%s'.", SG_string__sz(pStrRepoPath)) ); ////////////////////////////////////////////////////////////////// // Get Step[0] SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaPlan, 0, (SG_vhash **)&pvhStep_0) ); // see if the user has already performed the merge and maybe got interrupted. SG_ERR_CHECK( SG_vhash__get__int64(pCtx, pvhStep_0, "status", &r64) ); result = (SG_mrg_automerge_result)r64; if (result == SG_MRG_AUTOMERGE_RESULT__SUCCESSFUL) { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "TODO Print message about previous successful manual merge of the file content and ask if they want to redo it for '%s'.\n", SG_string__sz(pStrRepoPath)) ); *pFixStatus = FIX_USER_MERGED; goto done; } SG_ERR_CHECK( _resolve__step_pathnames__compute(pCtx, pData, pvhIssue, pvhStep_0, pStrRepoPath, &pStepPathnames) ); // While we still have a handle to the pendingtree, lookup the // specifics on the external tool that we should invoke. these // details come from localsettings. SG_ERR_CHECK( _resolve__external_tool__lookup(pCtx, pData, pszGid, pvhIssue, pStrRepoPath, &pET) ); // Free the PENDINGTREE so that we release the VFILE lock. pvhIssue = NULL; pvaPlan = NULL; pvhStep_0 = NULL; SG_PENDINGTREE_NULLFREE(pCtx, pData->pPendingTree); ////////////////////////////////////////////////////////////////// // Invoke the external tool. SG_ERR_CHECK( _resolve__fix__run_external_file_merge_1(pCtx, pData, pET, pStepPathnames, pStrRepoPath, &bMerged) ); if (!bMerged) { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "RESOLVE: Aborting the merge of this file.\n") ); *pFixStatus = FIX_USER_ABORTED; goto done; } ////////////////////////////////////////////////////////////////// // Reload the PENDINGTREE and re-fetch the ISSUE and updated the STATUS on // this step in the PLAN. // // We duplicate some of the "see if someone else resolved this issue while // we were without the lock" stuff. SG_ERR_CHECK( _resolve__lookup_issue(pCtx, pData, pszGid, &pvhIssue) ); SG_ERR_CHECK( _resolve__is_resolved(pCtx, pvhIssue, &bIsResolved) ); if (bIsResolved) { // Someone else marked it resolved while were waiting for // the user to edit the file and while we didn't have the // file lock. We should stop here. SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "RESOLVE: Aborting the merge of this file (due to race condition).\n") ); *pFixStatus = FIX_LOST_RACE; goto done; } // re-fetch the current step and update the "result" status for it // and flush the pendingtree back disk. // // we only update the step status -- we DO NOT alter the __DIVERGENT_FILE_EDIT__ // conflict_flags. SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvhIssue, "conflict_file_merge_plan", (SG_varray **)&pvaPlan) ); SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaPlan, 0, (SG_vhash **)&pvhStep_0) ); SG_ERR_CHECK( SG_pendingtree__set_wd_issue_plan_step_status__dont_save_pendingtree(pCtx, pData->pPendingTree, pvhStep_0, SG_MRG_AUTOMERGE_RESULT__SUCCESSFUL) ); SG_ERR_CHECK( SG_pendingtree__save(pCtx, pData->pPendingTree) ); SG_PENDINGTREE_NULLFREE(pCtx, pData->pPendingTree); SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "RESOLVE: The file content portion of the merge was successful.\n") ); *pFixStatus = FIX_USER_MERGED; // we defer the delete of the temp input files until we completely // resolve the issue. (This gives us more options if we allow the // resolve to be restarted after interruptions.) done: ; fail: _RESOLVE__EXTERNAL_TOOL__NULLFREE(pCtx, pET); _RESOLVE__STEP_PATHNAMES__NULLFREE(pCtx, pStepPathnames); }
void SG_workingdir__create_and_get( SG_context* pCtx, const char* pszDescriptorName, const SG_pathname* pPathDirPutTopLevelDirInHere, SG_bool bCreateDrawer, const char* psz_spec_hid_cs_baseline ) { SG_repo* pRepo = NULL; SG_rbtree* pIdsetLeaves = NULL; SG_uint32 count_leaves = 0; SG_changeset* pcs = NULL; const char* pszidUserSuperRoot = NULL; SG_bool b = SG_FALSE; char* psz_hid_cs_baseline = NULL; SG_pendingtree * pPendingTree = NULL; SG_vhash * pvhTimestamps = NULL; /* * Fetch the descriptor by its given name and use it to connect to * the repo. */ SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, pszDescriptorName, &pRepo) ); if (psz_spec_hid_cs_baseline) { SG_ERR_CHECK( SG_strdup(pCtx, psz_spec_hid_cs_baseline, &psz_hid_cs_baseline) ); } else { const char* psz_hid = NULL; /* * If you do not specify a hid to be the baseline, then this routine * currently only works if there is exactly one leaf in the repo. */ SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo,SG_DAGNUM__VERSION_CONTROL,&pIdsetLeaves) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, pIdsetLeaves, &count_leaves) ); if (count_leaves != 1) SG_ERR_THROW( SG_ERR_MULTIPLE_HEADS_FROM_DAGNODE ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, pIdsetLeaves, &b, &psz_hid, NULL) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_hid, &psz_hid_cs_baseline) ); } /* * Load the desired changeset from the repo so we can look up the * id of its user root directory */ SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_hid_cs_baseline, &pcs) ); SG_ERR_CHECK( SG_changeset__get_root(pCtx, pcs, &pszidUserSuperRoot) ); if (bCreateDrawer) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhTimestamps) ); // Retrieve everything into the WD and capture the timestamps on the files that we create. SG_ERR_CHECK( sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, pvhTimestamps) ); // this creates "repo.json" with the repo-descriptor. SG_ERR_CHECK( SG_workingdir__set_mapping(pCtx, pPathDirPutTopLevelDirInHere, pszDescriptorName, NULL) ); // this creates an empty "wd.json" file (which doesn't know anything). SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, pPathDirPutTopLevelDirInHere, SG_TRUE, &pPendingTree) ); // force set the initial parents to the current changeset. SG_ERR_CHECK( SG_pendingtree__set_single_wd_parent(pCtx, pPendingTree, psz_hid_cs_baseline) ); // force initialize the timestamp cache to the list that we just built; this should // be the only timestamps in the cache since we just populated the WD. SG_ERR_CHECK( SG_pendingtree__set_wd_timestamp_cache(pCtx, pPendingTree, &pvhTimestamps) ); // this steals our vhash SG_ERR_CHECK( SG_pendingtree__save(pCtx, pPendingTree) ); } else { // Retrieve everything into the WD but do not create .sgdrawer or record timestamps. // This is more like an EXPORT operation. SG_ERR_CHECK( sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, NULL) ); } fail: SG_VHASH_NULLFREE(pCtx, pvhTimestamps); SG_NULLFREE(pCtx, psz_hid_cs_baseline); SG_CHANGESET_NULLFREE(pCtx, pcs); SG_RBTREE_NULLFREE(pCtx, pIdsetLeaves); SG_REPO_NULLFREE(pCtx, pRepo); SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); }