/** * Given an issue that has a file-content conflict and has just been resolved, * go thru all of the steps in the merge plan and delete the ~mine files. */ static void _resolve__delete_temp_files(SG_context * pCtx, struct _resolve_data * pData, const char * pszGid, const SG_vhash * pvhIssue) { _resolve__step_pathnames * pStepPathnames = NULL; const SG_varray * pvaPlan; SG_uint32 kStep, nrSteps; SG_UNUSED( pszGid ); SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvhIssue, "conflict_file_merge_plan", (SG_varray **)&pvaPlan) ); SG_ERR_CHECK( SG_varray__count(pCtx, pvaPlan, &nrSteps) ); for (kStep=0; kStep<nrSteps; kStep++) { const SG_vhash * pvhStep; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaPlan, kStep, (SG_vhash **)&pvhStep) ); SG_ERR_CHECK( _resolve__step_pathnames__compute(pCtx, pData, pvhIssue, pvhStep, NULL, &pStepPathnames) ); SG_ERR_CHECK( _resolve__step_pathnames__delete_temp_files(pCtx, pStepPathnames) ); _RESOLVE__STEP_PATHNAMES__NULLFREE(pCtx, pStepPathnames); } fail: _RESOLVE__STEP_PATHNAMES__NULLFREE(pCtx, pStepPathnames); }
void SG_vc_hooks__ASK__WIT__LIST_ITEMS( SG_context* pCtx, SG_repo* pRepo, const char * psz_search_term, SG_varray *pBugs ) { SG_vhash* pvh_params = NULL; SG_vhash* pvh_result = NULL; SG_vhash* pvh_hook = NULL; const char* psz_js = NULL; const char* psz_descriptor_name = NULL; SG_bool hasBugs = SG_FALSE; SG_ERR_CHECK( sg_vc_hooks__lookup_by_interface__single_result( pCtx, pRepo, SG_VC_HOOK__INTERFACE__ASK__WIT__LIST_ITEMS, &pvh_hook ) ); if (!pvh_hook) return; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh_hook, "js", &psz_js) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_params) ); SG_ERR_CHECK( SG_repo__get_descriptor_name(pCtx, pRepo, &psz_descriptor_name) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "descriptor_name", psz_descriptor_name) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "text", psz_search_term) ); SG_ERR_CHECK( SG_vc_hooks__execute(pCtx, psz_js, pvh_params, &pvh_result) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvh_result, "items", &hasBugs) ); if (hasBugs && pBugs) { SG_varray *bugs = NULL; SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvh_result, "items", &bugs) ); SG_ERR_CHECK( SG_varray__copy_items(pCtx, bugs, pBugs) ); } fail: SG_VHASH_NULLFREE(pCtx, pvh_params); SG_VHASH_NULLFREE(pCtx, pvh_result); SG_VHASH_NULLFREE(pCtx, pvh_hook); }
void SG_dagfrag__alloc__from_vhash(SG_context * pCtx, SG_dagfrag ** ppNew, const SG_vhash * pvhFrag) { const char * szVersion; SG_dagfrag * pFrag = NULL; struct _deserialize_data deserialize_data; SG_int64 iDagNum = 0; SG_NULLARGCHECK_RETURN(ppNew); SG_NULLARGCHECK_RETURN(pvhFrag); SG_ERR_CHECK( SG_vhash__get__sz(pCtx,pvhFrag,KEY_VERSION,&szVersion) ); if (strcmp(szVersion,"1") == 0) { // handle dagfrags that were serialized by software compiled with // VALUE_VERSION == 1. SG_varray * pvaMyData; const char* psz_repo_id = NULL; const char* psz_admin_id = NULL; SG_ERR_CHECK( SG_vhash__get__sz(pCtx,pvhFrag,KEY_REPO_ID,&psz_repo_id) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx,pvhFrag,KEY_ADMIN_ID,&psz_admin_id) ); SG_ERR_CHECK( SG_vhash__get__int64(pCtx,pvhFrag,KEY_DAGNUM,&iDagNum) ); SG_ERR_CHECK( SG_dagfrag__alloc(pCtx,&pFrag,psz_repo_id,psz_admin_id,(SG_uint32) iDagNum) ); SG_ERR_CHECK( SG_vhash__get__varray(pCtx,pvhFrag,KEY_DATA,&pvaMyData) ); deserialize_data.pFrag = pFrag; SG_ERR_CHECK( SG_varray__foreach(pCtx, pvaMyData, _deserialize_data_ver_1_cb, &deserialize_data) ); *ppNew = pFrag; return; } else { SG_ERR_THROW( SG_ERR_DAGFRAG_DESERIALIZATION_VERSION ); } fail: SG_DAGFRAG_NULLFREE(pCtx, pFrag); }
/** * Release VFILE lock and invoke external merge tool for this file. * * TODO 2010/07/12 The MERGE-PLAN is an array and allows for * TODO multiple steps (for an n-way sub-merge cascade). * TODO But we don't have that part turned on yet in * TODO sg_mrg__private_biuld_wd_issues.h:_make_file_merge_plan(), * TODO so for now, we only expect 1 step. * TODO * TODO Also, when we do have multiple steps, we might want to * TODO be able to use the 'status' field to see which steps * TODO were already performed in an earlier RESOLVE. * TODO * TODO Also, when we want to support more than 1 step we need * TODO to copy pvaPlan because when we release the pendingtree * TODO the pvhIssue becomes invalidated too. */ static void _resolve__fix__run_external_file_merge(SG_context * pCtx, struct _resolve_data * pData, const char * pszGid, const SG_vhash * pvhIssue, SG_string * pStrRepoPath, enum _fix_status * pFixStatus) { _resolve__step_pathnames * pStepPathnames = NULL; _resolve__external_tool * pET = NULL; const SG_varray * pvaPlan; const SG_vhash * pvhStep_0; SG_int64 r64; SG_uint32 nrSteps; SG_mrg_automerge_result result; SG_bool bMerged = SG_FALSE; SG_bool bIsResolved = SG_FALSE; SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvhIssue, "conflict_file_merge_plan", (SG_varray **)&pvaPlan) ); SG_ERR_CHECK( SG_varray__count(pCtx, pvaPlan, &nrSteps) ); if (nrSteps > 1) SG_ERR_THROW2( SG_ERR_ASSERT, (pCtx, "TODO RESOLVE more than 1 step in auto-merge plan for '%s'.", SG_string__sz(pStrRepoPath)) ); ////////////////////////////////////////////////////////////////// // Get Step[0] SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaPlan, 0, (SG_vhash **)&pvhStep_0) ); // see if the user has already performed the merge and maybe got interrupted. SG_ERR_CHECK( SG_vhash__get__int64(pCtx, pvhStep_0, "status", &r64) ); result = (SG_mrg_automerge_result)r64; if (result == SG_MRG_AUTOMERGE_RESULT__SUCCESSFUL) { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "TODO Print message about previous successful manual merge of the file content and ask if they want to redo it for '%s'.\n", SG_string__sz(pStrRepoPath)) ); *pFixStatus = FIX_USER_MERGED; goto done; } SG_ERR_CHECK( _resolve__step_pathnames__compute(pCtx, pData, pvhIssue, pvhStep_0, pStrRepoPath, &pStepPathnames) ); // While we still have a handle to the pendingtree, lookup the // specifics on the external tool that we should invoke. these // details come from localsettings. SG_ERR_CHECK( _resolve__external_tool__lookup(pCtx, pData, pszGid, pvhIssue, pStrRepoPath, &pET) ); // Free the PENDINGTREE so that we release the VFILE lock. pvhIssue = NULL; pvaPlan = NULL; pvhStep_0 = NULL; SG_PENDINGTREE_NULLFREE(pCtx, pData->pPendingTree); ////////////////////////////////////////////////////////////////// // Invoke the external tool. SG_ERR_CHECK( _resolve__fix__run_external_file_merge_1(pCtx, pData, pET, pStepPathnames, pStrRepoPath, &bMerged) ); if (!bMerged) { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "RESOLVE: Aborting the merge of this file.\n") ); *pFixStatus = FIX_USER_ABORTED; goto done; } ////////////////////////////////////////////////////////////////// // Reload the PENDINGTREE and re-fetch the ISSUE and updated the STATUS on // this step in the PLAN. // // We duplicate some of the "see if someone else resolved this issue while // we were without the lock" stuff. SG_ERR_CHECK( _resolve__lookup_issue(pCtx, pData, pszGid, &pvhIssue) ); SG_ERR_CHECK( _resolve__is_resolved(pCtx, pvhIssue, &bIsResolved) ); if (bIsResolved) { // Someone else marked it resolved while were waiting for // the user to edit the file and while we didn't have the // file lock. We should stop here. SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "RESOLVE: Aborting the merge of this file (due to race condition).\n") ); *pFixStatus = FIX_LOST_RACE; goto done; } // re-fetch the current step and update the "result" status for it // and flush the pendingtree back disk. // // we only update the step status -- we DO NOT alter the __DIVERGENT_FILE_EDIT__ // conflict_flags. SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvhIssue, "conflict_file_merge_plan", (SG_varray **)&pvaPlan) ); SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaPlan, 0, (SG_vhash **)&pvhStep_0) ); SG_ERR_CHECK( SG_pendingtree__set_wd_issue_plan_step_status__dont_save_pendingtree(pCtx, pData->pPendingTree, pvhStep_0, SG_MRG_AUTOMERGE_RESULT__SUCCESSFUL) ); SG_ERR_CHECK( SG_pendingtree__save(pCtx, pData->pPendingTree) ); SG_PENDINGTREE_NULLFREE(pCtx, pData->pPendingTree); SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "RESOLVE: The file content portion of the merge was successful.\n") ); *pFixStatus = FIX_USER_MERGED; // we defer the delete of the temp input files until we completely // resolve the issue. (This gives us more options if we allow the // resolve to be restarted after interruptions.) done: ; fail: _RESOLVE__EXTERNAL_TOOL__NULLFREE(pCtx, pET); _RESOLVE__STEP_PATHNAMES__NULLFREE(pCtx, pStepPathnames); }
// TODO not sure we really want to pass this much stuff to this interface void SG_vc_hooks__ASK__WIT__VALIDATE_ASSOCIATIONS( SG_context* pCtx, SG_repo* pRepo, const char* const* paszAssocs, SG_uint32 count_assocs, SG_varray *pBugs ) { SG_vhash* pvh_params = NULL; SG_vhash* pvh_result = NULL; char* psz_repo_id = NULL; char* psz_admin_id = NULL; SG_vhash* pvh_hook = NULL; const char* psz_js = NULL; SG_uint32 i = 0; SG_varray* pva_ids = NULL; const char* psz_descriptor_name = NULL; SG_ERR_CHECK( sg_vc_hooks__lookup_by_interface__single_result( pCtx, pRepo, SG_VC_HOOK__INTERFACE__ASK__WIT__VALIDATE_ASSOCIATIONS, &pvh_hook ) ); if (!pvh_hook) return; SG_ERR_CHECK( SG_repo__get_admin_id(pCtx, pRepo, &psz_admin_id) ); SG_ERR_CHECK( SG_repo__get_repo_id( pCtx, pRepo, &psz_repo_id ) ); SG_ERR_CHECK( SG_repo__get_descriptor_name(pCtx, pRepo, &psz_descriptor_name) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh_hook, "js", &psz_js) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvh_params) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "repo_id", psz_repo_id) ); SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "admin_id", psz_admin_id) ); if (psz_descriptor_name) { SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvh_params, "descriptor_name", psz_descriptor_name) ); } SG_ERR_CHECK( SG_vhash__addnew__varray(pCtx, pvh_params, "wit_ids", &pva_ids) ); for (i=0; i<count_assocs; i++) { SG_ERR_CHECK( SG_varray__append__string__sz(pCtx, pva_ids, paszAssocs[i]) ); } SG_ERR_CHECK( SG_vc_hooks__execute(pCtx, psz_js, pvh_params, &pvh_result) ); // TODO process the result if (pvh_result) { SG_bool hasErrors = SG_FALSE; SG_bool hasBugs = SG_FALSE; SG_ERR_CHECK( SG_vhash__has(pCtx, pvh_result, "error", &hasErrors) ); if (hasErrors) { const char *emsg = NULL; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvh_result, "error", &emsg) ); SG_ERR_THROW2( SG_ERR_VC_HOOK_REFUSED, (pCtx, "%s", emsg) ); } SG_ERR_CHECK( SG_vhash__has(pCtx, pvh_result, "bugs", &hasBugs) ); if (hasBugs && pBugs) { SG_varray *bugs = NULL; SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvh_result, "bugs", &bugs) ); SG_ERR_CHECK( SG_varray__copy_items(pCtx, bugs, pBugs) ); } } fail: SG_VHASH_NULLFREE(pCtx, pvh_params); SG_VHASH_NULLFREE(pCtx, pvh_result); SG_VHASH_NULLFREE(pCtx, pvh_hook); SG_NULLFREE(pCtx, psz_repo_id); SG_NULLFREE(pCtx, psz_admin_id); }
void sg_vv2__history__working_folder( SG_context * pCtx, const SG_stringarray * psaInputs, const SG_rev_spec* pRevSpec, const SG_rev_spec* pRevSpec_single_revisions, const char* pszUser, const char* pszStamp, SG_bool bDetectCurrentBranch, SG_uint32 nResultLimit, SG_bool bHideObjectMerges, SG_int64 nFromDate, SG_int64 nToDate, SG_bool bListAll, SG_bool* pbHasResult, SG_vhash** ppvhBranchPile, SG_history_result ** ppResult, SG_history_token ** ppHistoryToken) { SG_repo * pRepo = NULL; SG_stringarray * pStringArrayGIDs = NULL; SG_stringarray * pStringArrayChangesets = NULL; SG_stringarray * pStringArrayChangesetsMissing = NULL; SG_stringarray * pStringArrayChangesets_single_revisions = NULL; SG_bool bRecommendDagWalk = SG_FALSE; SG_bool bLeaves = SG_FALSE; const char * pszBranchName = NULL; // we do not own this SG_vhash* pvhBranchPile = NULL; SG_varray* pvaParents = NULL; // we do not own this SG_bool bMyBranchWalkRecommendation = SG_FALSE; SG_rev_spec* pRevSpec_Allocated = NULL; SG_wc_tx * pWcTx = NULL; SG_vhash * pvhInfo = NULL; SG_uint32 count_args = 0; SG_uint32 countRevsSpecified = 0; if (psaInputs) SG_ERR_CHECK( SG_stringarray__count(pCtx, psaInputs, &count_args) ); // Use the WD to try to get the initial info. // I'm going to deviate from the model and use // a read-only TX here so that I can get a bunch // of fields that we need later. SG_ERR_CHECK( SG_WC_TX__ALLOC__BEGIN(pCtx, &pWcTx, NULL, SG_TRUE) ); if (count_args > 0) SG_ERR_CHECK( SG_wc_tx__get_item_gid__stringarray(pCtx, pWcTx, psaInputs, &pStringArrayGIDs) ); SG_ERR_CHECK( SG_wc_tx__get_wc_info(pCtx, pWcTx, &pvhInfo) ); SG_ERR_CHECK( SG_wc_tx__get_repo_and_wd_top(pCtx, pWcTx, &pRepo, NULL) ); /* If no revisions were specified, and the caller wants us to use the current branch, * create a revision spec with the current branch. */ if (pRevSpec) { SG_ERR_CHECK( SG_REV_SPEC__ALLOC__COPY(pCtx, pRevSpec, &pRevSpec_Allocated) ); SG_ERR_CHECK( SG_rev_spec__count(pCtx, pRevSpec_Allocated, &countRevsSpecified) ); } else { SG_ERR_CHECK( SG_REV_SPEC__ALLOC(pCtx, &pRevSpec_Allocated) ); } if (pRevSpec_single_revisions != NULL) { SG_uint32 countRevsSpecified_singles = 0; SG_ERR_CHECK( SG_rev_spec__count(pCtx, pRevSpec_single_revisions, &countRevsSpecified_singles) ); countRevsSpecified += countRevsSpecified_singles; } if (bDetectCurrentBranch && countRevsSpecified == 0) { SG_ERR_CHECK( SG_vhash__check__sz(pCtx, pvhInfo, "branch", &pszBranchName) ); if (pszBranchName) { /* The working folder is attached to a branch. Does it exist? */ SG_bool bHasBranches = SG_FALSE; SG_bool bBranchExists = SG_FALSE; SG_ERR_CHECK( SG_vc_branches__cleanup(pCtx, pRepo, &pvhBranchPile) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhBranchPile, "branches", &bHasBranches) ); if (bHasBranches) { SG_vhash* pvhRefBranches; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhBranchPile, "branches", &pvhRefBranches) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRefBranches, pszBranchName, &bBranchExists) ); } if (bBranchExists) { SG_uint32 numParents, i; const char* pszRefParent; /* If that branch exists, just add to our rev spec. */ SG_ERR_CHECK( SG_rev_spec__add_branch(pCtx, pRevSpec_Allocated, pszBranchName) ); /* Plus, if the working folder's parents are not in the branch (yet), add them as well * (they'll be in it after the user commits something...). */ SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvhInfo, "parents", &pvaParents) ); SG_ERR_CHECK( SG_varray__count(pCtx, pvaParents, &numParents) ); for (i = 0; i < numParents; i++) { SG_bool already_in_rev_spec = SG_FALSE; SG_ERR_CHECK( SG_varray__get__sz(pCtx, pvaParents, i, &pszRefParent) ); SG_ERR_CHECK( SG_rev_spec__contains(pCtx, pRepo, pRevSpec_Allocated, pszRefParent, &already_in_rev_spec) ); if(!already_in_rev_spec) SG_ERR_CHECK( SG_rev_spec__add_rev(pCtx, pRevSpec_Allocated, pszRefParent) ); } } else { /* If the branch doesn't exist, add the working folder's baseline(s) to the rev spec * and force a dag walk. */ SG_uint32 numParents, i; const char* pszRefParent; SG_ERR_CHECK( SG_vhash__get__varray(pCtx, pvhInfo, "parents", &pvaParents) ); SG_ERR_CHECK( SG_varray__count(pCtx, pvaParents, &numParents) ); for (i = 0; i < numParents; i++) { SG_ERR_CHECK( SG_varray__get__sz(pCtx, pvaParents, i, &pszRefParent) ); SG_ERR_CHECK( SG_rev_spec__add_rev(pCtx, pRevSpec_Allocated, pszRefParent) ); } bMyBranchWalkRecommendation = SG_TRUE; } } } // Determine the starting changeset IDs. strBranch and bLeaves control this. // We do this step here, so that repo paths can be looked up before we call into history__core. SG_ERR_CHECK( sg_vv2__history__get_starting_changesets(pCtx, pRepo, pRevSpec_Allocated, &pStringArrayChangesets, &pStringArrayChangesetsMissing, &bRecommendDagWalk, &bLeaves) ); if (pStringArrayChangesetsMissing) { // See K2177, K1322, W0836, W8132. We requested specific starting // points and ran into some csets that were referenced (by --tag // or --branch) that are not present in the local repo. Try to // silently ignore them. SG_uint32 nrFound = 0; SG_ERR_CHECK( SG_stringarray__count(pCtx, pStringArrayChangesets, &nrFound) ); if (nrFound > 0) { // Yes there were missing csets, but we still found some // of the referenced ones. Just ignore the missing ones. // This should behave just like we had the older tag/branch // dag prior to the push -r on the vc dag. } else { const char * psz_0; // TODO 2012/10/19 Do we want a different message if the number of missing is > 1 ? SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, pStringArrayChangesetsMissing, 0, &psz_0) ); SG_ERR_THROW2( SG_ERR_CHANGESET_BLOB_NOT_FOUND, (pCtx, "%s", psz_0) ); } } bRecommendDagWalk = bRecommendDagWalk || bMyBranchWalkRecommendation; //This hack is here to detect when we're being asked for the parent of a certain //object from the sg_parents code. parents always wants the dag walk. //The better solution would be to allow users to pass in a flag about their dagwalk //preferences if (count_args == 1 && nResultLimit == 1) bRecommendDagWalk = SG_TRUE; if (bListAll) { // See W8493. If they gave us a --list-all along with a --rev or --tag, they // want to force us to show the full history rather than just the info for the // named cset. bRecommendDagWalk = SG_TRUE; } if (pRevSpec_single_revisions) { // We DO NOT pass a psaMissingHids here because we want // it to throw if the user names a missing cset. SG_ERR_CHECK( SG_rev_spec__get_all__repo__dedup(pCtx, pRepo, pRevSpec_single_revisions, SG_TRUE, &pStringArrayChangesets_single_revisions, NULL) ); } // TODO 2012/07/03 The deviates from the model. This call directly returns the // TODO allocated data into the caller's pointers. If anything fails // TODO (such as the call to get the branches below), we'll probably // TODO leak the result and token. SG_ERR_CHECK( SG_history__run(pCtx, pRepo, pStringArrayGIDs, pStringArrayChangesets, pStringArrayChangesets_single_revisions, pszUser, pszStamp, nResultLimit, bLeaves, bHideObjectMerges, nFromDate, nToDate, bRecommendDagWalk, SG_FALSE, pbHasResult, ppResult, ppHistoryToken) ); /* This is kind of a hack. History callers often need branch data to format ouput. * But we open the repo down here. I didn't want to open/close it again. And there's logic * in here about which repo to open. So instead, we do this. */ if (ppvhBranchPile) { if (pvhBranchPile) { *ppvhBranchPile = pvhBranchPile; pvhBranchPile = NULL; } else SG_ERR_CHECK( SG_vc_branches__cleanup(pCtx, pRepo, ppvhBranchPile) ); } fail: SG_ERR_IGNORE( SG_wc_tx__cancel(pCtx, pWcTx) ); SG_WC_TX__NULLFREE(pCtx, pWcTx); SG_REV_SPEC_NULLFREE(pCtx, pRevSpec_Allocated); SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesets); SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesetsMissing); SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesets_single_revisions); SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayGIDs); SG_VHASH_NULLFREE(pCtx, pvhBranchPile); SG_VHASH_NULLFREE(pCtx, pvhInfo); SG_REPO_NULLFREE(pCtx, pRepo); }