static void _resolve__lookup_issue(SG_context * pCtx, struct _resolve_data * pData, const char * pszGid, const SG_vhash ** ppvhIssue) { const SG_vhash * pvhIssue; SG_bool bFound; if (!pData->pPendingTree) // previous iteration probably freed it after a SAVE SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, pData->pPathCwd, pData->bIgnoreWarnings, &pData->pPendingTree) ); SG_ERR_CHECK( SG_pendingtree__find_wd_issue_by_gid(pCtx, pData->pPendingTree, pszGid, &bFound, &pvhIssue) ); if (!bFound) { // Since we may have released the VFILE lock during the outer loop, // this could technically happen but only if they did something // like 'vv revert --all' in another shell while the external merge // tool was running. // // TODO 2010/07/11 I don't like this error message, but I doubt anybody // TODO will ever see it. Think about taking the GID and // TODO finding a repo-path for the item using just the // TODO PTNODEs and printing it. SG_ERR_THROW2( SG_ERR_ASSERT, (pCtx, "RESOLVE failed to find ISSUE for GID %s.", pszGid) ); } *ppvhIssue = pvhIssue; fail: return; }
static void _advise_after_update(SG_context * pCtx, SG_option_state * pOptSt, SG_pathname * pPathCwd, const char * pszBaselineBeforeUpdate) { SG_pendingtree * pPendingTree = NULL; SG_repo * pRepo; char * pszBaselineAfterUpdate = NULL; SG_rbtree * prbLeaves = NULL; SG_uint32 nrLeaves; SG_bool bUpdateChangedBaseline; // re-open pendingtree to get the now-current baseline (we have to do // this in a new instance because the UPDATE saves the pendingtree which // frees all of the interesting stuff). SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, pPathCwd, pOptSt->bIgnoreWarnings, &pPendingTree) ); SG_ERR_CHECK( SG_pendingtree__get_repo(pCtx, pPendingTree, &pRepo) ); SG_ERR_CHECK( _get_baseline(pCtx, pPendingTree, &pszBaselineAfterUpdate) ); // see if the update actually changed the baseline. bUpdateChangedBaseline = (strcmp(pszBaselineBeforeUpdate, pszBaselineAfterUpdate) != 0); // get the list of all heads/leaves. // // TODO 2010/06/30 Revisit this when we have NAMED BRANCHES because we // TODO want to filter this list for things within their BRANCH. SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx,pRepo,SG_DAGNUM__VERSION_CONTROL,&prbLeaves) ); #if defined(DEBUG) { SG_bool bFound = SG_FALSE; SG_ERR_CHECK( SG_rbtree__find(pCtx, prbLeaves, pszBaselineAfterUpdate, &bFound, NULL) ); SG_ASSERT( (bFound) ); } #endif SG_ERR_CHECK( SG_rbtree__count(pCtx, prbLeaves, &nrLeaves) ); if (nrLeaves > 1) { if (bUpdateChangedBaseline) { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Baseline updated to descendant head, but there are multiple heads; consider merging.\n") ); } else { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Baseline already at head, but there are multiple heads; consider merging.\n") ); } } else { if (bUpdateChangedBaseline) { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Baseline updated to head.\n") ); } else { SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDOUT, "Baseline already at head.\n") ); } } fail: SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); SG_RBTREE_NULLFREE(pCtx, prbLeaves); SG_NULLFREE(pCtx, pszBaselineAfterUpdate); }
/** * Handle the UPDATE command. * * */ void do_cmd_update(SG_context * pCtx, SG_option_state * pOptSt) { SG_pathname * pPathCwd = NULL; SG_pendingtree * pPendingTree = NULL; SG_repo * pRepo; char * pszTargetChangeset = NULL; char * pszBaselineBeforeUpdate = NULL; // use the current directory to find the pending-tree, the repo, and the current baseline. SG_ERR_CHECK( SG_PATHNAME__ALLOC(pCtx, &pPathCwd) ); SG_ERR_CHECK( SG_pathname__set__from_cwd(pCtx, pPathCwd) ); SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, pPathCwd, pOptSt->bIgnoreWarnings, &pPendingTree) ); SG_ERR_CHECK( SG_pendingtree__get_repo(pCtx, pPendingTree, &pRepo) ); SG_ERR_CHECK( _get_baseline(pCtx, pPendingTree, &pszBaselineBeforeUpdate) ); // determine the target changeset // we check that we have at most 1 rev *or* 1 tag up in sg.c if (pOptSt->iCountRevs == 1) { SG_rev_tag_obj* pRTobj = NULL; const char * psz_rev_0; SG_ERR_CHECK( SG_vector__get(pCtx, pOptSt->pvec_rev_tags, 0, (void**)&pRTobj) ); psz_rev_0 = pRTobj->pszRevTag; SG_ERR_CHECK( SG_repo__hidlookup__dagnode(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, psz_rev_0, &pszTargetChangeset) ); } else if (pOptSt->iCountTags == 1) { SG_rev_tag_obj* pRTobj = NULL; const char * psz_tag_0; SG_ERR_CHECK( SG_vector__get(pCtx, pOptSt->pvec_rev_tags, 0, (void**)&pRTobj) ); psz_tag_0 = pRTobj->pszRevTag; SG_ERR_CHECK( SG_vc_tags__lookup__tag(pCtx, pRepo, psz_tag_0, &pszTargetChangeset) ); } else { // pass NULL for target changeset and let the UPDATE code find the proper head/tip. } SG_ERR_CHECK( _my_do_cmd_update(pCtx, pOptSt, pPendingTree, pszTargetChangeset) ); SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); pRepo = NULL; if (pszTargetChangeset == NULL) { // if they didn't ask for a specific changeset (and we successfully // went to the SINGLE/UNIQUE DESCENDANT HEAD from their (then current) // BASELINE, we should look around and see if there are other heads/leaves // and advise them to MERGE with them. // // Since we did successfully do the UPDATE we should exit with OK, so // I'm going to do all of this advisory stuff in an IGNORE. SG_ERR_IGNORE( _advise_after_update(pCtx, pOptSt, pPathCwd, pszBaselineBeforeUpdate) ); } fail: SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); SG_PATHNAME_NULLFREE(pCtx, pPathCwd); SG_NULLFREE(pCtx, pszTargetChangeset); SG_NULLFREE(pCtx, pszBaselineBeforeUpdate); }
/** * Handle the RESOLVE command. * * */ void do_cmd_resolve(SG_context * pCtx, SG_option_state * pOptSt, SG_uint32 count_args, const char** paszArgs) { struct _resolve_data data; SG_uint32 sum = 0; SG_bool bAll = SG_FALSE; SG_bool bWantResolved = SG_FALSE; SG_bool bWantUnresolved = SG_FALSE; SG_bool bReqArg = SG_FALSE; memset(&data, 0, sizeof(data)); data.pPathCwd = NULL; data.pPendingTree = NULL; data.psaGids = NULL; data.bIgnoreWarnings = SG_TRUE; // TODO what should this be? // allow at most ONE of the command options. // // the --{List,Mark,Unmark}All options do not allow ARGs. // // the --{Mark,Unmark} require at least one ARG. // the --List allows 0 or more ARGs. // // if no command option, allow 0 or more ARGs. // // most commands do not require there to be issues; rather // they just don't do anything. // // WARNING: We set sg_cl_options[].has_arg to 0 for all of // our commands options so that we get all of the // pathnames in ARGs rather than bound to the option. // That is, I want to be able to say: // vv resolve --mark foo bar // rather than: // vv resolve --mark foo --mark bar // // It also allows me to have: // vv resolve --list // and // vv resolve --list foo if (pOptSt->bListAll) { sum++; bAll = SG_TRUE; bWantResolved = SG_TRUE; bWantUnresolved = SG_TRUE; bReqArg = SG_FALSE; } if (pOptSt->bMarkAll) { sum++; bAll = SG_TRUE; bWantResolved = SG_TRUE; bWantUnresolved = SG_TRUE; bReqArg = SG_FALSE; } // if (pOptSt->bUnmarkAll) { sum++; bAll = SG_TRUE; bWantResolved = SG_TRUE; bWantUnresolved = SG_TRUE; bReqArg = SG_FALSE; } if (pOptSt->bList) { if (count_args == 0) { sum++; bAll = SG_FALSE; bWantResolved = SG_FALSE; bWantUnresolved = SG_TRUE; bReqArg = SG_FALSE; } else { sum++; bAll = SG_FALSE; bWantResolved = SG_TRUE; bWantUnresolved = SG_TRUE; bReqArg = SG_FALSE; } } if (pOptSt->bMark) { sum++; bAll = SG_FALSE; bWantResolved = SG_FALSE; bWantUnresolved = SG_TRUE; bReqArg = SG_TRUE; } // if (pOptSt->bUnmark) { sum++; bAll = SG_FALSE; bWantResolved = SG_TRUE; bWantUnresolved = SG_FALSE; bReqArg = SG_TRUE; } if (sum == 0) { bAll = SG_FALSE; bWantResolved = SG_FALSE; bWantUnresolved = SG_TRUE; bReqArg = SG_FALSE; } if (sum > 1) SG_ERR_THROW( SG_ERR_USAGE ); if (bReqArg && (count_args == 0)) SG_ERR_THROW( SG_ERR_USAGE ); if (bAll && (count_args > 0)) SG_ERR_THROW( SG_ERR_USAGE ); SG_ERR_CHECK( SG_PATHNAME__ALLOC(pCtx, &data.pPathCwd) ); SG_ERR_CHECK( SG_pathname__set__from_cwd(pCtx, data.pPathCwd) ); // Do a complete scan first. This ensures that the pendingtree knows // about everything that is dirty in the WD and helps ensure that every // issue in the issues list has a ptnode in the pendingtree. // // TODO 2010/07/16 Technically, this should NOT be required. But it // TODO helps. The problem is that when a file is edited // TODO we don't automatically get the notification, rather // TODO we do a status aka scan (and/or use the timestamp // TODO cache) when various commands start which detect // TODO file content changes. So the fact that the MERGE // TODO may have written a bunch of merged/edited files // TODO doesn't necessarily mean that they are listed in // TODO the pendingtree -- because the user may have edited // TODO them again (or edited other files) since the merge // TODO completed. So we scan. // TODO // TODO See also the comment in sg.c:do_cmd_commit() for sprawl-809. // TODO // TODO What this scan is helping to hide is a problem where // TODO we're hitting the issues list for GIDs and then // TODO using SG_pendingtree__find_repo_path_by_gid() to // TODO dynamically convert it into a "live/current" repo-path. // TODO and it assumes that it is only called for dirty entries // TODO (or rather, for entries that have a ptnode). We need // TODO to fix that. SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, data.pPathCwd, data.bIgnoreWarnings, &data.pPendingTree) ); SG_ERR_CHECK( SG_pendingtree__scan(pCtx, data.pPendingTree, SG_TRUE, NULL, 0, NULL, 0) ); SG_PENDINGTREE_NULLFREE(pCtx, data.pPendingTree); // Now load the pendingtree for real. SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, data.pPathCwd, data.bIgnoreWarnings, &data.pPendingTree) ); if (count_args > 0) SG_ERR_CHECK( _resolve__map_args_to_gids(pCtx, &data, count_args, paszArgs, bWantResolved, bWantUnresolved) ); else SG_ERR_CHECK( _resolve__get_all_issue_gids(pCtx, &data, bWantResolved, bWantUnresolved) ); ////////////////////////////////////////////////////////////////// if (pOptSt->bListAll || pOptSt->bList) { SG_ERR_CHECK( _resolve__do_list(pCtx, &data) ); } else if (pOptSt->bMarkAll || pOptSt->bMark) { SG_ERR_CHECK( _resolve__do_mark(pCtx, &data, SG_TRUE) ); } // else if (pOptSt->bUnmarkAll || pOptSt->bUnmark) // { // SG_ERR_CHECK( _resolve__do_mark(pCtx, &data, SG_FALSE) ); // } else // no command option given -- assume we want to FIX the issues { SG_ERR_CHECK( _resolve__do_fix(pCtx, &data) ); } fail: SG_PATHNAME_NULLFREE(pCtx, data.pPathCwd); SG_PENDINGTREE_NULLFREE(pCtx, data.pPendingTree); SG_STRINGARRAY_NULLFREE(pCtx, data.psaGids); }
void SG_workingdir__create_and_get( SG_context* pCtx, const char* pszDescriptorName, const SG_pathname* pPathDirPutTopLevelDirInHere, SG_bool bCreateDrawer, const char* psz_spec_hid_cs_baseline ) { SG_repo* pRepo = NULL; SG_rbtree* pIdsetLeaves = NULL; SG_uint32 count_leaves = 0; SG_changeset* pcs = NULL; const char* pszidUserSuperRoot = NULL; SG_bool b = SG_FALSE; char* psz_hid_cs_baseline = NULL; SG_pendingtree * pPendingTree = NULL; SG_vhash * pvhTimestamps = NULL; /* * Fetch the descriptor by its given name and use it to connect to * the repo. */ SG_ERR_CHECK( SG_repo__open_repo_instance(pCtx, pszDescriptorName, &pRepo) ); if (psz_spec_hid_cs_baseline) { SG_ERR_CHECK( SG_strdup(pCtx, psz_spec_hid_cs_baseline, &psz_hid_cs_baseline) ); } else { const char* psz_hid = NULL; /* * If you do not specify a hid to be the baseline, then this routine * currently only works if there is exactly one leaf in the repo. */ SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo,SG_DAGNUM__VERSION_CONTROL,&pIdsetLeaves) ); SG_ERR_CHECK( SG_rbtree__count(pCtx, pIdsetLeaves, &count_leaves) ); if (count_leaves != 1) SG_ERR_THROW( SG_ERR_MULTIPLE_HEADS_FROM_DAGNODE ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, pIdsetLeaves, &b, &psz_hid, NULL) ); SG_ERR_CHECK( SG_STRDUP(pCtx, psz_hid, &psz_hid_cs_baseline) ); } /* * Load the desired changeset from the repo so we can look up the * id of its user root directory */ SG_ERR_CHECK( SG_changeset__load_from_repo(pCtx, pRepo, psz_hid_cs_baseline, &pcs) ); SG_ERR_CHECK( SG_changeset__get_root(pCtx, pcs, &pszidUserSuperRoot) ); if (bCreateDrawer) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhTimestamps) ); // Retrieve everything into the WD and capture the timestamps on the files that we create. SG_ERR_CHECK( sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, pvhTimestamps) ); // this creates "repo.json" with the repo-descriptor. SG_ERR_CHECK( SG_workingdir__set_mapping(pCtx, pPathDirPutTopLevelDirInHere, pszDescriptorName, NULL) ); // this creates an empty "wd.json" file (which doesn't know anything). SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, pPathDirPutTopLevelDirInHere, SG_TRUE, &pPendingTree) ); // force set the initial parents to the current changeset. SG_ERR_CHECK( SG_pendingtree__set_single_wd_parent(pCtx, pPendingTree, psz_hid_cs_baseline) ); // force initialize the timestamp cache to the list that we just built; this should // be the only timestamps in the cache since we just populated the WD. SG_ERR_CHECK( SG_pendingtree__set_wd_timestamp_cache(pCtx, pPendingTree, &pvhTimestamps) ); // this steals our vhash SG_ERR_CHECK( SG_pendingtree__save(pCtx, pPendingTree) ); } else { // Retrieve everything into the WD but do not create .sgdrawer or record timestamps. // This is more like an EXPORT operation. SG_ERR_CHECK( sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, NULL) ); } fail: SG_VHASH_NULLFREE(pCtx, pvhTimestamps); SG_NULLFREE(pCtx, psz_hid_cs_baseline); SG_CHANGESET_NULLFREE(pCtx, pcs); SG_RBTREE_NULLFREE(pCtx, pIdsetLeaves); SG_REPO_NULLFREE(pCtx, pRepo); SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree); }