コード例 #1
0
void u0051_hidlookup__commit_all(
	SG_context* pCtx,
	const SG_pathname* pPathWorkingDir,
    SG_dagnode** ppdn
	)
{
	SG_pendingtree* pPendingTree = NULL;
    SG_repo* pRepo = NULL;
    SG_dagnode* pdn = NULL;
    SG_audit q;

	SG_ERR_CHECK(  SG_pendingtree__alloc(pCtx, pPathWorkingDir, SG_FALSE, &pPendingTree)  );

    SG_ERR_CHECK(  SG_pendingtree__get_repo(pCtx, pPendingTree, &pRepo)  );
    SG_ERR_CHECK(  SG_audit__init(pCtx, &q, pRepo, SG_AUDIT__WHEN__NOW, SG_AUDIT__WHO__FROM_SETTINGS)  );
	SG_ERR_CHECK(  unittests_pendingtree__commit(pCtx, pPendingTree, &q, NULL, 0, NULL, NULL, 0, NULL, 0, NULL, 0, &pdn)  );

	SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree);

    *ppdn = pdn;

	return;
fail:
	SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree);
}
コード例 #2
0
ファイル: sg__do_cmd_resolve.c プロジェクト: avar/veracity
/**
 * Update the RESOLVED/UNRESOLVED status for this ISSUE and do an
 * incremental save on the pendingtree.
 */
static void _resolve__mark(SG_context * pCtx,
						   struct _resolve_data * pData,
						   const SG_vhash * pvhIssue,
						   SG_bool bMarkResolved)
{
	SG_int64 s;
	SG_pendingtree_wd_issue_status status;

	SG_ERR_CHECK_RETURN(  SG_vhash__get__int64(pCtx, pvhIssue, "status", &s)  );
	status = (SG_pendingtree_wd_issue_status)s;

	if (bMarkResolved)
		status |= SG_ISSUE_STATUS__MARKED_RESOLVED;
	else
		status &= ~SG_ISSUE_STATUS__MARKED_RESOLVED;

	// update the status on the ISSUE and save the pendingtree now.
	// since this trashes stuff within in it, go ahead and free it
	// so no one trips over the trash.

	SG_ERR_CHECK_RETURN(  SG_pendingtree__set_wd_issue_status(pCtx, pData->pPendingTree, pvhIssue, status)  );
	SG_PENDINGTREE_NULLFREE(pCtx, pData->pPendingTree);
}
コード例 #3
0
ファイル: sg__do_cmd_update.c プロジェクト: avar/veracity
static void _advise_after_update(SG_context * pCtx,
								 SG_option_state * pOptSt,
								 SG_pathname * pPathCwd,
								 const char * pszBaselineBeforeUpdate)
{
	SG_pendingtree * pPendingTree = NULL;
	SG_repo * pRepo;
	char * pszBaselineAfterUpdate = NULL;
	SG_rbtree * prbLeaves = NULL;
	SG_uint32 nrLeaves;
	SG_bool bUpdateChangedBaseline;

	// re-open pendingtree to get the now-current baseline (we have to do
	// this in a new instance because the UPDATE saves the pendingtree which
	// frees all of the interesting stuff).

	SG_ERR_CHECK(  SG_PENDINGTREE__ALLOC(pCtx, pPathCwd, pOptSt->bIgnoreWarnings, &pPendingTree)  );
	SG_ERR_CHECK(  SG_pendingtree__get_repo(pCtx, pPendingTree, &pRepo)  );

	SG_ERR_CHECK(  _get_baseline(pCtx, pPendingTree, &pszBaselineAfterUpdate)  );

	// see if the update actually changed the baseline.

	bUpdateChangedBaseline = (strcmp(pszBaselineBeforeUpdate, pszBaselineAfterUpdate) != 0);

	// get the list of all heads/leaves.
	//
	// TODO 2010/06/30 Revisit this when we have NAMED BRANCHES because we
	// TODO            want to filter this list for things within their BRANCH.

	SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx,pRepo,SG_DAGNUM__VERSION_CONTROL,&prbLeaves)  );
#if defined(DEBUG)
	{
		SG_bool bFound = SG_FALSE;
		SG_ERR_CHECK(  SG_rbtree__find(pCtx, prbLeaves, pszBaselineAfterUpdate, &bFound, NULL)  );
		SG_ASSERT(  (bFound)  );
	}
#endif	
	SG_ERR_CHECK(  SG_rbtree__count(pCtx, prbLeaves, &nrLeaves)  );

	if (nrLeaves > 1)
	{
		if (bUpdateChangedBaseline)
		{
			SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDOUT,
									   "Baseline updated to descendant head, but there are multiple heads; consider merging.\n")  );
		}
		else
		{
			SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDOUT,
									   "Baseline already at head, but there are multiple heads; consider merging.\n")  );
		}
	}
	else
	{
		if (bUpdateChangedBaseline)
		{
			SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDOUT,
									   "Baseline updated to head.\n")  );
		}
		else
		{
			SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDOUT,
									   "Baseline already at head.\n")  );
		}
	}

fail:
	SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree);
	SG_RBTREE_NULLFREE(pCtx, prbLeaves);
	SG_NULLFREE(pCtx, pszBaselineAfterUpdate);
}
コード例 #4
0
ファイル: sg__do_cmd_update.c プロジェクト: avar/veracity
/**
 * Handle the UPDATE command.
 *
 *
 */
void do_cmd_update(SG_context * pCtx,
				   SG_option_state * pOptSt)
{
	SG_pathname * pPathCwd = NULL;
	SG_pendingtree * pPendingTree = NULL;
	SG_repo * pRepo;
	char * pszTargetChangeset = NULL;
	char * pszBaselineBeforeUpdate = NULL;

	// use the current directory to find the pending-tree, the repo, and the current baseline.

	SG_ERR_CHECK(  SG_PATHNAME__ALLOC(pCtx, &pPathCwd)  );
	SG_ERR_CHECK(  SG_pathname__set__from_cwd(pCtx, pPathCwd)  );

	SG_ERR_CHECK(  SG_PENDINGTREE__ALLOC(pCtx, pPathCwd, pOptSt->bIgnoreWarnings, &pPendingTree)  );
	SG_ERR_CHECK(  SG_pendingtree__get_repo(pCtx, pPendingTree, &pRepo)  );

	SG_ERR_CHECK(  _get_baseline(pCtx, pPendingTree, &pszBaselineBeforeUpdate)  );

	// determine the target changeset

	// we check that we have at most 1 rev *or* 1 tag up in sg.c

	if (pOptSt->iCountRevs == 1)
	{
		SG_rev_tag_obj* pRTobj = NULL;
		const char * psz_rev_0;

		SG_ERR_CHECK(  SG_vector__get(pCtx, pOptSt->pvec_rev_tags, 0, (void**)&pRTobj)  );

		psz_rev_0 = pRTobj->pszRevTag;

		SG_ERR_CHECK(  SG_repo__hidlookup__dagnode(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, psz_rev_0, &pszTargetChangeset)  );
	}
	else if (pOptSt->iCountTags == 1)
	{
		SG_rev_tag_obj* pRTobj = NULL;
		const char * psz_tag_0;

		SG_ERR_CHECK(  SG_vector__get(pCtx, pOptSt->pvec_rev_tags, 0, (void**)&pRTobj)  );

		psz_tag_0 = pRTobj->pszRevTag;

		SG_ERR_CHECK(  SG_vc_tags__lookup__tag(pCtx, pRepo, psz_tag_0, &pszTargetChangeset)  );
	}
	else
	{
		// pass NULL for target changeset and let the UPDATE code find the proper head/tip.
	}

	SG_ERR_CHECK(  _my_do_cmd_update(pCtx, pOptSt, pPendingTree, pszTargetChangeset)  );
	SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree);
	pRepo = NULL;

	if (pszTargetChangeset == NULL)
	{
		// if they didn't ask for a specific changeset (and we successfully
		// went to the SINGLE/UNIQUE DESCENDANT HEAD from their (then current)
		// BASELINE, we should look around and see if there are other heads/leaves
		// and advise them to MERGE with them.
		//
		// Since we did successfully do the UPDATE we should exit with OK, so
		// I'm going to do all of this advisory stuff in an IGNORE.

		SG_ERR_IGNORE(  _advise_after_update(pCtx, pOptSt, pPathCwd, pszBaselineBeforeUpdate)  );
	}

fail:
	SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree);
	SG_PATHNAME_NULLFREE(pCtx, pPathCwd);
	SG_NULLFREE(pCtx, pszTargetChangeset);
	SG_NULLFREE(pCtx, pszBaselineBeforeUpdate);
}
コード例 #5
0
void SG_tag__add_tags(SG_context * pCtx, SG_repo * pRepo, SG_pendingtree * pPendingTree, const char* psz_spec_cs, SG_bool bRev, SG_bool bForce, const char** ppszTags, SG_uint32 count_args)
{
	SG_pathname* pPathCwd = NULL;
	char* psz_hid_cs = NULL;
	SG_audit q;
	SG_uint32 i = 0;
	char * psz_current_hid_with_that_tag = NULL;
	SG_bool bFreePendingTree = SG_FALSE;

	SG_ERR_CHECK(  SG_audit__init(pCtx,&q,pRepo,SG_AUDIT__WHEN__NOW,SG_AUDIT__WHO__FROM_SETTINGS)  );

	// TODO 4/21/10 pendingtree contains a pRepo inside it.  we should
	// TODO 4/21/10 refactor this to alloc the pendingtree first and then
	// TODO 4/21/10 just borrow the pRepo from it.



	if (psz_spec_cs)
	{
		if (bRev)
		{
			SG_ERR_CHECK(  SG_repo__hidlookup__dagnode(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, psz_spec_cs, &psz_hid_cs)  );
		}
		else
		{
			SG_ERR_CHECK(  SG_vc_tags__lookup__tag(pCtx, pRepo, psz_spec_cs, &psz_hid_cs)  );
			if (psz_hid_cs == NULL)
				SG_ERR_THROW(SG_ERR_TAG_NOT_FOUND);
		}
	}
	else
	{
		// tag the current baseline.
		//
		// when we have an uncomitted merge, we will have more than one parent.
		// what does this command mean then?  It feels like we we should throw
		// an error and say that you have to commit first.

		const SG_varray * pva_wd_parents;		// we do not own this
		const char * psz_hid_parent_0;			// we do not own this
		SG_uint32 nrParents;

		if (pPendingTree == NULL)
		{

			SG_ERR_CHECK(  SG_pendingtree__alloc_from_cwd(pCtx, SG_TRUE, &pPendingTree)  );
			bFreePendingTree = SG_TRUE;
		}
		SG_ERR_CHECK(  SG_pendingtree__get_wd_parents__ref(pCtx, pPendingTree, &pva_wd_parents)  );
		SG_ERR_CHECK(  SG_varray__count(pCtx, pva_wd_parents, &nrParents)  );
		if (nrParents > 1)
			SG_ERR_THROW(  SG_ERR_CANNOT_DO_WHILE_UNCOMMITTED_MERGE  );

		SG_ERR_CHECK(  SG_varray__get__sz(pCtx, pva_wd_parents, 0, &psz_hid_parent_0)  );
		SG_ERR_CHECK(  SG_strdup(pCtx, psz_hid_parent_0, &psz_hid_cs)  );
	}

	if (!bForce)
	{
		//Go through and check all tags to make sure that they are not already applied.
		for (i = 0; i < count_args; i++)
		{
			const char * pszTag = ppszTags[i];
			SG_ERR_IGNORE(  SG_vc_tags__lookup__tag(pCtx, pRepo, pszTag, &psz_current_hid_with_that_tag)  );
			if (psz_current_hid_with_that_tag != NULL && 0 != strcmp(psz_current_hid_with_that_tag, psz_hid_cs)) //The tag has been applied, but not to the given changeset.
				SG_ERR_THROW(SG_ERR_TAG_ALREADY_EXISTS);
			SG_NULLFREE(pCtx, psz_current_hid_with_that_tag);
		}
	}
	for (i = 0; i < count_args; i++)
	{
		const char * pszTag = ppszTags[i];
		SG_ERR_CHECK(  SG_vc_tags__lookup__tag(pCtx, pRepo, pszTag, &psz_current_hid_with_that_tag)  );
		if (psz_current_hid_with_that_tag == NULL || 0 != strcmp(psz_current_hid_with_that_tag, psz_hid_cs))
		{
			//The tag has not been applied, or it's been applied to a different dagnode.
			if ( psz_current_hid_with_that_tag != NULL && bForce)  //Remove it, if it's already there
					SG_ERR_CHECK(  SG_vc_tags__remove(pCtx, pRepo, &q, 1, &pszTag)  );
			SG_ERR_CHECK(  SG_vc_tags__add(pCtx, pRepo, psz_hid_cs, pszTag, &q)  );
		}
		SG_NULLFREE(pCtx, psz_current_hid_with_that_tag);
	}

fail:
	SG_NULLFREE(pCtx, psz_current_hid_with_that_tag);
	if (bFreePendingTree == SG_TRUE)
		SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree);
	SG_NULLFREE(pCtx, psz_hid_cs);
	SG_PATHNAME_NULLFREE(pCtx, pPathCwd);
}
コード例 #6
0
ファイル: sg__do_cmd_resolve.c プロジェクト: avar/veracity
/**
 * Release VFILE lock and invoke external merge tool for this file.
 *
 * TODO 2010/07/12 The MERGE-PLAN is an array and allows for
 * TODO            multiple steps (for an n-way sub-merge cascade).
 * TODO            But we don't have that part turned on yet in
 * TODO            sg_mrg__private_biuld_wd_issues.h:_make_file_merge_plan(),
 * TODO            so for now, we only expect 1 step.
 * TODO
 * TODO            Also, when we do have multiple steps, we might want to
 * TODO            be able to use the 'status' field to see which steps
 * TODO            were already performed in an earlier RESOLVE.
 * TODO
 * TODO            Also, when we want to support more than 1 step we need
 * TODO            to copy pvaPlan because when we release the pendingtree
 * TODO            the pvhIssue becomes invalidated too.
 */
static void _resolve__fix__run_external_file_merge(SG_context * pCtx,
												   struct _resolve_data * pData,
												   const char * pszGid,
												   const SG_vhash * pvhIssue,
												   SG_string * pStrRepoPath,
												   enum _fix_status * pFixStatus)
{
	_resolve__step_pathnames * pStepPathnames = NULL;
	_resolve__external_tool * pET = NULL;
	const SG_varray * pvaPlan;
	const SG_vhash * pvhStep_0;
	SG_int64 r64;
	SG_uint32 nrSteps;
	SG_mrg_automerge_result result;
	SG_bool bMerged = SG_FALSE;
	SG_bool bIsResolved = SG_FALSE;

	SG_ERR_CHECK(  SG_vhash__get__varray(pCtx, pvhIssue, "conflict_file_merge_plan", (SG_varray **)&pvaPlan)  );
	SG_ERR_CHECK(  SG_varray__count(pCtx, pvaPlan, &nrSteps)  );

	if (nrSteps > 1)
		SG_ERR_THROW2(  SG_ERR_ASSERT,
						(pCtx, "TODO RESOLVE more than 1 step in auto-merge plan for '%s'.", SG_string__sz(pStrRepoPath))  );

	//////////////////////////////////////////////////////////////////
	// Get Step[0]

	SG_ERR_CHECK(  SG_varray__get__vhash(pCtx, pvaPlan, 0, (SG_vhash **)&pvhStep_0)  );

	// see if the user has already performed the merge and maybe got interrupted.

	SG_ERR_CHECK(  SG_vhash__get__int64(pCtx, pvhStep_0, "status", &r64)  );
	result = (SG_mrg_automerge_result)r64;
	if (result == SG_MRG_AUTOMERGE_RESULT__SUCCESSFUL)
	{
		SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDERR,
								   "TODO Print message about previous successful manual merge of the file content and ask if they want to redo it for '%s'.\n",
								   SG_string__sz(pStrRepoPath))  );

		*pFixStatus = FIX_USER_MERGED;
		goto done;
	}

	SG_ERR_CHECK(  _resolve__step_pathnames__compute(pCtx, pData, pvhIssue, pvhStep_0, pStrRepoPath, &pStepPathnames)  );

	// While we still have a handle to the pendingtree, lookup the
	// specifics on the external tool that we should invoke.  these
	// details come from localsettings.

	SG_ERR_CHECK(  _resolve__external_tool__lookup(pCtx, pData, pszGid, pvhIssue, pStrRepoPath, &pET)  );

	// Free the PENDINGTREE so that we release the VFILE lock.

	pvhIssue = NULL;
	pvaPlan = NULL;
	pvhStep_0 = NULL;
	SG_PENDINGTREE_NULLFREE(pCtx, pData->pPendingTree);

	//////////////////////////////////////////////////////////////////
	// Invoke the external tool.

	SG_ERR_CHECK(  _resolve__fix__run_external_file_merge_1(pCtx,
															pData,
															pET,
															pStepPathnames,
															pStrRepoPath,
															&bMerged)  );
	if (!bMerged)
	{
		SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDOUT, "RESOLVE: Aborting the merge of this file.\n")  );
		*pFixStatus = FIX_USER_ABORTED;
		goto done;
	}

	//////////////////////////////////////////////////////////////////
	// Reload the PENDINGTREE and re-fetch the ISSUE and updated the STATUS on
	// this step in the PLAN.
	// 
	// We duplicate some of the "see if someone else resolved this issue while
	// we were without the lock" stuff.

	SG_ERR_CHECK(  _resolve__lookup_issue(pCtx, pData, pszGid, &pvhIssue)  );
	SG_ERR_CHECK(  _resolve__is_resolved(pCtx, pvhIssue, &bIsResolved)  );
	if (bIsResolved)
	{
		// Someone else marked it resolved while were waiting for
		// the user to edit the file and while we didn't have the
		// file lock.  We should stop here.

		SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDOUT, "RESOLVE: Aborting the merge of this file (due to race condition).\n")  );
		*pFixStatus = FIX_LOST_RACE;
		goto done;
	}

	// re-fetch the current step and update the "result" status for it
	// and flush the pendingtree back disk.
	//
	// we only update the step status -- we DO NOT alter the __DIVERGENT_FILE_EDIT__
	// conflict_flags.

	SG_ERR_CHECK(  SG_vhash__get__varray(pCtx, pvhIssue, "conflict_file_merge_plan", (SG_varray **)&pvaPlan)  );
	SG_ERR_CHECK(  SG_varray__get__vhash(pCtx, pvaPlan, 0, (SG_vhash **)&pvhStep_0)  );
	SG_ERR_CHECK(  SG_pendingtree__set_wd_issue_plan_step_status__dont_save_pendingtree(pCtx, pData->pPendingTree,
																						pvhStep_0,
																						SG_MRG_AUTOMERGE_RESULT__SUCCESSFUL)  );
	SG_ERR_CHECK(  SG_pendingtree__save(pCtx, pData->pPendingTree)  );
	SG_PENDINGTREE_NULLFREE(pCtx, pData->pPendingTree);

	SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDOUT, "RESOLVE: The file content portion of the merge was successful.\n")  );
	*pFixStatus = FIX_USER_MERGED;

	// we defer the delete of the temp input files until we completely
	// resolve the issue.  (This gives us more options if we allow the
	// resolve to be restarted after interruptions.)

done:
	;
fail:
	_RESOLVE__EXTERNAL_TOOL__NULLFREE(pCtx, pET);
	_RESOLVE__STEP_PATHNAMES__NULLFREE(pCtx, pStepPathnames);
}
コード例 #7
0
ファイル: sg__do_cmd_resolve.c プロジェクト: avar/veracity
/**
 * Handle the RESOLVE command.
 *
 *
 */
void do_cmd_resolve(SG_context * pCtx,
					SG_option_state * pOptSt,
					SG_uint32 count_args, const char** paszArgs)
{
	struct _resolve_data data;
	SG_uint32 sum = 0;
	SG_bool bAll = SG_FALSE;
	SG_bool bWantResolved = SG_FALSE;
	SG_bool bWantUnresolved = SG_FALSE;
	SG_bool bReqArg = SG_FALSE;

	memset(&data, 0, sizeof(data));
	data.pPathCwd = NULL;
	data.pPendingTree = NULL;
	data.psaGids = NULL;
	data.bIgnoreWarnings = SG_TRUE;			// TODO what should this be?

	// allow at most ONE of the command options.
	//
	// the --{List,Mark,Unmark}All options do not allow ARGs.
	// 
	// the --{Mark,Unmark} require at least one ARG.
	// the --List allows 0 or more ARGs.
	//
	// if no command option, allow 0 or more ARGs.
	//
	// most commands do not require there to be issues; rather
	// they just don't do anything.
	//
	// WARNING: We set sg_cl_options[].has_arg to 0 for all of
	//          our commands options so that we get all of the
	//          pathnames in ARGs rather than bound to the option.
	//          That is, I want to be able to say:
	//               vv resolve --mark foo bar
	//          rather than:
	//               vv resolve --mark foo --mark bar
	//
	//          It also allows me to have:
	//               vv resolve --list
	//          and
	//               vv resolve --list foo

	if (pOptSt->bListAll)		{ sum++; bAll = SG_TRUE;  bWantResolved = SG_TRUE;  bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }
	if (pOptSt->bMarkAll)		{ sum++; bAll = SG_TRUE;  bWantResolved = SG_TRUE;  bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }
//	if (pOptSt->bUnmarkAll)		{ sum++; bAll = SG_TRUE;  bWantResolved = SG_TRUE;  bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }
	if (pOptSt->bList)
	{
		if (count_args == 0)	{ sum++; bAll = SG_FALSE; bWantResolved = SG_FALSE; bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }
		else					{ sum++; bAll = SG_FALSE; bWantResolved = SG_TRUE;  bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }
	}
	if (pOptSt->bMark)			{ sum++; bAll = SG_FALSE; bWantResolved = SG_FALSE; bWantUnresolved = SG_TRUE;  bReqArg = SG_TRUE;  }
//	if (pOptSt->bUnmark)		{ sum++; bAll = SG_FALSE; bWantResolved = SG_TRUE;  bWantUnresolved = SG_FALSE; bReqArg = SG_TRUE;  }
	if (sum == 0)				{        bAll = SG_FALSE; bWantResolved = SG_FALSE; bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }

	if (sum > 1)
		SG_ERR_THROW(  SG_ERR_USAGE  );

	if (bReqArg && (count_args == 0))
		SG_ERR_THROW(  SG_ERR_USAGE  );

	if (bAll && (count_args > 0))
		SG_ERR_THROW(  SG_ERR_USAGE  );

	SG_ERR_CHECK(  SG_PATHNAME__ALLOC(pCtx, &data.pPathCwd)  );
	SG_ERR_CHECK(  SG_pathname__set__from_cwd(pCtx, data.pPathCwd)  );

	// Do a complete scan first.  This ensures that the pendingtree knows
	// about everything that is dirty in the WD and helps ensure that every
	// issue in the issues list has a ptnode in the pendingtree.
	// 
	// TODO 2010/07/16 Technically, this should NOT be required.  But it
	// TODO            helps.  The problem is that when a file is edited
	// TODO            we don't automatically get the notification, rather
	// TODO            we do a status aka scan (and/or use the timestamp
	// TODO            cache) when various commands start which detect
	// TODO            file content changes.  So the fact that the MERGE
	// TODO            may have written a bunch of merged/edited files
	// TODO            doesn't necessarily mean that they are listed in
	// TODO            the pendingtree -- because the user may have edited
	// TODO            them again (or edited other files) since the merge
	// TODO            completed.  So we scan.
	// TODO
	// TODO            See also the comment in sg.c:do_cmd_commit() for sprawl-809.
	// TODO
	// TODO            What this scan is helping to hide is a problem where
	// TODO            we're hitting the issues list for GIDs and then
	// TODO            using SG_pendingtree__find_repo_path_by_gid() to
	// TODO            dynamically convert it into a "live/current" repo-path.
	// TODO            and it assumes that it is only called for dirty entries
	// TODO            (or rather, for entries that have a ptnode).  We need
	// TODO            to fix that.

	SG_ERR_CHECK(  SG_PENDINGTREE__ALLOC(pCtx, data.pPathCwd, data.bIgnoreWarnings, &data.pPendingTree)  );
	SG_ERR_CHECK(  SG_pendingtree__scan(pCtx, data.pPendingTree, SG_TRUE, NULL, 0, NULL, 0)  );
	SG_PENDINGTREE_NULLFREE(pCtx, data.pPendingTree);

	// Now load the pendingtree for real.

	SG_ERR_CHECK(  SG_PENDINGTREE__ALLOC(pCtx, data.pPathCwd, data.bIgnoreWarnings, &data.pPendingTree)  );

	if (count_args > 0)
		SG_ERR_CHECK(  _resolve__map_args_to_gids(pCtx, &data, count_args, paszArgs, bWantResolved, bWantUnresolved)  );
	else
		SG_ERR_CHECK(  _resolve__get_all_issue_gids(pCtx, &data, bWantResolved, bWantUnresolved)  );

	//////////////////////////////////////////////////////////////////

	if (pOptSt->bListAll || pOptSt->bList)
	{
		SG_ERR_CHECK(  _resolve__do_list(pCtx, &data)  );
	}
	else if (pOptSt->bMarkAll || pOptSt->bMark)
	{
		SG_ERR_CHECK(  _resolve__do_mark(pCtx, &data, SG_TRUE)  );
	}
//	else if (pOptSt->bUnmarkAll || pOptSt->bUnmark)
//	{
//		SG_ERR_CHECK(  _resolve__do_mark(pCtx, &data, SG_FALSE)  );
//	}
	else // no command option given -- assume we want to FIX the issues
	{
		SG_ERR_CHECK(  _resolve__do_fix(pCtx, &data)  );
	}

fail:
	SG_PATHNAME_NULLFREE(pCtx, data.pPathCwd);
	SG_PENDINGTREE_NULLFREE(pCtx, data.pPendingTree);
	SG_STRINGARRAY_NULLFREE(pCtx, data.psaGids);
}
コード例 #8
0
void SG_workingdir__create_and_get(
	SG_context* pCtx,
	const char* pszDescriptorName,
	const SG_pathname* pPathDirPutTopLevelDirInHere,
	SG_bool bCreateDrawer,
    const char* psz_spec_hid_cs_baseline
	)
{
	SG_repo* pRepo = NULL;
	SG_rbtree* pIdsetLeaves = NULL;
	SG_uint32 count_leaves = 0;
	SG_changeset* pcs = NULL;
	const char* pszidUserSuperRoot = NULL;
	SG_bool b = SG_FALSE;
    char* psz_hid_cs_baseline = NULL;
	SG_pendingtree * pPendingTree = NULL;
	SG_vhash * pvhTimestamps = NULL;

	/*
	 * Fetch the descriptor by its given name and use it to connect to
	 * the repo.
	 */
	SG_ERR_CHECK(  SG_repo__open_repo_instance(pCtx, pszDescriptorName, &pRepo)  );


	if (psz_spec_hid_cs_baseline)
	{
		SG_ERR_CHECK(  SG_strdup(pCtx, psz_spec_hid_cs_baseline, &psz_hid_cs_baseline)  );
	}
	else
    {
        const char* psz_hid = NULL;
        /*
         * If you do not specify a hid to be the baseline, then this routine
         * currently only works if there is exactly one leaf in the repo.
         */
        SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo,SG_DAGNUM__VERSION_CONTROL,&pIdsetLeaves)  );
        SG_ERR_CHECK(  SG_rbtree__count(pCtx, pIdsetLeaves, &count_leaves)  );

		if (count_leaves != 1)
			SG_ERR_THROW(  SG_ERR_MULTIPLE_HEADS_FROM_DAGNODE  );

        SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, NULL, pIdsetLeaves, &b, &psz_hid, NULL)  );

        SG_ERR_CHECK(  SG_STRDUP(pCtx, psz_hid, &psz_hid_cs_baseline)  );
    }

	/*
	 * Load the desired changeset from the repo so we can look up the
	 * id of its user root directory
	 */
	SG_ERR_CHECK(  SG_changeset__load_from_repo(pCtx, pRepo, psz_hid_cs_baseline, &pcs)  );
	SG_ERR_CHECK(  SG_changeset__get_root(pCtx, pcs, &pszidUserSuperRoot)  );

	if (bCreateDrawer)
	{
		SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvhTimestamps)  );

		// Retrieve everything into the WD and capture the timestamps on the files that we create.
		SG_ERR_CHECK(  sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, pvhTimestamps)  );

		// this creates "repo.json" with the repo-descriptor.
		SG_ERR_CHECK(  SG_workingdir__set_mapping(pCtx, pPathDirPutTopLevelDirInHere, pszDescriptorName, NULL)  );

		// this creates an empty "wd.json" file (which doesn't know anything).
		SG_ERR_CHECK(  SG_PENDINGTREE__ALLOC(pCtx, pPathDirPutTopLevelDirInHere, SG_TRUE, &pPendingTree)  );

		// force set the initial parents to the current changeset.
		SG_ERR_CHECK(  SG_pendingtree__set_single_wd_parent(pCtx, pPendingTree, psz_hid_cs_baseline)  );

		// force initialize the timestamp cache to the list that we just built; this should
		// be the only timestamps in the cache since we just populated the WD.
		SG_ERR_CHECK(  SG_pendingtree__set_wd_timestamp_cache(pCtx, pPendingTree, &pvhTimestamps)  );	// this steals our vhash

		SG_ERR_CHECK(  SG_pendingtree__save(pCtx, pPendingTree)  );
	}
	else
	{
		// Retrieve everything into the WD but do not create .sgdrawer or record timestamps.
		// This is more like an EXPORT operation.
		SG_ERR_CHECK(  sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, NULL)  );
	}


fail:
	SG_VHASH_NULLFREE(pCtx, pvhTimestamps);
    SG_NULLFREE(pCtx, psz_hid_cs_baseline);
	SG_CHANGESET_NULLFREE(pCtx, pcs);
	SG_RBTREE_NULLFREE(pCtx, pIdsetLeaves);
	SG_REPO_NULLFREE(pCtx, pRepo);
	SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree);
}