Beispiel #1
0
void SG_group__list(
	SG_context* pCtx,
    SG_repo* pRepo,
    SG_varray** ppva
    )
{
    SG_string* pstr_where = NULL;
    SG_stringarray* psa_results = NULL;
    char* psz_hid_cs_leaf = NULL;
    SG_vhash* pvh_group = NULL;
    SG_varray* pva_result = NULL;
    SG_stringarray* psa_fields = NULL;

    SG_ERR_CHECK(  SG_zing__get_leaf__fail_if_needs_merge(pCtx, pRepo, SG_DAGNUM__USERS, &psz_hid_cs_leaf)  );

    SG_ERR_CHECK(  SG_STRINGARRAY__ALLOC(pCtx, &psa_fields, 2)  );
    SG_ERR_CHECK(  SG_stringarray__add(pCtx, psa_fields, "recid")  );
    SG_ERR_CHECK(  SG_stringarray__add(pCtx, psa_fields, "name")  );
    SG_ERR_CHECK(  SG_zing__query(pCtx, pRepo, SG_DAGNUM__USERS, psz_hid_cs_leaf, "group", NULL, "name #ASC", 0, 0, psa_fields, &pva_result)  );

    *ppva = pva_result;
    pva_result = NULL;

fail:
    SG_VHASH_NULLFREE(pCtx, pvh_group);
    SG_NULLFREE(pCtx, psz_hid_cs_leaf);
    SG_VARRAY_NULLFREE(pCtx, pva_result);
    SG_STRINGARRAY_NULLFREE(pCtx, psa_results);
    SG_STRING_NULLFREE(pCtx, pstr_where);
    SG_STRINGARRAY_NULLFREE(pCtx, psa_fields);
}
void SG_util__convert_argv_into_stringarray(SG_context * pCtx,
											SG_uint32 argc, const char ** argv,
											SG_stringarray ** ppsa)
{
	SG_stringarray * psa = NULL;
	SG_uint32 nrKept = 0;
	SG_uint32 k;

	if (argc && argv)
	{
		SG_ERR_CHECK(  SG_STRINGARRAY__ALLOC(pCtx, &psa, argc)  );
		for (k=0; k<argc; k++)
		{
			if (!argv[k])	// argv ends at first null pointer regardless of what argc says.
				break;

			if (argv[k][0])		// skip over empty "" strings.
			{
				SG_ERR_CHECK(  SG_stringarray__add(pCtx, psa, argv[k])  );
				nrKept++;
			}
		}

		if (nrKept == 0)
			SG_STRINGARRAY_NULLFREE(pCtx, psa);
	}
	
	*ppsa = psa;
	return;

fail:
	SG_STRINGARRAY_NULLFREE(pCtx, psa);
}
Beispiel #3
0
void SG_treendx__get_path_in_dagnode(SG_context* pCtx, SG_treendx* pTreeNdx, const char* psz_search_item_gid, const char* psz_changeset, SG_treenode_entry ** ppTreeNodeEntry)
{
	SG_rbtree_iterator * rb_it = NULL;
	const char * pPath = NULL;
	SG_changeset * pChangeset = NULL;
	SG_stringarray * pPaths = NULL;
	const char* pszHidTreeNode = NULL;
	SG_treenode * pTreenodeRoot = NULL;
	char* pszReturnedGID = NULL;
	SG_uint32 i = 0;
	SG_uint32 count = 0;

	SG_ERR_CHECK_RETURN(  SG_gid__argcheck(pCtx, psz_search_item_gid)  );

	SG_ERR_CHECK(  SG_treendx__get_all_paths(pCtx, pTreeNdx, psz_search_item_gid, &pPaths)   );
	*ppTreeNodeEntry = NULL;
	SG_ERR_CHECK(  SG_changeset__load_from_repo(pCtx, pTreeNdx->pRepo, psz_changeset, &pChangeset)  );
	SG_ERR_CHECK(  SG_changeset__get_root(pCtx, pChangeset, &pszHidTreeNode) );
	SG_ERR_CHECK(  SG_treenode__load_from_repo(pCtx, pTreeNdx->pRepo, pszHidTreeNode, &pTreenodeRoot)  );
	SG_ERR_CHECK(  SG_stringarray__count(pCtx, pPaths, &count )  );
	for (i = 0; i < count; i++)
	{
		SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, pPaths, i, &pPath)  );
		SG_ERR_CHECK(  SG_treenode__find_treenodeentry_by_path(pCtx, pTreeNdx->pRepo, pTreenodeRoot, pPath, &pszReturnedGID, ppTreeNodeEntry)  );
		if (*ppTreeNodeEntry != NULL && strcmp(pszReturnedGID, psz_search_item_gid) == 0)
		{
			break;
		}
		else if (*ppTreeNodeEntry != NULL)
		{
			SG_TREENODE_ENTRY_NULLFREE(pCtx, *ppTreeNodeEntry);
			*ppTreeNodeEntry = NULL; //It's not the right GID, even though it's in the right spot.
		}
	}

	SG_NULLFREE(pCtx, pszReturnedGID);
	SG_CHANGESET_NULLFREE(pCtx, pChangeset);
	SG_TREENODE_NULLFREE(pCtx, pTreenodeRoot);
	SG_STRINGARRAY_NULLFREE(pCtx, pPaths);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, rb_it);
	return;
fail:
	SG_NULLFREE(pCtx, pszReturnedGID);
	SG_CHANGESET_NULLFREE(pCtx, pChangeset);
	SG_STRINGARRAY_NULLFREE(pCtx, pPaths);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, rb_it);
	return;

}
Beispiel #4
0
void SG_stringarray__alloc__copy(
	SG_context* pCtx,
	SG_stringarray** ppThis,
	const SG_stringarray* pOther
	)
{
    SG_uint32 count = 0;
    SG_stringarray * pThis = NULL;
    SG_uint32 i;

    SG_ASSERT(pCtx!=NULL);
    SG_NULLARGCHECK_RETURN(ppThis);
    SG_NULLARGCHECK_RETURN(pOther);

    SG_ERR_CHECK(  SG_stringarray__count(pCtx, pOther, &count)  );
    SG_ERR_CHECK(  SG_stringarray__alloc(pCtx, &pThis, count)  );
    for(i=0;i<count;++i)
    {
        const char * sz = NULL;
        SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, pOther, i, &sz)  );
        SG_ERR_CHECK(  SG_stringarray__add(pCtx, pThis, sz)  );
    }

    *ppThis = pThis;

    return;
fail:
    SG_STRINGARRAY_NULLFREE(pCtx, pThis);
}
Beispiel #5
0
void SG_user__lookup_by_email(
	SG_context* pCtx,
    SG_repo* pRepo,
    const char* psz_email,
    SG_vhash** ppvh
    )
{
    char* psz_hid_cs_leaf = NULL;
    SG_vhash* pvh_user = NULL;
    char buf_where[256 + 64];
    SG_stringarray* psa_fields = NULL;

    SG_ERR_CHECK(  SG_zing__get_leaf__fail_if_needs_merge(pCtx, pRepo, SG_DAGNUM__USERS, &psz_hid_cs_leaf)  );

    if (psz_hid_cs_leaf)
    {
        SG_ERR_CHECK(  SG_sprintf(pCtx, buf_where, sizeof(buf_where), "email == '%s'", psz_email)  );

        SG_ERR_CHECK(  SG_STRINGARRAY__ALLOC(pCtx, &psa_fields, 4)  );
        SG_ERR_CHECK(  SG_stringarray__add(pCtx, psa_fields, "recid")  );
        SG_ERR_CHECK(  SG_stringarray__add(pCtx, psa_fields, "email")  );
        //SG_ERR_CHECK(  SG_stringarray__add(pCtx, psa_fields, "prefix")  );
        SG_ERR_CHECK(  SG_stringarray__add(pCtx, psa_fields, "key")  );
        SG_ERR_CHECK(  SG_zing__query__one(pCtx, pRepo, SG_DAGNUM__USERS, psz_hid_cs_leaf, "user", buf_where, psa_fields, &pvh_user)  );
    }

    *ppvh = pvh_user;
    pvh_user = NULL;

fail:
    SG_STRINGARRAY_NULLFREE(pCtx, psa_fields);
    SG_VHASH_NULLFREE(pCtx, pvh_user);
    SG_NULLFREE(pCtx, psz_hid_cs_leaf);
}
Beispiel #6
0
void SG_server__get_dagnode_info(
	SG_context* pCtx,
	SG_repo* pRepo,
	SG_vhash* pvhRequest,
	SG_varray** ppvaInfo)
{
	char bufDagnum[SG_DAGNUM__BUF_MAX__DEC];
	SG_vhash* pvhRefVersionControlHids = NULL;
	SG_varray* pvaHids = NULL;
	SG_stringarray* psaHids = NULL;
	const char* const* paszHids = NULL;
	SG_uint32 countHids = 0;

	SG_ERR_CHECK(  SG_dagnum__to_sz__decimal(pCtx, SG_DAGNUM__VERSION_CONTROL, bufDagnum, sizeof(bufDagnum))  );
	SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, bufDagnum, &pvhRefVersionControlHids)  );

	// Ugh.  Is vhash->varray->stringarray->char** the best option?
	SG_ERR_CHECK(  SG_vhash__get_keys(pCtx, pvhRefVersionControlHids, &pvaHids)  );
	SG_ERR_CHECK(  SG_varray__to_stringarray(pCtx, pvaHids, &psaHids)  );
	SG_ERR_CHECK(  SG_stringarray__sz_array_and_count(pCtx, psaHids, &paszHids, &countHids)  );

	SG_ERR_CHECK(  SG_history__query(pCtx, NULL, pRepo, 0, NULL, paszHids, countHids, NULL, NULL, 0, 0, 0, SG_FALSE, SG_FALSE, ppvaInfo)  );

	/* fall through */
fail:
	SG_VARRAY_NULLFREE(pCtx, pvaHids);
	SG_STRINGARRAY_NULLFREE(pCtx, psaHids);
}
void SG_sync_remote__get_dagnode_info(
	SG_context* pCtx,
	SG_repo* pRepo,
	SG_vhash* pvhRequest,
	SG_history_result** ppInfo)
{
	char bufDagnum[SG_DAGNUM__BUF_MAX__HEX];
	SG_vhash* pvhRefVersionControlHids = NULL;
	SG_varray* pvaHids = NULL;
	SG_stringarray* psaHids = NULL;

	SG_ERR_CHECK(  SG_dagnum__to_sz__hex(pCtx, SG_DAGNUM__VERSION_CONTROL, bufDagnum, sizeof(bufDagnum))  );
	SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, bufDagnum, &pvhRefVersionControlHids)  );

	// Ugh.  Is vhash->varray->stringarray the best option?
	SG_ERR_CHECK(  SG_vhash__get_keys(pCtx, pvhRefVersionControlHids, &pvaHids)  );
	SG_ERR_CHECK(  SG_varray__to_stringarray(pCtx, pvaHids, &psaHids)  );

	SG_ERR_CHECK(  SG_history__get_revision_details(pCtx, pRepo, psaHids, NULL, ppInfo)  );

	/* fall through */
fail:
	SG_VARRAY_NULLFREE(pCtx, pvaHids);
	SG_STRINGARRAY_NULLFREE(pCtx, psaHids);
}
bool vvRevSpec::GetTags(
	vvContext&     pCtx,
	wxArrayString& cTags
	) const
{
	SG_stringarray* pValues = NULL;

	SG_ERR_CHECK(  SG_rev_spec__tags(pCtx, this->mpRevSpec, &pValues)  );
	SG_ERR_CHECK(  vvSgHelpers::Convert_sgStringArray_wxArrayString(pCtx, pValues, cTags)  );

fail:
	SG_STRINGARRAY_NULLFREE(pCtx, pValues);
	return !pCtx.Error_Check();
}
void SG_cmd_util__dump_log(
	SG_context * pCtx, 
	SG_console_stream cs,
	SG_repo* pRepo, 
	const char* psz_hid_cs, 
	SG_vhash* pvhCleanPileOfBranches, 
	SG_bool bShowOnlyOpenBranchNames,
	SG_bool bShowFullComments)
{
	SG_history_result* pHistResult = NULL;
	SG_stringarray * psaHids = NULL;
	
	SG_STRINGARRAY__ALLOC(pCtx, &psaHids, 1);
	SG_ERR_CHECK(  SG_stringarray__add(pCtx, psaHids, psz_hid_cs)  );
	SG_history__get_revision_details(pCtx, pRepo, psaHids, NULL, &pHistResult);

    if (SG_context__err_equals(pCtx, SG_ERR_NOT_FOUND))
    {
		/* There's a branch that references a changeset that doesn't exist. Show what we can. */

		SG_vhash* pvhRefClosedBranches = NULL;
		SG_vhash* pvhRefBranchValues = NULL;

		SG_context__err_reset(pCtx);

		if (pvhCleanPileOfBranches)
		{
			SG_bool bHas = SG_FALSE;
			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhCleanPileOfBranches, "closed", &bHas)  );
			if (bHas)
				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhCleanPileOfBranches, "closed", &pvhRefClosedBranches)  );

			SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhCleanPileOfBranches, "values", &pvhRefBranchValues)  );
		}

		SG_ERR_CHECK(  SG_console(pCtx, cs, "\n\t%8s:  %s\n", "revision", psz_hid_cs)  );
		SG_ERR_CHECK(  _dump_branch_name(pCtx, cs, psz_hid_cs, bShowOnlyOpenBranchNames, 
			pvhRefBranchValues, pvhRefClosedBranches)  );
		SG_ERR_CHECK(  SG_console(pCtx, cs, "\t%8s   %s\n", "", "(not present in repository)")  );
    }
    else
    {
		SG_ERR_CHECK_CURRENT;
        SG_ERR_CHECK(  SG_cmd_util__dump_history_results(pCtx, cs, pHistResult, pvhCleanPileOfBranches, bShowOnlyOpenBranchNames, bShowFullComments, SG_FALSE)  );
    }

fail:
	SG_HISTORY_RESULT_NULLFREE(pCtx, pHistResult);
	SG_STRINGARRAY_NULLFREE(pCtx, psaHids);
}
Beispiel #10
0
void SG_stringarray__alloc(
	SG_context* pCtx,
	SG_stringarray** ppThis,
	SG_uint32 space
	)
{
	SG_stringarray* pThis = NULL;

    if(space==0)
        space = 1;

	SG_ERR_CHECK_RETURN(  SG_alloc1(pCtx, pThis)  );

	pThis->space = space;

	SG_ERR_CHECK(  SG_alloc(pCtx, pThis->space, sizeof(const char *), &pThis->aStrings)  );
	SG_ERR_CHECK(  SG_STRPOOL__ALLOC(pCtx, &pThis->pStrPool, pThis->space * 64)  );

	*ppThis = pThis;
	return;

fail:
	SG_STRINGARRAY_NULLFREE(pCtx, pThis);
}
static void _s2__do_cset_vs_cset(SG_context * pCtx,
                                 const SG_option_state * pOptSt,
                                 const SG_stringarray * psaArgs,
                                 SG_uint32 * pNrErrors)
{
    SG_varray * pvaStatus = NULL;
    SG_varray * pvaStatusDirtyFiles = NULL;
    SG_stringarray * psa1 = NULL;
    SG_string * pStringGidRepoPath = NULL;
    SG_string * pStringErr = NULL;
    SG_uint32 nrErrors = 0;

    SG_ERR_CHECK(  SG_vv2__status(pCtx,
                                  pOptSt->psz_repo,
                                  pOptSt->pRevSpec,
                                  psaArgs,
                                  WC__GET_DEPTH(pOptSt),
                                  SG_FALSE, // bNoSort
                                  &pvaStatus, NULL)  );
    if (pvaStatus)
    {
        if (pOptSt->bInteractive)
        {
            // Filter list down to just modified files and show them one-by-one.
            SG_ERR_CHECK(  _get_dirty_files(pCtx, pvaStatus, &pvaStatusDirtyFiles)  );
            if (pvaStatusDirtyFiles)
                SG_ERR_CHECK(  _do_gui_diffs(pCtx, SG_FALSE, pOptSt, pvaStatusDirtyFiles, &nrErrors)  );
        }
        else
        {
            SG_uint32 k, nrItems;

            // Print the changes with PATCH-like headers.
            // Accumulate any tool errors.
            SG_ERR_CHECK(  SG_varray__count(pCtx, pvaStatus, &nrItems)  );
            for (k=0; k<nrItems; k++)
            {
                SG_vhash * pvhItem;
                const char * pszGid = NULL;

                SG_ERR_CHECK(  SG_varray__get__vhash(pCtx, pvaStatus, k, &pvhItem)  );

                // TODO 2013/02/22 Our pvhItem has all of the details for the diff,
                // TODO            but we don't yet have a public API to let it be
                // TODO            used as is.  So we build a @gid repo-path and
                // TODO            run the old historical diff code on a 1-item array
                // TODO            containing this @gid.
                // TODO
                // TODO            We should fix this to just pass down the pvhItem
                // TOOD            so that it doesn't have to repeat the status lookup.

                SG_ERR_CHECK(  SG_vhash__get__sz(pCtx, pvhItem, "gid", &pszGid)  );
                SG_ERR_CHECK(  SG_STRING__ALLOC(pCtx, &pStringGidRepoPath)  );
                SG_ERR_CHECK(  SG_string__sprintf(pCtx, pStringGidRepoPath, "@%s", pszGid)  );

                SG_ERR_CHECK(  SG_STRINGARRAY__ALLOC(pCtx, &psa1, 1)  );
                SG_ERR_CHECK(  SG_stringarray__add(pCtx, psa1, SG_string__sz(pStringGidRepoPath))  );

                SG_vv2__diff_to_stream__throw(pCtx, pOptSt->psz_repo, pOptSt->pRevSpec,
                                              psa1, 0,
                                              SG_TRUE,		// bNoSort -- doesn't matter only 1 item in list
                                              SG_FALSE,		// bInteractive,
                                              pOptSt->psz_tool);
                // Don't throw the error from the tool.  Just print it on STDERR
                // and remember that we had an error so that don't stop showing
                // the diffs just because we stumble over a changed binary file
                // or mis-configured tool, for example.
                if (SG_context__has_err(pCtx))
                {
                    SG_context__err_to_string(pCtx, SG_FALSE, &pStringErr);
                    SG_context__err_reset(pCtx);
                    SG_ERR_CHECK(  SG_console__raw(pCtx, SG_CS_STDERR, SG_string__sz(pStringErr))  );
                    SG_STRING_NULLFREE(pCtx, pStringErr);

                    nrErrors++;
                }

                SG_STRING_NULLFREE(pCtx, pStringGidRepoPath);
                SG_STRINGARRAY_NULLFREE(pCtx, psa1);
            }
        }
    }

    *pNrErrors = nrErrors;

fail:
    SG_VARRAY_NULLFREE(pCtx, pvaStatus);
    SG_VARRAY_NULLFREE(pCtx, pvaStatusDirtyFiles);
    SG_STRINGARRAY_NULLFREE(pCtx, psa1);
    SG_STRING_NULLFREE(pCtx, pStringGidRepoPath);
    SG_STRING_NULLFREE(pCtx, pStringErr);
}
/**
 * Do diff of an individual item.
 * When WC-based, we have a "DiffStep" vhash.
 * When historical, we have an item from a pvaStatus.
 *
 */
static void _do_diff1(SG_context * pCtx,
                      SG_bool bWC,
                      const SG_option_state * pOptSt,
                      const SG_vhash * pvhItem,
                      SG_uint32 * piResult)
{
    SG_string * pStringGidRepoPath = NULL;
    SG_vhash * pvhResultCodes = NULL;
    SG_stringarray * psa1 = NULL;
    const char * pszGid;
    SG_int64 i64Result = 0;
    SG_string * pStringErr = NULL;

    SG_ERR_CHECK(  SG_vhash__get__sz(pCtx, pvhItem, "gid", &pszGid)  );

    if (bWC)
    {
        SG_pathname * pPathWc = NULL;
        SG_bool bHasTool = SG_FALSE;

        // With the __diff__setup() and __diff__run() changes, we have already
        // examined the items during the __setup() step and recorded a tool for
        // the *FILE* that have changed content.  So if "tool" isn't set in the
        // DiffStep/Item, we don't need to diff it -- it could be a structural
        // change, a non-file, a found item, etc.
        //
        // we do not use SG_wc__diff__throw() because we already have the diff info
        // and we want to control the result-code processing below.

        SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhItem, "tool", &bHasTool)  );
        if (bHasTool)
            SG_ERR_CHECK(  SG_wc__diff__run(pCtx, pPathWc, pvhItem, &pvhResultCodes)  );
    }
    else
    {
        SG_ERR_CHECK(  SG_STRING__ALLOC(pCtx, &pStringGidRepoPath)  );
        SG_ERR_CHECK(  SG_string__sprintf(pCtx, pStringGidRepoPath, "@%s", pszGid)  );

        SG_ERR_CHECK(  SG_STRINGARRAY__ALLOC(pCtx, &psa1, 1)  );
        SG_ERR_CHECK(  SG_stringarray__add(pCtx, psa1, SG_string__sz(pStringGidRepoPath))  );
        // we do not use the __throw() version of this routine so we can control
        // result-code processing below.
        SG_ERR_CHECK(  SG_vv2__diff_to_stream(pCtx, pOptSt->psz_repo, pOptSt->pRevSpec,
                                              psa1, 0,
                                              SG_FALSE,		// bNoSort
                                              SG_TRUE,		// bInteractive,
                                              pOptSt->psz_tool,
                                              &pvhResultCodes)  );
    }

    if (pvhResultCodes)
    {
        SG_vhash * pvhResult;				// we do not own this
        SG_ERR_CHECK(  SG_vhash__check__vhash(pCtx, pvhResultCodes, pszGid, &pvhResult)  );
        if (pvhResult)
        {
            const char * pszTool;

            SG_ERR_CHECK(  SG_vhash__get__sz(pCtx, pvhResult, "tool", &pszTool)  );
            SG_ERR_CHECK(  SG_vhash__get__int64(pCtx, pvhResult, "result", &i64Result)  );

            SG_difftool__check_result_code__throw(pCtx, i64Result, pszTool);
            if (SG_context__has_err(pCtx))
            {
                SG_context__err_to_string(pCtx, SG_FALSE, &pStringErr);
                SG_context__err_reset(pCtx);
                SG_ERR_CHECK(  SG_console__raw(pCtx, SG_CS_STDERR, SG_string__sz(pStringErr))  );

                // eat the tool error. the result code is set.
            }
        }
    }

    if (piResult)
        *piResult = (SG_uint32)i64Result;

fail:
    SG_STRING_NULLFREE(pCtx, pStringGidRepoPath);
    SG_VHASH_NULLFREE(pCtx, pvhResultCodes);
    SG_STRINGARRAY_NULLFREE(pCtx, psa1);
    SG_STRING_NULLFREE(pCtx, pStringErr);
}
Beispiel #13
0
/**
 * Handle the RESOLVE command.
 *
 *
 */
void do_cmd_resolve(SG_context * pCtx,
					SG_option_state * pOptSt,
					SG_uint32 count_args, const char** paszArgs)
{
	struct _resolve_data data;
	SG_uint32 sum = 0;
	SG_bool bAll = SG_FALSE;
	SG_bool bWantResolved = SG_FALSE;
	SG_bool bWantUnresolved = SG_FALSE;
	SG_bool bReqArg = SG_FALSE;

	memset(&data, 0, sizeof(data));
	data.pPathCwd = NULL;
	data.pPendingTree = NULL;
	data.psaGids = NULL;
	data.bIgnoreWarnings = SG_TRUE;			// TODO what should this be?

	// allow at most ONE of the command options.
	//
	// the --{List,Mark,Unmark}All options do not allow ARGs.
	// 
	// the --{Mark,Unmark} require at least one ARG.
	// the --List allows 0 or more ARGs.
	//
	// if no command option, allow 0 or more ARGs.
	//
	// most commands do not require there to be issues; rather
	// they just don't do anything.
	//
	// WARNING: We set sg_cl_options[].has_arg to 0 for all of
	//          our commands options so that we get all of the
	//          pathnames in ARGs rather than bound to the option.
	//          That is, I want to be able to say:
	//               vv resolve --mark foo bar
	//          rather than:
	//               vv resolve --mark foo --mark bar
	//
	//          It also allows me to have:
	//               vv resolve --list
	//          and
	//               vv resolve --list foo

	if (pOptSt->bListAll)		{ sum++; bAll = SG_TRUE;  bWantResolved = SG_TRUE;  bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }
	if (pOptSt->bMarkAll)		{ sum++; bAll = SG_TRUE;  bWantResolved = SG_TRUE;  bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }
//	if (pOptSt->bUnmarkAll)		{ sum++; bAll = SG_TRUE;  bWantResolved = SG_TRUE;  bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }
	if (pOptSt->bList)
	{
		if (count_args == 0)	{ sum++; bAll = SG_FALSE; bWantResolved = SG_FALSE; bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }
		else					{ sum++; bAll = SG_FALSE; bWantResolved = SG_TRUE;  bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }
	}
	if (pOptSt->bMark)			{ sum++; bAll = SG_FALSE; bWantResolved = SG_FALSE; bWantUnresolved = SG_TRUE;  bReqArg = SG_TRUE;  }
//	if (pOptSt->bUnmark)		{ sum++; bAll = SG_FALSE; bWantResolved = SG_TRUE;  bWantUnresolved = SG_FALSE; bReqArg = SG_TRUE;  }
	if (sum == 0)				{        bAll = SG_FALSE; bWantResolved = SG_FALSE; bWantUnresolved = SG_TRUE;  bReqArg = SG_FALSE; }

	if (sum > 1)
		SG_ERR_THROW(  SG_ERR_USAGE  );

	if (bReqArg && (count_args == 0))
		SG_ERR_THROW(  SG_ERR_USAGE  );

	if (bAll && (count_args > 0))
		SG_ERR_THROW(  SG_ERR_USAGE  );

	SG_ERR_CHECK(  SG_PATHNAME__ALLOC(pCtx, &data.pPathCwd)  );
	SG_ERR_CHECK(  SG_pathname__set__from_cwd(pCtx, data.pPathCwd)  );

	// Do a complete scan first.  This ensures that the pendingtree knows
	// about everything that is dirty in the WD and helps ensure that every
	// issue in the issues list has a ptnode in the pendingtree.
	// 
	// TODO 2010/07/16 Technically, this should NOT be required.  But it
	// TODO            helps.  The problem is that when a file is edited
	// TODO            we don't automatically get the notification, rather
	// TODO            we do a status aka scan (and/or use the timestamp
	// TODO            cache) when various commands start which detect
	// TODO            file content changes.  So the fact that the MERGE
	// TODO            may have written a bunch of merged/edited files
	// TODO            doesn't necessarily mean that they are listed in
	// TODO            the pendingtree -- because the user may have edited
	// TODO            them again (or edited other files) since the merge
	// TODO            completed.  So we scan.
	// TODO
	// TODO            See also the comment in sg.c:do_cmd_commit() for sprawl-809.
	// TODO
	// TODO            What this scan is helping to hide is a problem where
	// TODO            we're hitting the issues list for GIDs and then
	// TODO            using SG_pendingtree__find_repo_path_by_gid() to
	// TODO            dynamically convert it into a "live/current" repo-path.
	// TODO            and it assumes that it is only called for dirty entries
	// TODO            (or rather, for entries that have a ptnode).  We need
	// TODO            to fix that.

	SG_ERR_CHECK(  SG_PENDINGTREE__ALLOC(pCtx, data.pPathCwd, data.bIgnoreWarnings, &data.pPendingTree)  );
	SG_ERR_CHECK(  SG_pendingtree__scan(pCtx, data.pPendingTree, SG_TRUE, NULL, 0, NULL, 0)  );
	SG_PENDINGTREE_NULLFREE(pCtx, data.pPendingTree);

	// Now load the pendingtree for real.

	SG_ERR_CHECK(  SG_PENDINGTREE__ALLOC(pCtx, data.pPathCwd, data.bIgnoreWarnings, &data.pPendingTree)  );

	if (count_args > 0)
		SG_ERR_CHECK(  _resolve__map_args_to_gids(pCtx, &data, count_args, paszArgs, bWantResolved, bWantUnresolved)  );
	else
		SG_ERR_CHECK(  _resolve__get_all_issue_gids(pCtx, &data, bWantResolved, bWantUnresolved)  );

	//////////////////////////////////////////////////////////////////

	if (pOptSt->bListAll || pOptSt->bList)
	{
		SG_ERR_CHECK(  _resolve__do_list(pCtx, &data)  );
	}
	else if (pOptSt->bMarkAll || pOptSt->bMark)
	{
		SG_ERR_CHECK(  _resolve__do_mark(pCtx, &data, SG_TRUE)  );
	}
//	else if (pOptSt->bUnmarkAll || pOptSt->bUnmark)
//	{
//		SG_ERR_CHECK(  _resolve__do_mark(pCtx, &data, SG_FALSE)  );
//	}
	else // no command option given -- assume we want to FIX the issues
	{
		SG_ERR_CHECK(  _resolve__do_fix(pCtx, &data)  );
	}

fail:
	SG_PATHNAME_NULLFREE(pCtx, data.pPathCwd);
	SG_PENDINGTREE_NULLFREE(pCtx, data.pPendingTree);
	SG_STRINGARRAY_NULLFREE(pCtx, data.psaGids);
}
void sg_vv2__history__working_folder(
	SG_context * pCtx,
	const SG_stringarray * psaInputs,
	const SG_rev_spec* pRevSpec,
	const SG_rev_spec* pRevSpec_single_revisions,
	const char* pszUser,
	const char* pszStamp,
	SG_bool bDetectCurrentBranch,
	SG_uint32 nResultLimit,
	SG_bool bHideObjectMerges,
	SG_int64 nFromDate,
	SG_int64 nToDate,
	SG_bool bListAll,
	SG_bool* pbHasResult, 
	SG_vhash** ppvhBranchPile,
	SG_history_result ** ppResult,
	SG_history_token ** ppHistoryToken)
{
	SG_repo * pRepo = NULL;
	SG_stringarray * pStringArrayGIDs = NULL;
	SG_stringarray * pStringArrayChangesets = NULL;
	SG_stringarray * pStringArrayChangesetsMissing = NULL;
	SG_stringarray * pStringArrayChangesets_single_revisions = NULL;
	SG_bool bRecommendDagWalk = SG_FALSE;
	SG_bool bLeaves = SG_FALSE;
	const char * pszBranchName = NULL;	// we do not own this
	SG_vhash* pvhBranchPile = NULL;
	SG_varray* pvaParents = NULL;	// we do not own this
	SG_bool bMyBranchWalkRecommendation = SG_FALSE;
	
	SG_rev_spec* pRevSpec_Allocated = NULL;
	SG_wc_tx * pWcTx = NULL;
	SG_vhash * pvhInfo = NULL;
	SG_uint32 count_args = 0;
	SG_uint32 countRevsSpecified = 0;

	if (psaInputs)
		SG_ERR_CHECK(  SG_stringarray__count(pCtx, psaInputs, &count_args)  );

	// Use the WD to try to get the initial info.
	// I'm going to deviate from the model and use
	// a read-only TX here so that I can get a bunch
	// of fields that we need later.
	SG_ERR_CHECK(  SG_WC_TX__ALLOC__BEGIN(pCtx, &pWcTx, NULL, SG_TRUE)  );

	if (count_args > 0)
		SG_ERR_CHECK(  SG_wc_tx__get_item_gid__stringarray(pCtx, pWcTx, psaInputs, &pStringArrayGIDs)  );

	SG_ERR_CHECK(  SG_wc_tx__get_wc_info(pCtx, pWcTx, &pvhInfo)  );
	SG_ERR_CHECK(  SG_wc_tx__get_repo_and_wd_top(pCtx, pWcTx, &pRepo, NULL)  );

	/* If no revisions were specified, and the caller wants us to use the current branch,
	 * create a revision spec with the current branch. */

	if (pRevSpec)
	{
		SG_ERR_CHECK(  SG_REV_SPEC__ALLOC__COPY(pCtx, pRevSpec, &pRevSpec_Allocated)  );
		SG_ERR_CHECK(  SG_rev_spec__count(pCtx, pRevSpec_Allocated, &countRevsSpecified)  );
	}
	else
	{
		SG_ERR_CHECK(  SG_REV_SPEC__ALLOC(pCtx, &pRevSpec_Allocated)  );
	}

	if (pRevSpec_single_revisions != NULL)
	{
		SG_uint32 countRevsSpecified_singles = 0;
		SG_ERR_CHECK(  SG_rev_spec__count(pCtx, pRevSpec_single_revisions, &countRevsSpecified_singles)  );
		countRevsSpecified += countRevsSpecified_singles;
	}

	if (bDetectCurrentBranch && countRevsSpecified == 0) 
	{
		SG_ERR_CHECK(  SG_vhash__check__sz(pCtx, pvhInfo, "branch", &pszBranchName)  );
		if (pszBranchName)
		{
			/* The working folder is attached to a branch. Does it exist? */
			SG_bool bHasBranches = SG_FALSE;
			SG_bool bBranchExists = SG_FALSE;

			SG_ERR_CHECK(  SG_vc_branches__cleanup(pCtx, pRepo, &pvhBranchPile)  );
			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhBranchPile, "branches", &bHasBranches)  );
			if (bHasBranches)
			{
				SG_vhash* pvhRefBranches;
				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhBranchPile, "branches", &pvhRefBranches)  );
				SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRefBranches, pszBranchName, &bBranchExists)  );
			}
				
			if (bBranchExists)
			{
				SG_uint32 numParents, i;
				const char* pszRefParent;

				/* If that branch exists, just add to our rev spec. */
				SG_ERR_CHECK(  SG_rev_spec__add_branch(pCtx, pRevSpec_Allocated, pszBranchName)  );

				/* Plus, if the working folder's parents are not in the branch (yet), add them as well
				 * (they'll be in it after the user commits something...). */
				SG_ERR_CHECK(  SG_vhash__get__varray(pCtx, pvhInfo, "parents", &pvaParents)  );
				SG_ERR_CHECK(  SG_varray__count(pCtx, pvaParents, &numParents)  );
				for (i = 0; i < numParents; i++)
				{
					SG_bool already_in_rev_spec = SG_FALSE;
					SG_ERR_CHECK(  SG_varray__get__sz(pCtx, pvaParents, i, &pszRefParent)  );
					SG_ERR_CHECK(  SG_rev_spec__contains(pCtx, pRepo, pRevSpec_Allocated, pszRefParent,
														 &already_in_rev_spec)  );
					if(!already_in_rev_spec)
						SG_ERR_CHECK(  SG_rev_spec__add_rev(pCtx, pRevSpec_Allocated, pszRefParent)  );
				}
			}
			else
			{
				/* If the branch doesn't exist, add the working folder's baseline(s) to the rev spec
				 * and force a dag walk. */
				SG_uint32 numParents, i;
				const char* pszRefParent;

				SG_ERR_CHECK(  SG_vhash__get__varray(pCtx, pvhInfo, "parents", &pvaParents)  );
				SG_ERR_CHECK(  SG_varray__count(pCtx, pvaParents, &numParents)  );
				for (i = 0; i < numParents; i++)
				{
					SG_ERR_CHECK(  SG_varray__get__sz(pCtx, pvaParents, i, &pszRefParent)  );
					SG_ERR_CHECK(  SG_rev_spec__add_rev(pCtx, pRevSpec_Allocated, pszRefParent)  );
				}
				bMyBranchWalkRecommendation = SG_TRUE;
			}

		}
	}

	// Determine the starting changeset IDs.  strBranch and bLeaves control this.
	// We do this step here, so that repo paths can be looked up before we call into history__core.
	SG_ERR_CHECK( sg_vv2__history__get_starting_changesets(pCtx, pRepo, pRevSpec_Allocated,
														   &pStringArrayChangesets,
														   &pStringArrayChangesetsMissing,
														   &bRecommendDagWalk,
														   &bLeaves) );
	if (pStringArrayChangesetsMissing)
	{
		// See K2177, K1322, W0836, W8132.  We requested specific starting
		// points and ran into some csets that were referenced (by --tag
		// or --branch) that are not present in the local repo.  Try to
		// silently ignore them.
		SG_uint32 nrFound = 0;
		SG_ERR_CHECK(  SG_stringarray__count(pCtx, pStringArrayChangesets, &nrFound)  );
		if (nrFound > 0)
		{
			// Yes there were missing csets, but we still found some
			// of the referenced ones.  Just ignore the missing ones.
			// This should behave just like we had the older tag/branch
			// dag prior to the push -r on the vc dag.
		}
		else
		{
			const char * psz_0;
			// TODO 2012/10/19 Do we want a different message if the number of missing is > 1 ?
			SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, pStringArrayChangesetsMissing, 0, &psz_0)  );
			SG_ERR_THROW2(  SG_ERR_CHANGESET_BLOB_NOT_FOUND,
							(pCtx, "%s", psz_0)  );
		}
	}

	bRecommendDagWalk = bRecommendDagWalk || bMyBranchWalkRecommendation;
	
	//This hack is here to  detect when we're being asked for the parent of a certain
	//object from the sg_parents code.  parents always wants the dag walk.
	//The better solution would be to allow users to pass in a flag about their dagwalk
	//preferences
	if (count_args == 1 && nResultLimit == 1)
		bRecommendDagWalk = SG_TRUE;

	if (bListAll)
	{
		// See W8493.  If they gave us a --list-all along with a --rev or --tag, they
		// want to force us to show the full history rather than just the info for the
		// named cset.
		bRecommendDagWalk = SG_TRUE;
	}

	if (pRevSpec_single_revisions)
	{
		// We DO NOT pass a psaMissingHids here because we want
		// it to throw if the user names a missing cset.
		SG_ERR_CHECK(  SG_rev_spec__get_all__repo__dedup(pCtx, pRepo, pRevSpec_single_revisions, SG_TRUE,
														 &pStringArrayChangesets_single_revisions, NULL)  );
	}

	// TODO 2012/07/03 The deviates from the model.  This call directly returns the
	// TODO            allocated data into the caller's pointers.  If anything fails
	// TODO            (such as the call to get the branches below), we'll probably
	// TODO            leak the result and token.

	SG_ERR_CHECK( SG_history__run(pCtx, pRepo, pStringArrayGIDs,
					pStringArrayChangesets, pStringArrayChangesets_single_revisions,
					pszUser, pszStamp, nResultLimit, bLeaves, bHideObjectMerges,
					nFromDate, nToDate, bRecommendDagWalk, SG_FALSE, pbHasResult, ppResult, ppHistoryToken) );

	/* This is kind of a hack. History callers often need branch data to format ouput.
	 * But we open the repo down here. I didn't want to open/close it again. And there's logic
	 * in here about which repo to open. So instead, we do this. */
	if (ppvhBranchPile)
	{
		if (pvhBranchPile)
		{
			*ppvhBranchPile = pvhBranchPile;
			pvhBranchPile = NULL;
		}
		else
			SG_ERR_CHECK(  SG_vc_branches__cleanup(pCtx, pRepo, ppvhBranchPile)  );
	}

fail:
	SG_ERR_IGNORE(  SG_wc_tx__cancel(pCtx, pWcTx)  );
	SG_WC_TX__NULLFREE(pCtx, pWcTx);
	SG_REV_SPEC_NULLFREE(pCtx, pRevSpec_Allocated);
	SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesets);
	SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesetsMissing);
	SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesets_single_revisions);
	SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayGIDs);
	SG_VHASH_NULLFREE(pCtx, pvhBranchPile);
	SG_VHASH_NULLFREE(pCtx, pvhInfo);
	SG_REPO_NULLFREE(pCtx, pRepo);
}
void sg_vv2__history__repo2(
	SG_context * pCtx,
	SG_repo * pRepo, 
	const SG_stringarray * psaArgs,  // if present these must be full repo-paths
	const SG_rev_spec* pRevSpec,
	const SG_rev_spec* pRevSpec_single_revisions,
	const char* pszUser,
	const char* pszStamp,
	SG_uint32 nResultLimit,
	SG_bool bHideObjectMerges,
	SG_int64 nFromDate,
	SG_int64 nToDate,
	SG_bool bListAll,
	SG_bool bReassembleDag,
	SG_bool* pbHasResult,
	SG_history_result ** ppResult,
	SG_history_token ** ppHistoryToken)
{
	SG_stringarray * pStringArrayChangesets_starting = NULL;
	SG_stringarray * pStringArrayChangesetsMissing = NULL;
	SG_stringarray * pStringArrayChangesets_single_revisions = NULL;
	SG_bool bRecommendDagWalk = SG_FALSE;
	SG_bool bLeaves = SG_FALSE;
	const char * pszCurrentDagnodeID = NULL;
	SG_stringarray * pStringArrayGIDs = NULL;
	SG_uint32 i = 0, nChangesetCount = 0;
	SG_uint32 count_args = 0;

	//Determine the starting changeset IDs.  strBranch and bLeaves control this.
	//We do this step here, so that repo paths can be looked up before we call into history__core.
	SG_ERR_CHECK( sg_vv2__history__get_starting_changesets(pCtx, pRepo, pRevSpec,
														   &pStringArrayChangesets_starting,
														   &pStringArrayChangesetsMissing,
														   &bRecommendDagWalk,
														   &bLeaves) );
	if (pStringArrayChangesetsMissing)
	{
		// See K2177, K1322, W0836, W8132.  We requested specific starting
		// points and ran into some csets that were referenced (by --tag
		// or --branch) that are not present in the local repo.  Try to
		// silently ignore them.
		SG_uint32 nrFound = 0;
		SG_ERR_CHECK(  SG_stringarray__count(pCtx, pStringArrayChangesets_starting, &nrFound)  );
		if (nrFound > 0)
		{
			// Yes there were missing csets, but we still found some
			// of the referenced ones.  Just ignore the missing ones.
			// This should behave just like we had the older tag/branch
			// dag prior to the push -r on the vc dag.
		}
		else
		{
			const char * psz_0;
			// TODO 2012/10/19 Do we want a different message if the number of missing is > 1 ?
			SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, pStringArrayChangesetsMissing, 0, &psz_0)  );
			SG_ERR_THROW2(  SG_ERR_CHANGESET_BLOB_NOT_FOUND,
							(pCtx, "%s", psz_0)  );
		}
	}

	if (bListAll)
	{
		// See W8493.  If they gave us a --list-all along with a --rev or --tag, they
		// want to force us to show the full history rather than just the info for the
		// named cset.
		bRecommendDagWalk = SG_TRUE;
	}

	if (pRevSpec_single_revisions)
	{
		// We DO NOT pass a psaMissingHids here because we want
		// it to throw if the user names a missing cset.
		SG_ERR_CHECK(  SG_rev_spec__get_all__repo__dedup(pCtx, pRepo, pRevSpec_single_revisions, SG_TRUE,
														 &pStringArrayChangesets_single_revisions, NULL)  );
	}

	// TODO 2012/07/02 Is the following loop really what we want?
	// TODO            As written, it will look at each changeset
	// TODO            in the list and lookup each item's GID with
	// TODO            it.  If cset[k] does not have *ALL* of the
	// TODO            items, we discard the array of GIDs already
	// TODO            discovered and then try cset[k+1].
	// TODO
	// TODO            This seems wasteful and likely to fail in
	// TODO            the presence of lots of adds and deletes.
	// TODO
	// TODO            Seems like it would be better to start the
	// TODO            result list outside of the loop and remove
	// TODO            items from the search list as we find them
	// TODO            as we iterate over the csets.  This would
	// TODO            let us stop as soon as we have them all and
	// TODO            not require us to repeat the expensive mapping
	// TODO            of repo-path to gid.
	// TODO
	// TODO            Then again, I think the caller has limited us
	// TODO            to only having *1* item in the set of files/folders,
	// TODO            so this might not actually matter.

	if (psaArgs)
		SG_ERR_CHECK(  SG_stringarray__count(pCtx, psaArgs, &count_args)  );
	if (count_args > 0)
	{
		SG_ERR_CHECK(  SG_stringarray__count(pCtx, pStringArrayChangesets_starting, &nChangesetCount)  );
		//Look up the GIDs for all of the arguments.
		//Try every changeset, until we get one that has the GID in question.
		for (i = 0; i < nChangesetCount; i++)
		{
			SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, pStringArrayChangesets_starting, i, &pszCurrentDagnodeID) );

			//This might be used if you have --leaves, or if there are multiple parents
			//since they specified a changeset, we need to use the full repo path @/blah/blah to look up the objects
			sg_vv2__history__lookup_gids_by_repopaths(pCtx, pRepo, pszCurrentDagnodeID, psaArgs,
													  &pStringArrayGIDs);
			if (SG_CONTEXT__HAS_ERR(pCtx))
			{
				if (i == (nChangesetCount - 1) || ! SG_context__err_equals(pCtx, SG_ERR_NOT_FOUND) )
				{
					SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayGIDs);
					SG_ERR_RETHROW;
				}
				else
				{
					SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayGIDs);
					SG_context__err_reset(pCtx);
				}
			}
			else
				break;
		}
	}

	//Call history core with the GIDs
	SG_ERR_CHECK( SG_history__run(pCtx, pRepo, pStringArrayGIDs,
					pStringArrayChangesets_starting, pStringArrayChangesets_single_revisions,
					pszUser, pszStamp, nResultLimit, bLeaves, bHideObjectMerges,
					nFromDate, nToDate, bRecommendDagWalk, bReassembleDag, pbHasResult, ppResult, ppHistoryToken) );

fail:
	SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayGIDs);
	SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesets_starting);
	SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesetsMissing);
	SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesets_single_revisions);
}
void do_cmd_merge_preview(SG_context * pCtx, SG_option_state * pOptSt)
{
	SG_repo * pRepo = NULL;
	
	SG_uint32 countRevSpecs = 0;
	SG_stringarray * psaRevSpecs = NULL;
	const char * const * ppszRevSpecs = NULL;
	
	SG_stringarray * psaNewChangesets = NULL;
	const char * const * ppszNewChangesets = NULL;
	SG_uint32 countNewChangesets = 0;
	
	char * pszHidBaseline = NULL;
	char * pszHidMergeTarget = NULL;
	SG_dagquery_relationship relationship;
	
	SG_vhash * pvhPileOfCleanBranches = NULL;
	SG_uint32 i = 0;
	
	countRevSpecs = 0;
	if (pOptSt->pRevSpec)
	{
		SG_ERR_CHECK(  SG_rev_spec__count(pCtx, pOptSt->pRevSpec, &countRevSpecs)  );
		if(countRevSpecs>2)
			SG_ERR_THROW(SG_ERR_USAGE);
	}
	
	if(pOptSt->psz_repo!=NULL)
	{
		if(countRevSpecs==2)
		{
			SG_ERR_CHECK(  SG_REPO__OPEN_REPO_INSTANCE(pCtx, pOptSt->psz_repo, &pRepo)  );
			SG_ERR_CHECK(  SG_rev_spec__get_all__repo(pCtx, pRepo, pOptSt->pRevSpec, SG_FALSE, &psaRevSpecs, NULL)  );
			SG_ERR_CHECK(  SG_stringarray__sz_array(pCtx, psaRevSpecs, &ppszRevSpecs)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[0], &pszHidBaseline)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[1], &pszHidMergeTarget)  );
			SG_STRINGARRAY_NULLFREE(pCtx, psaRevSpecs);
		}
		else
		{
			SG_ERR_THROW2(SG_ERR_USAGE, (pCtx, "When using the --repo option, you must provide both the BASELINE-REVSPEC and the OTHER-REVSPEC."));
		}
	}
	else
	{
		SG_ERR_CHECK(  SG_cmd_util__get_repo_from_cwd(pCtx, &pRepo, NULL)  );
		if(countRevSpecs==2)
		{
			SG_ERR_CHECK(  SG_rev_spec__get_all__repo(pCtx, pRepo, pOptSt->pRevSpec, SG_FALSE, &psaRevSpecs, NULL)  );
			SG_ERR_CHECK(  SG_stringarray__sz_array(pCtx, psaRevSpecs, &ppszRevSpecs)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[0], &pszHidBaseline)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[1], &pszHidMergeTarget)  );
			SG_STRINGARRAY_NULLFREE(pCtx, psaRevSpecs);
		}
		else
		{
			SG_uint32 countBaselines = 0;
			SG_ERR_CHECK(  SG_wc__get_wc_parents__stringarray(pCtx, NULL, &psaRevSpecs)  );
			SG_ERR_CHECK(  SG_stringarray__sz_array_and_count(pCtx, psaRevSpecs, &ppszRevSpecs, &countBaselines)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[0], &pszHidBaseline)  );
			if(countBaselines==2)
			{
				SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[1], &pszHidMergeTarget)  );
			}
			else
			{
				SG_wc_merge_args merge_args;
				merge_args.pRevSpec = pOptSt->pRevSpec;
				merge_args.bNoAutoMergeFiles = SG_TRUE;	// doesn't matter
				merge_args.bComplainIfBaselineNotLeaf = SG_FALSE;	// doesn't matter
				SG_ERR_CHECK(  SG_wc__merge__compute_preview_target(pCtx, NULL, &merge_args, &pszHidMergeTarget)  );
			}
			SG_STRINGARRAY_NULLFREE(pCtx, psaRevSpecs);
		}
	}
	
	SG_ERR_CHECK(  SG_dagquery__how_are_dagnodes_related(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL,
		pszHidMergeTarget, pszHidBaseline,
		SG_FALSE, SG_FALSE,
		&relationship)  );
	if(relationship==SG_DAGQUERY_RELATIONSHIP__ANCESTOR || relationship==SG_DAGQUERY_RELATIONSHIP__SAME)
	{
		SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDOUT, "The baseline already includes the merge target. No merge is needed.\n")  );
	}
	else
	{
		SG_ERR_CHECK(  SG_dagquery__find_new_since_common(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, pszHidBaseline, pszHidMergeTarget, &psaNewChangesets)  );
		SG_ERR_CHECK(  SG_stringarray__sz_array_and_count(pCtx, psaNewChangesets, &ppszNewChangesets, &countNewChangesets)  );
		
		SG_ERR_CHECK(  SG_vc_branches__cleanup(pCtx, pRepo, &pvhPileOfCleanBranches)  );
		for(i=0; i<countNewChangesets; ++i)
		{
			SG_ERR_CHECK(  SG_cmd_util__dump_log(pCtx, SG_CS_STDOUT, pRepo, ppszNewChangesets[i], pvhPileOfCleanBranches, SG_TRUE, SG_FALSE)  );
		}
		
		if(relationship==SG_DAGQUERY_RELATIONSHIP__DESCENDANT)
		{
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDOUT, "\nFast-Forward Merge to '%s' brings in %i changeset%s.\n", pszHidMergeTarget, countNewChangesets, ((countNewChangesets==1)?"":"s"))  );
		}
		else
		{
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDOUT, "\nMerge with '%s' brings in %i changeset%s.\n", pszHidMergeTarget, countNewChangesets, ((countNewChangesets==1)?"":"s"))  );
		}
	}

	SG_VHASH_NULLFREE(pCtx, pvhPileOfCleanBranches);
	SG_STRINGARRAY_NULLFREE(pCtx, psaNewChangesets);
	SG_NULLFREE(pCtx, pszHidBaseline);
	SG_NULLFREE(pCtx, pszHidMergeTarget);
	SG_REPO_NULLFREE(pCtx, pRepo);

	return;
fail:
	SG_REPO_NULLFREE(pCtx, pRepo);
	SG_STRINGARRAY_NULLFREE(pCtx, psaNewChangesets);
	SG_STRINGARRAY_NULLFREE(pCtx, psaRevSpecs);
	SG_NULLFREE(pCtx, pszHidBaseline);
	SG_NULLFREE(pCtx, pszHidMergeTarget);
	SG_VHASH_NULLFREE(pCtx, pvhPileOfCleanBranches);
}
static void _merge__compute_target_hid(SG_context * pCtx,
									   SG_mrg * pMrg)
{
	const SG_rev_spec * pRevSpec = ((pMrg->pMergeArgs) ? pMrg->pMergeArgs->pRevSpec : NULL);
	SG_stringarray * psaHids = NULL;
	SG_stringarray * psaMissingHids = NULL;
	SG_rev_spec * pRevSpec_Allocated = NULL;
	SG_bool bRequestedAttachedBranch = SG_FALSE;
	SG_stringarray * psaBranchesRequested = NULL;
	const char * pszBranchNameRequested = NULL;
	SG_uint32 nrMatched = 0;
	SG_uint32 nrMatchedExcludingParent = 0;

	if (pRevSpec)
	{
		SG_uint32 uTotal = 0u;
		SG_uint32 uBranches = 0u;

		SG_ERR_CHECK(  SG_rev_spec__count(pCtx, pRevSpec, &uTotal)  );
		SG_ERR_CHECK(  SG_rev_spec__count_branches(pCtx, pRevSpec, &uBranches)  );
		if (uTotal == 0u)
		{
			// if the rev spec is empty, just pretend it doesn't exist
			pRevSpec = NULL;
		}
		else if (uTotal > 1u)
		{
			// we can only handle a single specification
			SG_ERR_THROW2(SG_ERR_INVALIDARG, (pCtx, "Merge can accept at most one revision/tag/branch specifier."));
		}
		else if (uTotal == 1u && uBranches == 1u)
		{
			SG_ERR_CHECK(  SG_rev_spec__branches(pCtx, (/*const*/ SG_rev_spec *)pRevSpec, &psaBranchesRequested)  );
			SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaBranchesRequested, 0, &pszBranchNameRequested)  );

			if (pMrg->pszBranchName_Starting)
				bRequestedAttachedBranch = (strcmp(pszBranchNameRequested, pMrg->pszBranchName_Starting) == 0);
		}
	}

	if (!pRevSpec)
	{
        if (!pMrg->pszBranchName_Starting)
            SG_ERR_THROW(  SG_ERR_NOT_TIED  );

		SG_ERR_CHECK(  SG_REV_SPEC__ALLOC(pCtx, &pRevSpec_Allocated)  );
		SG_ERR_CHECK(  SG_rev_spec__add_branch(pCtx, pRevSpec_Allocated, pMrg->pszBranchName_Starting)  );
		pRevSpec = pRevSpec_Allocated;
		pszBranchNameRequested = pMrg->pszBranchName_Starting;
		bRequestedAttachedBranch = SG_TRUE;
	}

	// Lookup the given (or synthesized) --rev/--tag/--branch
	// and see how many csets it refers to.  Disregard/filter-out
	// any that are not present in the local repo.

	SG_ERR_CHECK(  SG_rev_spec__get_all__repo(pCtx, pMrg->pWcTx->pDb->pRepo, pRevSpec, SG_TRUE,
											  &psaHids, &psaMissingHids)  );
	SG_ERR_CHECK(  SG_stringarray__count(pCtx, psaHids, &nrMatched)  );
	if (nrMatched == 0)
	{
		SG_uint32 nrMissing = 0;
		SG_ASSERT_RELEASE_FAIL(  (psaMissingHids != NULL)  );
		SG_ERR_CHECK(  SG_stringarray__count(pCtx, psaMissingHids, &nrMissing)  );
		if (nrMissing == 1)
		{
			const char * psz_0;
			SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaMissingHids, 0, &psz_0)  );
			SG_ERR_THROW2(  SG_ERR_BRANCH_HEAD_CHANGESET_NOT_PRESENT,
							(pCtx, "Branch '%s' refers to changeset '%s'. Consider pulling.",
							 pszBranchNameRequested, psz_0)  );
		}
		else
		{
			SG_ERR_THROW2(  SG_ERR_BRANCH_HEAD_CHANGESET_NOT_PRESENT,
							(pCtx, "Branch '%s' refers to %d changesets that are not present. Consider pulling.",
							 pszBranchNameRequested, nrMissing)  );
		}
	}
	else if (nrMatched == 1)
	{
		// We found a single unique match for our request.
		// We ***DO NOT*** disqualify the current baseline
		// in this case.  We let routines like do_cmd_merge_preview()
		// report that.

		const char * psz_0;
		SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaHids, 0, &psz_0)  );
		SG_ERR_CHECK(  SG_STRDUP(pCtx, psz_0, &pMrg->pszHidTarget)  );
	}
	else
	{
		// We can only get here if pRevSpec contained a "--branch ..."
		// reference (because the "--rev" lookup throws when given a
		// non-unique prefix and "--tag" can only be bound to a single
		// cset).
		//
		// If they referenced the attached branch (and the baseline is
		// pointing at a head), we'll get our baseline in the result set,
		// so get rid of it.
		SG_ERR_CHECK(  SG_stringarray__remove_all(pCtx, psaHids, pMrg->pszHid_StartingBaseline, NULL)  );
		SG_ERR_CHECK(  SG_stringarray__count(pCtx, psaHids, &nrMatchedExcludingParent)  );

		if (nrMatchedExcludingParent == 1)
		{
			// parent may or may not be a head of this branch, but
			// we found a single head or single other head.
			const char * psz_0;
			SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaHids, 0, &psz_0)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, psz_0, &pMrg->pszHidTarget)  );
		}
		else if (nrMatchedExcludingParent < nrMatched)
		{
			// There were at least 3 heads of this branch and the baseline
			// is one of them.  Throwing a generic 'needs merge' message is
			// not helpful.
			SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
							(pCtx, "Branch '%s' has %d heads (excluding the baseline). Consider merging one of the other heads using --rev/--tag.",
							 pszBranchNameRequested, nrMatchedExcludingParent)  );
		}
		else //if (nrMatchedExcludingParent == nrMatched)
		{
			// The requested branch has multiple heads and the current
			// baseline is NOT one of them.  The current baseline MAY OR MAY NOT
			// be in that branch.  (And independently, we may or may not be
			// attached to that branch.)
			//
			// See how the heads are related to the current baseline.
			const char * pszDescendant0 = NULL;
			const char * pszAncestor0 = NULL;
			SG_uint32 nrDescendants = 0;
			SG_uint32 nrAncestors = 0;
			SG_uint32 k;
			for (k=0; k<nrMatched; k++)
			{
				const char * psz_k;
				SG_dagquery_relationship dqRel;
				SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaHids, k, &psz_k)  );
				SG_ERR_CHECK(  SG_dagquery__how_are_dagnodes_related(pCtx, pMrg->pWcTx->pDb->pRepo,
																	 SG_DAGNUM__VERSION_CONTROL,
																	 psz_k, pMrg->pszHid_StartingBaseline,
																	 SG_FALSE, SG_FALSE, &dqRel)  );
				if (dqRel == SG_DAGQUERY_RELATIONSHIP__DESCENDANT)
				{
					pszDescendant0 = psz_k;
					nrDescendants++; // target[k] is descendant of baseline
				}
				else if (dqRel == SG_DAGQUERY_RELATIONSHIP__ANCESTOR)
				{
					pszAncestor0 = psz_k;
					nrAncestors++;	// target[k] is ancestor of baseline
				}
			}
			SG_ASSERT(  ((nrDescendants == 0) || (nrAncestors == 0))  );
			if (nrDescendants == 1)
			{
				if (bRequestedAttachedBranch)			// The current baseline is attached to the same branch, just not a head.
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Only changeset '%s' is a descendant of the current baseline. Consider updating to it and then merging the branch.",
									 pszBranchNameRequested, nrMatched, pszDescendant0)  );
				else if (pMrg->pszBranchName_Starting)	// currently attached to a different branch
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Only changeset '%s' is a descendant of the current baseline. Consider updating to it. You are attached to branch '%s'.",
									 pszBranchNameRequested, nrMatched, pszDescendant0, pMrg->pszBranchName_Starting)  );
				else									// currently detached
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Only changeset '%s' is a descendant of the current baseline. Consider updating to it. You are not attached to a branch.",
									 pszBranchNameRequested, nrMatched, pszDescendant0)  );
			}
			else if (nrDescendants > 1)					// nrDescendants may or may not be equal to nrMatched since there may be peers too.
			{
				if (bRequestedAttachedBranch)			// The current baseline is attached to the same branch, just not a head.
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. %d are descendants of the current baseline. Consider updating to one of them and then merging the branch.",
									 pszBranchNameRequested, nrMatched, nrDescendants)  );
				else if (pMrg->pszBranchName_Starting)	// currently attached to a different branch
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. %d are descendants of the current baseline. Consider updating to one of them. You are attached to branch '%s'.",
									 pszBranchNameRequested, nrMatched, nrDescendants, pMrg->pszBranchName_Starting)  );
				else									// currently detached
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. %d are descendants of the current baseline. Consider updating to one of them. You are not attached to a branch.",
									 pszBranchNameRequested, nrMatched, nrDescendants)  );
			}
			else if (nrAncestors == 1)
			{
				if (bRequestedAttachedBranch)			// The current baseline is attached to the same branch, but the head pointer is not pointing at us.
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Changeset '%s' is an ancestor of the current baseline. Consider moving that head forward and then merging the branch.",
									 pszBranchNameRequested, nrMatched, pszAncestor0)  );
				else if (pMrg->pszBranchName_Starting)	// currently attached to a different branch
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Changeset '%s' is an ancestor of the current baseline. Consider moving that head forward. You are attached to branch '%s'.",
									 pszBranchNameRequested, nrMatched, pszAncestor0, pMrg->pszBranchName_Starting)  );
				else									// currently detached
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Changeset '%s' is an ancestor of the current baseline. Consider moving that head forward. You are not attached to a branch.",
									 pszBranchNameRequested, nrMatched, pszAncestor0)  );
			}
			else if (nrAncestors > 1)					// nrAncestors may or may not be equal to nrMatched since there may be peers too.
			{
				SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
								(pCtx, "Branch '%s' has %d heads. All of them are ancestors of the current baseline. Consider moving one of the heads forward and removing the others.",
								 pszBranchNameRequested, nrMatched)  );
			}
			else										// All of the heads are peers of the current baseline.
			{
				if (bRequestedAttachedBranch)			// The current baseline is attached to the same branch, but the head pointer is not pointing at us.
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. All are peers of the current baseline. Consider merging one of the other heads using --rev/--tag.",
									 pszBranchNameRequested, nrMatched)  );
				else if (pMrg->pszBranchName_Starting)	// currently attached to a different branch
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. All are peers of the current baseline. Consider merging one of the other heads using --rev/--tag. You are attached to branch '%s'.",
									 pszBranchNameRequested, nrMatched, pMrg->pszBranchName_Starting)  );
				else									// currently detached
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. All are peers of the current baseline. Consider merging one of the other heads using --rev/--tag. You are not attached to a branch.",
									 pszBranchNameRequested, nrMatched)  );
			}
		}
	}

fail:
	SG_STRINGARRAY_NULLFREE(pCtx, psaBranchesRequested);
	SG_STRINGARRAY_NULLFREE(pCtx, psaHids);
	SG_STRINGARRAY_NULLFREE(pCtx, psaMissingHids);
	SG_REV_SPEC_NULLFREE(pCtx, pRevSpec_Allocated);
}
void SG_dagquery__find_new_since_common(
	SG_context * pCtx,
	SG_repo * pRepo,
	SG_uint64 dagnum,
	const char * pszOldNodeHid,
	const char * pszNewNodeHid,
	SG_stringarray ** ppResults
	)
{
	_fnsc_work_queue_t workQueue = {NULL, 0, 0, 0, NULL};
	SG_uint32 i;
	SG_dagnode * pDagnode = NULL;
	SG_stringarray * pResults = NULL;
	
	SG_ASSERT(pCtx!=NULL);
	SG_NULLARGCHECK(pRepo);
	SG_NONEMPTYCHECK(pszOldNodeHid);
	SG_NONEMPTYCHECK(pszNewNodeHid);
	SG_NULLARGCHECK(ppResults);
	
	SG_ERR_CHECK(  SG_allocN(pCtx, _FNSC_WORK_QUEUE_INIT_LENGTH, workQueue.p)  );
	workQueue.allocatedLength = _FNSC_WORK_QUEUE_INIT_LENGTH;
	SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &workQueue.pRevnoCache)  );
	
	SG_ERR_CHECK(  _fnsc_work_queue__insert(pCtx, &workQueue, pszOldNodeHid, dagnum, pRepo, _ANCESTOR_OF_OLD)  );
	SG_ERR_CHECK(  _fnsc_work_queue__insert(pCtx, &workQueue, pszNewNodeHid, dagnum, pRepo, _ANCESTOR_OF_NEW)  );
	
	SG_ERR_CHECK(  SG_STRINGARRAY__ALLOC(pCtx, &pResults, 32)  );
	while(workQueue.numAncestorsOfNewOnTheQueue > 0)
	{
		const char * pszHidRef = NULL;
		SG_byte isAncestorOf = 0;
		
		SG_ERR_CHECK(  _fnsc_work_queue__pop(pCtx, &workQueue, &pDagnode, &pszHidRef, &isAncestorOf)  );
		if (isAncestorOf==_ANCESTOR_OF_NEW)
			SG_ERR_CHECK(  SG_stringarray__add(pCtx, pResults, pszHidRef)  );
		
		{
			SG_uint32 count_parents = 0;
			const char** parents = NULL;
			SG_ERR_CHECK(  SG_dagnode__get_parents__ref(pCtx, pDagnode, &count_parents, &parents)  );
			for(i=0; i<count_parents; ++i)
				SG_ERR_CHECK(  _fnsc_work_queue__insert(pCtx, &workQueue, parents[i], dagnum, pRepo, isAncestorOf)  );
		}
		
		SG_DAGNODE_NULLFREE(pCtx, pDagnode);
	}
	
	for(i=0; i<workQueue.length; ++i)
		SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode);
	SG_NULLFREE(pCtx, workQueue.p);
	SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache);
	
	*ppResults = pResults;

	return;
fail:
	for(i=0; i<workQueue.length; ++i)
		SG_DAGNODE_NULLFREE(pCtx, workQueue.p[i].pDagnode);
	SG_NULLFREE(pCtx, workQueue.p);
	SG_RBTREE_NULLFREE(pCtx, workQueue.pRevnoCache);

	SG_DAGNODE_NULLFREE(pCtx, pDagnode);

	SG_STRINGARRAY_NULLFREE(pCtx, pResults);
}
// Helper function for _tree__init__continuation()
static void _tree__add_subtoken(SG_context * pCtx, _tree_t * pTree, _node_t * pParent, SG_varray * pSubtoken)
{
	SG_int64 revno = 0;
	char * szHid = NULL;
	_node_t * pNodeRef = NULL;
	SG_uint32 count = 0;
	SG_uint32 i = 0;
	SG_uint16 lastElementType = 0;
	SG_varray * pSubsubtoken = NULL;
	SG_stringarray * psa = NULL;

	++pTree->indentLevel;

	SG_ERR_CHECK(  SG_varray__get__int64(pCtx, pSubtoken, 0, &revno)  );
	SG_ERR_CHECK(  SG_repo__find_dagnode_by_rev_id(pCtx, pTree->pRepoRef, SG_DAGNUM__VERSION_CONTROL, (SG_uint32)revno, &szHid)  );
	SG_ERR_CHECK(  _tree__add_new_node(pCtx, pTree, pParent, szHid, &pNodeRef)  );
	pNodeRef->isPending = SG_FALSE;
	SG_NULLFREE(pCtx, szHid);

	SG_ERR_CHECK(  SG_varray__count(pCtx, pSubtoken, &count)  );
	SG_ERR_CHECK(  SG_varray__typeof(pCtx, pSubtoken, count-1, &lastElementType)  );
	if(lastElementType==SG_VARIANT_TYPE_VARRAY)
	{
		SG_ERR_CHECK(  SG_varray__get__varray(pCtx, pSubtoken, count-1, &pSubsubtoken)  );
		--count;
	}

	for(i=1; i<count; ++i)
	{
		_node_t * pRefChildNode = NULL;
		SG_ERR_CHECK(  SG_varray__get__int64(pCtx, pSubtoken, i, &revno)  );
		if(i==1 && revno==0)
		{
			// Revno 0 in the as the first child means "Use the LCA for the baseline".
			SG_uint32 j = 0;

			if(pSubsubtoken!=NULL)
			{
				// LCA would not include nodes in the sub-token. Just error out.
				SG_ERR_THROW(SG_ERR_INVALIDARG);
			}

			if(count-2<2)
			{
				// This would be interpreted as merging a node with itself or with nothing.
				SG_ERR_THROW(SG_ERR_INVALIDARG);
			}

			SG_ERR_CHECK(  SG_STRINGARRAY__ALLOC(pCtx, &psa, count-2)  );
			for(j=2; j<count; ++j)
			{
				SG_ERR_CHECK(  SG_varray__get__int64(pCtx, pSubtoken, j, &revno)  );
				SG_ERR_CHECK(  SG_repo__find_dagnode_by_rev_id(pCtx, pTree->pRepoRef, SG_DAGNUM__VERSION_CONTROL, (SG_uint32)revno, &szHid)  );
				SG_ERR_CHECK(  SG_stringarray__add(pCtx, psa, szHid)  );
				SG_NULLFREE(pCtx, szHid);
			}
			SG_ERR_CHECK(  SG_dagquery__highest_revno_common_ancestor(pCtx, pTree->pRepoRef, SG_DAGNUM__VERSION_CONTROL, psa, &szHid)  );
			SG_STRINGARRAY_NULLFREE(pCtx, psa);
		}
		else
		{
			SG_ERR_CHECK(  SG_repo__find_dagnode_by_rev_id(pCtx, pTree->pRepoRef, SG_DAGNUM__VERSION_CONTROL, (SG_uint32)revno, &szHid)  );
		}
		SG_ERR_CHECK(  _tree__add_new_node(pCtx, pTree, pNodeRef, szHid, &pRefChildNode)  );
		pTree->pNextResult = pRefChildNode;
		SG_ERR_CHECK(  _node_list__append(pCtx, &pTree->pending, &pRefChildNode)  );
		SG_NULLFREE(pCtx, szHid);
	}

	if(pSubsubtoken!=NULL)
	{
		SG_ERR_CHECK(  _tree__add_subtoken(pCtx, pTree, pNodeRef, pSubsubtoken)  );
	}

	return;
fail:
	SG_NULLFREE(pCtx, szHid);
	SG_STRINGARRAY_NULLFREE(pCtx, psa);
}
void SG_sync_remote__request_fragball(
	SG_context* pCtx,
	SG_repo* pRepo,
	const SG_pathname* pFragballDirPathname,
	SG_vhash* pvhRequest,
	char** ppszFragballName)
{
	SG_pathname* pFragballPathname = NULL;
	SG_uint64* paDagNums = NULL;
	SG_string* pstrFragballName = NULL;
	SG_rbtree* prbDagnodes = NULL;
	SG_rbtree_iterator* pit = NULL;
	SG_rev_spec* pRevSpec = NULL;
	SG_stringarray* psaFullHids = NULL;
	SG_rbtree* prbDagnums = NULL;
	SG_dagfrag* pFrag = NULL;
	char* pszRepoId = NULL;
	char* pszAdminId = NULL;
    SG_fragball_writer* pfb = NULL;

	SG_NULLARGCHECK_RETURN(pRepo);
	SG_NULLARGCHECK_RETURN(pFragballDirPathname);

    {
        char buf_filename[SG_TID_MAX_BUFFER_LENGTH];
        SG_ERR_CHECK(  SG_tid__generate(pCtx, buf_filename, sizeof(buf_filename))  );
        SG_ERR_CHECK(  SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pFragballPathname, pFragballDirPathname, buf_filename)  );
    }

	if (!pvhRequest)
	{
		// Add leaves from every dag to the fragball.
		SG_uint32 count_dagnums;
		SG_uint32 i;

        SG_ERR_CHECK(  SG_fragball_writer__alloc(pCtx, pRepo, pFragballPathname, SG_TRUE, 2, &pfb)  );
		SG_ERR_CHECK(  SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums)  );

		for (i=0; i<count_dagnums; i++)
		{
			SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, paDagNums[i], &prbDagnodes)  );
			SG_ERR_CHECK(  SG_fragball__write__dagnodes(pCtx, pfb, paDagNums[i], prbDagnodes)  );
			SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
		}

		SG_ERR_CHECK(  SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName)  );
		SG_ERR_CHECK(  SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName)  );
        SG_ERR_CHECK(  SG_fragball_writer__close(pCtx, pfb)  );
	}
	else
	{
		// Specific dags/nodes were requested. Build that fragball.
		SG_bool found;

#if TRACE_SYNC_REMOTE && 0
		SG_ERR_CHECK(  SG_vhash_debug__dump_to_console__named(pCtx, pvhRequest, "fragball request")  );
#endif

		SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE, &found)  );
		if (found)
		{
            // SG_SYNC_STATUS_KEY__CLONE_REQUEST is currently ignored
            SG_ERR_CHECK(  SG_repo__fetch_repo__fragball(pCtx, pRepo, 3, pFragballDirPathname, ppszFragballName) );
		}
		else
		{
			// Not a full clone.

            SG_ERR_CHECK(  SG_fragball_writer__alloc(pCtx, pRepo, pFragballPathname, SG_TRUE, 2, &pfb)  );
			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__SINCE, &found)  );
			if (found)
			{
                SG_vhash* pvh_since = NULL;

                SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__SINCE, &pvh_since)  );

                SG_ERR_CHECK(  _do_since(pCtx, pRepo, pvh_since, pfb)  );
            }

			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &found)  );
			if (found)
			{
				// Specific Dagnodes were requested. Add just those nodes to our "start from" rbtree.

				SG_vhash* pvhDags;
				SG_uint32 count_requested_dagnums;
				SG_uint32 i;
				const SG_variant* pvRevSpecs = NULL;
				SG_vhash* pvhRevSpec = NULL;

				// For each requested dag, get rev spec request.
				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags)  );
				SG_ERR_CHECK(  SG_vhash__count(pCtx, pvhDags, &count_requested_dagnums)  );
				if (count_requested_dagnums)
					SG_ERR_CHECK(  SG_repo__list_dags__rbtree(pCtx, pRepo, &prbDagnums)  );
				for (i=0; i<count_requested_dagnums; i++)
				{
					SG_bool isValidDagnum = SG_FALSE;
					SG_bool bSpecificNodesRequested = SG_FALSE;
					const char* pszRefDagNum = NULL;
					SG_uint64 iDagnum;

					// Get the dag's missing node vhash.
					SG_ERR_CHECK(  SG_vhash__get_nth_pair(pCtx, pvhDags, i, &pszRefDagNum, &pvRevSpecs)  );

					// Verify that requested dagnum exists
					SG_ERR_CHECK(  SG_rbtree__find(pCtx, prbDagnums, pszRefDagNum, &isValidDagnum, NULL)  );
					if (!isValidDagnum)
                        continue;

					SG_ERR_CHECK(  SG_dagnum__from_sz__hex(pCtx, pszRefDagNum, &iDagnum)  );
					
					if (pvRevSpecs && pvRevSpecs->type != SG_VARIANT_TYPE_NULL)
					{
						SG_uint32 countRevSpecs = 0;
						
						SG_ERR_CHECK(  SG_variant__get__vhash(pCtx, pvRevSpecs, &pvhRevSpec)  );
						SG_ERR_CHECK(  SG_rev_spec__from_vash(pCtx, pvhRevSpec, &pRevSpec)  );

						// Process the rev spec for each dag
						SG_ERR_CHECK(  SG_rev_spec__count(pCtx, pRevSpec, &countRevSpecs)  );
						if (countRevSpecs > 0)
						{
							bSpecificNodesRequested = SG_TRUE;

							SG_ERR_CHECK(  SG_rev_spec__get_all__repo(pCtx, pRepo, pRevSpec, SG_TRUE, &psaFullHids, NULL)  );
							SG_ERR_CHECK(  SG_stringarray__to_rbtree_keys(pCtx, psaFullHids, &prbDagnodes)  );
							SG_STRINGARRAY_NULLFREE(pCtx, psaFullHids);
						}
						SG_REV_SPEC_NULLFREE(pCtx, pRevSpec);
					}

					if (!bSpecificNodesRequested)
					{
						// When no specific nodes are in the request, add all leaves.
						SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagnum, &prbDagnodes)  );
					}

					if (prbDagnodes) // can be null when leaves of an empty dag are requested
					{
						// Get the leaves of the other repo, which we need to connect to.
						SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__LEAVES, &found)  );
						if (found)
						{
							SG_vhash* pvhRefAllLeaves;
							SG_vhash* pvhRefDagLeaves;
							SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__LEAVES, &pvhRefAllLeaves)  );
							SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, pszRefDagNum, &found)  );
							{
								SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRefAllLeaves, pszRefDagNum, &pvhRefDagLeaves)  );
								SG_ERR_CHECK(  SG_sync__build_best_guess_dagfrag(pCtx, pRepo, iDagnum, 
									prbDagnodes, pvhRefDagLeaves, &pFrag)  );
							}
						}
						else
						{
							// The other repo's leaves weren't provided: add just the requested nodes, make no attempt to connect.
							SG_ERR_CHECK(  SG_repo__get_repo_id(pCtx, pRepo, &pszRepoId)  );
							SG_ERR_CHECK(  SG_repo__get_admin_id(pCtx, pRepo, &pszAdminId)  );
							SG_ERR_CHECK(  SG_dagfrag__alloc(pCtx, &pFrag, pszRepoId, pszAdminId, iDagnum)  );
							SG_ERR_CHECK(  SG_dagfrag__load_from_repo__simple(pCtx, pFrag, pRepo, prbDagnodes)  );
							SG_NULLFREE(pCtx, pszRepoId);
							SG_NULLFREE(pCtx, pszAdminId);
						}

						SG_ERR_CHECK(  SG_fragball__write__frag(pCtx, pfb, pFrag)  );
						
						SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
						SG_DAGFRAG_NULLFREE(pCtx, pFrag);
					}

				} // dagnum loop
			} // if "dags" exists

			/* Add requested blobs to the fragball */
			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &found)  );
			if (found)
			{
				// Blobs were requested.
				SG_vhash* pvhBlobs;
				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &pvhBlobs)  );
				SG_ERR_CHECK(  SG_sync__add_blobs_to_fragball(pCtx, pfb, pvhBlobs)  );
			}

			SG_ERR_CHECK(  SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName)  );
		}
        SG_ERR_CHECK(  SG_fragball_writer__close(pCtx, pfb)  );
	}

	/* fallthru */
fail:
	// If we had an error, delete the half-baked fragball.
	if (pFragballPathname && SG_context__has_err(pCtx))
    {
		SG_ERR_IGNORE(  SG_fsobj__remove__pathname(pCtx, pFragballPathname)  );
    }

	SG_PATHNAME_NULLFREE(pCtx, pFragballPathname);
	SG_NULLFREE(pCtx, paDagNums);
	SG_STRING_NULLFREE(pCtx, pstrFragballName);
	SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);
	SG_RBTREE_NULLFREE(pCtx, prbDagnums);
	SG_REV_SPEC_NULLFREE(pCtx, pRevSpec);
	SG_STRINGARRAY_NULLFREE(pCtx, psaFullHids);
	SG_DAGFRAG_NULLFREE(pCtx, pFrag);
	SG_NULLFREE(pCtx, pszRepoId);
	SG_NULLFREE(pCtx, pszAdminId);
    SG_FRAGBALL_WRITER_NULLFREE(pCtx, pfb);
}