void sg_treediff_cache::RememberLastQuery(SG_context * pCtx, const char * pszLastQuery, const char * pszLastResult)
{
	//Free the previous results.
	SG_NULLFREE(pCtx, m_pPreviousResult);
	SG_NULLFREE(  pCtx, m_pPreviousCommand);

	//Save the current  command and result to reuse if necessary
	SG_ERR_CHECK(  SG_STRDUP(pCtx, pszLastResult, &m_pPreviousResult )  );
	SG_ERR_CHECK(  SG_STRDUP(pCtx, pszLastQuery, &m_pPreviousCommand)  );
fail:
	return;
}
/**
 * Iterate over all items that list the given gid as
 * their parent directory.
 *
 * Warning: if you give a bogus directory gid, we can't
 * tell -- our SELECT just won't find any.
 *
 */
void sg_wc_db__tne__foreach_in_dir_by_parent_alias(SG_context * pCtx,
												   sg_wc_db * pDb,
												   const sg_wc_db__cset_row * pCSetRow,
												   SG_uint64 uiAliasGidParent,
												   sg_wc_db__tne__foreach_cb * pfn_cb,
												   void * pVoidData)
{
	sqlite3_stmt * pStmt = NULL;
	sg_wc_db__tne_row * pTneRow = NULL;
	int rc;

	SG_ARGCHECK_RETURN(  (uiAliasGidParent != SG_WC_DB__ALIAS_GID__UNDEFINED), uiAliasGidParent  );

	SG_ERR_CHECK(  sg_sqlite__prepare(pCtx, pDb->psql, &pStmt,
									  ("SELECT"
									   "    alias_gid,"			// 0
									   "    hid,"				// 1
									   "    type,"				// 2
									   "    attrbits,"			// 3
									   "    entryname"			// 4
									   "  FROM %s"
									   "  WHERE (alias_gid_parent = ?)"),
									  pCSetRow->psz_tne_table_name)  );
	SG_ERR_CHECK(  sg_sqlite__bind_int64(pCtx, pStmt, 1, uiAliasGidParent)  );

	while ((rc=sqlite3_step(pStmt)) == SQLITE_ROW)
	{
		SG_ERR_CHECK(  sg_wc_db__tne_row__alloc(pCtx, &pTneRow)  );

		pTneRow->p_s->uiAliasGid       = (SG_uint64)sqlite3_column_int64(pStmt, 0);
		pTneRow->p_s->uiAliasGidParent = uiAliasGidParent;
		SG_ERR_CHECK(  SG_STRDUP(pCtx, (const char *)sqlite3_column_text(pStmt, 1), &pTneRow->p_d->pszHid)  );
		pTneRow->p_s->tneType          = (SG_uint32)sqlite3_column_int(pStmt, 2);
		pTneRow->p_d->attrbits         = (SG_uint64)sqlite3_column_int64(pStmt, 3);
		SG_ERR_CHECK(  SG_STRDUP(pCtx, (const char *)sqlite3_column_text(pStmt, 4), &pTneRow->p_s->pszEntryname)  );

		// pass the tne_row by address so that the caller can steal it if they want to.
		SG_ERR_CHECK(  (*pfn_cb)(pCtx, pVoidData, &pTneRow)  );

		SG_WC_DB__TNE_ROW__NULLFREE(pCtx, pTneRow);
	}
	if (rc != SQLITE_DONE)
	{
		SG_ERR_THROW(  SG_ERR_SQLITE(rc)  );
	}

	SG_ERR_CHECK(  sg_sqlite__nullfinalize(pCtx, &pStmt)  );
	return;

fail:
	SG_ERR_IGNORE(  sg_sqlite__nullfinalize(pCtx, &pStmt)  );
	SG_WC_DB__TNE_ROW__NULLFREE(pCtx, pTneRow);
}
Beispiel #3
0
void SG_user__get_email_for_repo(SG_context * pCtx, SG_repo* pRepo, const char ** ppsz_email)
{
	char * psz_admin_id = NULL;
	char * psz_userid = NULL;
	const char * psz_email_temp = NULL;
	SG_string * pstr_path = NULL;
	SG_vhash * pvh_userhash = NULL;
	if (pRepo)
	{
		SG_ERR_CHECK(  SG_repo__get_admin_id(pCtx, pRepo, &psz_admin_id)  );

		// we store this userid under the admin scope of the repo we were given
		SG_ERR_CHECK(  SG_string__alloc__format(pCtx, &pstr_path, "%s/%s/%s",
					SG_LOCALSETTING__SCOPE__ADMIN,
					psz_admin_id,
					SG_LOCALSETTING__USERID
					)  );
		SG_ERR_CHECK(  SG_localsettings__get__sz(pCtx, SG_string__sz(pstr_path), pRepo, &psz_userid, NULL)  );
		if (psz_userid == NULL || *psz_userid == 0)
			SG_ERR_THROW(SG_ERR_USER_NOT_FOUND);
		SG_ERR_CHECK(  SG_user__lookup_by_userid(pCtx, pRepo, psz_userid, &pvh_userhash)  );
		SG_ERR_CHECK(  SG_vhash__get__sz(pCtx, pvh_userhash, "email", &psz_email_temp)  );
		SG_ERR_CHECK(  SG_STRDUP(pCtx, psz_email_temp, (char**)ppsz_email)  );
	}

fail:
	SG_VHASH_NULLFREE(pCtx, pvh_userhash);
	SG_NULLFREE(pCtx, psz_admin_id);
	SG_NULLFREE(pCtx, psz_userid);
	SG_STRING_NULLFREE(pCtx, pstr_path);

}
/**
 * Fetch the current branch if attached.
 * Return NULL if detached.
 *
 */
void sg_wc_db__branch__get_branch(SG_context * pCtx,
                                  sg_wc_db * pDb,
                                  char ** ppszBranchName)
{
    sqlite3_stmt * pStmt = NULL;
    int rc;

    SG_ERR_CHECK(  sg_sqlite__prepare(pCtx, pDb->psql, &pStmt,
                                      ("SELECT"
                                       "    name"	// 0
                                       "  FROM tbl_branch"
                                       "  WHERE id = ?"))  );
    SG_ERR_CHECK(  sg_sqlite__bind_int64(pCtx, pStmt, 1, ID_KEY)  );
    rc = sqlite3_step(pStmt);
    switch (rc)
    {
    case SQLITE_ROW:
        if (sqlite3_column_type(pStmt, 0) == SQLITE_NULL)
            *ppszBranchName = NULL;
        else
            SG_ERR_CHECK(  SG_STRDUP(pCtx, (const char *)sqlite3_column_text(pStmt, 0), ppszBranchName)  );
        break;

    case SQLITE_DONE:
        *ppszBranchName = NULL;
        break;

    default:
        SG_ERR_THROW2(  SG_ERR_SQLITE(rc),
                        (pCtx, "sg_wc_db:tbl_branch can't get branch name.")  );
    }

    SG_ERR_CHECK(  sg_sqlite__nullfinalize(pCtx, &pStmt)  );

#if TRACE_WC_DB
    SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDERR,
                               "sg_wc_db__branch__get_branch: %s\n",
                               ((*ppszBranchName) ? (*ppszBranchName) : "<detached>"))  );
#endif

    return;

fail:
    SG_ERR_IGNORE(  sg_sqlite__nullfinalize(pCtx, &pStmt)  );
}
void sg_wc_db__gid__get_gid_from_alias(SG_context * pCtx,
									   sg_wc_db * pDb,
									   SG_uint64 uiAliasGid,
									   char ** ppszGid)
{
	char * pszGid = NULL;
	int rc;

	// TODO 2011/08/01 Should this check for "*null*" and offer to return NULL ?

	//Caching the prepared statement is a pretty huge performance win.
	if (pDb->pSqliteStmt__get_gid_from_alias == NULL)
	{
		SG_ERR_CHECK(  sg_sqlite__prepare(pCtx, pDb->psql, &pDb->pSqliteStmt__get_gid_from_alias,
									  ("SELECT gid FROM tbl_gid WHERE alias_gid = ?"))  );
	}
	SG_ERR_CHECK(  sg_sqlite__bind_int64(pCtx, pDb->pSqliteStmt__get_gid_from_alias, 1, uiAliasGid)  );

	if ((rc=sqlite3_step(pDb->pSqliteStmt__get_gid_from_alias)) != SQLITE_ROW)
	{
		SG_int_to_string_buffer bufui64;
		SG_ERR_THROW2(  SG_ERR_SQLITE(rc),
						(pCtx, "sg_wc_db:tbl_gid can't find alias %s.",
						 SG_uint64_to_sz(uiAliasGid, bufui64))  );
	}

	SG_ERR_CHECK(  SG_STRDUP(pCtx, (const char *)sqlite3_column_text(pDb->pSqliteStmt__get_gid_from_alias, 0), &pszGid)  );
	SG_ERR_CHECK(  sg_sqlite__reset(pCtx, pDb->pSqliteStmt__get_gid_from_alias)  );
	SG_ERR_CHECK(  sg_sqlite__clear_bindings(pCtx, pDb->pSqliteStmt__get_gid_from_alias)  );

	*ppszGid = pszGid;
	return;

fail:
	SG_ERR_IGNORE(  sg_sqlite__reset(pCtx, pDb->pSqliteStmt__get_gid_from_alias)  );
	SG_ERR_IGNORE(  sg_sqlite__clear_bindings(pCtx, pDb->pSqliteStmt__get_gid_from_alias)  );
	SG_NULLFREE(pCtx, pszGid);
}
void sg_wc_db__gid__get_gid_from_alias2(SG_context * pCtx,
										sg_wc_db * pDb,
										SG_uint64 uiAliasGid,
										char ** ppszGid,
										SG_bool * pbIsTmp)
{
	sqlite3_stmt * pStmt = NULL;
	char * pszGid = NULL;
	int rc;
	SG_bool bIsTmp;

	// TODO 2011/08/01 Should this check for "*null*" and offer to return NULL ?

	SG_ERR_CHECK(  sg_sqlite__prepare(pCtx, pDb->psql, &pStmt,
									  ("SELECT gid,tmp FROM tbl_gid WHERE alias_gid = ?"))  );
	SG_ERR_CHECK(  sg_sqlite__bind_int64(pCtx, pStmt, 1, uiAliasGid)  );

	if ((rc=sqlite3_step(pStmt)) != SQLITE_ROW)
	{
		SG_int_to_string_buffer bufui64;
		SG_ERR_THROW2(  SG_ERR_SQLITE(rc),
						(pCtx, "sg_wc_db:tbl_gid can't find alias %s.",
						 SG_uint64_to_sz(uiAliasGid, bufui64))  );
	}

	SG_ERR_CHECK(  SG_STRDUP(pCtx, (const char *)sqlite3_column_text(pStmt, 0), &pszGid)  );
	bIsTmp = (sqlite3_column_int(pStmt, 1) != 0);
	SG_ERR_CHECK(  sg_sqlite__finalize(pCtx, pStmt)  );

	*ppszGid = pszGid;
	*pbIsTmp = bIsTmp;
	return;

fail:
	SG_ERR_IGNORE(  sg_sqlite__finalize(pCtx, pStmt)  );
	SG_NULLFREE(pCtx, pszGid);
}
/**
 * Create a new repo in the closet.
 */
static void _vv_verbs__init_new_repo__do_init(SG_context * pCtx,
											  const char * pszRepoName,
											  const char * pszStorage,
											  const char * pszHashMethod,
											  const char * psz_shared_users,
											  SG_bool bFromUserMaster,
											  char ** ppszGidRepoId,
											  char ** ppszHidCSetFirst)
{
	SG_repo * pRepo = NULL;
	SG_repo * pRepoUserMaster = NULL;
	char * pszUserMasterAdminId = NULL;
	SG_changeset * pCSetFirst = NULL;
	const char * pszHidCSetFirst_ref;
	char * pszHidCSetFirst = NULL;
	char * pszGidRepoId = NULL;
	char bufAdminId[SG_GID_BUFFER_LENGTH];

	// create a completely new repo in the closet.

	SG_NULLARGCHECK_RETURN( pszRepoName );
	// pszStorage is optional
	// pszHashMethod is optional

	SG_ASSERT(SG_FALSE == (psz_shared_users && bFromUserMaster)); // checked in SG_vv_verbs__init_new_repo

    if (psz_shared_users)
    {
        SG_ERR_CHECK(  _vv_verbs__init_new_repo__get_admin_id(pCtx, psz_shared_users, bufAdminId)  );
    }
	else if (bFromUserMaster)
	{
		SG_ERR_CHECK(  SG_REPO__USER_MASTER__OPEN(pCtx, &pRepoUserMaster)  );
		SG_ERR_CHECK(  SG_repo__get_admin_id(pCtx, pRepoUserMaster, &pszUserMasterAdminId)  );
		memcpy(bufAdminId, pszUserMasterAdminId, sizeof(bufAdminId));
		//SG_memcpy2(pszUserMasterAdminId, bufAdminId);
		SG_NULLFREE(pCtx, pszUserMasterAdminId);
	}
    else
    {
        SG_ERR_CHECK(  SG_gid__generate(pCtx, bufAdminId, sizeof(bufAdminId))  );
    }

	SG_ERR_CHECK(  SG_repo__create__completely_new__empty__closet(pCtx, bufAdminId, pszStorage, pszHashMethod, pszRepoName)  );
	SG_ERR_CHECK(  SG_REPO__OPEN_REPO_INSTANCE(pCtx, pszRepoName, &pRepo)  );
	if (!psz_shared_users && !bFromUserMaster)
    {
        SG_ERR_CHECK(  SG_user__create_nobody(pCtx, pRepo)  );
    }

	SG_ERR_CHECK(  SG_repo__setup_basic_stuff(pCtx, pRepo, &pCSetFirst, NULL)  );

	if (psz_shared_users)
	{
		SG_ERR_CHECK(  SG_pull__admin(pCtx, pRepo, psz_shared_users, NULL, NULL, NULL, NULL)  );
	}
	else if (bFromUserMaster)
	{
		SG_ERR_CHECK(  SG_pull__admin__local(pCtx, pRepo, pRepoUserMaster, NULL)  );
	}

	SG_ERR_CHECK(  SG_changeset__get_id_ref(pCtx, pCSetFirst, &pszHidCSetFirst_ref)  );
	SG_ERR_CHECK(  SG_STRDUP(pCtx, pszHidCSetFirst_ref, &pszHidCSetFirst)  );

	SG_ERR_CHECK(  SG_repo__get_repo_id(pCtx, pRepo, &pszGidRepoId)  );

	*ppszGidRepoId = pszGidRepoId;
	*ppszHidCSetFirst = pszHidCSetFirst;

    SG_REPO_NULLFREE(pCtx, pRepo);
	SG_REPO_NULLFREE(pCtx, pRepoUserMaster);
	SG_CHANGESET_NULLFREE(pCtx, pCSetFirst);
	return;

fail:
	/* If we fail to pull the admin dags after the repo's been created, delete it. */
	if (pRepo)
	{
		SG_REPO_NULLFREE(pCtx, pRepo);
		if (pszRepoName)
			SG_ERR_IGNORE(  _vv_verbs__init_new_repo__delete_new_repo(pCtx, pszRepoName)  );
	}

	SG_REPO_NULLFREE(pCtx, pRepoUserMaster);
	SG_CHANGESET_NULLFREE(pCtx, pCSetFirst);
	SG_NULLFREE(pCtx, pszGidRepoId);
	SG_NULLFREE(pCtx, pszHidCSetFirst);
}
/**
 * Get the TNE Row for the (uiAliasGirParent, pszEntryname) pair.
 * This should not have any of the entryname ambiguity problems
 * that such a request on the live view might have.
 *
 */
void sg_wc_db__tne__get_row_by_parent_alias_and_entryname(SG_context * pCtx,
														  sg_wc_db * pDb,
														  const sg_wc_db__cset_row * pCSetRow,
														  SG_uint64 uiAliasGidParent,
														  const char * pszEntryname,
														  SG_bool * pbFound,
														  sg_wc_db__tne_row ** ppTneRow)
{
	sqlite3_stmt * pStmt = NULL;
	sg_wc_db__tne_row * pTneRow = NULL;
	int rc;

	SG_ERR_CHECK(  sg_sqlite__prepare(pCtx, pDb->psql, &pStmt,
									  ("SELECT"
									   "    alias_gid,"			// 0
									   "    hid,"				// 1
									   "    type,"				// 2
									   "    attrbits"			// 3
									   "  FROM %s"
									   "  WHERE alias_gid_parent = ?"
									   "    AND entryname = ?"),
									  pCSetRow->psz_tne_table_name)  );
	SG_ERR_CHECK(  sg_sqlite__bind_int64(pCtx, pStmt, 1, uiAliasGidParent)  );
	SG_ERR_CHECK(  sg_sqlite__bind_text(pCtx,  pStmt, 2, pszEntryname)  );

	if ((rc=sqlite3_step(pStmt)) != SQLITE_ROW)
	{
		SG_int_to_string_buffer bufui64;

		if ((rc == SQLITE_DONE) && pbFound)
		{
			*pbFound = SG_FALSE;
			*ppTneRow = NULL;
			goto done;
		}

		SG_ERR_THROW2(  SG_ERR_SQLITE(rc),
						(pCtx, "sg_wc_db:%s can't find tne row for parent alias %s and entryname '%s'.",
						 pCSetRow->psz_tne_table_name,
						 SG_uint64_to_sz(uiAliasGidParent, bufui64),
						 pszEntryname)  );
	}

	SG_ERR_CHECK(  sg_wc_db__tne_row__alloc(pCtx, &pTneRow)  );

	pTneRow->p_s->uiAliasGid       = (SG_uint64)sqlite3_column_int64(pStmt, 0);
	pTneRow->p_s->uiAliasGidParent = uiAliasGidParent;
	SG_ERR_CHECK(  SG_STRDUP(pCtx, (const char *)sqlite3_column_text(pStmt, 1), &pTneRow->p_d->pszHid)  );
	pTneRow->p_s->tneType          = (SG_uint32)sqlite3_column_int(pStmt, 2);
	pTneRow->p_d->attrbits         = (SG_uint64)sqlite3_column_int64(pStmt, 3);
	SG_ERR_CHECK(  SG_STRDUP(pCtx, pszEntryname, &pTneRow->p_s->pszEntryname)  );

    SG_ERR_CHECK(  sg_sqlite__nullfinalize(pCtx, &pStmt)  );

#if TRACE_WC_DB
	SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDERR, "sg_wc_db__tne__get_row_by_parent_alias_and_entryname: found:\n")  );
	SG_ERR_IGNORE(  sg_wc_db__debug__tne_row__print(pCtx, pTneRow)  );
#endif

	if (pbFound)
		*pbFound = SG_TRUE;
	*ppTneRow = pTneRow;
	return;

fail:
	SG_WC_DB__TNE_ROW__NULLFREE(pCtx, pTneRow);
done:
    SG_ERR_IGNORE(  sg_sqlite__nullfinalize(pCtx, &pStmt)  );
}
void SG_repo__dag__find_direct_path_from_root(
        SG_context * pCtx,
        SG_repo* pRepo,
        SG_uint64 dagnum,
        const char* psz_csid,
        SG_varray** ppva
        )
{
    SG_varray* new_pva = NULL;
#if SG_DOUBLE_CHECK__PATH_TO_ROOT
    SG_varray* old_pva = NULL;
    SG_dagnode* pdn = NULL;
    char* psz_cur = NULL;
    SG_string* pstr1 = NULL;
    SG_string* pstr2 = NULL;
#endif

    SG_ERR_CHECK(  SG_repo__find_dag_path(pCtx, pRepo, dagnum, NULL, psz_csid, &new_pva)  );

#if SG_DOUBLE_CHECK__PATH_TO_ROOT
    SG_ERR_CHECK(  SG_VARRAY__ALLOC(pCtx, &old_pva)  );
    SG_ERR_CHECK(  SG_STRDUP(pCtx, psz_csid, &psz_cur)  );
    while (1)
    {
        SG_uint32 count_parents = 0;
        const char** a_parents = NULL;

        SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_cur, &pdn)  );
        SG_ERR_CHECK(  SG_varray__append__string__sz(pCtx, old_pva, psz_cur)  );
        SG_ERR_CHECK(  SG_dagnode__get_parents__ref(pCtx, pdn, &count_parents, &a_parents)  );
        if (0 == count_parents)
        {
            break;
        }
        SG_NULLFREE(pCtx, psz_cur);
        SG_ERR_CHECK(  SG_STRDUP(pCtx, a_parents[0], &psz_cur)  );
        SG_DAGNODE_NULLFREE(pCtx, pdn);
    }
    SG_ERR_CHECK(  SG_varray__append__string__sz(pCtx, old_pva, "")  );

    SG_ERR_CHECK(  SG_string__alloc(pCtx, &pstr1)  );
    SG_ERR_CHECK(  SG_string__alloc(pCtx, &pstr2)  );
    SG_ERR_CHECK(  SG_varray__to_json(pCtx, old_pva, pstr1)  );
    SG_ERR_CHECK(  SG_varray__to_json(pCtx, new_pva, pstr2)  );
    if (0 != strcmp(SG_string__sz(pstr1), SG_string__sz(pstr2)))
    {
        // a failure here isn't actually ALWAYS bad.  there can be more than one path
        // to root.

        fprintf(stderr, "old way:\n");
        SG_VARRAY_STDERR(old_pva);
        fprintf(stderr, "new way:\n");
        SG_VARRAY_STDERR(new_pva);

        SG_ERR_THROW(  SG_ERR_UNSPECIFIED  );
    }
#endif

    *ppva = new_pva;
    new_pva = NULL;

fail:
    SG_VARRAY_NULLFREE(pCtx, new_pva);
#if SG_DOUBLE_CHECK__PATH_TO_ROOT
    SG_STRING_NULLFREE(pCtx, pstr1);
    SG_STRING_NULLFREE(pCtx, pstr2);
    SG_VARRAY_NULLFREE(pCtx, old_pva);
    SG_DAGNODE_NULLFREE(pCtx, pdn);
    SG_NULLFREE(pCtx, psz_cur);
#endif
}
Beispiel #10
0
void SG_server__pull_request_fragball(SG_context* pCtx,
									  SG_repo* pRepo,
									  SG_vhash* pvhRequest,
									  const SG_pathname* pFragballDirPathname,
									  char** ppszFragballName,
									  SG_vhash** ppvhStatus)
{
	SG_pathname* pFragballPathname = NULL;
	SG_uint32* paDagNums = NULL;
    SG_rbtree* prbDagnodes = NULL;
	SG_string* pstrFragballName = NULL;
	char* pszRevFullHid = NULL;
	SG_rbtree_iterator* pit = NULL;
	SG_uint32* repoDagnums = NULL;

	SG_NULLARGCHECK_RETURN(pRepo);
	SG_NULLARGCHECK_RETURN(pFragballDirPathname);
	SG_NULLARGCHECK_RETURN(ppvhStatus);

#if TRACE_SERVER
	SG_ERR_CHECK(  SG_vhash_debug__dump_to_console__named(pCtx, pvhRequest, "pull fragball request")  );
#endif

	SG_ERR_CHECK(  SG_fragball__create(pCtx, pFragballDirPathname, &pFragballPathname)  );

	if (!pvhRequest)
	{
		// Add leaves from every dag to the fragball.
		SG_uint32 count_dagnums;
		SG_uint32 i;
		SG_ERR_CHECK(  SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums)  );

		for (i=0; i<count_dagnums; i++)
		{
			SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, paDagNums[i], &prbDagnodes)  );
			SG_ERR_CHECK(  SG_fragball__append__dagnodes(pCtx, pFragballPathname, pRepo, paDagNums[i], prbDagnodes)  );
			SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
		}

		SG_ERR_CHECK(  SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName)  );
		SG_ERR_CHECK(  SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName)  );
	}
	else
	{
		// Build the requested fragball.
		SG_bool found;

		SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE, &found)  );
		if (found)
		{
			// Full clone requested.
			SG_ERR_CHECK(  SG_repo__fetch_repo__fragball(pCtx, pRepo, pFragballDirPathname, ppszFragballName) );
		}
		else
		{
			// Not a full clone.

			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &found)  );
			if (found)
			{
				// Dagnodes were requested.

				SG_uint32 generations = 0;
				SG_vhash* pvhDags;
				SG_uint32 count_requested_dagnums;
				SG_uint32 count_repo_dagnums = 0;
				SG_uint32 i;
				const char* pszDagNum = NULL;
				const SG_variant* pvRequestedNodes = NULL;
				SG_vhash* pvhRequestedNodes = NULL;
				const char* pszHidRequestedDagnode = NULL;

				// Were additional generations requested?
				SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__GENERATIONS, &found)  );
				if (found)
					SG_ERR_CHECK(  SG_vhash__get__uint32(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__GENERATIONS, &generations)  );

				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags)  );
				SG_ERR_CHECK(  SG_vhash__count(pCtx, pvhDags, &count_requested_dagnums)  );
				if (count_requested_dagnums)
					SG_ERR_CHECK(  SG_repo__list_dags(pCtx, pRepo, &count_repo_dagnums, &repoDagnums)  );

				// For each requested dag, get the requested nodes.
				for (i=0; i<count_requested_dagnums; i++)
				{
					SG_uint32 iMissingNodeCount;
					SG_uint32 iDagnum;
					SG_uint32 j;
					SG_bool isValidDagnum = SG_FALSE;
					SG_bool bSpecificNodesRequested = SG_FALSE;

					// Get the dag's missing node vhash.
					SG_ERR_CHECK(  SG_vhash__get_nth_pair(pCtx, pvhDags, i, &pszDagNum, &pvRequestedNodes)  );
					SG_ERR_CHECK(  SG_dagnum__from_sz__decimal(pCtx, pszDagNum, &iDagnum)  );

					// Verify that requested dagnum exists
					for (j = 0; j < count_repo_dagnums; j++)
					{
						if (repoDagnums[j] == iDagnum)
						{
							isValidDagnum = SG_TRUE;
							break;
						}
					}
					if (!isValidDagnum)
					{
						char buf[SG_DAGNUM__BUF_MAX__NAME];
						SG_ERR_CHECK(  SG_dagnum__to_name(pCtx, iDagnum, buf, sizeof(buf))  );
						SG_ERR_THROW2(SG_ERR_NO_SUCH_DAG, (pCtx, "%s", buf));
					}

					if (pvRequestedNodes)
					{
						SG_ERR_CHECK(  SG_variant__get__vhash(pCtx, pvRequestedNodes, &pvhRequestedNodes)  );

						// Get each node listed for the dag
						SG_ERR_CHECK(  SG_vhash__count(pCtx, pvhRequestedNodes, &iMissingNodeCount)  );
						if (iMissingNodeCount > 0)
						{
							SG_uint32 j;
							const SG_variant* pvVal;

							bSpecificNodesRequested = SG_TRUE;

							SG_ERR_CHECK(  SG_RBTREE__ALLOC__PARAMS(pCtx, &prbDagnodes, iMissingNodeCount, NULL)  );
							for (j=0; j<iMissingNodeCount; j++)
							{
								SG_ERR_CHECK(  SG_vhash__get_nth_pair(pCtx, pvhRequestedNodes, j, &pszHidRequestedDagnode, &pvVal)  );

								if (pvVal)
								{
									const char* pszVal;
									SG_ERR_CHECK(  SG_variant__get__sz(pCtx, pvVal, &pszVal)  );
									if (pszVal)
									{
										if (0 == strcmp(pszVal, SG_SYNC_REQUEST_VALUE_HID_PREFIX))
										{
											SG_ERR_CHECK(  SG_repo__hidlookup__dagnode(pCtx, pRepo, iDagnum, pszHidRequestedDagnode, &pszRevFullHid)  );
											pszHidRequestedDagnode = pszRevFullHid;
										}
										else if (0 == strcmp(pszVal, SG_SYNC_REQUEST_VALUE_TAG))
										{
											SG_ERR_CHECK(  SG_vc_tags__lookup__tag(pCtx, pRepo, pszHidRequestedDagnode, &pszRevFullHid)  );
											if (!pszRevFullHid)
												SG_ERR_THROW(SG_ERR_TAG_NOT_FOUND);
											pszHidRequestedDagnode = pszRevFullHid;
										}
										else
											SG_ERR_THROW(SG_ERR_PULL_INVALID_FRAGBALL_REQUEST);
									}
								}
								
								SG_ERR_CHECK(  SG_rbtree__update(pCtx, prbDagnodes, pszHidRequestedDagnode)  );
								// Get additional dagnode generations, if requested.
								SG_ERR_CHECK(  SG_sync__add_n_generations(pCtx, pRepo, pszHidRequestedDagnode, prbDagnodes, generations)  );
								SG_NULLFREE(pCtx, pszRevFullHid);
							}
						}
					}

					if (!bSpecificNodesRequested)
					{
						// When no specific nodes are in the request, add all leaves.
						SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagnum, &prbDagnodes)  );

						// Get additional dagnode generations, if requested.
						if (generations)
						{
							SG_bool found;
							const char* hid;
							
							SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pit, prbDagnodes, &found, &hid, NULL)  );
							while (found)
							{
								SG_ERR_CHECK(  SG_sync__add_n_generations(pCtx, pRepo, hid, prbDagnodes, generations)  );
								SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pit, &found, &hid, NULL)  );
							}
						}
					}

					if (prbDagnodes) // can be null when leaves of an empty dag are requested
					{
						SG_ERR_CHECK(  SG_fragball__append__dagnodes(pCtx, pFragballPathname, pRepo, iDagnum, prbDagnodes)  );
						SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
					}

				} // dagnum loop
			} // if "dags" exists

			/* Add requested blobs to the fragball */
			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &found)  );
			if (found)
			{
				// Blobs were requested.
				SG_vhash* pvhBlobs;
				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &pvhBlobs)  );
				SG_ERR_CHECK(  SG_sync__add_blobs_to_fragball(pCtx, pRepo, pFragballPathname, pvhBlobs)  );
			}

			SG_ERR_CHECK(  SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName)  );
		}
	}
	
	/* fallthru */
fail:
	// If we had an error, delete the half-baked fragball.
	if (pFragballPathname && SG_context__has_err(pCtx))
		SG_ERR_IGNORE(  SG_fsobj__remove__pathname(pCtx, pFragballPathname)  );

	SG_PATHNAME_NULLFREE(pCtx, pFragballPathname);
	SG_NULLFREE(pCtx, paDagNums);
	SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
	SG_STRING_NULLFREE(pCtx, pstrFragballName);
	SG_NULLFREE(pCtx, pszRevFullHid);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);
	SG_NULLFREE(pCtx, repoDagnums);
}
void SG_cmd_util__get_username_for_repo(
	SG_context *pCtx,
	const char *szRepoName,
	char **ppUsername
	)
{
	SG_string * pUsername = NULL;
	SG_repo * pRepo = NULL;
	char * psz_username = NULL;
	SG_curl * pCurl = NULL;
	SG_string * pUri = NULL;
	SG_string * pResponse = NULL;
	SG_int32 responseStatusCode = 0;
	SG_vhash * pRepoInfo = NULL;
	char * psz_userid = NULL;
	SG_varray * pUsers = NULL;

	SG_NULLARGCHECK_RETURN(ppUsername);

	if(!szRepoName)
	{
		// Look up username based on 'whoami' of repo associated with cwd.

		SG_ERR_IGNORE(  SG_cmd_util__get_repo_from_cwd(pCtx, &pRepo, NULL)  );
		if(pRepo)
			SG_ERR_IGNORE(  SG_user__get_username_for_repo(pCtx, pRepo, &psz_username)  );
		SG_REPO_NULLFREE(pCtx, pRepo);
	}
	else if(SG_sz__starts_with(szRepoName, "http://") || SG_sz__starts_with(szRepoName, "https://"))
	{
		// Look up username based on 'whoami' of admin id of remote repo.

		SG_ERR_CHECK(  SG_curl__alloc(pCtx, &pCurl)  );

		SG_ERR_CHECK(  SG_STRING__ALLOC__SZ(pCtx, &pUri, szRepoName)  );
		SG_ERR_CHECK(  SG_string__append__sz(pCtx, pUri, ".json")  );

		SG_ERR_CHECK(  SG_curl__reset(pCtx, pCurl)  );
		SG_ERR_CHECK(  SG_curl__setopt__sz(pCtx, pCurl, CURLOPT_URL, SG_string__sz(pUri))  );

		SG_ERR_CHECK(  SG_STRING__ALLOC(pCtx, &pResponse)  );
		SG_ERR_CHECK(  SG_curl__set__write_string(pCtx, pCurl, pResponse)  );

		SG_ERR_CHECK(  SG_curl__perform(pCtx, pCurl)  );
		SG_ERR_CHECK(  SG_curl__getinfo__int32(pCtx, pCurl, CURLINFO_RESPONSE_CODE, &responseStatusCode)  );

		if(responseStatusCode==200)
		{
			const char * szAdminId = NULL;
			SG_ERR_CHECK(  SG_VHASH__ALLOC__FROM_JSON__STRING(pCtx, &pRepoInfo, pResponse)  );
			SG_ERR_CHECK(  SG_vhash__get__sz(pCtx, pRepoInfo, SG_SYNC_REPO_INFO_KEY__ADMIN_ID, &szAdminId)  );

			SG_ERR_CHECK(  SG_string__clear(pCtx, pUri)  );
			SG_ERR_CHECK(  SG_string__append__format(pCtx, pUri, "/admin/%s/whoami/userid", szAdminId)  );
			SG_ERR_IGNORE(  SG_localsettings__get__sz(pCtx, SG_string__sz(pUri), NULL, &psz_userid, NULL)  );

			if(psz_userid)
			{
				// We now have the userid. Look up the username.


				SG_ERR_CHECK(  SG_string__clear(pCtx, pUri)  );
				SG_ERR_CHECK(  SG_string__append__format(pCtx, pUri, "%s/users.json", szRepoName)  );

				SG_ERR_CHECK(  SG_curl__reset(pCtx, pCurl)  );
				SG_ERR_CHECK(  SG_curl__setopt__sz(pCtx, pCurl, CURLOPT_URL, SG_string__sz(pUri))  );

				SG_ERR_CHECK(  SG_string__clear(pCtx, pResponse)  );
				SG_ERR_CHECK(  SG_curl__set__write_string(pCtx, pCurl, pResponse)  );

				SG_ERR_CHECK(  SG_curl__perform(pCtx, pCurl)  );
				SG_ERR_CHECK(  SG_curl__getinfo__int32(pCtx, pCurl, CURLINFO_RESPONSE_CODE, &responseStatusCode)  );

				if(responseStatusCode==200)
				{
					SG_uint32 i, nUsers;
					SG_ERR_CHECK(  SG_VARRAY__ALLOC__FROM_JSON__STRING(pCtx, &pUsers, pResponse)  );
					SG_ERR_CHECK(  SG_varray__count(pCtx, pUsers, &nUsers)  );
					for(i=0; i<nUsers; ++i)
					{
						SG_vhash * pUser = NULL;
						const char * psz_recid = NULL;
						SG_ERR_CHECK(  SG_varray__get__vhash(pCtx, pUsers, i, &pUser)  );
						SG_ERR_CHECK(  SG_vhash__get__sz(pCtx, pUser, "recid", &psz_recid)  );
						if(!strcmp(psz_recid, psz_userid))
						{
							const char * psz_name = NULL;
							SG_ERR_CHECK(  SG_vhash__get__sz(pCtx, pUser, "name", &psz_name)  );
							SG_ERR_CHECK(  SG_STRDUP(pCtx, psz_name, &psz_username)  );
							break;
						}
					}
					SG_VARRAY_NULLFREE(pCtx, pUsers);
				}
				
				SG_NULLFREE(pCtx, psz_userid);
			}

			SG_VHASH_NULLFREE(pCtx, pRepoInfo);
		}

		SG_STRING_NULLFREE(pCtx, pResponse);
		SG_STRING_NULLFREE(pCtx, pUri);
		SG_CURL_NULLFREE(pCtx, pCurl);
	}
	else
	{
		// Look up username based on 'whoami' of repo provided.

		SG_ERR_CHECK(  SG_REPO__OPEN_REPO_INSTANCE(pCtx, szRepoName, &pRepo)  );
		SG_ERR_IGNORE(  SG_user__get_username_for_repo(pCtx, pRepo, &psz_username)  );
		SG_REPO_NULLFREE(pCtx, pRepo);
	}

	*ppUsername = psz_username;

	return;
fail:
	SG_STRING_NULLFREE(pCtx, pUsername);
	SG_REPO_NULLFREE(pCtx, pRepo);
	SG_NULLFREE(pCtx, psz_username);
	SG_CURL_NULLFREE(pCtx, pCurl);
	SG_STRING_NULLFREE(pCtx, pUri);
	SG_STRING_NULLFREE(pCtx, pResponse);
	SG_VHASH_NULLFREE(pCtx, pRepoInfo);
	SG_NULLFREE(pCtx, psz_userid);
	SG_VARRAY_NULLFREE(pCtx, pUsers);
}
void SG_sync_remote__request_fragball(
	SG_context* pCtx,
	SG_repo* pRepo,
	const SG_pathname* pFragballDirPathname,
	SG_vhash* pvhRequest,
	char** ppszFragballName)
{
	SG_pathname* pFragballPathname = NULL;
	SG_uint64* paDagNums = NULL;
	SG_string* pstrFragballName = NULL;
	SG_rbtree* prbDagnodes = NULL;
	SG_rbtree_iterator* pit = NULL;
	SG_rev_spec* pRevSpec = NULL;
	SG_stringarray* psaFullHids = NULL;
	SG_rbtree* prbDagnums = NULL;
	SG_dagfrag* pFrag = NULL;
	char* pszRepoId = NULL;
	char* pszAdminId = NULL;
    SG_fragball_writer* pfb = NULL;

	SG_NULLARGCHECK_RETURN(pRepo);
	SG_NULLARGCHECK_RETURN(pFragballDirPathname);

    {
        char buf_filename[SG_TID_MAX_BUFFER_LENGTH];
        SG_ERR_CHECK(  SG_tid__generate(pCtx, buf_filename, sizeof(buf_filename))  );
        SG_ERR_CHECK(  SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pFragballPathname, pFragballDirPathname, buf_filename)  );
    }

	if (!pvhRequest)
	{
		// Add leaves from every dag to the fragball.
		SG_uint32 count_dagnums;
		SG_uint32 i;

        SG_ERR_CHECK(  SG_fragball_writer__alloc(pCtx, pRepo, pFragballPathname, SG_TRUE, 2, &pfb)  );
		SG_ERR_CHECK(  SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums)  );

		for (i=0; i<count_dagnums; i++)
		{
			SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, paDagNums[i], &prbDagnodes)  );
			SG_ERR_CHECK(  SG_fragball__write__dagnodes(pCtx, pfb, paDagNums[i], prbDagnodes)  );
			SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
		}

		SG_ERR_CHECK(  SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName)  );
		SG_ERR_CHECK(  SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName)  );
        SG_ERR_CHECK(  SG_fragball_writer__close(pCtx, pfb)  );
	}
	else
	{
		// Specific dags/nodes were requested. Build that fragball.
		SG_bool found;

#if TRACE_SYNC_REMOTE && 0
		SG_ERR_CHECK(  SG_vhash_debug__dump_to_console__named(pCtx, pvhRequest, "fragball request")  );
#endif

		SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE, &found)  );
		if (found)
		{
            // SG_SYNC_STATUS_KEY__CLONE_REQUEST is currently ignored
            SG_ERR_CHECK(  SG_repo__fetch_repo__fragball(pCtx, pRepo, 3, pFragballDirPathname, ppszFragballName) );
		}
		else
		{
			// Not a full clone.

            SG_ERR_CHECK(  SG_fragball_writer__alloc(pCtx, pRepo, pFragballPathname, SG_TRUE, 2, &pfb)  );
			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__SINCE, &found)  );
			if (found)
			{
                SG_vhash* pvh_since = NULL;

                SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__SINCE, &pvh_since)  );

                SG_ERR_CHECK(  _do_since(pCtx, pRepo, pvh_since, pfb)  );
            }

			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &found)  );
			if (found)
			{
				// Specific Dagnodes were requested. Add just those nodes to our "start from" rbtree.

				SG_vhash* pvhDags;
				SG_uint32 count_requested_dagnums;
				SG_uint32 i;
				const SG_variant* pvRevSpecs = NULL;
				SG_vhash* pvhRevSpec = NULL;

				// For each requested dag, get rev spec request.
				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags)  );
				SG_ERR_CHECK(  SG_vhash__count(pCtx, pvhDags, &count_requested_dagnums)  );
				if (count_requested_dagnums)
					SG_ERR_CHECK(  SG_repo__list_dags__rbtree(pCtx, pRepo, &prbDagnums)  );
				for (i=0; i<count_requested_dagnums; i++)
				{
					SG_bool isValidDagnum = SG_FALSE;
					SG_bool bSpecificNodesRequested = SG_FALSE;
					const char* pszRefDagNum = NULL;
					SG_uint64 iDagnum;

					// Get the dag's missing node vhash.
					SG_ERR_CHECK(  SG_vhash__get_nth_pair(pCtx, pvhDags, i, &pszRefDagNum, &pvRevSpecs)  );

					// Verify that requested dagnum exists
					SG_ERR_CHECK(  SG_rbtree__find(pCtx, prbDagnums, pszRefDagNum, &isValidDagnum, NULL)  );
					if (!isValidDagnum)
                        continue;

					SG_ERR_CHECK(  SG_dagnum__from_sz__hex(pCtx, pszRefDagNum, &iDagnum)  );
					
					if (pvRevSpecs && pvRevSpecs->type != SG_VARIANT_TYPE_NULL)
					{
						SG_uint32 countRevSpecs = 0;
						
						SG_ERR_CHECK(  SG_variant__get__vhash(pCtx, pvRevSpecs, &pvhRevSpec)  );
						SG_ERR_CHECK(  SG_rev_spec__from_vash(pCtx, pvhRevSpec, &pRevSpec)  );

						// Process the rev spec for each dag
						SG_ERR_CHECK(  SG_rev_spec__count(pCtx, pRevSpec, &countRevSpecs)  );
						if (countRevSpecs > 0)
						{
							bSpecificNodesRequested = SG_TRUE;

							SG_ERR_CHECK(  SG_rev_spec__get_all__repo(pCtx, pRepo, pRevSpec, SG_TRUE, &psaFullHids, NULL)  );
							SG_ERR_CHECK(  SG_stringarray__to_rbtree_keys(pCtx, psaFullHids, &prbDagnodes)  );
							SG_STRINGARRAY_NULLFREE(pCtx, psaFullHids);
						}
						SG_REV_SPEC_NULLFREE(pCtx, pRevSpec);
					}

					if (!bSpecificNodesRequested)
					{
						// When no specific nodes are in the request, add all leaves.
						SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagnum, &prbDagnodes)  );
					}

					if (prbDagnodes) // can be null when leaves of an empty dag are requested
					{
						// Get the leaves of the other repo, which we need to connect to.
						SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__LEAVES, &found)  );
						if (found)
						{
							SG_vhash* pvhRefAllLeaves;
							SG_vhash* pvhRefDagLeaves;
							SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__LEAVES, &pvhRefAllLeaves)  );
							SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, pszRefDagNum, &found)  );
							{
								SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRefAllLeaves, pszRefDagNum, &pvhRefDagLeaves)  );
								SG_ERR_CHECK(  SG_sync__build_best_guess_dagfrag(pCtx, pRepo, iDagnum, 
									prbDagnodes, pvhRefDagLeaves, &pFrag)  );
							}
						}
						else
						{
							// The other repo's leaves weren't provided: add just the requested nodes, make no attempt to connect.
							SG_ERR_CHECK(  SG_repo__get_repo_id(pCtx, pRepo, &pszRepoId)  );
							SG_ERR_CHECK(  SG_repo__get_admin_id(pCtx, pRepo, &pszAdminId)  );
							SG_ERR_CHECK(  SG_dagfrag__alloc(pCtx, &pFrag, pszRepoId, pszAdminId, iDagnum)  );
							SG_ERR_CHECK(  SG_dagfrag__load_from_repo__simple(pCtx, pFrag, pRepo, prbDagnodes)  );
							SG_NULLFREE(pCtx, pszRepoId);
							SG_NULLFREE(pCtx, pszAdminId);
						}

						SG_ERR_CHECK(  SG_fragball__write__frag(pCtx, pfb, pFrag)  );
						
						SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
						SG_DAGFRAG_NULLFREE(pCtx, pFrag);
					}

				} // dagnum loop
			} // if "dags" exists

			/* Add requested blobs to the fragball */
			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &found)  );
			if (found)
			{
				// Blobs were requested.
				SG_vhash* pvhBlobs;
				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &pvhBlobs)  );
				SG_ERR_CHECK(  SG_sync__add_blobs_to_fragball(pCtx, pfb, pvhBlobs)  );
			}

			SG_ERR_CHECK(  SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName)  );
		}
        SG_ERR_CHECK(  SG_fragball_writer__close(pCtx, pfb)  );
	}

	/* fallthru */
fail:
	// If we had an error, delete the half-baked fragball.
	if (pFragballPathname && SG_context__has_err(pCtx))
    {
		SG_ERR_IGNORE(  SG_fsobj__remove__pathname(pCtx, pFragballPathname)  );
    }

	SG_PATHNAME_NULLFREE(pCtx, pFragballPathname);
	SG_NULLFREE(pCtx, paDagNums);
	SG_STRING_NULLFREE(pCtx, pstrFragballName);
	SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);
	SG_RBTREE_NULLFREE(pCtx, prbDagnums);
	SG_REV_SPEC_NULLFREE(pCtx, pRevSpec);
	SG_STRINGARRAY_NULLFREE(pCtx, psaFullHids);
	SG_DAGFRAG_NULLFREE(pCtx, pFrag);
	SG_NULLFREE(pCtx, pszRepoId);
	SG_NULLFREE(pCtx, pszAdminId);
    SG_FRAGBALL_WRITER_NULLFREE(pCtx, pfb);
}
void SG_repo__db__calc_delta(
        SG_context * pCtx,
        SG_repo* pRepo,
        SG_uint64 dagnum,
        const char* psz_csid_from,
        const char* psz_csid_to,
        SG_uint32 flags,
        SG_vhash** ppvh_add,
        SG_vhash** ppvh_remove
        )
{
    SG_dagnode* pdn_from = NULL;
    SG_dagnode* pdn_to = NULL;
    SG_int32 gen_from = -1;
    SG_int32 gen_to = -1;
    SG_varray* pva_direct_backward_path = NULL;
    SG_varray* pva_direct_forward_path = NULL;
    SG_vhash* pvh_add = NULL;
    SG_vhash* pvh_remove = NULL;
    SG_rbtree* prb_temp = NULL;
    SG_daglca* plca = NULL;
    char* psz_csid_ancestor = NULL;

    SG_NULLARGCHECK_RETURN(psz_csid_from);
    SG_NULLARGCHECK_RETURN(psz_csid_to);
    SG_NULLARGCHECK_RETURN(pRepo);
    SG_NULLARGCHECK_RETURN(ppvh_add);
    SG_NULLARGCHECK_RETURN(ppvh_remove);

    SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_csid_from, &pdn_from)  );
    SG_ERR_CHECK(  SG_dagnode__get_generation(pCtx, pdn_from, &gen_from)  );
    SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo, dagnum, psz_csid_to, &pdn_to)  );
    SG_ERR_CHECK(  SG_dagnode__get_generation(pCtx, pdn_to, &gen_to)  );

    if (gen_from > gen_to)
    {
        SG_ERR_CHECK(  SG_repo__dag__find_direct_backward_path(
                    pCtx,
                    pRepo,
                    dagnum,
                    psz_csid_from,
                    psz_csid_to,
                    &pva_direct_backward_path
                    )  );
        if (pva_direct_backward_path)
        {
            SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvh_add)  );
            SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvh_remove)  );
            SG_ERR_CHECK(  SG_db__make_delta_from_path(
                        pCtx,
                        pRepo,
                        dagnum,
                        pva_direct_backward_path,
                        flags,
                        pvh_add,
                        pvh_remove
                        )  );
        }
    }
    else if (gen_from < gen_to)
    {
        SG_ERR_CHECK(  SG_repo__dag__find_direct_backward_path(
                    pCtx,
                    pRepo,
                    dagnum,
                    psz_csid_to,
                    psz_csid_from,
                    &pva_direct_forward_path
                    )  );
        if (pva_direct_forward_path)
        {
            SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvh_add)  );
            SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvh_remove)  );
            SG_ERR_CHECK(  SG_db__make_delta_from_path(
                        pCtx,
                        pRepo,
                        dagnum,
                        pva_direct_forward_path,
                        flags,
                        pvh_remove,
                        pvh_add
                        )  );
        }
    }

    if (!pvh_add && !pvh_remove)
    {
        SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prb_temp)  );
        SG_ERR_CHECK(  SG_rbtree__add(pCtx,prb_temp,psz_csid_from)  );
        SG_ERR_CHECK(  SG_rbtree__add(pCtx,prb_temp,psz_csid_to)  );
        SG_ERR_CHECK(  SG_repo__get_dag_lca(pCtx,pRepo,dagnum,prb_temp,&plca)  );
        {
            const char* psz_hid = NULL;
            SG_daglca_node_type node_type = 0;
            SG_int32 gen = -1;

            SG_ERR_CHECK(  SG_daglca__iterator__first(pCtx,
                                                      NULL,
                                                      plca,
                                                      SG_FALSE,
                                                      &psz_hid,
                                                      &node_type,
                                                      &gen,
                                                      NULL)  );
            SG_ERR_CHECK(  SG_STRDUP(pCtx, psz_hid, &psz_csid_ancestor)  );
        }

        SG_ERR_CHECK(  SG_repo__dag__find_direct_backward_path(
                    pCtx,
                    pRepo,
                    dagnum,
                    psz_csid_from,
                    psz_csid_ancestor,
                    &pva_direct_backward_path
                    )  );
        SG_ERR_CHECK(  SG_repo__dag__find_direct_backward_path(
                    pCtx,
                    pRepo,
                    dagnum,
                    psz_csid_to,
                    psz_csid_ancestor,
                    &pva_direct_forward_path
                    )  );
        SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvh_add)  );
        SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvh_remove)  );
        SG_ERR_CHECK(  SG_db__make_delta_from_path(
                    pCtx,
                    pRepo,
                    dagnum,
                    pva_direct_backward_path,
                    flags,
                    pvh_add,
                    pvh_remove
                    )  );
        SG_ERR_CHECK(  SG_db__make_delta_from_path(
                    pCtx,
                    pRepo,
                    dagnum,
                    pva_direct_forward_path,
                    flags,
                    pvh_remove,
                    pvh_add
                    )  );
    }

    *ppvh_add = pvh_add;
    pvh_add = NULL;

    *ppvh_remove = pvh_remove;
    pvh_remove = NULL;

fail:
    SG_NULLFREE(pCtx, psz_csid_ancestor);
    SG_RBTREE_NULLFREE(pCtx, prb_temp);
    SG_DAGLCA_NULLFREE(pCtx, plca);
    SG_VHASH_NULLFREE(pCtx, pvh_add);
    SG_VHASH_NULLFREE(pCtx, pvh_remove);
    SG_VARRAY_NULLFREE(pCtx, pva_direct_backward_path);
    SG_VARRAY_NULLFREE(pCtx, pva_direct_forward_path);
    SG_DAGNODE_NULLFREE(pCtx, pdn_from);
    SG_DAGNODE_NULLFREE(pCtx, pdn_to);
}
Beispiel #14
0
void SG_workingdir__create_and_get(
	SG_context* pCtx,
	const char* pszDescriptorName,
	const SG_pathname* pPathDirPutTopLevelDirInHere,
	SG_bool bCreateDrawer,
    const char* psz_spec_hid_cs_baseline
	)
{
	SG_repo* pRepo = NULL;
	SG_rbtree* pIdsetLeaves = NULL;
	SG_uint32 count_leaves = 0;
	SG_changeset* pcs = NULL;
	const char* pszidUserSuperRoot = NULL;
	SG_bool b = SG_FALSE;
    char* psz_hid_cs_baseline = NULL;
	SG_pendingtree * pPendingTree = NULL;
	SG_vhash * pvhTimestamps = NULL;

	/*
	 * Fetch the descriptor by its given name and use it to connect to
	 * the repo.
	 */
	SG_ERR_CHECK(  SG_repo__open_repo_instance(pCtx, pszDescriptorName, &pRepo)  );


	if (psz_spec_hid_cs_baseline)
	{
		SG_ERR_CHECK(  SG_strdup(pCtx, psz_spec_hid_cs_baseline, &psz_hid_cs_baseline)  );
	}
	else
    {
        const char* psz_hid = NULL;
        /*
         * If you do not specify a hid to be the baseline, then this routine
         * currently only works if there is exactly one leaf in the repo.
         */
        SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo,SG_DAGNUM__VERSION_CONTROL,&pIdsetLeaves)  );
        SG_ERR_CHECK(  SG_rbtree__count(pCtx, pIdsetLeaves, &count_leaves)  );

		if (count_leaves != 1)
			SG_ERR_THROW(  SG_ERR_MULTIPLE_HEADS_FROM_DAGNODE  );

        SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, NULL, pIdsetLeaves, &b, &psz_hid, NULL)  );

        SG_ERR_CHECK(  SG_STRDUP(pCtx, psz_hid, &psz_hid_cs_baseline)  );
    }

	/*
	 * Load the desired changeset from the repo so we can look up the
	 * id of its user root directory
	 */
	SG_ERR_CHECK(  SG_changeset__load_from_repo(pCtx, pRepo, psz_hid_cs_baseline, &pcs)  );
	SG_ERR_CHECK(  SG_changeset__get_root(pCtx, pcs, &pszidUserSuperRoot)  );

	if (bCreateDrawer)
	{
		SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvhTimestamps)  );

		// Retrieve everything into the WD and capture the timestamps on the files that we create.
		SG_ERR_CHECK(  sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, pvhTimestamps)  );

		// this creates "repo.json" with the repo-descriptor.
		SG_ERR_CHECK(  SG_workingdir__set_mapping(pCtx, pPathDirPutTopLevelDirInHere, pszDescriptorName, NULL)  );

		// this creates an empty "wd.json" file (which doesn't know anything).
		SG_ERR_CHECK(  SG_PENDINGTREE__ALLOC(pCtx, pPathDirPutTopLevelDirInHere, SG_TRUE, &pPendingTree)  );

		// force set the initial parents to the current changeset.
		SG_ERR_CHECK(  SG_pendingtree__set_single_wd_parent(pCtx, pPendingTree, psz_hid_cs_baseline)  );

		// force initialize the timestamp cache to the list that we just built; this should
		// be the only timestamps in the cache since we just populated the WD.
		SG_ERR_CHECK(  SG_pendingtree__set_wd_timestamp_cache(pCtx, pPendingTree, &pvhTimestamps)  );	// this steals our vhash

		SG_ERR_CHECK(  SG_pendingtree__save(pCtx, pPendingTree)  );
	}
	else
	{
		// Retrieve everything into the WD but do not create .sgdrawer or record timestamps.
		// This is more like an EXPORT operation.
		SG_ERR_CHECK(  sg_workingdir__do_get_dir__top(pCtx, pRepo, pPathDirPutTopLevelDirInHere, pszidUserSuperRoot, NULL)  );
	}


fail:
	SG_VHASH_NULLFREE(pCtx, pvhTimestamps);
    SG_NULLFREE(pCtx, psz_hid_cs_baseline);
	SG_CHANGESET_NULLFREE(pCtx, pcs);
	SG_RBTREE_NULLFREE(pCtx, pIdsetLeaves);
	SG_REPO_NULLFREE(pCtx, pRepo);
	SG_PENDINGTREE_NULLFREE(pCtx, pPendingTree);
}
void sg_wc_liveview_item__alloc__add_special(SG_context * pCtx,
											 sg_wc_liveview_item ** ppLVI,
											 SG_wc_tx * pWcTx,
											 SG_uint64 uiAliasGid,
											 SG_uint64 uiAliasGidParent,
											 const char * pszEntryname,
											 SG_treenode_entry_type tneType,
											 const char * pszHidMerge,
											 SG_int64 attrbits,
											 SG_wc_status_flags statusFlagsAddSpecialReason)
{
	sg_wc_liveview_item * pLVI = NULL;
	SG_bool bFoundIssue = SG_FALSE;

	SG_ERR_CHECK(  SG_alloc1(pCtx, pLVI)  );

	// caller needs to set the backptr if appropriate
	// if/when it adds this LVI to the LVD's vector.
	pLVI->pLiveViewDir = NULL;

	pLVI->uiAliasGid = uiAliasGid;

	SG_ERR_CHECK(  SG_STRING__ALLOC__SZ(pCtx,
										&pLVI->pStringEntryname,
										pszEntryname)  );

	// During the QUEUE phase where we are doing this
	// special-add, we have not yet actually created
	// the item on disk.  So we should indicate that
	// scandir/readdir didn't know anything about it.
	//
	// TODO 2012/01/31 During the APPLY phase, we need to
	// TODO            fix-up this field.  Because all of
	// TODO            the __get_original_ and __get_current_
	// TODO            routines below assume that this field
	// TODO            is set.  That is, if you want to do
	// TODO            additional operations (like status)
	// TODO            after a merge, for example.
	// TODO
	// TODO 2012/04/12 Think about using pLVI->queuedOverwrite
	// TODO            fields for this.
	// NOTE
	// NOTE 2012/05/16 Setting this field to NULL caused a problem
	// NOTE            in _deal_with_moved_out_list() and
	// NOTE            sg_wc_liveview_item__alter_structure__move_rename()
	// NOTE            during UPDATE when an ADD-SPECIAL item was initially
	// NOTE            PARKED because of a transient collision (because the
	// NOTE            final UNPARK step uses move/rename to do the work).

	pLVI->pPrescanRow = NULL;

	// because a liveview_item must start as an
	// exact clone of a scanrow, there cannot be
	// any in-tx changes yet for it.
	SG_ERR_CHECK(  SG_WC_DB__PC_ROW__ALLOC(pCtx, &pLVI->pPcRow_PC)  );

	if (statusFlagsAddSpecialReason & SG_WC_STATUS_FLAGS__S__MERGE_CREATED)
		pLVI->pPcRow_PC->flags_net = SG_WC_DB__PC_ROW__FLAGS_NET__ADD_SPECIAL_M;
	else if (statusFlagsAddSpecialReason & SG_WC_STATUS_FLAGS__S__UPDATE_CREATED)
		pLVI->pPcRow_PC->flags_net = SG_WC_DB__PC_ROW__FLAGS_NET__ADD_SPECIAL_U;
	else
		SG_ERR_THROW2(  SG_ERR_INVALIDARG,
						(pCtx, "Invalid statusFlagsAddSpecialReason for '%s'", pszEntryname)  );

	pLVI->pPcRow_PC->p_s->uiAliasGid = uiAliasGid;
	pLVI->pPcRow_PC->p_s->uiAliasGidParent = uiAliasGidParent;
	SG_ERR_CHECK(  SG_STRDUP(pCtx, pszEntryname, &pLVI->pPcRow_PC->p_s->pszEntryname)  );
	pLVI->pPcRow_PC->p_s->tneType = tneType;
	if (pszHidMerge)
		SG_ERR_CHECK(  SG_STRDUP(pCtx, pszHidMerge, &pLVI->pPcRow_PC->pszHidMerge)  );

	pLVI->tneType = tneType;
	pLVI->scan_flags_Live = SG_WC_PRESCAN_FLAGS__CONTROLLED_ACTIVE_POSTSCAN;

	if (statusFlagsAddSpecialReason & SG_WC_STATUS_FLAGS__A__SPARSE)
	{
		pLVI->pPcRow_PC->flags_net |= SG_WC_DB__PC_ROW__FLAGS_NET__SPARSE;
		SG_ERR_CHECK(  sg_wc_db__state_dynamic__alloc(pCtx, &pLVI->pPcRow_PC->p_d_sparse)  );
		pLVI->pPcRow_PC->p_d_sparse->attrbits = attrbits;
		if (tneType != SG_TREENODEENTRY_TYPE_DIRECTORY)
		{
			SG_ASSERT( pszHidMerge && *pszHidMerge );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, pszHidMerge, &pLVI->pPcRow_PC->p_d_sparse->pszHid)  );
		}
		pLVI->scan_flags_Live = SG_WC_PRESCAN_FLAGS__CONTROLLED_ACTIVE_SPARSE;	// not |=
	}

	pLVI->pPcRow_PC->ref_attrbits = attrbits;

	SG_ERR_CHECK(  sg_wc_db__issue__get_issue(pCtx, pWcTx->pDb,
											  pLVI->uiAliasGid,
											  &bFoundIssue,
											  &pLVI->statusFlags_x_xr_xu,
											  &pLVI->pvhIssue,
											  &pLVI->pvhSavedResolutions)  );

	*ppLVI = pLVI;
	return;

fail:
	SG_WC_LIVEVIEW_ITEM__NULLFREE(pCtx, pLVI);
}
/**
 * Get the current content HID and optionally the size.
 * (Note that the current HID is not usually defined for a directory.
 * And therefore the content size of a directory is not usually
 * defined either.)
 *
 */
void sg_wc_liveview_item__get_current_content_hid(SG_context * pCtx,
												  sg_wc_liveview_item * pLVI,
												  SG_wc_tx * pWcTx,
												  SG_bool bNoTSC,
												  char ** ppszHidContent,
												  SG_uint64 * pSize)
{
	if (pLVI->queuedOverwrites.pvhContent)
	{
		// We have a QUEUED operation on this item that changed the
		// contents.  Get the 'current' value from the journal.

		const char * psz = NULL;
		
		SG_ERR_CHECK_RETURN(  SG_vhash__check__sz(pCtx, pLVI->queuedOverwrites.pvhContent, "hid", &psz)  );
		if (psz)
		{
			// last overwrite-type operation used an HID.
#if TRACE_WC_LIE
			SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDERR,
									   "GetCurrentContentHid: using journal %s for: %s\n",
									   psz, SG_string__sz(pLVI->pStringEntryname))  );
#endif
			if (pSize)
				SG_ERR_CHECK_RETURN(  _fetch_size_of_blob(pCtx, pWcTx, psz, pSize)  );

			SG_ERR_CHECK_RETURN(  SG_strdup(pCtx, psz, ppszHidContent)  );
			return;
		}

		SG_ERR_CHECK_RETURN(  SG_vhash__check__sz(pCtx, pLVI->queuedOverwrites.pvhContent, "file", &psz)  );
		if (psz)
		{
			// last overwrite-type operation used a TEMP file.

			SG_ERR_CHECK_RETURN(  sg_wc_compute_file_hid__sz(pCtx, pWcTx, psz, ppszHidContent, pSize)  );
			return;
		}

		SG_ERR_CHECK_RETURN(  SG_vhash__check__sz(pCtx, pLVI->queuedOverwrites.pvhContent, "target", &psz)  );
		if (psz)
		{
			// last overwrite-type operation gave us a SYMLINK-TARGET.
			// it is no problem to compute this, i'm just being
			// lazy since i'm not sure we need this.  
			SG_ERR_THROW2_RETURN(  SG_ERR_NOTIMPLEMENTED,
								   (pCtx,
									"GetCurrentContentHid: using journal: TODO compute HID of symlink target '%s' for: %s",
									psz, SG_string__sz(pLVI->pStringEntryname))  );
			// TODO also return size
		}

		SG_ERR_THROW2_RETURN(  SG_ERR_NOTIMPLEMENTED,
							   (pCtx,
								"GetCurrentContentHid: required field missing from vhash for: %s",
								SG_string__sz(pLVI->pStringEntryname))  );
	}

	SG_ASSERT_RELEASE_RETURN( (pLVI->pPrescanRow) );

	if (SG_WC_PRESCAN_FLAGS__IS_CONTROLLED_SPARSE(pLVI->scan_flags_Live))
	{
		if (pLVI->pPcRow_PC)
		{
			SG_ASSERT_RELEASE_RETURN( (pLVI->pPcRow_PC->p_d_sparse) );
			SG_ERR_CHECK_RETURN(  SG_STRDUP(pCtx,
											pLVI->pPcRow_PC->p_d_sparse->pszHid,
											ppszHidContent)  );
			if (pSize)
				SG_ERR_CHECK_RETURN(  _fetch_size_of_blob(pCtx, pWcTx,
														  pLVI->pPcRow_PC->p_d_sparse->pszHid,
														  pSize)  );
		}
		else if (pLVI->pPrescanRow->pPcRow_Ref)
		{
			SG_ASSERT_RELEASE_RETURN( (pLVI->pPrescanRow->pPcRow_Ref->p_d_sparse) );
			SG_ERR_CHECK_RETURN(  SG_STRDUP(pCtx,
											pLVI->pPrescanRow->pPcRow_Ref->p_d_sparse->pszHid,
											ppszHidContent)  );
			if (pSize)
				SG_ERR_CHECK_RETURN(  _fetch_size_of_blob(pCtx, pWcTx,
														  pLVI->pPrescanRow->pPcRow_Ref->p_d_sparse->pszHid,
														  pSize)  );
		}
		else
		{
			// With the addition of {sparse_hid,sparse_attrbits} to tbl_PC,
			// we should not get here.
			SG_ERR_THROW2_RETURN(  SG_ERR_NOTIMPLEMENTED,
								   (pCtx, "GetCurrentHid: unhandled case when sparse for '%s'.",
									SG_string__sz(pLVI->pStringEntryname))  );
		}
	}
	else if (pLVI->pPrescanRow->pRD)
	{
#if TRACE_WC_TSC
		if (!bNoTSC)
			SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDERR,
									   "GetCurrentContentHid: looking up '%s'\n",
									   SG_string__sz(pLVI->pStringEntryname))  );
#endif
		SG_ERR_CHECK_RETURN(  sg_wc_readdir__row__get_content_hid__owned(pCtx, pWcTx,
																		 pLVI->pPrescanRow->pRD,
																		 bNoTSC,
																		 ppszHidContent,
																		 pSize)  );
	}
	else if (pLVI->pPrescanRow->pTneRow)
	{
		SG_ERR_CHECK_RETURN(  SG_STRDUP(pCtx, pLVI->pPrescanRow->pTneRow->p_d->pszHid, ppszHidContent)  );
		if (pSize)
			SG_ERR_CHECK_RETURN(  _fetch_size_of_blob(pCtx, pWcTx, pLVI->pPrescanRow->pTneRow->p_d->pszHid, pSize)  );
	}
	else
	{
		// perhaps an ADD-SPECIAL + DELETE
		// or an ADDED+LOST in an UPDATE ?
		SG_ERR_THROW2_RETURN(  SG_ERR_NOTIMPLEMENTED,
							   (pCtx, "GetCurrentHid: unhandled case for '%s'.",
								SG_string__sz(pLVI->pStringEntryname))  );
	}
}
static void _merge__compute_target_hid(SG_context * pCtx,
									   SG_mrg * pMrg)
{
	const SG_rev_spec * pRevSpec = ((pMrg->pMergeArgs) ? pMrg->pMergeArgs->pRevSpec : NULL);
	SG_stringarray * psaHids = NULL;
	SG_stringarray * psaMissingHids = NULL;
	SG_rev_spec * pRevSpec_Allocated = NULL;
	SG_bool bRequestedAttachedBranch = SG_FALSE;
	SG_stringarray * psaBranchesRequested = NULL;
	const char * pszBranchNameRequested = NULL;
	SG_uint32 nrMatched = 0;
	SG_uint32 nrMatchedExcludingParent = 0;

	if (pRevSpec)
	{
		SG_uint32 uTotal = 0u;
		SG_uint32 uBranches = 0u;

		SG_ERR_CHECK(  SG_rev_spec__count(pCtx, pRevSpec, &uTotal)  );
		SG_ERR_CHECK(  SG_rev_spec__count_branches(pCtx, pRevSpec, &uBranches)  );
		if (uTotal == 0u)
		{
			// if the rev spec is empty, just pretend it doesn't exist
			pRevSpec = NULL;
		}
		else if (uTotal > 1u)
		{
			// we can only handle a single specification
			SG_ERR_THROW2(SG_ERR_INVALIDARG, (pCtx, "Merge can accept at most one revision/tag/branch specifier."));
		}
		else if (uTotal == 1u && uBranches == 1u)
		{
			SG_ERR_CHECK(  SG_rev_spec__branches(pCtx, (/*const*/ SG_rev_spec *)pRevSpec, &psaBranchesRequested)  );
			SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaBranchesRequested, 0, &pszBranchNameRequested)  );

			if (pMrg->pszBranchName_Starting)
				bRequestedAttachedBranch = (strcmp(pszBranchNameRequested, pMrg->pszBranchName_Starting) == 0);
		}
	}

	if (!pRevSpec)
	{
        if (!pMrg->pszBranchName_Starting)
            SG_ERR_THROW(  SG_ERR_NOT_TIED  );

		SG_ERR_CHECK(  SG_REV_SPEC__ALLOC(pCtx, &pRevSpec_Allocated)  );
		SG_ERR_CHECK(  SG_rev_spec__add_branch(pCtx, pRevSpec_Allocated, pMrg->pszBranchName_Starting)  );
		pRevSpec = pRevSpec_Allocated;
		pszBranchNameRequested = pMrg->pszBranchName_Starting;
		bRequestedAttachedBranch = SG_TRUE;
	}

	// Lookup the given (or synthesized) --rev/--tag/--branch
	// and see how many csets it refers to.  Disregard/filter-out
	// any that are not present in the local repo.

	SG_ERR_CHECK(  SG_rev_spec__get_all__repo(pCtx, pMrg->pWcTx->pDb->pRepo, pRevSpec, SG_TRUE,
											  &psaHids, &psaMissingHids)  );
	SG_ERR_CHECK(  SG_stringarray__count(pCtx, psaHids, &nrMatched)  );
	if (nrMatched == 0)
	{
		SG_uint32 nrMissing = 0;
		SG_ASSERT_RELEASE_FAIL(  (psaMissingHids != NULL)  );
		SG_ERR_CHECK(  SG_stringarray__count(pCtx, psaMissingHids, &nrMissing)  );
		if (nrMissing == 1)
		{
			const char * psz_0;
			SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaMissingHids, 0, &psz_0)  );
			SG_ERR_THROW2(  SG_ERR_BRANCH_HEAD_CHANGESET_NOT_PRESENT,
							(pCtx, "Branch '%s' refers to changeset '%s'. Consider pulling.",
							 pszBranchNameRequested, psz_0)  );
		}
		else
		{
			SG_ERR_THROW2(  SG_ERR_BRANCH_HEAD_CHANGESET_NOT_PRESENT,
							(pCtx, "Branch '%s' refers to %d changesets that are not present. Consider pulling.",
							 pszBranchNameRequested, nrMissing)  );
		}
	}
	else if (nrMatched == 1)
	{
		// We found a single unique match for our request.
		// We ***DO NOT*** disqualify the current baseline
		// in this case.  We let routines like do_cmd_merge_preview()
		// report that.

		const char * psz_0;
		SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaHids, 0, &psz_0)  );
		SG_ERR_CHECK(  SG_STRDUP(pCtx, psz_0, &pMrg->pszHidTarget)  );
	}
	else
	{
		// We can only get here if pRevSpec contained a "--branch ..."
		// reference (because the "--rev" lookup throws when given a
		// non-unique prefix and "--tag" can only be bound to a single
		// cset).
		//
		// If they referenced the attached branch (and the baseline is
		// pointing at a head), we'll get our baseline in the result set,
		// so get rid of it.
		SG_ERR_CHECK(  SG_stringarray__remove_all(pCtx, psaHids, pMrg->pszHid_StartingBaseline, NULL)  );
		SG_ERR_CHECK(  SG_stringarray__count(pCtx, psaHids, &nrMatchedExcludingParent)  );

		if (nrMatchedExcludingParent == 1)
		{
			// parent may or may not be a head of this branch, but
			// we found a single head or single other head.
			const char * psz_0;
			SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaHids, 0, &psz_0)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, psz_0, &pMrg->pszHidTarget)  );
		}
		else if (nrMatchedExcludingParent < nrMatched)
		{
			// There were at least 3 heads of this branch and the baseline
			// is one of them.  Throwing a generic 'needs merge' message is
			// not helpful.
			SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
							(pCtx, "Branch '%s' has %d heads (excluding the baseline). Consider merging one of the other heads using --rev/--tag.",
							 pszBranchNameRequested, nrMatchedExcludingParent)  );
		}
		else //if (nrMatchedExcludingParent == nrMatched)
		{
			// The requested branch has multiple heads and the current
			// baseline is NOT one of them.  The current baseline MAY OR MAY NOT
			// be in that branch.  (And independently, we may or may not be
			// attached to that branch.)
			//
			// See how the heads are related to the current baseline.
			const char * pszDescendant0 = NULL;
			const char * pszAncestor0 = NULL;
			SG_uint32 nrDescendants = 0;
			SG_uint32 nrAncestors = 0;
			SG_uint32 k;
			for (k=0; k<nrMatched; k++)
			{
				const char * psz_k;
				SG_dagquery_relationship dqRel;
				SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaHids, k, &psz_k)  );
				SG_ERR_CHECK(  SG_dagquery__how_are_dagnodes_related(pCtx, pMrg->pWcTx->pDb->pRepo,
																	 SG_DAGNUM__VERSION_CONTROL,
																	 psz_k, pMrg->pszHid_StartingBaseline,
																	 SG_FALSE, SG_FALSE, &dqRel)  );
				if (dqRel == SG_DAGQUERY_RELATIONSHIP__DESCENDANT)
				{
					pszDescendant0 = psz_k;
					nrDescendants++; // target[k] is descendant of baseline
				}
				else if (dqRel == SG_DAGQUERY_RELATIONSHIP__ANCESTOR)
				{
					pszAncestor0 = psz_k;
					nrAncestors++;	// target[k] is ancestor of baseline
				}
			}
			SG_ASSERT(  ((nrDescendants == 0) || (nrAncestors == 0))  );
			if (nrDescendants == 1)
			{
				if (bRequestedAttachedBranch)			// The current baseline is attached to the same branch, just not a head.
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Only changeset '%s' is a descendant of the current baseline. Consider updating to it and then merging the branch.",
									 pszBranchNameRequested, nrMatched, pszDescendant0)  );
				else if (pMrg->pszBranchName_Starting)	// currently attached to a different branch
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Only changeset '%s' is a descendant of the current baseline. Consider updating to it. You are attached to branch '%s'.",
									 pszBranchNameRequested, nrMatched, pszDescendant0, pMrg->pszBranchName_Starting)  );
				else									// currently detached
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Only changeset '%s' is a descendant of the current baseline. Consider updating to it. You are not attached to a branch.",
									 pszBranchNameRequested, nrMatched, pszDescendant0)  );
			}
			else if (nrDescendants > 1)					// nrDescendants may or may not be equal to nrMatched since there may be peers too.
			{
				if (bRequestedAttachedBranch)			// The current baseline is attached to the same branch, just not a head.
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. %d are descendants of the current baseline. Consider updating to one of them and then merging the branch.",
									 pszBranchNameRequested, nrMatched, nrDescendants)  );
				else if (pMrg->pszBranchName_Starting)	// currently attached to a different branch
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. %d are descendants of the current baseline. Consider updating to one of them. You are attached to branch '%s'.",
									 pszBranchNameRequested, nrMatched, nrDescendants, pMrg->pszBranchName_Starting)  );
				else									// currently detached
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. %d are descendants of the current baseline. Consider updating to one of them. You are not attached to a branch.",
									 pszBranchNameRequested, nrMatched, nrDescendants)  );
			}
			else if (nrAncestors == 1)
			{
				if (bRequestedAttachedBranch)			// The current baseline is attached to the same branch, but the head pointer is not pointing at us.
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Changeset '%s' is an ancestor of the current baseline. Consider moving that head forward and then merging the branch.",
									 pszBranchNameRequested, nrMatched, pszAncestor0)  );
				else if (pMrg->pszBranchName_Starting)	// currently attached to a different branch
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Changeset '%s' is an ancestor of the current baseline. Consider moving that head forward. You are attached to branch '%s'.",
									 pszBranchNameRequested, nrMatched, pszAncestor0, pMrg->pszBranchName_Starting)  );
				else									// currently detached
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. Changeset '%s' is an ancestor of the current baseline. Consider moving that head forward. You are not attached to a branch.",
									 pszBranchNameRequested, nrMatched, pszAncestor0)  );
			}
			else if (nrAncestors > 1)					// nrAncestors may or may not be equal to nrMatched since there may be peers too.
			{
				SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
								(pCtx, "Branch '%s' has %d heads. All of them are ancestors of the current baseline. Consider moving one of the heads forward and removing the others.",
								 pszBranchNameRequested, nrMatched)  );
			}
			else										// All of the heads are peers of the current baseline.
			{
				if (bRequestedAttachedBranch)			// The current baseline is attached to the same branch, but the head pointer is not pointing at us.
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. All are peers of the current baseline. Consider merging one of the other heads using --rev/--tag.",
									 pszBranchNameRequested, nrMatched)  );
				else if (pMrg->pszBranchName_Starting)	// currently attached to a different branch
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. All are peers of the current baseline. Consider merging one of the other heads using --rev/--tag. You are attached to branch '%s'.",
									 pszBranchNameRequested, nrMatched, pMrg->pszBranchName_Starting)  );
				else									// currently detached
					SG_ERR_THROW2(  SG_ERR_BRANCH_NEEDS_MERGE,
									(pCtx, "Branch '%s' has %d heads. All are peers of the current baseline. Consider merging one of the other heads using --rev/--tag. You are not attached to a branch.",
									 pszBranchNameRequested, nrMatched)  );
			}
		}
	}

fail:
	SG_STRINGARRAY_NULLFREE(pCtx, psaBranchesRequested);
	SG_STRINGARRAY_NULLFREE(pCtx, psaHids);
	SG_STRINGARRAY_NULLFREE(pCtx, psaMissingHids);
	SG_REV_SPEC_NULLFREE(pCtx, pRevSpec_Allocated);
}
char *  sg_treediff_cache::GetLastResult(SG_context * pCtx)
{
	char * pszNewCopy = NULL;
	SG_STRDUP(pCtx, m_pPreviousResult, &pszNewCopy);
	return pszNewCopy;
}
void do_cmd_merge_preview(SG_context * pCtx, SG_option_state * pOptSt)
{
	SG_repo * pRepo = NULL;
	
	SG_uint32 countRevSpecs = 0;
	SG_stringarray * psaRevSpecs = NULL;
	const char * const * ppszRevSpecs = NULL;
	
	SG_stringarray * psaNewChangesets = NULL;
	const char * const * ppszNewChangesets = NULL;
	SG_uint32 countNewChangesets = 0;
	
	char * pszHidBaseline = NULL;
	char * pszHidMergeTarget = NULL;
	SG_dagquery_relationship relationship;
	
	SG_vhash * pvhPileOfCleanBranches = NULL;
	SG_uint32 i = 0;
	
	countRevSpecs = 0;
	if (pOptSt->pRevSpec)
	{
		SG_ERR_CHECK(  SG_rev_spec__count(pCtx, pOptSt->pRevSpec, &countRevSpecs)  );
		if(countRevSpecs>2)
			SG_ERR_THROW(SG_ERR_USAGE);
	}
	
	if(pOptSt->psz_repo!=NULL)
	{
		if(countRevSpecs==2)
		{
			SG_ERR_CHECK(  SG_REPO__OPEN_REPO_INSTANCE(pCtx, pOptSt->psz_repo, &pRepo)  );
			SG_ERR_CHECK(  SG_rev_spec__get_all__repo(pCtx, pRepo, pOptSt->pRevSpec, SG_FALSE, &psaRevSpecs, NULL)  );
			SG_ERR_CHECK(  SG_stringarray__sz_array(pCtx, psaRevSpecs, &ppszRevSpecs)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[0], &pszHidBaseline)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[1], &pszHidMergeTarget)  );
			SG_STRINGARRAY_NULLFREE(pCtx, psaRevSpecs);
		}
		else
		{
			SG_ERR_THROW2(SG_ERR_USAGE, (pCtx, "When using the --repo option, you must provide both the BASELINE-REVSPEC and the OTHER-REVSPEC."));
		}
	}
	else
	{
		SG_ERR_CHECK(  SG_cmd_util__get_repo_from_cwd(pCtx, &pRepo, NULL)  );
		if(countRevSpecs==2)
		{
			SG_ERR_CHECK(  SG_rev_spec__get_all__repo(pCtx, pRepo, pOptSt->pRevSpec, SG_FALSE, &psaRevSpecs, NULL)  );
			SG_ERR_CHECK(  SG_stringarray__sz_array(pCtx, psaRevSpecs, &ppszRevSpecs)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[0], &pszHidBaseline)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[1], &pszHidMergeTarget)  );
			SG_STRINGARRAY_NULLFREE(pCtx, psaRevSpecs);
		}
		else
		{
			SG_uint32 countBaselines = 0;
			SG_ERR_CHECK(  SG_wc__get_wc_parents__stringarray(pCtx, NULL, &psaRevSpecs)  );
			SG_ERR_CHECK(  SG_stringarray__sz_array_and_count(pCtx, psaRevSpecs, &ppszRevSpecs, &countBaselines)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[0], &pszHidBaseline)  );
			if(countBaselines==2)
			{
				SG_ERR_CHECK(  SG_STRDUP(pCtx, ppszRevSpecs[1], &pszHidMergeTarget)  );
			}
			else
			{
				SG_wc_merge_args merge_args;
				merge_args.pRevSpec = pOptSt->pRevSpec;
				merge_args.bNoAutoMergeFiles = SG_TRUE;	// doesn't matter
				merge_args.bComplainIfBaselineNotLeaf = SG_FALSE;	// doesn't matter
				SG_ERR_CHECK(  SG_wc__merge__compute_preview_target(pCtx, NULL, &merge_args, &pszHidMergeTarget)  );
			}
			SG_STRINGARRAY_NULLFREE(pCtx, psaRevSpecs);
		}
	}
	
	SG_ERR_CHECK(  SG_dagquery__how_are_dagnodes_related(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL,
		pszHidMergeTarget, pszHidBaseline,
		SG_FALSE, SG_FALSE,
		&relationship)  );
	if(relationship==SG_DAGQUERY_RELATIONSHIP__ANCESTOR || relationship==SG_DAGQUERY_RELATIONSHIP__SAME)
	{
		SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDOUT, "The baseline already includes the merge target. No merge is needed.\n")  );
	}
	else
	{
		SG_ERR_CHECK(  SG_dagquery__find_new_since_common(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, pszHidBaseline, pszHidMergeTarget, &psaNewChangesets)  );
		SG_ERR_CHECK(  SG_stringarray__sz_array_and_count(pCtx, psaNewChangesets, &ppszNewChangesets, &countNewChangesets)  );
		
		SG_ERR_CHECK(  SG_vc_branches__cleanup(pCtx, pRepo, &pvhPileOfCleanBranches)  );
		for(i=0; i<countNewChangesets; ++i)
		{
			SG_ERR_CHECK(  SG_cmd_util__dump_log(pCtx, SG_CS_STDOUT, pRepo, ppszNewChangesets[i], pvhPileOfCleanBranches, SG_TRUE, SG_FALSE)  );
		}
		
		if(relationship==SG_DAGQUERY_RELATIONSHIP__DESCENDANT)
		{
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDOUT, "\nFast-Forward Merge to '%s' brings in %i changeset%s.\n", pszHidMergeTarget, countNewChangesets, ((countNewChangesets==1)?"":"s"))  );
		}
		else
		{
			SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDOUT, "\nMerge with '%s' brings in %i changeset%s.\n", pszHidMergeTarget, countNewChangesets, ((countNewChangesets==1)?"":"s"))  );
		}
	}

	SG_VHASH_NULLFREE(pCtx, pvhPileOfCleanBranches);
	SG_STRINGARRAY_NULLFREE(pCtx, psaNewChangesets);
	SG_NULLFREE(pCtx, pszHidBaseline);
	SG_NULLFREE(pCtx, pszHidMergeTarget);
	SG_REPO_NULLFREE(pCtx, pRepo);

	return;
fail:
	SG_REPO_NULLFREE(pCtx, pRepo);
	SG_STRINGARRAY_NULLFREE(pCtx, psaNewChangesets);
	SG_STRINGARRAY_NULLFREE(pCtx, psaRevSpecs);
	SG_NULLFREE(pCtx, pszHidBaseline);
	SG_NULLFREE(pCtx, pszHidMergeTarget);
	SG_VHASH_NULLFREE(pCtx, pvhPileOfCleanBranches);
}