/**
 * Return a pathname set to the location of the "tmp"
 * directory for this working directory.
 *
 * We create something like:
 *
 *         <root>/.sgdrawer/tmp/
 *
 * Note that the main "tmp" directory is long-lived and
 * may be used by multiple tasks at the same time.  So
 * for significant tasks, create a private sub-directory
 * within it.
 *
 */
void sg_wc_db__path__get_temp_dir(SG_context * pCtx,
								  const sg_wc_db * pDb,
								  SG_pathname ** ppPathTempDir)
{
	SG_pathname * pPath = NULL;
	
	SG_ERR_CHECK(  SG_workingdir__get_drawer_path(pCtx,
												  pDb->pPathWorkingDirectoryTop,
												  &pPath)  );
	SG_ERR_CHECK(  SG_pathname__append__from_sz(pCtx, pPath, "tmp")  );
	SG_ERR_CHECK(  SG_pathname__add_final_slash(pCtx, pPath)  );

	// the "tmp" dir could get deleted by routine housekeeping,
	// so re-create it if someone even thinks about using temp-based
	// pathnames.  i'm going to skip the usual "if (!exists) mkdir"
	// stuff and just try to create it and ignore the error.

	SG_fsobj__mkdir__pathname(pCtx, pPath);
	if (SG_CONTEXT__HAS_ERR(pCtx))
	{
		if (!SG_context__err_equals(pCtx, SG_ERR_DIR_ALREADY_EXISTS))
			SG_ERR_RETHROW;
		SG_context__err_reset(pCtx);
	}

	*ppPathTempDir = pPath;
	return;

fail:
	SG_PATHNAME_NULLFREE(pCtx, pPath);
}
/**
 * Compute an MSTATUS and optionally fallback to a regular STATUS.
 * You own the returned pvaStatus.
 *
 */
void SG_wc_tx__mstatus(SG_context * pCtx,
					   SG_wc_tx * pWcTx,
					   SG_bool bNoIgnores,
					   SG_bool bNoFallback,
					   SG_bool bNoSort,
					   SG_varray ** ppvaStatus,
					   SG_vhash ** ppvhLegend)
{
	_sg_wc_tx__mstatus(pCtx, pWcTx, bNoIgnores, bNoSort, ppvaStatus, ppvhLegend);
	if (SG_CONTEXT__HAS_ERR(pCtx) == SG_FALSE)
		return;
	
	if ((SG_context__err_equals(pCtx, SG_ERR_NOT_IN_A_MERGE) == SG_FALSE)
		|| bNoFallback)
		SG_ERR_RETHROW;

	SG_context__err_reset(pCtx);
	SG_ERR_CHECK(  SG_wc_tx__status(pCtx, pWcTx,
									NULL, SG_INT32_MAX,	// no filtering
									SG_FALSE,			// bListUnchanged
									bNoIgnores,
									SG_FALSE,			// bNoTSC
									SG_FALSE,			// bListSparse
									SG_FALSE,			// bListReserved
									bNoSort,
									ppvaStatus,
									ppvhLegend)  );
fail:
	return;
}
Exemplo n.º 3
0
static void _my_do_cmd_update(SG_context * pCtx,
							  SG_option_state * pOptSt,
							  SG_pendingtree * pPendingTree,
							  const char * pszTargetChangeset)
{
	SG_pendingtree_action_log_enum eActionLog;

	if (pOptSt->bTest)
	{
		eActionLog = SG_PT_ACTION__LOG_IT;
	}
	else
	{
		eActionLog = SG_PT_ACTION__DO_IT;
		if (pOptSt->bVerbose)
			eActionLog |= SG_PT_ACTION__LOG_IT;
	}

	SG_ERR_CHECK(  SG_pendingtree__update_baseline(pCtx, pPendingTree, pszTargetChangeset,
												   pOptSt->bForce,
												   eActionLog)  );

	if (eActionLog & SG_PT_ACTION__LOG_IT)
		SG_ERR_IGNORE(  sg_report_action_log(pCtx,pPendingTree)  );

fail:
	if (SG_context__err_equals(pCtx,SG_ERR_PORTABILITY_WARNINGS))
		SG_ERR_IGNORE(  sg_report_portability_warnings(pCtx,pPendingTree)  );
}
Exemplo n.º 4
0
void SG_workingdir__generate_and_create_temp_dir_for_purpose(SG_context * pCtx,
															 const SG_pathname * pPathWorkingDirectoryTop,
															 const char * pszPurpose,
															 SG_pathname ** ppPathTempDir)
{
	SG_pathname * pPathTempRoot = NULL;
	SG_pathname * pPath = NULL;
	SG_string * pString = NULL;
	SG_int64 iTimeUTC;
	SG_time tmLocal;
	SG_uint32 kAttempt = 0;

	SG_NONEMPTYCHECK_RETURN(pszPurpose);
	SG_NULLARGCHECK_RETURN(ppPathTempDir);

	// get path to "<wd-top>/.sgtemp".
	SG_ERR_CHECK(  SG_workingdir__get_temp_path(pCtx,pPathWorkingDirectoryTop,&pPathTempRoot)  );

	SG_ERR_CHECK(  SG_time__get_milliseconds_since_1970_utc(pCtx,&iTimeUTC)  );
	SG_ERR_CHECK(  SG_time__decode__local(pCtx,iTimeUTC,&tmLocal)  );

	SG_ERR_CHECK(  SG_STRING__ALLOC(pCtx,&pString)  );

	while (1)
	{
		// build path "<wd-top>/.sgtemp/<purpose>_20091201_0".
		// where <purpose> is something like "revert" or "merge".

		SG_ERR_CHECK(  SG_string__sprintf(pCtx,pString,"%s_%04d%02d%02d_%d",
										  pszPurpose,
										  tmLocal.year,tmLocal.month,tmLocal.mday,
										  kAttempt++)  );
		SG_ERR_CHECK(  SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx,&pPath,pPathTempRoot,SG_string__sz(pString))  );

		// try to create a NEW temp directory.  if this path already exists on disk,
		// loop and try again.  if we have a hard errors, just give up.

		SG_fsobj__mkdir_recursive__pathname(pCtx,pPath);
		if (SG_context__has_err(pCtx) == SG_FALSE)
			goto success;

		if (SG_context__err_equals(pCtx,SG_ERR_DIR_ALREADY_EXISTS) == SG_FALSE)
			SG_ERR_RETHROW;

		SG_context__err_reset(pCtx);
		SG_PATHNAME_NULLFREE(pCtx,pPath);
	}

success:
	*ppPathTempDir = pPath;

	SG_STRING_NULLFREE(pCtx, pString);
	SG_PATHNAME_NULLFREE(pCtx, pPathTempRoot);
	return;

fail:
	SG_STRING_NULLFREE(pCtx, pString);
	SG_PATHNAME_NULLFREE(pCtx, pPathTempRoot);
	SG_PATHNAME_NULLFREE(pCtx, pPath);
}
Exemplo n.º 5
0
void SG_sync__compare_repo_blobs(SG_context* pCtx,
								 SG_repo* pRepo1,
								 SG_repo* pRepo2,
								 SG_bool* pbIdentical)
{
	const SG_uint32 chunk_size = 1000;
	SG_vhash* pvh = NULL;
	const char* pszBlob1 = NULL;
	SG_uint32 i, j;
	SG_uint32 count_observed = 0;
	SG_uint32 count_returned;

	SG_NULLARGCHECK_RETURN(pRepo1);
	SG_NULLARGCHECK_RETURN(pRepo2);
	SG_NULLARGCHECK_RETURN(pbIdentical);

	for(i = 0; SG_TRUE; i++)
	{
		// Ian TODO: other encodings
		SG_ERR_CHECK(  SG_repo__list_blobs(pCtx, pRepo1, SG_BLOBENCODING__ZLIB, SG_FALSE, SG_FALSE, chunk_size, chunk_size * i, &pvh)  );

		for (j = 0; j < chunk_size; j++)
		{
			SG_bool b = SG_TRUE;

			SG_vhash__get_nth_pair(pCtx, pvh, j, &pszBlob1, NULL);
			if (SG_context__err_equals(pCtx, SG_ERR_ARGUMENT_OUT_OF_RANGE))
			{
				SG_context__err_reset(pCtx);
				break;
			}
			SG_ERR_CHECK_CURRENT;

			SG_ERR_CHECK(  SG_repo__does_blob_exist(pCtx, pRepo2, pszBlob1, &b)  );
			if (!b)
			{
				*pbIdentical = SG_FALSE;
				break;
			}
			count_observed++;
		}
		if (!j)
			break;

		SG_VHASH_NULLFREE(pCtx, pvh);
	}

	SG_ERR_CHECK(  SG_repo__get_blob_stats(pCtx, pRepo2, NULL, NULL, &count_returned, NULL, NULL, NULL, NULL, NULL, NULL, NULL)  );
	if (count_returned != count_observed)
		*pbIdentical = SG_FALSE;

	// fall through
fail:
	SG_VHASH_NULLFREE(pCtx, pvh);
}
/**
 * Convert absolute path to a wd-top-relative string.
 * 
 * This is just pathname/string parsing; we DO NOT confirm
 * that the path exists.  We only confirm that the result
 * is properly contained within the working directory.
 *
 */
void sg_wc_db__path__absolute_to_repopath(SG_context * pCtx,
										  const sg_wc_db * pDb,
										  const SG_pathname * pPathItem,
										  SG_string ** ppStringRepoPath)
{
	SG_string * pString = NULL;
	const char * psz;

	SG_workingdir__wdpath_to_repopath(pCtx,
									  pDb->pPathWorkingDirectoryTop,
									  pPathItem,
									  SG_FALSE,
									  &pString);
	if (SG_CONTEXT__HAS_ERR(pCtx))
	{
		if (SG_context__err_equals(pCtx, SG_ERR_CANNOT_MAKE_RELATIVE_PATH))
			goto throw_not_in_working_copy;
		if (SG_context__err_equals(pCtx, SG_ERR_ITEM_NOT_UNDER_VERSION_CONTROL))
			goto throw_not_in_working_copy;
		if (SG_context__err_equals(pCtx, SG_ERR_PATH_NOT_IN_WORKING_COPY))
			goto throw_not_in_working_copy;
		SG_ERR_RETHROW;
	}
	
	psz = SG_string__sz(pString);
	if ((psz[0] == '.') && (psz[1] == '.') && ((psz[2] == '/') || (psz[2] == 0)))
		goto throw_not_in_working_copy;

	*ppStringRepoPath = pString;
	return;

throw_not_in_working_copy:
	SG_context__err_reset(pCtx);
	SG_ERR_THROW2(  SG_ERR_PATH_NOT_IN_WORKING_COPY,
					(pCtx, "The path '%s' is not inside the working copy rooted at '%s'.",
					 SG_pathname__sz(pPathItem),
					 SG_pathname__sz(pDb->pPathWorkingDirectoryTop))  );

fail:
	SG_STRING_NULLFREE(pCtx, pString);
}
void sg_sync_client__http__heartbeat(
    SG_context* pCtx,
    SG_sync_client* pSyncClient,
    SG_vhash** ppvh)
{
    char* pszUrl = NULL;
    SG_vhash* pvh = NULL;
    SG_string* pstr = NULL;

    SG_NULLARGCHECK_RETURN(pSyncClient);
    SG_NULLARGCHECK_RETURN(ppvh);

    SG_ERR_CHECK(  _get_sync_url(pCtx, pSyncClient->psz_remote_repo_spec,
                                 "/heartbeat" JSON_URL_SUFFIX, NULL, NULL, &pszUrl)  );

    SG_ERR_CHECK(  do_url(
                       pCtx,
                       pszUrl,
                       "GET",
                       NULL,
                       pSyncClient->psz_username,
                       pSyncClient->psz_password,
                       &pstr,
                       NULL,
                       SG_TRUE
                   )  );

    SG_vhash__alloc__from_json__sz(pCtx, ppvh, SG_string__sz(pstr));
    if(SG_context__err_equals(pCtx, SG_ERR_JSON_WRONG_TOP_TYPE) || SG_context__err_equals(pCtx, SG_ERR_JSONPARSER_SYNTAX))
        SG_ERR_RESET_THROW2(SG_ERR_NOTAREPOSITORY, (pCtx, "%s", pSyncClient->psz_remote_repo_spec));
    else
        SG_ERR_CHECK_CURRENT;

    /* fall through */

fail:
    SG_NULLFREE(pCtx, pszUrl);
    SG_VHASH_NULLFREE(pCtx, pvh);
    SG_STRING_NULLFREE(pCtx, pstr);
}
Exemplo n.º 8
0
int u0020_utf8pathnames__create_file(SG_context * pCtx, const SG_pathname * pPathnameTmpDir, _tableitem * pti)
{
	// create a file in the given tmp dir using the given filename.

	SG_pathname * pPathnameNewFile;
	char * pBufUtf8;
	SG_uint32 lenUtf8;
	SG_file * pFile;
	int iResult;
	SG_bool bTest;

	// convert the utf32 string into utf8.

	VERIFY_ERR_CHECK_DISCARD(  SG_utf8__from_utf32(pCtx, pti->pa32,&pBufUtf8,&lenUtf8)  );	// we have to free pBufUtf8

	// verify that the computed utf8 string matches what we thought it should be.
	// (this hopefully guards against the conversion layer playing NFC/NFD tricks.)

	iResult = SG_utf8__compare(pBufUtf8,(char *)pti->pa8);
	VERIFYP_COND("u0020_utf8pathnames",(iResult==0),("Compare failed [%s][%s]",pBufUtf8,pti->pa8));

	// create full pathname to the file to create.

	VERIFY_ERR_CHECK_DISCARD(  SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx,
															   &pPathnameNewFile,
															   pPathnameTmpDir,pBufUtf8)  );

	// create the file and close it.
	// on Linux when our locale is set to something other than UTF-8, we may
	// get an ICU(10) == U_INVALID_CHAR_FOUND error because the test data is
	// not necessarily friendly to any one locale and there are some NFD
	// cases too.   we map ICU(10) to SG_ERR_UNMAPPABLE_UNICODE_CHAR

	SG_file__open__pathname(pCtx,pPathnameNewFile,SG_FILE_WRONLY|SG_FILE_CREATE_NEW,0755,&pFile);
#if defined(LINUX)
	bTest = (  (!SG_context__has_err(pCtx))  ||  (SG_context__err_equals(pCtx,SG_ERR_UNMAPPABLE_UNICODE_CHAR))  );
#else
	bTest = (  (!SG_context__has_err(pCtx))  );
#endif
	SG_context__err_reset(pCtx);

	VERIFYP_COND("u0020_utf8pathnames",bTest,
			 ("Error Creating file [%s]",SG_pathname__sz(pPathnameNewFile)));

	VERIFY_ERR_CHECK_DISCARD(  SG_file__close(pCtx, &pFile)  );

	SG_PATHNAME_NULLFREE(pCtx, pPathnameNewFile);
	SG_NULLFREE(pCtx, pBufUtf8);

	return 1;
}
void u0038_test_version(SG_context * pCtx)
{
	/* This test pokes around in closet internals in ways normal closet callers shouldn't. */

	SG_string* pstrEnv = NULL;
	SG_uint32 len;
	SG_pathname* pPathCloset = NULL;
	SG_pathname* pPathClosetVersion = NULL;
	SG_pathname* pPathClosetVersionBackup = NULL;
	SG_file* pFile = NULL;
	SG_vhash* pvh = NULL;
	
	/* Deliberately making this break for closet version 3 -- current is version 2. */
	SG_byte buf[3]; 
	
	VERIFY_ERR_CHECK(  SG_environment__get__str(pCtx, "SGCLOSET", &pstrEnv, &len)  );
	if (len)
	{
		VERIFY_ERR_CHECK(  SG_PATHNAME__ALLOC__SZ(pCtx, &pPathCloset, SG_string__sz(pstrEnv))  );
	}
	else
	{
		VERIFY_ERR_CHECK(  SG_PATHNAME__ALLOC__USER_APPDATA_DIRECTORY(pCtx, &pPathCloset)  );
		VERIFY_ERR_CHECK(  SG_pathname__append__from_sz(pCtx, pPathCloset, ".sgcloset")  );
	}

	VERIFY_ERR_CHECK(  SG_pathname__alloc__pathname_sz(pCtx, &pPathClosetVersion, pPathCloset, "version")  );
	VERIFY_ERR_CHECK(  SG_pathname__alloc__pathname_sz(pCtx, &pPathClosetVersionBackup, pPathCloset, "version.bak")  );

	VERIFY_ERR_CHECK(  SG_fsobj__move__pathname_pathname(pCtx, pPathClosetVersion, pPathClosetVersionBackup)  );
	
	VERIFY_ERR_CHECK(  SG_file__open__pathname(pCtx, pPathClosetVersion, SG_FILE_OPEN_OR_CREATE|SG_FILE_WRONLY|SG_FILE_TRUNC, 0644, &pFile)  );
	VERIFY_ERR_CHECK(  SG_file__write(pCtx, pFile, sizeof(buf), buf, NULL)  ); 
	VERIFY_ERR_CHECK(  SG_file__close(pCtx, &pFile)  );

	SG_closet__descriptors__list(pCtx, &pvh);
	VERIFY_COND("", SG_context__err_equals(pCtx, SG_ERR_UNSUPPORTED_CLOSET_VERSION));
	SG_ERR_DISCARD;

	VERIFY_ERR_CHECK(  SG_fsobj__remove__pathname(pCtx, pPathClosetVersion)  );
	VERIFY_ERR_CHECK(  SG_fsobj__move__pathname_pathname(pCtx, pPathClosetVersionBackup, pPathClosetVersion)  );

	/* Common cleanup */
fail:
	SG_STRING_NULLFREE(pCtx, pstrEnv);
	SG_PATHNAME_NULLFREE(pCtx, pPathCloset);
	SG_PATHNAME_NULLFREE(pCtx, pPathClosetVersion);
	SG_PATHNAME_NULLFREE(pCtx, pPathClosetVersionBackup);
	SG_FILE_NULLCLOSE(pCtx, pFile);
	SG_VHASH_NULLFREE(pCtx, pvh);
}
void SG_cmd_util__dump_log(
	SG_context * pCtx, 
	SG_console_stream cs,
	SG_repo* pRepo, 
	const char* psz_hid_cs, 
	SG_vhash* pvhCleanPileOfBranches, 
	SG_bool bShowOnlyOpenBranchNames,
	SG_bool bShowFullComments)
{
	SG_history_result* pHistResult = NULL;
	SG_stringarray * psaHids = NULL;
	
	SG_STRINGARRAY__ALLOC(pCtx, &psaHids, 1);
	SG_ERR_CHECK(  SG_stringarray__add(pCtx, psaHids, psz_hid_cs)  );
	SG_history__get_revision_details(pCtx, pRepo, psaHids, NULL, &pHistResult);

    if (SG_context__err_equals(pCtx, SG_ERR_NOT_FOUND))
    {
		/* There's a branch that references a changeset that doesn't exist. Show what we can. */

		SG_vhash* pvhRefClosedBranches = NULL;
		SG_vhash* pvhRefBranchValues = NULL;

		SG_context__err_reset(pCtx);

		if (pvhCleanPileOfBranches)
		{
			SG_bool bHas = SG_FALSE;
			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhCleanPileOfBranches, "closed", &bHas)  );
			if (bHas)
				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhCleanPileOfBranches, "closed", &pvhRefClosedBranches)  );

			SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhCleanPileOfBranches, "values", &pvhRefBranchValues)  );
		}

		SG_ERR_CHECK(  SG_console(pCtx, cs, "\n\t%8s:  %s\n", "revision", psz_hid_cs)  );
		SG_ERR_CHECK(  _dump_branch_name(pCtx, cs, psz_hid_cs, bShowOnlyOpenBranchNames, 
			pvhRefBranchValues, pvhRefClosedBranches)  );
		SG_ERR_CHECK(  SG_console(pCtx, cs, "\t%8s   %s\n", "", "(not present in repository)")  );
    }
    else
    {
		SG_ERR_CHECK_CURRENT;
        SG_ERR_CHECK(  SG_cmd_util__dump_history_results(pCtx, cs, pHistResult, pvhCleanPileOfBranches, bShowOnlyOpenBranchNames, bShowFullComments, SG_FALSE)  );
    }

fail:
	SG_HISTORY_RESULT_NULLFREE(pCtx, pHistResult);
	SG_STRINGARRAY_NULLFREE(pCtx, psaHids);
}
void sg_treediff_cache::GetStatusForPath(SG_context * pCtx, SG_bool bIsRetry, const SG_pathname * pPathName, SG_wc_status_flags * pStatusFlags, SG_bool * pbDirChanged)
{
	wxCriticalSectionLocker lock(m_wc_transaction_lock);
	SG_wc_status_flags statusFlags = SG_WC_STATUS_FLAGS__U__FOUND;
	SG_string * psgstr_statusFlagsDetails = NULL;
	if (m_pwc_tx == NULL)
	{
		//Start a read-only transaction.
		SG_ERR_CHECK(  SG_WC_TX__ALLOC__BEGIN(pCtx, &m_pwc_tx, ((pPathName)), SG_TRUE)  );
	}

	SG_ERR_CHECK(  SG_wc_tx__get_item_status_flags(pCtx, m_pwc_tx, SG_pathname__sz(pPathName), SG_FALSE, SG_FALSE, &statusFlags, NULL)  );
	if (pbDirChanged != NULL && (statusFlags & SG_WC_STATUS_FLAGS__T__DIRECTORY))
	{
		//It's a directory, so we need to check to see if there are modifications under it.
		SG_wc_status_flags dirStatusFlags = SG_WC_STATUS_FLAGS__U__FOUND;
		SG_ERR_CHECK(  SG_wc_tx__get_item_dirstatus_flags(pCtx, m_pwc_tx, SG_pathname__sz(pPathName), &dirStatusFlags, NULL, pbDirChanged)   );
	}
#if DEBUG
	SG_ERR_CHECK(  SG_wc_debug__status__dump_flags(pCtx, statusFlags, "status flags", &psgstr_statusFlagsDetails)  );
	SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "%s", SG_string__sz(psgstr_statusFlagsDetails))  );
#endif
	//Don't recurse if this is our second try. 
	if ((bIsRetry == SG_FALSE) 
		&& ((statusFlags & SG_WC_STATUS_FLAGS__T__BOGUS) == SG_WC_STATUS_FLAGS__T__BOGUS) )
	{
		//This is a item that is new in the directory since we started
		//our transaction.  Close the transaction, and start a new one.
		SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Got a bogus status.  Restarting transaction")  );
		clearCache(pCtx);
		GetStatusForPath(pCtx, SG_TRUE, pPathName, &statusFlags, pbDirChanged);
	}
	
	SG_ERR_CHECK(  SG_time__get_milliseconds_since_1970_utc(pCtx, &m_timeLastRequest)  );
	*pStatusFlags = statusFlags;
fail:
	SG_STRING_NULLFREE(pCtx, psgstr_statusFlagsDetails);
	//Log and reset any errors on the context.
	//Any error condition should report the status as
	//Found and still send a message back to the client.
	if (SG_context__has_err(pCtx) == SG_TRUE)
	{
		if (!SG_context__err_equals(pCtx, SG_ERR_WC_DB_BUSY))
		{
			SG_log__report_error__current_error(pCtx);
		}
		SG_context__err_reset(pCtx);
	}
}
Exemplo n.º 12
0
void _read_template_file(
	SG_context *pCtx,
	const char *templateFn,
	SG_string **pContent,	/**< we allocate, we free on error, else caller owns it */
	const _request_headers *pRequestHeaders,
	_replacer_cb replacer)
{
	SG_pathname *tpath = NULL;
	SG_file *pFile = NULL;
	SG_uint32 got = 0;
	char	tbuf[1024];

    //todo: make this thread-safe:
    if(_sg_uridispatch__templatePath==NULL)
        SG_ERR_CHECK(  _sgui_set_templatePath(pCtx)  );

	SG_ERR_CHECK(  SG_PATHNAME__ALLOC__COPY(pCtx, &tpath, _sg_uridispatch__templatePath)  );
	SG_ERR_CHECK(  SG_pathname__append__from_sz(pCtx, tpath, templateFn)  );

	SG_ERR_CHECK(  SG_file__open__pathname(pCtx, tpath, SG_FILE_RDONLY|SG_FILE_OPEN_EXISTING, 0644, &pFile)  );
	SG_PATHNAME_NULLFREE(pCtx, tpath);

	SG_ERR_CHECK(  SG_STRING__ALLOC(pCtx, pContent)  );

	do
	{
		SG_file__read(pCtx, pFile, sizeof(tbuf), (SG_byte *)tbuf, &got);
        if (SG_context__err_equals(pCtx, SG_ERR_EOF))
        {
			SG_context__err_reset(pCtx);
            break;
        }
		SG_ERR_CHECK_CURRENT;

		SG_ERR_CHECK(  SG_string__append__buf_len(pCtx, *pContent, (const SG_byte *)tbuf, got) );
	} while (got > 0);

	SG_ERR_CHECK(  SG_file__close(pCtx, &pFile)  );

	SG_ERR_CHECK(  _templatize(pCtx, *pContent, pRequestHeaders, replacer)  );

	return;

fail:
	SG_STRING_NULLFREE(pCtx, *pContent);
	SG_FILE_NULLCLOSE(pCtx, pFile);
	SG_PATHNAME_NULLFREE(pCtx, tpath);
}
Exemplo n.º 13
0
void SG_dbrecord__save_to_repo(SG_context* pCtx, SG_dbrecord * pRecord, SG_repo * pRepo, SG_repo_tx_handle* pRepoTx, SG_uint64* iBlobFullLength)
{
	SG_string * pString = NULL;
	char* pszHidComputed = NULL;
    SG_uint32 iLength = 0;

	SG_NULLARGCHECK_RETURN(pRepo);
	SG_NULLARGCHECK_RETURN(pRecord);

	// serialize the dbrecord into JSON string.

	SG_ERR_CHECK(  SG_STRING__ALLOC(pCtx, &pString)  );
	SG_ERR_CHECK(  SG_dbrecord__to_json(pCtx, pRecord,pString)  );

#if 0
    printf("%s\n", SG_string__sz(pString)); // TODO remove this
#endif

	// remember the length of the blob
    iLength = SG_string__length_in_bytes(pString);
	*iBlobFullLength = (SG_uint64) iLength;

	// create a blob in the repository using the JSON string.  this computes the HID and returns it.

	SG_repo__store_blob_from_memory(pCtx,
		pRepo,pRepoTx,
        NULL, // TODO pass the record id into here
        SG_FALSE,
		(const SG_byte *)SG_string__sz(pString),
		iLength,
		&pszHidComputed);
	if (!SG_context__has_err(pCtx) || SG_context__err_equals(pCtx, SG_ERR_BLOBFILEALREADYEXISTS))
	{
		// freeeze the dbrecord memory-object and effectively make it read-only
		// from this point forward.  we give up our ownership of the HID.

		SG_ASSERT(  pszHidComputed  );
		_sg_dbrecord__freeze(pCtx, pRecord, pszHidComputed);
		pszHidComputed = NULL;

		SG_STRING_NULLFREE(pCtx, pString);
		return;  // return _OK or __BLOBFILEALREADYEXISTS
	}

fail:
	SG_STRING_NULLFREE(pCtx, pString);
	SG_NULLFREE(pCtx, pszHidComputed);
}
Exemplo n.º 14
0
void SG_curl__throw_on_non200(SG_context* pCtx, SG_curl* pCurl)
{
	SG_int32 httpResponseCode = 0;
	SG_vhash* pvhErr = NULL;
	_sg_curl* p = (_sg_curl*)pCurl;

	SG_NULLARGCHECK_RETURN(pCurl);

	SG_ERR_CHECK(  SG_curl__getinfo__int32(pCtx, pCurl, CURLINFO_RESPONSE_CODE, &httpResponseCode)  );
	if (httpResponseCode != 200)
	{
		if ((httpResponseCode == 500 || httpResponseCode == 410) && p->pstrErr)
		{
			SG_bool bHas = SG_FALSE;
			const char* szMsg = NULL;
			
			SG_VHASH__ALLOC__FROM_JSON__SZ(pCtx, &pvhErr, SG_string__sz(p->pstrErr));
			if (SG_context__err_equals(pCtx, SG_ERR_JSONPARSER_SYNTAX))
			{
				// The server didn't return a JSON-formatted response.
				if (httpResponseCode == 500)
					SG_ERR_RESET_THROW2(SG_ERR_EXTENDED_HTTP_500, (pCtx, "%s", SG_string__sz(p->pstrErr)));
				else
					SG_ERR_THROW2(SG_ERR_SERVER_HTTP_ERROR, (pCtx, "%d", httpResponseCode));
			}
			SG_ERR_CHECK_CURRENT;

			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhErr, "msg", &bHas)  );
			if (bHas)
				SG_ERR_CHECK(  SG_vhash__get__sz(pCtx, pvhErr, "msg", &szMsg)  );
			if (szMsg)
				SG_ERR_THROW2(SG_ERR_EXTENDED_HTTP_500, (pCtx, "%s", szMsg));
			else
				SG_ERR_THROW2(SG_ERR_EXTENDED_HTTP_500, (pCtx, "%s", SG_string__sz(p->pstrErr)));
		}
		else if (httpResponseCode == 401)
		{
			SG_ERR_THROW(SG_ERR_AUTHORIZATION_REQUIRED);
		}
		else
			SG_ERR_THROW2(SG_ERR_SERVER_HTTP_ERROR, (pCtx, "%d", httpResponseCode));
	}

	/* common cleanup */
fail:
	SG_VHASH_NULLFREE(pCtx, pvhErr);
}
void SG_jscontextpool__init(SG_context * pCtx, const char * szApplicationRoot)
{
	if(gpJSContextPoolGlobalState != NULL)
		return;

	SG_ERR_CHECK_RETURN(  SG_alloc1(pCtx, gpJSContextPoolGlobalState)  );

	SG_localsettings__get__collapsed_vhash(pCtx, "server", NULL, &gpJSContextPoolGlobalState->pServerConfig);
	if(SG_context__has_err(pCtx))
	{
		SG_log__report_error__current_error(pCtx);
		SG_context__err_reset(pCtx);
	}
	if(gpJSContextPoolGlobalState->pServerConfig==NULL)
	{
		SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &gpJSContextPoolGlobalState->pServerConfig)  );
	}

	SG_ERR_CHECK(  _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "enable_diagnostics", NULL)  );
	SG_ERR_CHECK(  _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "readonly", NULL)  );
	SG_ERR_CHECK(  _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "remote_ajax_libs", NULL)  );
	SG_ERR_CHECK(  _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "ssjs_mutable", &gpJSContextPoolGlobalState->ssjsMutable)  );

	if(szApplicationRoot==NULL)
		szApplicationRoot="";
	SG_ERR_CHECK(  SG_vhash__update__string__sz(pCtx, gpJSContextPoolGlobalState->pServerConfig, "application_root", szApplicationRoot)  );

	// Start up SpiderMonkey.
	SG_jscore__new_runtime(pCtx, SG_jsglue__context_callback, NULL, SG_FALSE, NULL);

	//If jscore is already initialized, just move on.
	if (SG_context__err_equals(pCtx, SG_ERR_ALREADY_INITIALIZED))
	{
		SG_context__err_reset(pCtx);
	}

	SG_ERR_CHECK(  SG_mutex__init(pCtx, &gpJSContextPoolGlobalState->lock)  );

	return;
fail:
	SG_VHASH_NULLFREE(pCtx, gpJSContextPoolGlobalState->pServerConfig);
	SG_NULLFREE(pCtx, gpJSContextPoolGlobalState);
}
void SG_cmd_util__set_password(SG_context * pCtx,
							   const char * pszSrcRepoSpec,
							   SG_string * pStringUsername,
							   SG_string * pStringPassword)
{
	if (SG_password__supported())
	{
		SG_password__set(pCtx, pszSrcRepoSpec, pStringUsername, pStringPassword);
		if (!SG_CONTEXT__HAS_ERR(pCtx))
			return;
		if (!SG_context__err_equals(pCtx, SG_ERR_NOTIMPLEMENTED))
			SG_ERR_RETHROW;
		SG_context__err_reset(pCtx);
	}

	SG_ERR_CHECK(  SG_console(pCtx, SG_CS_STDERR,
							  "Could not remember your password in the keyring.\n")  );

fail:
	return;
}
Exemplo n.º 17
0
void SG_repo__pack__zlib(SG_context* pCtx, SG_repo * pRepo)
{
    SG_vhash* pvh = NULL;
    SG_uint32 count = 0;
    SG_uint32 i = 0;
	SG_repo_tx_handle* pTx;

    SG_ERR_CHECK(  SG_repo__list_blobs(pCtx, pRepo, SG_BLOBENCODING__FULL, SG_TRUE, SG_TRUE, 500, 0, &pvh)  );
    SG_ERR_CHECK(  SG_vhash__count(pCtx, pvh, &count)  );
    SG_ERR_CHECK(  SG_repo__begin_tx(pCtx, pRepo, &pTx)  );
    for (i=0; i<count; i++)
    {
        const char* psz_hid = NULL;
        const SG_variant* pv = NULL;
        SG_ERR_CHECK(  SG_vhash__get_nth_pair(pCtx, pvh, i, &psz_hid, &pv)  );
        SG_repo__change_blob_encoding(pCtx, pRepo, pTx,  psz_hid, SG_BLOBENCODING__ZLIB, NULL, NULL, NULL, NULL, NULL);

		if (SG_context__has_err(pCtx))
        {
            if (!SG_context__err_equals(pCtx,SG_ERR_REPO_BUSY))
            {
                SG_ERR_RETHROW;
            }
            else
            {
                SG_context__err_reset(pCtx);
            }
        }
    }
    SG_ERR_CHECK(  SG_repo__commit_tx(pCtx, pRepo, &pTx)  );
    SG_VHASH_NULLFREE(pCtx, pvh);

    return;

fail:
    SG_VHASH_NULLFREE(pCtx, pvh);
}
Exemplo n.º 18
0
void SG_sync__closet_user_dags(
	SG_context* pCtx, 
	SG_repo* pRepoSrcNotMine, 
	const char* pszRefHidLeafSrc,
	SG_varray** ppvaSyncedUserList)
{
	char* pszSrcAdminId = NULL;
	char* pszHidLeafSrc = NULL;
	char* pszHidLeafDest = NULL;
	SG_vhash* pvhDescriptors = NULL;
	SG_repo* pRepoDest = NULL;
	SG_repo* pRepoSrcMine = NULL;
	char* pszDestAdminId = NULL;

	/* Using disallowed characters to ensure no collision with an actual repo name.
	 * Not that this isn't actually stored anywhere--we just use it as a key in the
	 * vhash below where the /real/ repos have descriptor names. */
	const char* pszRefUserMasterFakeName = "\\/USER_MASTER\\/"; 

	/* The repo routines do a null arg check of pRepoSrcNotMine.
	   The other args are optional. */

	if (!pszRefHidLeafSrc)
	{
		SG_ERR_CHECK(  SG_zing__get_leaf(pCtx, pRepoSrcNotMine, NULL, SG_DAGNUM__USERS, &pszHidLeafSrc)  );
		pszRefHidLeafSrc = pszHidLeafSrc;
	}

	/* Add all repositories in "normal" status, to the list we'll iterate over. */
	SG_ERR_CHECK(  SG_closet__descriptors__list(pCtx, &pvhDescriptors)  );

	/* If it exists, add the user master repo to the list. */
	{
		SG_bool bExists = SG_FALSE;
		SG_ERR_CHECK(  SG_repo__user_master__exists(pCtx, &bExists)  );
		if (bExists)
			SG_ERR_CHECK(  SG_vhash__add__null(pCtx, pvhDescriptors, pszRefUserMasterFakeName)  );
	}

	/* Iterate over the repositories, syncing the user database. */
	{
		SG_int32 i = 0;
		SG_uint32 numDescriptors = 0;

		SG_ERR_CHECK(  SG_vhash__count(pCtx, pvhDescriptors, &numDescriptors)  );
		for(i = 0; i < (SG_int32)numDescriptors; i++)
		{
			const char* pszRefNameDest = NULL;
			SG_bool bAdminIdsMatch = SG_TRUE;
			const SG_variant* pvRefDest = NULL;

			/* Note that the source repo will be in this loop, too, but we don't need to check for 
			 * it, adding another strcmp, because the leaf hid comparison below will effectively 
			 * skip it. So we do one extra leaf fetch and comparison, total, rather than an extra 
			 * strcmp for every repo in the closet. */

			SG_ERR_CHECK(  SG_vhash__get_nth_pair(pCtx, pvhDescriptors, i, &pszRefNameDest, &pvRefDest)  );

			if (SG_VARIANT_TYPE_NULL == pvRefDest->type)
				SG_ERR_CHECK(  SG_REPO__USER_MASTER__OPEN(pCtx, &pRepoDest)  );
			else
				SG_ERR_CHECK(  SG_REPO__OPEN_REPO_INSTANCE(pCtx, pszRefNameDest, &pRepoDest)  );

			SG_ERR_CHECK(  SG_zing__get_leaf(pCtx, pRepoDest, NULL, SG_DAGNUM__USERS, &pszHidLeafDest)  );

			if (strcmp(pszRefHidLeafSrc, pszHidLeafDest))
			{
				/* Pull from source to dest. 
				 * Pull is generally faster than push, so we're using it on purpose. */
				SG_pull__admin__local(pCtx, pRepoDest, pRepoSrcNotMine, NULL);
				if (SG_context__has_err(pCtx))
				{
					/* If there's an admin id mismatch, don't die. Log a warning and move on. */
					if (SG_context__err_equals(pCtx, SG_ERR_ADMIN_ID_MISMATCH))
					{
						const char* pszRefNameSrc = NULL;

						SG_ERR_DISCARD;

						SG_ERR_CHECK(  SG_repo__get_descriptor_name(pCtx, pRepoSrcNotMine, &pszRefNameSrc)  );
						if (!pszRefNameSrc)
							pszRefNameSrc = pszRefUserMasterFakeName;
						SG_ERR_CHECK(  SG_repo__get_admin_id(pCtx, pRepoSrcNotMine, &pszSrcAdminId)  );

						SG_ERR_CHECK(  SG_repo__get_admin_id(pCtx, pRepoDest, &pszDestAdminId)  );

						SG_ERR_CHECK(  SG_log__report_warning(pCtx, 
							"admin-id mismatch when syncing users: source repo %s has %s, dest repo %s has %s",
							pszRefNameSrc, pszSrcAdminId, pszRefNameDest, pszDestAdminId)  );

						bAdminIdsMatch = SG_FALSE;

						SG_NULLFREE(pCtx, pszDestAdminId);
						SG_NULLFREE(pCtx, pszSrcAdminId);
					}
					else
						SG_ERR_RETHROW;
				}

				if (bAdminIdsMatch)
				{
					SG_NULLFREE(pCtx, pszHidLeafDest);
					SG_ERR_CHECK(  SG_zing__get_leaf(pCtx, pRepoDest, NULL, SG_DAGNUM__USERS, &pszHidLeafDest)  );

					if (strcmp(pszRefHidLeafSrc, pszHidLeafDest))
					{
						/* The pull from source to dest resulted in a new leaf. 
						 * Use the new leaf and restart the loop. */
						SG_NULLFREE(pCtx, pszHidLeafSrc);
						pszRefHidLeafSrc = pszHidLeafSrc = pszHidLeafDest;
						pszHidLeafDest = NULL;

						SG_REPO_NULLFREE(pCtx, pRepoSrcMine);
						pRepoSrcNotMine = pRepoSrcMine = pRepoDest;
						pRepoDest = NULL;

						i = -1; /* start again at the first descriptor */
					}

				}
			}

			SG_NULLFREE(pCtx, pszHidLeafDest);
			SG_REPO_NULLFREE(pCtx, pRepoDest);
		}
	}

	if (ppvaSyncedUserList)
		SG_ERR_CHECK(  SG_user__list_all(pCtx, pRepoSrcNotMine, ppvaSyncedUserList)  );

	/* fall through */
fail:
	SG_NULLFREE(pCtx, pszSrcAdminId);
	SG_NULLFREE(pCtx, pszHidLeafSrc);
	SG_NULLFREE(pCtx, pszHidLeafDest);
	SG_VHASH_NULLFREE(pCtx, pvhDescriptors);
	SG_REPO_NULLFREE(pCtx, pRepoDest);
	SG_REPO_NULLFREE(pCtx, pRepoSrcMine);
	SG_NULLFREE(pCtx, pszDestAdminId);
}
Exemplo n.º 19
0
void SG_tag__remove(SG_context * pCtx, SG_repo * pRepo, const char* pszRev, SG_bool bRev, SG_uint32 count_args, const char** paszArgs)
{
    SG_audit q;
	char* psz_hid_given = NULL;
	char* psz_hid_assoc_with_tag = NULL;
	SG_uint32 count_valid_tags = 0;
	const char** paszValidArgs = NULL;
	SG_uint32 i = 0;

    if (0 == count_args)
		return;

    SG_ERR_CHECK(  SG_audit__init(pCtx,&q,pRepo,SG_AUDIT__WHEN__NOW,SG_AUDIT__WHO__FROM_SETTINGS)  );

	if (pszRev)
	{
		if (bRev)
			SG_repo__hidlookup__dagnode(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, pszRev, &psz_hid_given);
		else
			SG_vc_tags__lookup__tag(pCtx, pRepo, pszRev, &psz_hid_given);

		if (SG_context__has_err(pCtx))
		{
			if (SG_context__err_equals(pCtx, SG_ERR_AMBIGUOUS_ID_PREFIX))
			{
				SG_context__push_level(pCtx);
				SG_console(pCtx, SG_CS_STDERR, "The revision or tag could not be found:  %s\n", pszRev);
				SG_context__pop_level(pCtx);
				SG_ERR_RETHROW;
			}
			else
				SG_ERR_RETHROW;
		}
	}

	// SG_vc_tags__remove will throw on the first non-existant tag and stop
	// we don't want to issue errors, just warnings and keep going
	// weed out all the invalid tags here before calling SG_vc_tags__remove
	SG_ERR_CHECK(  SG_alloc(pCtx, count_args, sizeof(const char*), &paszValidArgs)  );
	for (i =0; i < count_args; i++)
	{
		SG_ERR_CHECK(  SG_vc_tags__lookup__tag(pCtx, pRepo, paszArgs[i], &psz_hid_assoc_with_tag)  );
		if (psz_hid_assoc_with_tag) // tag exists
		{
			if (psz_hid_given)
			{
				if (strcmp(psz_hid_given, psz_hid_assoc_with_tag) == 0) // tag is assoc with given changeset
					paszValidArgs[count_valid_tags++] = paszArgs[i];
				else // tag not assoc with given changeset, no error, but warn
					SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDOUT, "Tag not associated with given revision:  %s\n", paszArgs[i])  );
			}
			else
				paszValidArgs[count_valid_tags++] = paszArgs[i];
		}
		else
		{
			SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDOUT, "Tag not found:  %s\n", paszArgs[i])  );
		}
		SG_NULLFREE(pCtx, psz_hid_assoc_with_tag);
		psz_hid_assoc_with_tag = NULL;
	}

	SG_ERR_CHECK(  SG_vc_tags__remove(pCtx, pRepo, &q, count_valid_tags, paszValidArgs)  );


fail:
	SG_NULLFREE(pCtx, paszValidArgs);
	SG_NULLFREE(pCtx, psz_hid_given);
	SG_NULLFREE(pCtx, psz_hid_assoc_with_tag);

}
void sg_vv2__history__repo2(
	SG_context * pCtx,
	SG_repo * pRepo, 
	const SG_stringarray * psaArgs,  // if present these must be full repo-paths
	const SG_rev_spec* pRevSpec,
	const SG_rev_spec* pRevSpec_single_revisions,
	const char* pszUser,
	const char* pszStamp,
	SG_uint32 nResultLimit,
	SG_bool bHideObjectMerges,
	SG_int64 nFromDate,
	SG_int64 nToDate,
	SG_bool bListAll,
	SG_bool bReassembleDag,
	SG_bool* pbHasResult,
	SG_history_result ** ppResult,
	SG_history_token ** ppHistoryToken)
{
	SG_stringarray * pStringArrayChangesets_starting = NULL;
	SG_stringarray * pStringArrayChangesetsMissing = NULL;
	SG_stringarray * pStringArrayChangesets_single_revisions = NULL;
	SG_bool bRecommendDagWalk = SG_FALSE;
	SG_bool bLeaves = SG_FALSE;
	const char * pszCurrentDagnodeID = NULL;
	SG_stringarray * pStringArrayGIDs = NULL;
	SG_uint32 i = 0, nChangesetCount = 0;
	SG_uint32 count_args = 0;

	//Determine the starting changeset IDs.  strBranch and bLeaves control this.
	//We do this step here, so that repo paths can be looked up before we call into history__core.
	SG_ERR_CHECK( sg_vv2__history__get_starting_changesets(pCtx, pRepo, pRevSpec,
														   &pStringArrayChangesets_starting,
														   &pStringArrayChangesetsMissing,
														   &bRecommendDagWalk,
														   &bLeaves) );
	if (pStringArrayChangesetsMissing)
	{
		// See K2177, K1322, W0836, W8132.  We requested specific starting
		// points and ran into some csets that were referenced (by --tag
		// or --branch) that are not present in the local repo.  Try to
		// silently ignore them.
		SG_uint32 nrFound = 0;
		SG_ERR_CHECK(  SG_stringarray__count(pCtx, pStringArrayChangesets_starting, &nrFound)  );
		if (nrFound > 0)
		{
			// Yes there were missing csets, but we still found some
			// of the referenced ones.  Just ignore the missing ones.
			// This should behave just like we had the older tag/branch
			// dag prior to the push -r on the vc dag.
		}
		else
		{
			const char * psz_0;
			// TODO 2012/10/19 Do we want a different message if the number of missing is > 1 ?
			SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, pStringArrayChangesetsMissing, 0, &psz_0)  );
			SG_ERR_THROW2(  SG_ERR_CHANGESET_BLOB_NOT_FOUND,
							(pCtx, "%s", psz_0)  );
		}
	}

	if (bListAll)
	{
		// See W8493.  If they gave us a --list-all along with a --rev or --tag, they
		// want to force us to show the full history rather than just the info for the
		// named cset.
		bRecommendDagWalk = SG_TRUE;
	}

	if (pRevSpec_single_revisions)
	{
		// We DO NOT pass a psaMissingHids here because we want
		// it to throw if the user names a missing cset.
		SG_ERR_CHECK(  SG_rev_spec__get_all__repo__dedup(pCtx, pRepo, pRevSpec_single_revisions, SG_TRUE,
														 &pStringArrayChangesets_single_revisions, NULL)  );
	}

	// TODO 2012/07/02 Is the following loop really what we want?
	// TODO            As written, it will look at each changeset
	// TODO            in the list and lookup each item's GID with
	// TODO            it.  If cset[k] does not have *ALL* of the
	// TODO            items, we discard the array of GIDs already
	// TODO            discovered and then try cset[k+1].
	// TODO
	// TODO            This seems wasteful and likely to fail in
	// TODO            the presence of lots of adds and deletes.
	// TODO
	// TODO            Seems like it would be better to start the
	// TODO            result list outside of the loop and remove
	// TODO            items from the search list as we find them
	// TODO            as we iterate over the csets.  This would
	// TODO            let us stop as soon as we have them all and
	// TODO            not require us to repeat the expensive mapping
	// TODO            of repo-path to gid.
	// TODO
	// TODO            Then again, I think the caller has limited us
	// TODO            to only having *1* item in the set of files/folders,
	// TODO            so this might not actually matter.

	if (psaArgs)
		SG_ERR_CHECK(  SG_stringarray__count(pCtx, psaArgs, &count_args)  );
	if (count_args > 0)
	{
		SG_ERR_CHECK(  SG_stringarray__count(pCtx, pStringArrayChangesets_starting, &nChangesetCount)  );
		//Look up the GIDs for all of the arguments.
		//Try every changeset, until we get one that has the GID in question.
		for (i = 0; i < nChangesetCount; i++)
		{
			SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, pStringArrayChangesets_starting, i, &pszCurrentDagnodeID) );

			//This might be used if you have --leaves, or if there are multiple parents
			//since they specified a changeset, we need to use the full repo path @/blah/blah to look up the objects
			sg_vv2__history__lookup_gids_by_repopaths(pCtx, pRepo, pszCurrentDagnodeID, psaArgs,
													  &pStringArrayGIDs);
			if (SG_CONTEXT__HAS_ERR(pCtx))
			{
				if (i == (nChangesetCount - 1) || ! SG_context__err_equals(pCtx, SG_ERR_NOT_FOUND) )
				{
					SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayGIDs);
					SG_ERR_RETHROW;
				}
				else
				{
					SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayGIDs);
					SG_context__err_reset(pCtx);
				}
			}
			else
				break;
		}
	}

	//Call history core with the GIDs
	SG_ERR_CHECK( SG_history__run(pCtx, pRepo, pStringArrayGIDs,
					pStringArrayChangesets_starting, pStringArrayChangesets_single_revisions,
					pszUser, pszStamp, nResultLimit, bLeaves, bHideObjectMerges,
					nFromDate, nToDate, bRecommendDagWalk, bReassembleDag, pbHasResult, ppResult, ppHistoryToken) );

fail:
	SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayGIDs);
	SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesets_starting);
	SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesetsMissing);
	SG_STRINGARRAY_NULLFREE(pCtx, pStringArrayChangesets_single_revisions);
}
void sg_sync_client__http__push_begin(
    SG_context* pCtx,
    SG_sync_client * pSyncClient,
    SG_pathname** pFragballDirPathname,
    SG_sync_client_push_handle** ppPush)
{
    char* pszUrl = NULL;
    sg_sync_client_http_push_handle* pPush = NULL;
    SG_vhash* pvhResponse = NULL;
    SG_pathname* pPathUserTemp = NULL;
    SG_pathname* pPathFragballDir = NULL;
    char bufTid[SG_TID_MAX_BUFFER_LENGTH];
    SG_string* pstr = NULL;

    SG_NULLARGCHECK_RETURN(pSyncClient);
    SG_NULLARGCHECK_RETURN(pFragballDirPathname);
    SG_NULLARGCHECK_RETURN(ppPush);

    // Get the URL we're going to post to
    SG_ERR_CHECK(  _get_sync_url(pCtx, pSyncClient->psz_remote_repo_spec, SYNC_URL_SUFFIX JSON_URL_SUFFIX, NULL, NULL, &pszUrl)  );

    SG_ERR_CHECK(  do_url(pCtx, pszUrl, "POST", NULL, pSyncClient->psz_username, pSyncClient->psz_password, &pstr, NULL, SG_TRUE)  );
    SG_ERR_CHECK(  SG_vhash__alloc__from_json__sz(pCtx, &pvhResponse, SG_string__sz(pstr))  );
    SG_STRING_NULLFREE(pCtx, pstr);

    // Alloc a push handle.  Stuff the push ID we received into it.
    {
        const char* pszRef = NULL;
        SG_ERR_CHECK(  SG_alloc(pCtx, 1, sizeof(sg_sync_client_http_push_handle), &pPush)  );
        SG_ERR_CHECK(  SG_vhash__get__sz(pCtx, pvhResponse, PUSH_ID_KEY, &pszRef)  );
        SG_ERR_CHECK(  SG_strdup(pCtx, pszRef, &pPush->pszPushId)  );
    }

    // Create a temporary local directory for stashing fragballs before shipping them over the network.
    SG_ERR_CHECK(  SG_PATHNAME__ALLOC__USER_TEMP_DIRECTORY(pCtx, &pPathUserTemp)  );
    SG_ERR_CHECK(  SG_tid__generate(pCtx, bufTid, SG_TID_MAX_BUFFER_LENGTH)  );
    SG_ERR_CHECK(  SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathFragballDir, pPathUserTemp, bufTid)  );
    SG_ERR_CHECK(  SG_fsobj__mkdir__pathname(pCtx, pPathFragballDir)  );

    // Tell caller where to stash fragballs for this push.
    SG_RETURN_AND_NULL(pPathFragballDir, pFragballDirPathname);

    // Return the new push handle.
    *ppPush = (SG_sync_client_push_handle*)pPush;
    pPush = NULL;

    /* fall through */
fail:
    SG_STRING_NULLFREE(pCtx, pstr);
    if(SG_context__err_equals(pCtx, SG_ERR_SERVER_HTTP_ERROR))
    {
        const char * szInfo = NULL;
        if(SG_IS_OK(SG_context__err_get_description(pCtx, &szInfo)) && strcmp(szInfo, "405")==0)
            SG_ERR_RESET_THROW(SG_ERR_SERVER_DOESNT_ACCEPT_PUSHES);
    }
    _NULLFREE_PUSH_HANDLE(pCtx, pPush);
    SG_NULLFREE(pCtx, pszUrl);
    SG_PATHNAME_NULLFREE(pCtx, pPathUserTemp);
    SG_PATHNAME_NULLFREE(pCtx, pPathFragballDir);
    SG_VHASH_NULLFREE(pCtx, pvhResponse);
}
Exemplo n.º 22
0
void SG_dagfrag__load_from_repo__one(SG_context * pCtx,
									 SG_dagfrag * pFrag,
									 SG_repo* pRepo,
									 const char * szHidStart,
									 SG_int32 nGenerations)
{
	// load a fragment of the dag starting with the given dagnode
	// for nGenerations of parents.
	//
	// we add this portion of the graph to whatevery we already
	// have in our fragment.  this may either augment (give us
	// a larger connected piece) or it may be an independent
	// subset.
	//
	// if nGenerations <= 0, load everything from this starting point
	// back to the NULL/root.
	//
	// generationStart is the generation of the starting dagnode.
	//
	// the starting dagnode *MAY* be in the final start-fringe.
	// normally, it will be.  but if we are called multiple times
	// (and have more than one start point), it may be the case
	// that this node is a parent of one of the other start points.
	//
	// we compute generationEnd as the generation that we will NOT
	// include in the fragment; nodes of that generation will be in
	// the end-fringe.  that is, we include [start...end) like most
	// C++ iterators.

	_my_data * pMyDataCached = NULL;
	SG_dagnode * pDagnodeAllocated = NULL;
	SG_dagnode * pDagnodeStart;
	SG_int32 generationStart, generationEnd;
	SG_bool bPresent = SG_FALSE;
    SG_rbtree* prb_WorkQueue = NULL;

	SG_NULLARGCHECK_RETURN(pFrag);
	SG_NONEMPTYCHECK_RETURN(szHidStart);

	// if we are extending the fragment, delete the generation-sorted
	// member cache copy.  (see __foreach_member()).  it's either that
	// or update it in parallel as we change the real CACHE and that
	// doesn't seem worth the bother.

	SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache);
	pFrag->m_pRB_GenerationSortedMemberCache = NULL;

    SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prb_WorkQueue)  );

	// fetch the starting dagnode and compute the generation bounds.
	// first, see if the cache already has info for this dagnode.
	// if not, fetch it from the source and then add it to the cache.

	SG_ERR_CHECK(  _cache__lookup(pCtx, pFrag,szHidStart,&pMyDataCached,&bPresent)  );
	if (!bPresent)
	{
		if (!pRepo)
			SG_ERR_THROW(  SG_ERR_INVALID_WHILE_FROZEN  );

        SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo, szHidStart, &pDagnodeAllocated)  );

		pDagnodeStart = pDagnodeAllocated;
	}
	else
	{
		pDagnodeStart = pMyDataCached->m_pDagnode;
	}

	SG_ERR_CHECK(  SG_dagnode__get_generation(pCtx, pDagnodeStart,&generationStart)  );
	SG_ASSERT_RELEASE_FAIL2(  (generationStart > 0),
					  (pCtx,"Invalid generation value [%d] for dagnode [%s]",
					   generationStart,szHidStart)  );
	if ((nGenerations <= 0)  ||  (generationStart <= nGenerations))
		generationEnd = 0;
	else
		generationEnd = generationStart - nGenerations;

	if (!bPresent)
	{
		// this dagnode was not already present in the cache.
		// add it to the cache directly and set the state.
		// we don't need to go thru the work queue for it.
		//
		// then the add all of its parents to the work queue.

		SG_ERR_CHECK(  _cache__add__dagnode(pCtx,
											pFrag,
											generationStart,
											pDagnodeAllocated,SG_DFS_START_MEMBER,
											&pMyDataCached)  );
		pDagnodeAllocated = NULL;

		SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
	}
	else
	{
		// the node was already present in the cache, so we have already
		// walked at least part of the graph around it.

		switch (pMyDataCached->m_state)
		{
		default:
		//case SG_DFS_UNKNOWN:
			SG_ASSERT_RELEASE_FAIL2(  (0),
							  (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]",
							   pMyDataCached->m_state,szHidStart)  );

		case SG_DFS_INTERIOR_MEMBER:				// already in fragment
		case SG_DFS_START_MEMBER:	// already in fragment, duplicated leaf?
			if (generationEnd < pMyDataCached->m_genDagnode)
			{
				// they've expanded the bounds of the fragment since we
				// last visited this dagnode.  keep this dagnode in the
				// fragment and revisit the ancestors in case any were
				// put in the end-fringe that should now be included.
				//
				// we leave the state as INCLUDE or INCLUDE_AND_START
				// because a duplicate start point should remain a
				// start point.

				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
			}
			else
			{
				// the current end-generation requested is >= the previous
				// end-generation, then we've completely explored this dagnode
				// already.  that is, a complete walk from this node for nGenerations
				// would not reveal any new information.
			}
			break;

		case SG_DFS_END_FRINGE:
			{
				// they want to start at a dagnode that we put in the
				// end-fringe.  this can happen if they need to expand
				// the bounds of the fragment to include older ancestors.
				//
				// we do not mark this as a start node because someone
				// else already has it as a parent.

				pMyDataCached->m_state = SG_DFS_INTERIOR_MEMBER;
				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
			}
			break;
		}
	}

	// we optionally put the parents of the current node into the work queue.
	//
	// service the work queue until it is empty.  this allows us to walk the graph without
	// recursion.  that is, as we decide what to do with a node, we add the parents
	// to the queue.  we then iterate thru the work queue until we have dealt with
	// everything -- that is, until all parents have been properly placed.
	//
	// we cannot use a standard iterator to drive this loop because we
	// modify the queue.

	while (1)
	{
		_process_work_queue_item(pCtx, pFrag,prb_WorkQueue,generationEnd,pRepo);
		if (!SG_context__has_err(pCtx))
			break;							// we processed everything in the queue and are done

		if (!SG_context__err_equals(pCtx,SG_ERR_RESTART_FOREACH))
			SG_ERR_RETHROW;

		SG_context__err_reset(pCtx);		// queue changed, restart iteration
	}

	SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue);

	/*
	** we have loaded a piece of the dag (starting with the given start node
	** and tracing all parent edges back n generations).  we leave with everything
	** in our progress queues so that other start nodes can be added to the
	** fragment.  this allows the processing of subsequent start nodes to
	** override some of the decisions that we made.  for example:
	**
	**           Q_15
	**             |
	**             |
	**           Z_16
	**           /  \
	**          /    \
	**      Y_17      A_17
	**          \    /   \
	**           \  /     \
	**           B_18     C_18
	**             |
	**             |
	**           D_19
	**             |
	**             |
	**           E_20
	**
	** if we started with the leaf E_20 and requested 3 generations, we would have:
	**     start_set := { E }
	**     include_set := { B, D, E }
	**     end_set := { Y, A }
	**
	** after a subsequent call with the leaf C_18 and 3 generations, we would have:
	**     start_set := { C, E }
	**     include_set := { Z, A, B, C, D, E }
	**     end_set := { Q, Y }
	**
	*/

	return;

fail:
	SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue);
	SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated);
}
void sg_sync_client__http__get_repo_info(
    SG_context* pCtx,
    SG_sync_client* pSyncClient,
    SG_bool bIncludeBranchInfo,
    SG_bool b_include_areas,
    SG_vhash** ppvh)
{
    char* pszUrl = NULL;
    SG_vhash* pvh = NULL;
    SG_uint32 iProtocolVersion = 0;
    SG_string* pstr = NULL;
    const char* psz_qs = NULL;

    SG_NULLARGCHECK_RETURN(pSyncClient);

    if (bIncludeBranchInfo && b_include_areas)
    {
        psz_qs = "details&areas";
    }
    else if (bIncludeBranchInfo && !b_include_areas)
    {
        psz_qs = "details";
    }
    else if (!bIncludeBranchInfo && b_include_areas)
    {
        psz_qs = "areas";
    }
    else
    {
        psz_qs = NULL;
    }

    SG_ERR_CHECK(  _get_sync_url(pCtx, pSyncClient->psz_remote_repo_spec,
                                 SYNC_URL_SUFFIX JSON_URL_SUFFIX, NULL, psz_qs, &pszUrl)  );

    SG_ERR_CHECK(  do_url(
                       pCtx,
                       pszUrl,
                       "GET",
                       NULL,
                       pSyncClient->psz_username,
                       pSyncClient->psz_password,
                       &pstr,
                       NULL,
                       SG_TRUE
                   )  );

    SG_ERR_CHECK(  SG_vhash__alloc__from_json__sz(pCtx, &pvh, SG_string__sz(pstr))  );
    if(SG_context__err_equals(pCtx, SG_ERR_JSON_WRONG_TOP_TYPE) || SG_context__err_equals(pCtx, SG_ERR_JSONPARSER_SYNTAX))
    {
        SG_ERR_RESET_THROW2(SG_ERR_NOTAREPOSITORY, (pCtx, "%s", pSyncClient->psz_remote_repo_spec));
    }
    else
    {
        SG_ERR_CHECK_CURRENT;
    }

    SG_ERR_CHECK(  SG_vhash__get__uint32(pCtx, pvh, SG_SYNC_REPO_INFO_KEY__PROTOCOL_VERSION, &iProtocolVersion)  );
    if (iProtocolVersion != 1)
    {
        SG_ERR_THROW2(SG_ERR_UNKNOWN_SYNC_PROTOCOL_VERSION, (pCtx, "%u", iProtocolVersion));
    }

    *ppvh = pvh;
    pvh = NULL;

    /* fall through */

fail:
    SG_NULLFREE(pCtx, pszUrl);
    SG_VHASH_NULLFREE(pCtx, pvh);
    SG_STRING_NULLFREE(pCtx, pstr);
}
/**
 * Request to UNLOCK on one or more files.
 *
 * WARNING: This routine deviates from the model of most
 * WARNING: of the SG_wc__ level-8 and/or SG_wc_tx level-7
 * WARNING: API routines because we cannot just "queue" an
 * WARNING: unlock like we do a RENAME with everything
 * WARNING: contained within the pWcTx; we actually have
 * WARNING: to update the locks dag (which is outside of
 * WARNING: the scope of the WC TX).
 * WARNING:
 * WARNING: So we only have a level-8 API
 * WARNING: so that we can completely control/bound the TX.
 *
 * We also deviate in that we don't take a --test
 * nor --verbose option.  Which means we don't have a
 * JOURNAL to mess with.
 *
 */
void SG_wc__unlock(SG_context * pCtx,
                   const SG_pathname* pPathWc,
                   const SG_stringarray * psaInputs,
                   SG_bool bForce,
                   const char * psz_username,
                   const char * psz_password,
                   const char * psz_repo_upstream)
{
    SG_wc_tx * pWcTx = NULL;
    SG_audit q;
    SG_uint32 nrInputs = 0;
    SG_uint32 k;
    char * psz_tied_branch_name = NULL;
    char * psz_repo_upstream_allocated = NULL;
    SG_vhash * pvh_gids = NULL;
    const char * pszRepoDescriptorName = NULL;	// we do not own this

    if (psaInputs)
        SG_ERR_CHECK(  SG_stringarray__count(pCtx, psaInputs, &nrInputs)  );
    if (nrInputs == 0)
        SG_ERR_THROW2(  SG_ERR_INVALIDARG,
                        (pCtx, "Nothing to unlock")  );

    // psz_username is optional (assume no auth required)
    // psz_password is optional (assume no auth required)
    // psz_server is optional (assume default server)

    // Begin a WC TX so that we get all of the good stuff
    // (like mapping the CWD into a REPO handle and mapping
    // the inputs into GIDs).
    //
    // At this point I don't believe that setting a lock
    // will actually make any changes in WC.DB, so I'm
    // making it a READ-ONLY TX.
    //
    // My assumption is that the lock actually gets
    // written to the Locks DAG and shared with the server.
    // But I still want a TX handle for all of the other stuff.

    SG_ERR_CHECK(  SG_WC_TX__ALLOC__BEGIN(pCtx, &pWcTx, pPathWc, SG_FALSE)  );

    // We need the repo descriptor name later for the push/pull
    // and to optionally look up the default destination for
    // this repo.  The pRepo stores this *IFF* it was properly
    // opened (using a name).

    SG_ERR_CHECK(  SG_repo__get_descriptor_name(pCtx, pWcTx->pDb->pRepo,
                   &pszRepoDescriptorName)  );
    SG_ASSERT_RELEASE_FAIL2(  (pszRepoDescriptorName && *pszRepoDescriptorName),
                              (pCtx, "SG_wc__unlock: Could not get repo descriptor name.")  );

    // now we need to know what branch we are tied to.
    // if we're not tied, fail
    SG_ERR_CHECK(  SG_wc_tx__branch__get(pCtx, pWcTx, &psz_tied_branch_name)  );
    if (!psz_tied_branch_name)
        SG_ERR_THROW(  SG_ERR_NOT_TIED  );

    SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &pvh_gids)  );
    for (k=0; k<nrInputs; k++)
    {
        const char * pszInput_k;

        SG_ERR_CHECK(  SG_stringarray__get_nth(pCtx, psaInputs, k, &pszInput_k)  );
        SG_ERR_CHECK(  _map_input(pCtx, pWcTx, pvh_gids, pszInput_k)  );
    }

    if (!psz_repo_upstream)
    {
        SG_localsettings__descriptor__get__sz(pCtx, pszRepoDescriptorName,
                                              "paths/default",
                                              &psz_repo_upstream_allocated);
        if (SG_context__err_equals(pCtx, SG_ERR_NOT_FOUND))
            SG_ERR_REPLACE_ANY_RETHROW(  SG_ERR_NO_SERVER_SPECIFIED  );
        else
            SG_ERR_CHECK_CURRENT;

        psz_repo_upstream = psz_repo_upstream_allocated;
    }

    SG_ERR_CHECK(  SG_audit__init(pCtx, &q, pWcTx->pDb->pRepo,
                                  SG_AUDIT__WHEN__NOW,
                                  SG_AUDIT__WHO__FROM_SETTINGS)  );

    // OK, we have all the pieces.  Time to call the unlock code
    SG_ERR_CHECK(  SG_vc_locks__unlock(
                       pCtx,
                       pszRepoDescriptorName,
                       psz_repo_upstream,
                       psz_username,
                       psz_password,
                       psz_tied_branch_name,
                       pvh_gids,
                       bForce,
                       &q
                   )  );

    // Fall through and let the normal fail code discard/cancel
    // the read-only WC TX.  This will not affect the Locks DAG
    // nor the server.

fail:
    SG_ERR_IGNORE(  SG_wc_tx__cancel(pCtx, pWcTx)  );
    SG_WC_TX__NULLFREE(pCtx, pWcTx);
    SG_NULLFREE(pCtx, psz_tied_branch_name);
    SG_NULLFREE(pCtx, psz_repo_upstream_allocated);
    SG_VHASH_NULLFREE(pCtx, pvh_gids);
}
Exemplo n.º 25
0
void SG_workingdir__find_mapping(
	SG_context* pCtx,
	const SG_pathname* pPathLocalDirectory,
	SG_pathname** ppPathMappedLocalDirectory, /**< Return the actual local directory that contains the mapping */
	SG_string** ppstrNameRepoInstanceDescriptor, /**< Return the name of the repo instance descriptor */
	char** ppszidGidAnchorDirectory /**< Return the GID of the repo directory */
	)
{
	SG_pathname* curpath = NULL;
	SG_string* result_pstrDescriptorName = NULL;
	char* result_pszidGid = NULL;
	SG_pathname* result_mappedLocalDirectory = NULL;
	SG_vhash* pvhMapping = NULL;
	SG_pathname* pDrawerPath = NULL;
	SG_pathname* pMappingFilePath = NULL;
	SG_vhash* pvh = NULL;

	SG_NULLARGCHECK_RETURN(pPathLocalDirectory);

	SG_ERR_CHECK(  SG_PATHNAME__ALLOC__COPY(pCtx, &curpath, pPathLocalDirectory)  );

	/* it's a directory, so it should have a final slash */
	SG_ERR_CHECK(  SG_pathname__add_final_slash(pCtx, curpath)  );

	while (SG_TRUE)
	{
		SG_ERR_CHECK(  SG_workingdir__get_drawer_path(pCtx, curpath, &pDrawerPath)  );

		SG_fsobj__verify_directory_exists_on_disk__pathname(pCtx, pDrawerPath);
		if (!SG_context__has_err(pCtx))
		{
			const char* pszDescriptorName = NULL;
			const char* pszGid = NULL;

			SG_ERR_CHECK(  SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pMappingFilePath, pDrawerPath, "repo.json")  );
			SG_PATHNAME_NULLFREE(pCtx, pDrawerPath);

			SG_ERR_CHECK(  SG_vfile__slurp(pCtx, pMappingFilePath, &pvh)  );

			SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvh, "mapping", &pvhMapping)  );

			SG_PATHNAME_NULLFREE(pCtx, pMappingFilePath);

			SG_ERR_CHECK(  SG_vhash__get__sz(pCtx, pvhMapping, "descriptor", &pszDescriptorName)  );
			SG_ERR_CHECK(  SG_vhash__get__sz(pCtx, pvhMapping, "anchor", &pszGid)  );

			SG_ERR_CHECK(  SG_STRING__ALLOC(pCtx, &result_pstrDescriptorName)  );
			SG_ERR_CHECK(  SG_string__set__sz(pCtx, result_pstrDescriptorName, pszDescriptorName)  );

			if (pszGid)
			{
				SG_ERR_CHECK(  SG_gid__alloc_clone(pCtx, pszGid, &result_pszidGid)  );
			}
			else
			{
				result_pszidGid = NULL;
			}

			SG_VHASH_NULLFREE(pCtx, pvh);

			result_mappedLocalDirectory = curpath;
			curpath = NULL;

			break;
		}
		else
			SG_context__err_reset(pCtx);

		SG_PATHNAME_NULLFREE(pCtx, pDrawerPath);

		SG_pathname__remove_last(pCtx, curpath);

		if (SG_context__err_equals(pCtx, SG_ERR_CANNOTTRIMROOTDIRECTORY))
		{
			SG_context__err_reset(pCtx);
			break;
		}
		else
		{
			SG_ERR_CHECK_CURRENT;
		}
	}

	if (result_mappedLocalDirectory)
	{
        if (ppPathMappedLocalDirectory)
        {
		    *ppPathMappedLocalDirectory = result_mappedLocalDirectory;
        }
        else
        {
	        SG_PATHNAME_NULLFREE(pCtx, result_mappedLocalDirectory);
        }
        if (ppstrNameRepoInstanceDescriptor)
        {
            *ppstrNameRepoInstanceDescriptor = result_pstrDescriptorName;
        }
        else
        {
            SG_STRING_NULLFREE(pCtx, result_pstrDescriptorName);
        }
        if (ppszidGidAnchorDirectory)
        {
            *ppszidGidAnchorDirectory = result_pszidGid;
        }
        else
        {
            SG_NULLFREE(pCtx, result_pszidGid);
        }

		return;
	}
	else
	{
		SG_PATHNAME_NULLFREE(pCtx, curpath);

		SG_ERR_THROW_RETURN(SG_ERR_NOT_FOUND);
	}

fail:
	SG_VHASH_NULLFREE(pCtx, pvh);
	SG_PATHNAME_NULLFREE(pCtx, pDrawerPath);
	SG_PATHNAME_NULLFREE(pCtx, pMappingFilePath);
	SG_PATHNAME_NULLFREE(pCtx, result_mappedLocalDirectory);
	SG_PATHNAME_NULLFREE(pCtx, curpath);
}