void u0026_jsonparser__test_jsonparser_vhash_2(SG_context * pCtx)
{
	SG_string* pstr1 = NULL;
	SG_string* pstr2 = NULL;
	SG_vhash* pvh = NULL;

	VERIFY_ERR_CHECK(  SG_STRING__ALLOC(pCtx, &pstr1)  );
	VERIFY_ERR_CHECK(  SG_STRING__ALLOC(pCtx, &pstr2)  );

	VERIFY_ERR_CHECK(  u0026_jsonparser__create_2(pCtx, pstr1)  );

	SG_VHASH__ALLOC__FROM_JSON__SZ(pCtx, &pvh, SG_string__sz(pstr1));
	VERIFY_COND("from_json", !SG_context__has_err(pCtx));
	VERIFY_COND("from_json", pvh);

	SG_context__err_reset(pCtx);
	SG_vhash__to_json(pCtx, pvh, pstr2);
	VERIFY_COND("from_json", !SG_context__has_err(pCtx));

	// TODO do some checks

fail:
	SG_STRING_NULLFREE(pCtx, pstr1);
	SG_STRING_NULLFREE(pCtx, pstr2);
	SG_VHASH_NULLFREE(pCtx, pvh);
}
static int _my_seek__file(void *instream, curl_off_t offset, int origin)
{
	_sg_curl* pMe = (_sg_curl*)instream;

	if (origin == SEEK_SET)
	{
		SG_context__push_level(pMe->pCtx);
		SG_log__report_verbose(pMe->pCtx, "SG_curl handling SEEK_SET.");
		SG_context__pop_level(pMe->pCtx);

		SG_file__seek(pMe->pCtx, pMe->readState.pFile, (SG_uint64)offset);
		if(SG_context__has_err(pMe->pCtx))
			return CURL_SEEKFUNC_FAIL;

		pMe->readState.pos = (SG_uint64)offset;
		pMe->readState.finished = SG_FALSE;
		return CURL_SEEKFUNC_OK;
	}
	else if (origin == SEEK_CUR)
	{
		SG_uint64 new_pos = (SG_uint64)((SG_int64)pMe->readState.pos+(SG_int64)offset);

		SG_context__push_level(pMe->pCtx);
		SG_log__report_verbose(pMe->pCtx, "SG_curl handling SEEK_CUR.");
		SG_context__pop_level(pMe->pCtx);

		SG_file__seek(pMe->pCtx, pMe->readState.pFile, new_pos);
		if(SG_context__has_err(pMe->pCtx))
			return CURL_SEEKFUNC_FAIL;

		pMe->readState.pos = new_pos;
		pMe->readState.finished = SG_FALSE;
		return CURL_SEEKFUNC_OK;
	}
	else if (origin == SEEK_END)
	{
		SG_uint64 new_pos = (SG_uint64)((SG_int64)pMe->readState.len+(SG_int64)offset);

		SG_context__push_level(pMe->pCtx);
		SG_log__report_verbose(pMe->pCtx, "SG_curl handling SEEK_END.");
		SG_context__pop_level(pMe->pCtx);

		SG_file__seek(pMe->pCtx, pMe->readState.pFile, new_pos);
		if(SG_context__has_err(pMe->pCtx))
			return CURL_SEEKFUNC_FAIL;

		pMe->readState.pos = new_pos;
		pMe->readState.finished = SG_FALSE;
		return CURL_SEEKFUNC_OK;
	}
	else
	{
		return CURL_SEEKFUNC_CANTSEEK;
	}
}
Beispiel #3
0
int u0020_utf8pathnames__create_file(SG_context * pCtx, const SG_pathname * pPathnameTmpDir, _tableitem * pti)
{
	// create a file in the given tmp dir using the given filename.

	SG_pathname * pPathnameNewFile;
	char * pBufUtf8;
	SG_uint32 lenUtf8;
	SG_file * pFile;
	int iResult;
	SG_bool bTest;

	// convert the utf32 string into utf8.

	VERIFY_ERR_CHECK_DISCARD(  SG_utf8__from_utf32(pCtx, pti->pa32,&pBufUtf8,&lenUtf8)  );	// we have to free pBufUtf8

	// verify that the computed utf8 string matches what we thought it should be.
	// (this hopefully guards against the conversion layer playing NFC/NFD tricks.)

	iResult = SG_utf8__compare(pBufUtf8,(char *)pti->pa8);
	VERIFYP_COND("u0020_utf8pathnames",(iResult==0),("Compare failed [%s][%s]",pBufUtf8,pti->pa8));

	// create full pathname to the file to create.

	VERIFY_ERR_CHECK_DISCARD(  SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx,
															   &pPathnameNewFile,
															   pPathnameTmpDir,pBufUtf8)  );

	// create the file and close it.
	// on Linux when our locale is set to something other than UTF-8, we may
	// get an ICU(10) == U_INVALID_CHAR_FOUND error because the test data is
	// not necessarily friendly to any one locale and there are some NFD
	// cases too.   we map ICU(10) to SG_ERR_UNMAPPABLE_UNICODE_CHAR

	SG_file__open__pathname(pCtx,pPathnameNewFile,SG_FILE_WRONLY|SG_FILE_CREATE_NEW,0755,&pFile);
#if defined(LINUX)
	bTest = (  (!SG_context__has_err(pCtx))  ||  (SG_context__err_equals(pCtx,SG_ERR_UNMAPPABLE_UNICODE_CHAR))  );
#else
	bTest = (  (!SG_context__has_err(pCtx))  );
#endif
	SG_context__err_reset(pCtx);

	VERIFYP_COND("u0020_utf8pathnames",bTest,
			 ("Error Creating file [%s]",SG_pathname__sz(pPathnameNewFile)));

	VERIFY_ERR_CHECK_DISCARD(  SG_file__close(pCtx, &pFile)  );

	SG_PATHNAME_NULLFREE(pCtx, pPathnameNewFile);
	SG_NULLFREE(pCtx, pBufUtf8);

	return 1;
}
static curlioerr _my_ioctl__file(CURL *handle, int cmd, void *clientp)
{
	_sg_curl* pMe = (_sg_curl*)clientp;

	SG_UNUSED(handle);

	if (cmd == CURLIOCMD_NOP)
	{
		SG_context__push_level(pMe->pCtx);
		SG_log__report_verbose(pMe->pCtx, "SG_curl handling CURLIOCMD_NOP.");
		SG_context__pop_level(pMe->pCtx);

		return CURLIOE_OK;
	}
	else if (cmd == CURLIOCMD_RESTARTREAD)
	{
		SG_context__push_level(pMe->pCtx);
		SG_log__report_verbose(pMe->pCtx, "SG_curl handling CURLIOCMD_RESTARTREAD.");
		SG_context__pop_level(pMe->pCtx);

		SG_file__seek(pMe->pCtx, pMe->readState.pFile, 0);
		if(SG_context__has_err(pMe->pCtx))
			return CURLIOE_FAILRESTART;

		pMe->readState.pos = 0;
		pMe->readState.finished = SG_FALSE;
		return CURLIOE_OK;
	}
	else
	{
		return CURLIOE_UNKNOWNCMD;
	}
}
Beispiel #5
0
void SG_workingdir__generate_and_create_temp_dir_for_purpose(SG_context * pCtx,
															 const SG_pathname * pPathWorkingDirectoryTop,
															 const char * pszPurpose,
															 SG_pathname ** ppPathTempDir)
{
	SG_pathname * pPathTempRoot = NULL;
	SG_pathname * pPath = NULL;
	SG_string * pString = NULL;
	SG_int64 iTimeUTC;
	SG_time tmLocal;
	SG_uint32 kAttempt = 0;

	SG_NONEMPTYCHECK_RETURN(pszPurpose);
	SG_NULLARGCHECK_RETURN(ppPathTempDir);

	// get path to "<wd-top>/.sgtemp".
	SG_ERR_CHECK(  SG_workingdir__get_temp_path(pCtx,pPathWorkingDirectoryTop,&pPathTempRoot)  );

	SG_ERR_CHECK(  SG_time__get_milliseconds_since_1970_utc(pCtx,&iTimeUTC)  );
	SG_ERR_CHECK(  SG_time__decode__local(pCtx,iTimeUTC,&tmLocal)  );

	SG_ERR_CHECK(  SG_STRING__ALLOC(pCtx,&pString)  );

	while (1)
	{
		// build path "<wd-top>/.sgtemp/<purpose>_20091201_0".
		// where <purpose> is something like "revert" or "merge".

		SG_ERR_CHECK(  SG_string__sprintf(pCtx,pString,"%s_%04d%02d%02d_%d",
										  pszPurpose,
										  tmLocal.year,tmLocal.month,tmLocal.mday,
										  kAttempt++)  );
		SG_ERR_CHECK(  SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx,&pPath,pPathTempRoot,SG_string__sz(pString))  );

		// try to create a NEW temp directory.  if this path already exists on disk,
		// loop and try again.  if we have a hard errors, just give up.

		SG_fsobj__mkdir_recursive__pathname(pCtx,pPath);
		if (SG_context__has_err(pCtx) == SG_FALSE)
			goto success;

		if (SG_context__err_equals(pCtx,SG_ERR_DIR_ALREADY_EXISTS) == SG_FALSE)
			SG_ERR_RETHROW;

		SG_context__err_reset(pCtx);
		SG_PATHNAME_NULLFREE(pCtx,pPath);
	}

success:
	*ppPathTempDir = pPath;

	SG_STRING_NULLFREE(pCtx, pString);
	SG_PATHNAME_NULLFREE(pCtx, pPathTempRoot);
	return;

fail:
	SG_STRING_NULLFREE(pCtx, pString);
	SG_PATHNAME_NULLFREE(pCtx, pPathTempRoot);
	SG_PATHNAME_NULLFREE(pCtx, pPath);
}
JS_END_EXTERN_C
#endif

static JSBool
GetLine(SG_context *pCtx, JSContext *cx, char *bufp, SG_uint32 bufremaining, FILE *file, const char *prompt) {
#ifndef EDITLINE
    SG_UNUSED(cx);
#endif

#ifdef EDITLINE
    /*
     * Use readline only if file is stdin, because there's no way to specify
     * another handle.  Are other filehandles interactive?
     */
    if (file == stdin) {
        char *linep = readline(prompt);
        if (!linep)
            return JS_FALSE;
        if (linep[0] != '\0')
            add_history(linep);
        SG_strcpy(pCtx, bufp, bufremaining, linep);
        if(SG_context__has_err(pCtx))
        {
            SG_context__err_reset(pCtx);
            return JS_FALSE;
        }
        JS_free(cx, linep);
        bufp += strlen(bufp);
        *bufp++ = '\n';
        *bufp = '\0';
    } else
#endif
    {
        char line[256];
        fprintf(gOutFile, "%s", prompt);
        fflush(gOutFile);
        if (!fgets(line, sizeof line, file))
            return JS_FALSE;
        SG_strcpy(pCtx, bufp, bufremaining, line);
        if(SG_context__has_err(pCtx))
        {
            SG_context__err_reset(pCtx);
            return JS_FALSE;
        }
    }
    return JS_TRUE;
}
void u0026_jsonparser__verify(SG_context * pCtx, SG_string* pstr)
{
	const char *psz = SG_string__sz(pstr);
	SG_jsonparser* jc = NULL;
	struct u0026_ctx ctx;

	ctx.key[0] = 0;

	SG_ERR_CHECK(  SG_jsonparser__alloc(pCtx, &jc, u0026_jcb, &ctx)  );

    SG_ERR_CHECK(  SG_jsonparser__chars(pCtx, jc, psz, (SG_uint32) strlen(psz))  );
	if (!SG_context__has_err(pCtx))
	{
		SG_jsonparser__done(pCtx, jc);
		VERIFY_COND("JSON_checker_done", !SG_context__has_err(pCtx));
	}

fail:
	SG_JSONPARSER_NULLFREE(pCtx, jc);
}
static void _remote_clone_allowed(SG_context* pCtx)
{
	char* pszSetting = NULL;
	SG_bool bAllowed = SG_TRUE;

	SG_localsettings__get__sz(pCtx, SG_LOCALSETTING__SERVER_CLONE_ALLOWED, NULL, &pszSetting, NULL);
	if(!SG_context__has_err(pCtx) && pszSetting != NULL)
		bAllowed = (strcmp(pszSetting, "true")==0);
	if(SG_context__has_err(pCtx))
	{
		SG_log__report_error__current_error(pCtx);
		SG_context__err_reset(pCtx);
	}

	if (!bAllowed)
		SG_ERR_THROW(SG_ERR_SERVER_DISALLOWED_REPO_CREATE_OR_DELETE);

	/* fall through */
fail:
	SG_NULLFREE(pCtx, pszSetting);
}
void SG_jscontext__acquire(SG_context * pCtx, SG_jscontext ** ppJs)
{
	SG_ASSERT(pCtx!=NULL);
	SG_NULLARGCHECK_RETURN(ppJs);

	if(gpJSContextPoolGlobalState->ssjsMutable)
	{
		_sg_jscontext__create(pCtx, ppJs);
		return;
	}

	SG_ERR_CHECK_RETURN(  SG_mutex__lock(pCtx, &gpJSContextPoolGlobalState->lock)  );

	if(gpJSContextPoolGlobalState->pFirstAvailableContext!=NULL)
	{
		SG_jscontext * pJs = gpJSContextPoolGlobalState->pFirstAvailableContext;

		gpJSContextPoolGlobalState->pFirstAvailableContext = pJs->pNextAvailableContext;
		pJs->pNextAvailableContext = NULL;

		++gpJSContextPoolGlobalState->numContextsCheckedOut;

		SG_ERR_CHECK_RETURN(  SG_mutex__unlock(pCtx, &gpJSContextPoolGlobalState->lock)  );

		SG_httprequestprofiler__start(SG_HTTPREQUESTPROFILER_CATEGORY__JSREQUEST_TOGGLING);
		(void)JS_SetContextThread(pJs->cx);
		JS_BeginRequest(pJs->cx);
		pJs->isInARequest = SG_TRUE;
		SG_httprequestprofiler__stop();

		JS_SetContextPrivate(pJs->cx, pCtx);

		*ppJs = pJs;
	}
	else
	{
		++gpJSContextPoolGlobalState->numContextsCheckedOut;
		SG_ERR_CHECK_RETURN(  SG_mutex__unlock(pCtx, &gpJSContextPoolGlobalState->lock)  );

		_sg_jscontext__create(pCtx, ppJs);

		if(SG_context__has_err(pCtx) || *ppJs==NULL)
		{
			/* Use the version of the mutex routines that doesn't touch pCtx,
			because we're already in an error state. */
			SG_mutex__lock__bare(&gpJSContextPoolGlobalState->lock);
			--gpJSContextPoolGlobalState->numContextsCheckedOut;
			SG_mutex__unlock__bare(&gpJSContextPoolGlobalState->lock);
		}
	}
}
static JSBool
Load(JSContext *cx, uintN argc, jsval *vp)
{
    SG_context *pCtx = NULL;
    JSObject *thisobj = JS_THIS_OBJECT(cx, vp);
    jsval *argv = JS_ARGV(cx, vp);
    uintN i;
    if (!thisobj)
        return JS_FALSE;

	SG_context__alloc(&pCtx);
	if (pCtx==NULL)
		return JS_FALSE;

    for (i = 0; i < argc; i++) {
        JSString *str = JS_ValueToString(cx, argv[i]);
        char *filename = NULL;
    	uint32 oldopts;
    	JSObject *scriptObj;
        if (!str) {
        	SG_CONTEXT_NULLFREE(pCtx);
            return JS_FALSE;
        }
        argv[i] = STRING_TO_JSVAL(str);
        sg_jsglue__jsstring_to_sz(pCtx, cx, str, &filename);
        if (SG_context__has_err(pCtx)) {
        	SG_CONTEXT_NULLFREE(pCtx);
            return JS_FALSE;
        }
        errno = 0;
        oldopts = JS_GetOptions(cx);
        JS_SetOptions(cx, oldopts | JSOPTION_COMPILE_N_GO | JSOPTION_NO_SCRIPT_RVAL);
        scriptObj = JS_CompileFile(cx, thisobj, filename);
        SG_NULLFREE(pCtx, filename);
        JS_SetOptions(cx, oldopts);
        if (!scriptObj) {
        	SG_CONTEXT_NULLFREE(pCtx);
            return JS_FALSE;
        }

        if (!compileOnly && !JS_ExecuteScript(cx, thisobj, scriptObj, NULL)) {
        	SG_CONTEXT_NULLFREE(pCtx);
            return JS_FALSE;
        }
    }

	SG_CONTEXT_NULLFREE(pCtx);
    return JS_TRUE;
}
void sg_treediff_cache::GetStatusForPath(SG_context * pCtx, SG_bool bIsRetry, const SG_pathname * pPathName, SG_wc_status_flags * pStatusFlags, SG_bool * pbDirChanged)
{
	wxCriticalSectionLocker lock(m_wc_transaction_lock);
	SG_wc_status_flags statusFlags = SG_WC_STATUS_FLAGS__U__FOUND;
	SG_string * psgstr_statusFlagsDetails = NULL;
	if (m_pwc_tx == NULL)
	{
		//Start a read-only transaction.
		SG_ERR_CHECK(  SG_WC_TX__ALLOC__BEGIN(pCtx, &m_pwc_tx, ((pPathName)), SG_TRUE)  );
	}

	SG_ERR_CHECK(  SG_wc_tx__get_item_status_flags(pCtx, m_pwc_tx, SG_pathname__sz(pPathName), SG_FALSE, SG_FALSE, &statusFlags, NULL)  );
	if (pbDirChanged != NULL && (statusFlags & SG_WC_STATUS_FLAGS__T__DIRECTORY))
	{
		//It's a directory, so we need to check to see if there are modifications under it.
		SG_wc_status_flags dirStatusFlags = SG_WC_STATUS_FLAGS__U__FOUND;
		SG_ERR_CHECK(  SG_wc_tx__get_item_dirstatus_flags(pCtx, m_pwc_tx, SG_pathname__sz(pPathName), &dirStatusFlags, NULL, pbDirChanged)   );
	}
#if DEBUG
	SG_ERR_CHECK(  SG_wc_debug__status__dump_flags(pCtx, statusFlags, "status flags", &psgstr_statusFlagsDetails)  );
	SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "%s", SG_string__sz(psgstr_statusFlagsDetails))  );
#endif
	//Don't recurse if this is our second try. 
	if ((bIsRetry == SG_FALSE) 
		&& ((statusFlags & SG_WC_STATUS_FLAGS__T__BOGUS) == SG_WC_STATUS_FLAGS__T__BOGUS) )
	{
		//This is a item that is new in the directory since we started
		//our transaction.  Close the transaction, and start a new one.
		SG_ERR_CHECK(  SG_log__report_verbose(pCtx, "Got a bogus status.  Restarting transaction")  );
		clearCache(pCtx);
		GetStatusForPath(pCtx, SG_TRUE, pPathName, &statusFlags, pbDirChanged);
	}
	
	SG_ERR_CHECK(  SG_time__get_milliseconds_since_1970_utc(pCtx, &m_timeLastRequest)  );
	*pStatusFlags = statusFlags;
fail:
	SG_STRING_NULLFREE(pCtx, psgstr_statusFlagsDetails);
	//Log and reset any errors on the context.
	//Any error condition should report the status as
	//Found and still send a message back to the client.
	if (SG_context__has_err(pCtx) == SG_TRUE)
	{
		if (!SG_context__err_equals(pCtx, SG_ERR_WC_DB_BUSY))
		{
			SG_log__report_error__current_error(pCtx);
		}
		SG_context__err_reset(pCtx);
	}
}
Beispiel #12
0
void SG_dbrecord__save_to_repo(SG_context* pCtx, SG_dbrecord * pRecord, SG_repo * pRepo, SG_repo_tx_handle* pRepoTx, SG_uint64* iBlobFullLength)
{
	SG_string * pString = NULL;
	char* pszHidComputed = NULL;
    SG_uint32 iLength = 0;

	SG_NULLARGCHECK_RETURN(pRepo);
	SG_NULLARGCHECK_RETURN(pRecord);

	// serialize the dbrecord into JSON string.

	SG_ERR_CHECK(  SG_STRING__ALLOC(pCtx, &pString)  );
	SG_ERR_CHECK(  SG_dbrecord__to_json(pCtx, pRecord,pString)  );

#if 0
    printf("%s\n", SG_string__sz(pString)); // TODO remove this
#endif

	// remember the length of the blob
    iLength = SG_string__length_in_bytes(pString);
	*iBlobFullLength = (SG_uint64) iLength;

	// create a blob in the repository using the JSON string.  this computes the HID and returns it.

	SG_repo__store_blob_from_memory(pCtx,
		pRepo,pRepoTx,
        NULL, // TODO pass the record id into here
        SG_FALSE,
		(const SG_byte *)SG_string__sz(pString),
		iLength,
		&pszHidComputed);
	if (!SG_context__has_err(pCtx) || SG_context__err_equals(pCtx, SG_ERR_BLOBFILEALREADYEXISTS))
	{
		// freeeze the dbrecord memory-object and effectively make it read-only
		// from this point forward.  we give up our ownership of the HID.

		SG_ASSERT(  pszHidComputed  );
		_sg_dbrecord__freeze(pCtx, pRecord, pszHidComputed);
		pszHidComputed = NULL;

		SG_STRING_NULLFREE(pCtx, pString);
		return;  // return _OK or __BLOBFILEALREADYEXISTS
	}

fail:
	SG_STRING_NULLFREE(pCtx, pString);
	SG_NULLFREE(pCtx, pszHidComputed);
}
void SG_jscontextpool__teardown(SG_context * pCtx)
{
	if(gpJSContextPoolGlobalState!=NULL)
	{
		SG_ERR_CHECK_RETURN(  SG_mutex__lock(pCtx, &gpJSContextPoolGlobalState->lock)  );

		// Wait until all outstanding SG_jscontexts have been released. Don't try to terminate
		// early, otherwise we have a race condition on our hands: If the outstanding SG_jscontext
		// tries to perform any JavaScript operations before the app terminates, but after we have
		// called JS_Shutdown(), we'll end up crashing on exit. This of course is worse than
		//  making the user hit Ctrl-C again to do a hard shutdown if it's taking too long.
		if(gpJSContextPoolGlobalState->numContextsCheckedOut > 0)
		{
			SG_ERR_IGNORE(  SG_log__report_warning(pCtx, "Waiting on %d SG_jscontexts that are still in use.", gpJSContextPoolGlobalState->numContextsCheckedOut)  );
			while(gpJSContextPoolGlobalState->numContextsCheckedOut > 0)
			{
				SG_ERR_CHECK_RETURN(  SG_mutex__unlock(pCtx, &gpJSContextPoolGlobalState->lock)  );
				SG_sleep_ms(10);
				SG_ERR_CHECK_RETURN(  SG_mutex__lock(pCtx, &gpJSContextPoolGlobalState->lock)  );
			}
		}

		SG_ERR_CHECK_RETURN(  SG_mutex__unlock(pCtx, &gpJSContextPoolGlobalState->lock)  );
		SG_mutex__destroy(&gpJSContextPoolGlobalState->lock);

		while(gpJSContextPoolGlobalState->pFirstAvailableContext!=NULL)
		{
			SG_jscontext * pJs = gpJSContextPoolGlobalState->pFirstAvailableContext;
			gpJSContextPoolGlobalState->pFirstAvailableContext = pJs->pNextAvailableContext;

			(void)JS_SetContextThread(pJs->cx);
			JS_BeginRequest(pJs->cx);
			JS_SetContextPrivate(pJs->cx, pCtx);
			JS_DestroyContextNoGC(pJs->cx);

			if(SG_context__has_err(pCtx)) // An error was produced during GC...
			{
				SG_log__report_error__current_error(pCtx);
				SG_context__err_reset(pCtx);
			}

			SG_NULLFREE(pCtx, pJs);
		}

		SG_VHASH_NULLFREE(pCtx, gpJSContextPoolGlobalState->pServerConfig);
		SG_NULLFREE(pCtx, gpJSContextPoolGlobalState);
	}
}
void SG_jscontext__release(SG_context * pCtx, SG_jscontext ** ppJs)
{
	if(ppJs==NULL || *ppJs==NULL)
		return;

	if(gpJSContextPoolGlobalState->ssjsMutable)
	{
		SG_httprequestprofiler__start(SG_HTTPREQUESTPROFILER_CATEGORY__JSREQUEST_TOGGLING);
		JS_EndRequest((*ppJs)->cx);
		JS_DestroyContext((*ppJs)->cx);
		SG_httprequestprofiler__stop();

		if(SG_context__has_err(pCtx)) // An error was produced during GC...
		{
			SG_log__report_error__current_error(pCtx);
			SG_context__err_reset(pCtx);
		}

		SG_NULLFREE(pCtx, (*ppJs));
	}
	else
	{
		SG_jscontext * pJs = *ppJs;

		JS_MaybeGC(pJs->cx);

		JS_SetContextPrivate(pJs->cx, NULL); // Clear out the old pCtx pointer.

		SG_httprequestprofiler__start(SG_HTTPREQUESTPROFILER_CATEGORY__JSREQUEST_TOGGLING);
		JS_EndRequest(pJs->cx);
		pJs->isInARequest = SG_FALSE;
		(void)JS_ClearContextThread(pJs->cx);
		SG_httprequestprofiler__stop();

		SG_ERR_CHECK_RETURN(  SG_mutex__lock(pCtx, &gpJSContextPoolGlobalState->lock)  );

		pJs->pNextAvailableContext = gpJSContextPoolGlobalState->pFirstAvailableContext;
		gpJSContextPoolGlobalState->pFirstAvailableContext = pJs;
		--gpJSContextPoolGlobalState->numContextsCheckedOut;

		SG_ERR_CHECK_RETURN(  SG_mutex__unlock(pCtx, &gpJSContextPoolGlobalState->lock)  );

		*ppJs = NULL;
	}
}
Beispiel #15
0
/**
 * Find the appropriate external tool to let the user perform a TEXT MERGE
 * on a file.
 *
 * TODO 2010/07/13 For now, this is hard-coded to use DiffMerge.
 * TODO            Later we want to allow them to have multiple
 * TODO            tools configured and/or to use the file suffix
 * TODO            and so on.
 */
static void _resolve__external_tool__lookup(SG_context * pCtx,
											struct _resolve_data * pData,
											const char * pszGid,
											const SG_vhash * pvhIssue,
											SG_string * pStrRepoPath,
											_resolve__external_tool ** ppET)
{
	_resolve__external_tool * pET = NULL;
	SG_repo * pRepo;

	SG_UNUSED( pszGid );
	SG_UNUSED( pvhIssue );

	SG_ERR_CHECK(  SG_pendingtree__get_repo(pCtx, pData->pPendingTree, &pRepo)  );

	SG_ERR_CHECK(  SG_alloc1(pCtx, pET)  );

	// TODO 2010/07/13 Use localsettings to determine WHICH external tool we should use.
	// TODO            (This could be based upon suffixes and/or whatever.)
	// TODO            Then -- for THAT tool -- lookup the program executable path and
	// TODO            the argument list.
	// TODO            Substitute the given pathnames into the argument list.
	// TODO
	// TODO            For now, we just hard-code DiffMerge.

	SG_ERR_CHECK(  SG_strdup(pCtx, "DiffMerge", &pET->pszName)  );

	SG_localsettings__get__sz(pCtx, "merge/diffmerge/program", pRepo, &pET->pszExe, NULL);
	if (SG_context__has_err(pCtx) || (!pET->pszExe) || (!*pET->pszExe))
	{
		SG_context__err_reset(pCtx);
		SG_ERR_THROW2(  SG_ERR_NO_MERGE_TOOL_CONFIGURED,
						(pCtx, "'%s'  Use 'vv localsetting set merge/diffmerge/program' and retry -or- manually merge content and then use 'vv resolve --mark'.",
						 SG_string__sz(pStrRepoPath))  );
	}

	// TODO 2010/07/13 Get argvec.

	*ppET = pET;
	return;

fail:
	_RESOLVE__EXTERNAL_TOOL__NULLFREE(pCtx, pET);
}
void SG_jscontextpool__init(SG_context * pCtx, const char * szApplicationRoot)
{
	if(gpJSContextPoolGlobalState != NULL)
		return;

	SG_ERR_CHECK_RETURN(  SG_alloc1(pCtx, gpJSContextPoolGlobalState)  );

	SG_localsettings__get__collapsed_vhash(pCtx, "server", NULL, &gpJSContextPoolGlobalState->pServerConfig);
	if(SG_context__has_err(pCtx))
	{
		SG_log__report_error__current_error(pCtx);
		SG_context__err_reset(pCtx);
	}
	if(gpJSContextPoolGlobalState->pServerConfig==NULL)
	{
		SG_ERR_CHECK(  SG_VHASH__ALLOC(pCtx, &gpJSContextPoolGlobalState->pServerConfig)  );
	}

	SG_ERR_CHECK(  _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "enable_diagnostics", NULL)  );
	SG_ERR_CHECK(  _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "readonly", NULL)  );
	SG_ERR_CHECK(  _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "remote_ajax_libs", NULL)  );
	SG_ERR_CHECK(  _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "ssjs_mutable", &gpJSContextPoolGlobalState->ssjsMutable)  );

	if(szApplicationRoot==NULL)
		szApplicationRoot="";
	SG_ERR_CHECK(  SG_vhash__update__string__sz(pCtx, gpJSContextPoolGlobalState->pServerConfig, "application_root", szApplicationRoot)  );

	// Start up SpiderMonkey.
	SG_jscore__new_runtime(pCtx, SG_jsglue__context_callback, NULL, SG_FALSE, NULL);

	//If jscore is already initialized, just move on.
	if (SG_context__err_equals(pCtx, SG_ERR_ALREADY_INITIALIZED))
	{
		SG_context__err_reset(pCtx);
	}

	SG_ERR_CHECK(  SG_mutex__init(pCtx, &gpJSContextPoolGlobalState->lock)  );

	return;
fail:
	SG_VHASH_NULLFREE(pCtx, gpJSContextPoolGlobalState->pServerConfig);
	SG_NULLFREE(pCtx, gpJSContextPoolGlobalState);
}
static void _add_conflicting_descriptor_in_thread(void* pParam)
#endif
{
	thread_data* pMe = (thread_data*)pParam;
	SG_context* pCtx = pMe->pCtx;
	SG_closet_descriptor_handle* ph = NULL;
	SG_vhash* pvh = NULL;

	VERIFY_ERR_CHECK_ERR_EQUALS_DISCARD(SG_closet__descriptors__add_begin(pCtx, "1", NULL, NULL, NULL, NULL, &pvh, &ph),
		SG_ERR_REPO_ALREADY_EXISTS);
	VERIFY_COND("", !pvh);
	VERIFY_COND("", !ph);

	SG_VHASH_NULLFREE(pCtx, pvh);
	SG_ERR_IGNORE(SG_closet__descriptors__add_abort(pCtx, &ph));

#if defined(WINDOWS)
	return (SG_context__has_err(pCtx));
#endif
}
static int _vscript_context_teardown(SG_context **ppCtx)
{
    SG_context *pCtx = *ppCtx;
    int result = 0;

	//////////////////////////////////////////////////////////////////
	// Post-processing for errors captured in SG_context.
	if (SG_context__has_err(pCtx))
	{
		SG_context__err_to_console(pCtx, SG_CS_STDERR);
		SG_context__err_reset(pCtx);
	}

    *ppCtx = NULL;

    SG_lib__global_cleanup(pCtx);

    SG_CONTEXT_NULLFREE(pCtx);

    return(result);
}
Beispiel #19
0
void SG_repo__pack__zlib(SG_context* pCtx, SG_repo * pRepo)
{
    SG_vhash* pvh = NULL;
    SG_uint32 count = 0;
    SG_uint32 i = 0;
	SG_repo_tx_handle* pTx;

    SG_ERR_CHECK(  SG_repo__list_blobs(pCtx, pRepo, SG_BLOBENCODING__FULL, SG_TRUE, SG_TRUE, 500, 0, &pvh)  );
    SG_ERR_CHECK(  SG_vhash__count(pCtx, pvh, &count)  );
    SG_ERR_CHECK(  SG_repo__begin_tx(pCtx, pRepo, &pTx)  );
    for (i=0; i<count; i++)
    {
        const char* psz_hid = NULL;
        const SG_variant* pv = NULL;
        SG_ERR_CHECK(  SG_vhash__get_nth_pair(pCtx, pvh, i, &psz_hid, &pv)  );
        SG_repo__change_blob_encoding(pCtx, pRepo, pTx,  psz_hid, SG_BLOBENCODING__ZLIB, NULL, NULL, NULL, NULL, NULL);

		if (SG_context__has_err(pCtx))
        {
            if (!SG_context__err_equals(pCtx,SG_ERR_REPO_BUSY))
            {
                SG_ERR_RETHROW;
            }
            else
            {
                SG_context__err_reset(pCtx);
            }
        }
    }
    SG_ERR_CHECK(  SG_repo__commit_tx(pCtx, pRepo, &pTx)  );
    SG_VHASH_NULLFREE(pCtx, pvh);

    return;

fail:
    SG_VHASH_NULLFREE(pCtx, pvh);
}
Beispiel #20
0
void SG_dagfrag__load_from_repo__one(SG_context * pCtx,
									 SG_dagfrag * pFrag,
									 SG_repo* pRepo,
									 const char * szHidStart,
									 SG_int32 nGenerations)
{
	// load a fragment of the dag starting with the given dagnode
	// for nGenerations of parents.
	//
	// we add this portion of the graph to whatevery we already
	// have in our fragment.  this may either augment (give us
	// a larger connected piece) or it may be an independent
	// subset.
	//
	// if nGenerations <= 0, load everything from this starting point
	// back to the NULL/root.
	//
	// generationStart is the generation of the starting dagnode.
	//
	// the starting dagnode *MAY* be in the final start-fringe.
	// normally, it will be.  but if we are called multiple times
	// (and have more than one start point), it may be the case
	// that this node is a parent of one of the other start points.
	//
	// we compute generationEnd as the generation that we will NOT
	// include in the fragment; nodes of that generation will be in
	// the end-fringe.  that is, we include [start...end) like most
	// C++ iterators.

	_my_data * pMyDataCached = NULL;
	SG_dagnode * pDagnodeAllocated = NULL;
	SG_dagnode * pDagnodeStart;
	SG_int32 generationStart, generationEnd;
	SG_bool bPresent = SG_FALSE;
    SG_rbtree* prb_WorkQueue = NULL;

	SG_NULLARGCHECK_RETURN(pFrag);
	SG_NONEMPTYCHECK_RETURN(szHidStart);

	// if we are extending the fragment, delete the generation-sorted
	// member cache copy.  (see __foreach_member()).  it's either that
	// or update it in parallel as we change the real CACHE and that
	// doesn't seem worth the bother.

	SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache);
	pFrag->m_pRB_GenerationSortedMemberCache = NULL;

    SG_ERR_CHECK(  SG_RBTREE__ALLOC(pCtx, &prb_WorkQueue)  );

	// fetch the starting dagnode and compute the generation bounds.
	// first, see if the cache already has info for this dagnode.
	// if not, fetch it from the source and then add it to the cache.

	SG_ERR_CHECK(  _cache__lookup(pCtx, pFrag,szHidStart,&pMyDataCached,&bPresent)  );
	if (!bPresent)
	{
		if (!pRepo)
			SG_ERR_THROW(  SG_ERR_INVALID_WHILE_FROZEN  );

        SG_ERR_CHECK(  SG_repo__fetch_dagnode(pCtx, pRepo, szHidStart, &pDagnodeAllocated)  );

		pDagnodeStart = pDagnodeAllocated;
	}
	else
	{
		pDagnodeStart = pMyDataCached->m_pDagnode;
	}

	SG_ERR_CHECK(  SG_dagnode__get_generation(pCtx, pDagnodeStart,&generationStart)  );
	SG_ASSERT_RELEASE_FAIL2(  (generationStart > 0),
					  (pCtx,"Invalid generation value [%d] for dagnode [%s]",
					   generationStart,szHidStart)  );
	if ((nGenerations <= 0)  ||  (generationStart <= nGenerations))
		generationEnd = 0;
	else
		generationEnd = generationStart - nGenerations;

	if (!bPresent)
	{
		// this dagnode was not already present in the cache.
		// add it to the cache directly and set the state.
		// we don't need to go thru the work queue for it.
		//
		// then the add all of its parents to the work queue.

		SG_ERR_CHECK(  _cache__add__dagnode(pCtx,
											pFrag,
											generationStart,
											pDagnodeAllocated,SG_DFS_START_MEMBER,
											&pMyDataCached)  );
		pDagnodeAllocated = NULL;

		SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
	}
	else
	{
		// the node was already present in the cache, so we have already
		// walked at least part of the graph around it.

		switch (pMyDataCached->m_state)
		{
		default:
		//case SG_DFS_UNKNOWN:
			SG_ASSERT_RELEASE_FAIL2(  (0),
							  (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]",
							   pMyDataCached->m_state,szHidStart)  );

		case SG_DFS_INTERIOR_MEMBER:				// already in fragment
		case SG_DFS_START_MEMBER:	// already in fragment, duplicated leaf?
			if (generationEnd < pMyDataCached->m_genDagnode)
			{
				// they've expanded the bounds of the fragment since we
				// last visited this dagnode.  keep this dagnode in the
				// fragment and revisit the ancestors in case any were
				// put in the end-fringe that should now be included.
				//
				// we leave the state as INCLUDE or INCLUDE_AND_START
				// because a duplicate start point should remain a
				// start point.

				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
			}
			else
			{
				// the current end-generation requested is >= the previous
				// end-generation, then we've completely explored this dagnode
				// already.  that is, a complete walk from this node for nGenerations
				// would not reveal any new information.
			}
			break;

		case SG_DFS_END_FRINGE:
			{
				// they want to start at a dagnode that we put in the
				// end-fringe.  this can happen if they need to expand
				// the bounds of the fragment to include older ancestors.
				//
				// we do not mark this as a start node because someone
				// else already has it as a parent.

				pMyDataCached->m_state = SG_DFS_INTERIOR_MEMBER;
				SG_ERR_CHECK(  _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue)  );
			}
			break;
		}
	}

	// we optionally put the parents of the current node into the work queue.
	//
	// service the work queue until it is empty.  this allows us to walk the graph without
	// recursion.  that is, as we decide what to do with a node, we add the parents
	// to the queue.  we then iterate thru the work queue until we have dealt with
	// everything -- that is, until all parents have been properly placed.
	//
	// we cannot use a standard iterator to drive this loop because we
	// modify the queue.

	while (1)
	{
		_process_work_queue_item(pCtx, pFrag,prb_WorkQueue,generationEnd,pRepo);
		if (!SG_context__has_err(pCtx))
			break;							// we processed everything in the queue and are done

		if (!SG_context__err_equals(pCtx,SG_ERR_RESTART_FOREACH))
			SG_ERR_RETHROW;

		SG_context__err_reset(pCtx);		// queue changed, restart iteration
	}

	SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue);

	/*
	** we have loaded a piece of the dag (starting with the given start node
	** and tracing all parent edges back n generations).  we leave with everything
	** in our progress queues so that other start nodes can be added to the
	** fragment.  this allows the processing of subsequent start nodes to
	** override some of the decisions that we made.  for example:
	**
	**           Q_15
	**             |
	**             |
	**           Z_16
	**           /  \
	**          /    \
	**      Y_17      A_17
	**          \    /   \
	**           \  /     \
	**           B_18     C_18
	**             |
	**             |
	**           D_19
	**             |
	**             |
	**           E_20
	**
	** if we started with the leaf E_20 and requested 3 generations, we would have:
	**     start_set := { E }
	**     include_set := { B, D, E }
	**     end_set := { Y, A }
	**
	** after a subsequent call with the leaf C_18 and 3 generations, we would have:
	**     start_set := { C, E }
	**     include_set := { Z, A, B, C, D, E }
	**     end_set := { Q, Y }
	**
	*/

	return;

fail:
	SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue);
	SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated);
}
Beispiel #21
0
void SG_server__pull_request_fragball(SG_context* pCtx,
									  SG_repo* pRepo,
									  SG_vhash* pvhRequest,
									  const SG_pathname* pFragballDirPathname,
									  char** ppszFragballName,
									  SG_vhash** ppvhStatus)
{
	SG_pathname* pFragballPathname = NULL;
	SG_uint32* paDagNums = NULL;
    SG_rbtree* prbDagnodes = NULL;
	SG_string* pstrFragballName = NULL;
	char* pszRevFullHid = NULL;
	SG_rbtree_iterator* pit = NULL;
	SG_uint32* repoDagnums = NULL;

	SG_NULLARGCHECK_RETURN(pRepo);
	SG_NULLARGCHECK_RETURN(pFragballDirPathname);
	SG_NULLARGCHECK_RETURN(ppvhStatus);

#if TRACE_SERVER
	SG_ERR_CHECK(  SG_vhash_debug__dump_to_console__named(pCtx, pvhRequest, "pull fragball request")  );
#endif

	SG_ERR_CHECK(  SG_fragball__create(pCtx, pFragballDirPathname, &pFragballPathname)  );

	if (!pvhRequest)
	{
		// Add leaves from every dag to the fragball.
		SG_uint32 count_dagnums;
		SG_uint32 i;
		SG_ERR_CHECK(  SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums)  );

		for (i=0; i<count_dagnums; i++)
		{
			SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, paDagNums[i], &prbDagnodes)  );
			SG_ERR_CHECK(  SG_fragball__append__dagnodes(pCtx, pFragballPathname, pRepo, paDagNums[i], prbDagnodes)  );
			SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
		}

		SG_ERR_CHECK(  SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName)  );
		SG_ERR_CHECK(  SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName)  );
	}
	else
	{
		// Build the requested fragball.
		SG_bool found;

		SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE, &found)  );
		if (found)
		{
			// Full clone requested.
			SG_ERR_CHECK(  SG_repo__fetch_repo__fragball(pCtx, pRepo, pFragballDirPathname, ppszFragballName) );
		}
		else
		{
			// Not a full clone.

			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &found)  );
			if (found)
			{
				// Dagnodes were requested.

				SG_uint32 generations = 0;
				SG_vhash* pvhDags;
				SG_uint32 count_requested_dagnums;
				SG_uint32 count_repo_dagnums = 0;
				SG_uint32 i;
				const char* pszDagNum = NULL;
				const SG_variant* pvRequestedNodes = NULL;
				SG_vhash* pvhRequestedNodes = NULL;
				const char* pszHidRequestedDagnode = NULL;

				// Were additional generations requested?
				SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__GENERATIONS, &found)  );
				if (found)
					SG_ERR_CHECK(  SG_vhash__get__uint32(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__GENERATIONS, &generations)  );

				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags)  );
				SG_ERR_CHECK(  SG_vhash__count(pCtx, pvhDags, &count_requested_dagnums)  );
				if (count_requested_dagnums)
					SG_ERR_CHECK(  SG_repo__list_dags(pCtx, pRepo, &count_repo_dagnums, &repoDagnums)  );

				// For each requested dag, get the requested nodes.
				for (i=0; i<count_requested_dagnums; i++)
				{
					SG_uint32 iMissingNodeCount;
					SG_uint32 iDagnum;
					SG_uint32 j;
					SG_bool isValidDagnum = SG_FALSE;
					SG_bool bSpecificNodesRequested = SG_FALSE;

					// Get the dag's missing node vhash.
					SG_ERR_CHECK(  SG_vhash__get_nth_pair(pCtx, pvhDags, i, &pszDagNum, &pvRequestedNodes)  );
					SG_ERR_CHECK(  SG_dagnum__from_sz__decimal(pCtx, pszDagNum, &iDagnum)  );

					// Verify that requested dagnum exists
					for (j = 0; j < count_repo_dagnums; j++)
					{
						if (repoDagnums[j] == iDagnum)
						{
							isValidDagnum = SG_TRUE;
							break;
						}
					}
					if (!isValidDagnum)
					{
						char buf[SG_DAGNUM__BUF_MAX__NAME];
						SG_ERR_CHECK(  SG_dagnum__to_name(pCtx, iDagnum, buf, sizeof(buf))  );
						SG_ERR_THROW2(SG_ERR_NO_SUCH_DAG, (pCtx, "%s", buf));
					}

					if (pvRequestedNodes)
					{
						SG_ERR_CHECK(  SG_variant__get__vhash(pCtx, pvRequestedNodes, &pvhRequestedNodes)  );

						// Get each node listed for the dag
						SG_ERR_CHECK(  SG_vhash__count(pCtx, pvhRequestedNodes, &iMissingNodeCount)  );
						if (iMissingNodeCount > 0)
						{
							SG_uint32 j;
							const SG_variant* pvVal;

							bSpecificNodesRequested = SG_TRUE;

							SG_ERR_CHECK(  SG_RBTREE__ALLOC__PARAMS(pCtx, &prbDagnodes, iMissingNodeCount, NULL)  );
							for (j=0; j<iMissingNodeCount; j++)
							{
								SG_ERR_CHECK(  SG_vhash__get_nth_pair(pCtx, pvhRequestedNodes, j, &pszHidRequestedDagnode, &pvVal)  );

								if (pvVal)
								{
									const char* pszVal;
									SG_ERR_CHECK(  SG_variant__get__sz(pCtx, pvVal, &pszVal)  );
									if (pszVal)
									{
										if (0 == strcmp(pszVal, SG_SYNC_REQUEST_VALUE_HID_PREFIX))
										{
											SG_ERR_CHECK(  SG_repo__hidlookup__dagnode(pCtx, pRepo, iDagnum, pszHidRequestedDagnode, &pszRevFullHid)  );
											pszHidRequestedDagnode = pszRevFullHid;
										}
										else if (0 == strcmp(pszVal, SG_SYNC_REQUEST_VALUE_TAG))
										{
											SG_ERR_CHECK(  SG_vc_tags__lookup__tag(pCtx, pRepo, pszHidRequestedDagnode, &pszRevFullHid)  );
											if (!pszRevFullHid)
												SG_ERR_THROW(SG_ERR_TAG_NOT_FOUND);
											pszHidRequestedDagnode = pszRevFullHid;
										}
										else
											SG_ERR_THROW(SG_ERR_PULL_INVALID_FRAGBALL_REQUEST);
									}
								}
								
								SG_ERR_CHECK(  SG_rbtree__update(pCtx, prbDagnodes, pszHidRequestedDagnode)  );
								// Get additional dagnode generations, if requested.
								SG_ERR_CHECK(  SG_sync__add_n_generations(pCtx, pRepo, pszHidRequestedDagnode, prbDagnodes, generations)  );
								SG_NULLFREE(pCtx, pszRevFullHid);
							}
						}
					}

					if (!bSpecificNodesRequested)
					{
						// When no specific nodes are in the request, add all leaves.
						SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagnum, &prbDagnodes)  );

						// Get additional dagnode generations, if requested.
						if (generations)
						{
							SG_bool found;
							const char* hid;
							
							SG_ERR_CHECK(  SG_rbtree__iterator__first(pCtx, &pit, prbDagnodes, &found, &hid, NULL)  );
							while (found)
							{
								SG_ERR_CHECK(  SG_sync__add_n_generations(pCtx, pRepo, hid, prbDagnodes, generations)  );
								SG_ERR_CHECK(  SG_rbtree__iterator__next(pCtx, pit, &found, &hid, NULL)  );
							}
						}
					}

					if (prbDagnodes) // can be null when leaves of an empty dag are requested
					{
						SG_ERR_CHECK(  SG_fragball__append__dagnodes(pCtx, pFragballPathname, pRepo, iDagnum, prbDagnodes)  );
						SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
					}

				} // dagnum loop
			} // if "dags" exists

			/* Add requested blobs to the fragball */
			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &found)  );
			if (found)
			{
				// Blobs were requested.
				SG_vhash* pvhBlobs;
				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &pvhBlobs)  );
				SG_ERR_CHECK(  SG_sync__add_blobs_to_fragball(pCtx, pRepo, pFragballPathname, pvhBlobs)  );
			}

			SG_ERR_CHECK(  SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName)  );
		}
	}
	
	/* fallthru */
fail:
	// If we had an error, delete the half-baked fragball.
	if (pFragballPathname && SG_context__has_err(pCtx))
		SG_ERR_IGNORE(  SG_fsobj__remove__pathname(pCtx, pFragballPathname)  );

	SG_PATHNAME_NULLFREE(pCtx, pFragballPathname);
	SG_NULLFREE(pCtx, paDagNums);
	SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
	SG_STRING_NULLFREE(pCtx, pstrFragballName);
	SG_NULLFREE(pCtx, pszRevFullHid);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);
	SG_NULLFREE(pCtx, repoDagnums);
}
/**
 * Do diff of an individual item.
 * When WC-based, we have a "DiffStep" vhash.
 * When historical, we have an item from a pvaStatus.
 *
 */
static void _do_diff1(SG_context * pCtx,
                      SG_bool bWC,
                      const SG_option_state * pOptSt,
                      const SG_vhash * pvhItem,
                      SG_uint32 * piResult)
{
    SG_string * pStringGidRepoPath = NULL;
    SG_vhash * pvhResultCodes = NULL;
    SG_stringarray * psa1 = NULL;
    const char * pszGid;
    SG_int64 i64Result = 0;
    SG_string * pStringErr = NULL;

    SG_ERR_CHECK(  SG_vhash__get__sz(pCtx, pvhItem, "gid", &pszGid)  );

    if (bWC)
    {
        SG_pathname * pPathWc = NULL;
        SG_bool bHasTool = SG_FALSE;

        // With the __diff__setup() and __diff__run() changes, we have already
        // examined the items during the __setup() step and recorded a tool for
        // the *FILE* that have changed content.  So if "tool" isn't set in the
        // DiffStep/Item, we don't need to diff it -- it could be a structural
        // change, a non-file, a found item, etc.
        //
        // we do not use SG_wc__diff__throw() because we already have the diff info
        // and we want to control the result-code processing below.

        SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhItem, "tool", &bHasTool)  );
        if (bHasTool)
            SG_ERR_CHECK(  SG_wc__diff__run(pCtx, pPathWc, pvhItem, &pvhResultCodes)  );
    }
    else
    {
        SG_ERR_CHECK(  SG_STRING__ALLOC(pCtx, &pStringGidRepoPath)  );
        SG_ERR_CHECK(  SG_string__sprintf(pCtx, pStringGidRepoPath, "@%s", pszGid)  );

        SG_ERR_CHECK(  SG_STRINGARRAY__ALLOC(pCtx, &psa1, 1)  );
        SG_ERR_CHECK(  SG_stringarray__add(pCtx, psa1, SG_string__sz(pStringGidRepoPath))  );
        // we do not use the __throw() version of this routine so we can control
        // result-code processing below.
        SG_ERR_CHECK(  SG_vv2__diff_to_stream(pCtx, pOptSt->psz_repo, pOptSt->pRevSpec,
                                              psa1, 0,
                                              SG_FALSE,		// bNoSort
                                              SG_TRUE,		// bInteractive,
                                              pOptSt->psz_tool,
                                              &pvhResultCodes)  );
    }

    if (pvhResultCodes)
    {
        SG_vhash * pvhResult;				// we do not own this
        SG_ERR_CHECK(  SG_vhash__check__vhash(pCtx, pvhResultCodes, pszGid, &pvhResult)  );
        if (pvhResult)
        {
            const char * pszTool;

            SG_ERR_CHECK(  SG_vhash__get__sz(pCtx, pvhResult, "tool", &pszTool)  );
            SG_ERR_CHECK(  SG_vhash__get__int64(pCtx, pvhResult, "result", &i64Result)  );

            SG_difftool__check_result_code__throw(pCtx, i64Result, pszTool);
            if (SG_context__has_err(pCtx))
            {
                SG_context__err_to_string(pCtx, SG_FALSE, &pStringErr);
                SG_context__err_reset(pCtx);
                SG_ERR_CHECK(  SG_console__raw(pCtx, SG_CS_STDERR, SG_string__sz(pStringErr))  );

                // eat the tool error. the result code is set.
            }
        }
    }

    if (piResult)
        *piResult = (SG_uint32)i64Result;

fail:
    SG_STRING_NULLFREE(pCtx, pStringGidRepoPath);
    SG_VHASH_NULLFREE(pCtx, pvhResultCodes);
    SG_STRINGARRAY_NULLFREE(pCtx, psa1);
    SG_STRING_NULLFREE(pCtx, pStringErr);
}
static void _s2__do_cset_vs_cset(SG_context * pCtx,
                                 const SG_option_state * pOptSt,
                                 const SG_stringarray * psaArgs,
                                 SG_uint32 * pNrErrors)
{
    SG_varray * pvaStatus = NULL;
    SG_varray * pvaStatusDirtyFiles = NULL;
    SG_stringarray * psa1 = NULL;
    SG_string * pStringGidRepoPath = NULL;
    SG_string * pStringErr = NULL;
    SG_uint32 nrErrors = 0;

    SG_ERR_CHECK(  SG_vv2__status(pCtx,
                                  pOptSt->psz_repo,
                                  pOptSt->pRevSpec,
                                  psaArgs,
                                  WC__GET_DEPTH(pOptSt),
                                  SG_FALSE, // bNoSort
                                  &pvaStatus, NULL)  );
    if (pvaStatus)
    {
        if (pOptSt->bInteractive)
        {
            // Filter list down to just modified files and show them one-by-one.
            SG_ERR_CHECK(  _get_dirty_files(pCtx, pvaStatus, &pvaStatusDirtyFiles)  );
            if (pvaStatusDirtyFiles)
                SG_ERR_CHECK(  _do_gui_diffs(pCtx, SG_FALSE, pOptSt, pvaStatusDirtyFiles, &nrErrors)  );
        }
        else
        {
            SG_uint32 k, nrItems;

            // Print the changes with PATCH-like headers.
            // Accumulate any tool errors.
            SG_ERR_CHECK(  SG_varray__count(pCtx, pvaStatus, &nrItems)  );
            for (k=0; k<nrItems; k++)
            {
                SG_vhash * pvhItem;
                const char * pszGid = NULL;

                SG_ERR_CHECK(  SG_varray__get__vhash(pCtx, pvaStatus, k, &pvhItem)  );

                // TODO 2013/02/22 Our pvhItem has all of the details for the diff,
                // TODO            but we don't yet have a public API to let it be
                // TODO            used as is.  So we build a @gid repo-path and
                // TODO            run the old historical diff code on a 1-item array
                // TODO            containing this @gid.
                // TODO
                // TODO            We should fix this to just pass down the pvhItem
                // TOOD            so that it doesn't have to repeat the status lookup.

                SG_ERR_CHECK(  SG_vhash__get__sz(pCtx, pvhItem, "gid", &pszGid)  );
                SG_ERR_CHECK(  SG_STRING__ALLOC(pCtx, &pStringGidRepoPath)  );
                SG_ERR_CHECK(  SG_string__sprintf(pCtx, pStringGidRepoPath, "@%s", pszGid)  );

                SG_ERR_CHECK(  SG_STRINGARRAY__ALLOC(pCtx, &psa1, 1)  );
                SG_ERR_CHECK(  SG_stringarray__add(pCtx, psa1, SG_string__sz(pStringGidRepoPath))  );

                SG_vv2__diff_to_stream__throw(pCtx, pOptSt->psz_repo, pOptSt->pRevSpec,
                                              psa1, 0,
                                              SG_TRUE,		// bNoSort -- doesn't matter only 1 item in list
                                              SG_FALSE,		// bInteractive,
                                              pOptSt->psz_tool);
                // Don't throw the error from the tool.  Just print it on STDERR
                // and remember that we had an error so that don't stop showing
                // the diffs just because we stumble over a changed binary file
                // or mis-configured tool, for example.
                if (SG_context__has_err(pCtx))
                {
                    SG_context__err_to_string(pCtx, SG_FALSE, &pStringErr);
                    SG_context__err_reset(pCtx);
                    SG_ERR_CHECK(  SG_console__raw(pCtx, SG_CS_STDERR, SG_string__sz(pStringErr))  );
                    SG_STRING_NULLFREE(pCtx, pStringErr);

                    nrErrors++;
                }

                SG_STRING_NULLFREE(pCtx, pStringGidRepoPath);
                SG_STRINGARRAY_NULLFREE(pCtx, psa1);
            }
        }
    }

    *pNrErrors = nrErrors;

fail:
    SG_VARRAY_NULLFREE(pCtx, pvaStatus);
    SG_VARRAY_NULLFREE(pCtx, pvaStatusDirtyFiles);
    SG_STRINGARRAY_NULLFREE(pCtx, psa1);
    SG_STRING_NULLFREE(pCtx, pStringGidRepoPath);
    SG_STRING_NULLFREE(pCtx, pStringErr);
}
Beispiel #24
0
void SG_tag__remove(SG_context * pCtx, SG_repo * pRepo, const char* pszRev, SG_bool bRev, SG_uint32 count_args, const char** paszArgs)
{
    SG_audit q;
	char* psz_hid_given = NULL;
	char* psz_hid_assoc_with_tag = NULL;
	SG_uint32 count_valid_tags = 0;
	const char** paszValidArgs = NULL;
	SG_uint32 i = 0;

    if (0 == count_args)
		return;

    SG_ERR_CHECK(  SG_audit__init(pCtx,&q,pRepo,SG_AUDIT__WHEN__NOW,SG_AUDIT__WHO__FROM_SETTINGS)  );

	if (pszRev)
	{
		if (bRev)
			SG_repo__hidlookup__dagnode(pCtx, pRepo, SG_DAGNUM__VERSION_CONTROL, pszRev, &psz_hid_given);
		else
			SG_vc_tags__lookup__tag(pCtx, pRepo, pszRev, &psz_hid_given);

		if (SG_context__has_err(pCtx))
		{
			if (SG_context__err_equals(pCtx, SG_ERR_AMBIGUOUS_ID_PREFIX))
			{
				SG_context__push_level(pCtx);
				SG_console(pCtx, SG_CS_STDERR, "The revision or tag could not be found:  %s\n", pszRev);
				SG_context__pop_level(pCtx);
				SG_ERR_RETHROW;
			}
			else
				SG_ERR_RETHROW;
		}
	}

	// SG_vc_tags__remove will throw on the first non-existant tag and stop
	// we don't want to issue errors, just warnings and keep going
	// weed out all the invalid tags here before calling SG_vc_tags__remove
	SG_ERR_CHECK(  SG_alloc(pCtx, count_args, sizeof(const char*), &paszValidArgs)  );
	for (i =0; i < count_args; i++)
	{
		SG_ERR_CHECK(  SG_vc_tags__lookup__tag(pCtx, pRepo, paszArgs[i], &psz_hid_assoc_with_tag)  );
		if (psz_hid_assoc_with_tag) // tag exists
		{
			if (psz_hid_given)
			{
				if (strcmp(psz_hid_given, psz_hid_assoc_with_tag) == 0) // tag is assoc with given changeset
					paszValidArgs[count_valid_tags++] = paszArgs[i];
				else // tag not assoc with given changeset, no error, but warn
					SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDOUT, "Tag not associated with given revision:  %s\n", paszArgs[i])  );
			}
			else
				paszValidArgs[count_valid_tags++] = paszArgs[i];
		}
		else
		{
			SG_ERR_IGNORE(  SG_console(pCtx, SG_CS_STDOUT, "Tag not found:  %s\n", paszArgs[i])  );
		}
		SG_NULLFREE(pCtx, psz_hid_assoc_with_tag);
		psz_hid_assoc_with_tag = NULL;
	}

	SG_ERR_CHECK(  SG_vc_tags__remove(pCtx, pRepo, &q, count_valid_tags, paszValidArgs)  );


fail:
	SG_NULLFREE(pCtx, paszValidArgs);
	SG_NULLFREE(pCtx, psz_hid_given);
	SG_NULLFREE(pCtx, psz_hid_assoc_with_tag);

}
Beispiel #25
0
void SG_workingdir__find_mapping(
	SG_context* pCtx,
	const SG_pathname* pPathLocalDirectory,
	SG_pathname** ppPathMappedLocalDirectory, /**< Return the actual local directory that contains the mapping */
	SG_string** ppstrNameRepoInstanceDescriptor, /**< Return the name of the repo instance descriptor */
	char** ppszidGidAnchorDirectory /**< Return the GID of the repo directory */
	)
{
	SG_pathname* curpath = NULL;
	SG_string* result_pstrDescriptorName = NULL;
	char* result_pszidGid = NULL;
	SG_pathname* result_mappedLocalDirectory = NULL;
	SG_vhash* pvhMapping = NULL;
	SG_pathname* pDrawerPath = NULL;
	SG_pathname* pMappingFilePath = NULL;
	SG_vhash* pvh = NULL;

	SG_NULLARGCHECK_RETURN(pPathLocalDirectory);

	SG_ERR_CHECK(  SG_PATHNAME__ALLOC__COPY(pCtx, &curpath, pPathLocalDirectory)  );

	/* it's a directory, so it should have a final slash */
	SG_ERR_CHECK(  SG_pathname__add_final_slash(pCtx, curpath)  );

	while (SG_TRUE)
	{
		SG_ERR_CHECK(  SG_workingdir__get_drawer_path(pCtx, curpath, &pDrawerPath)  );

		SG_fsobj__verify_directory_exists_on_disk__pathname(pCtx, pDrawerPath);
		if (!SG_context__has_err(pCtx))
		{
			const char* pszDescriptorName = NULL;
			const char* pszGid = NULL;

			SG_ERR_CHECK(  SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pMappingFilePath, pDrawerPath, "repo.json")  );
			SG_PATHNAME_NULLFREE(pCtx, pDrawerPath);

			SG_ERR_CHECK(  SG_vfile__slurp(pCtx, pMappingFilePath, &pvh)  );

			SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvh, "mapping", &pvhMapping)  );

			SG_PATHNAME_NULLFREE(pCtx, pMappingFilePath);

			SG_ERR_CHECK(  SG_vhash__get__sz(pCtx, pvhMapping, "descriptor", &pszDescriptorName)  );
			SG_ERR_CHECK(  SG_vhash__get__sz(pCtx, pvhMapping, "anchor", &pszGid)  );

			SG_ERR_CHECK(  SG_STRING__ALLOC(pCtx, &result_pstrDescriptorName)  );
			SG_ERR_CHECK(  SG_string__set__sz(pCtx, result_pstrDescriptorName, pszDescriptorName)  );

			if (pszGid)
			{
				SG_ERR_CHECK(  SG_gid__alloc_clone(pCtx, pszGid, &result_pszidGid)  );
			}
			else
			{
				result_pszidGid = NULL;
			}

			SG_VHASH_NULLFREE(pCtx, pvh);

			result_mappedLocalDirectory = curpath;
			curpath = NULL;

			break;
		}
		else
			SG_context__err_reset(pCtx);

		SG_PATHNAME_NULLFREE(pCtx, pDrawerPath);

		SG_pathname__remove_last(pCtx, curpath);

		if (SG_context__err_equals(pCtx, SG_ERR_CANNOTTRIMROOTDIRECTORY))
		{
			SG_context__err_reset(pCtx);
			break;
		}
		else
		{
			SG_ERR_CHECK_CURRENT;
		}
	}

	if (result_mappedLocalDirectory)
	{
        if (ppPathMappedLocalDirectory)
        {
		    *ppPathMappedLocalDirectory = result_mappedLocalDirectory;
        }
        else
        {
	        SG_PATHNAME_NULLFREE(pCtx, result_mappedLocalDirectory);
        }
        if (ppstrNameRepoInstanceDescriptor)
        {
            *ppstrNameRepoInstanceDescriptor = result_pstrDescriptorName;
        }
        else
        {
            SG_STRING_NULLFREE(pCtx, result_pstrDescriptorName);
        }
        if (ppszidGidAnchorDirectory)
        {
            *ppszidGidAnchorDirectory = result_pszidGid;
        }
        else
        {
            SG_NULLFREE(pCtx, result_pszidGid);
        }

		return;
	}
	else
	{
		SG_PATHNAME_NULLFREE(pCtx, curpath);

		SG_ERR_THROW_RETURN(SG_ERR_NOT_FOUND);
	}

fail:
	SG_VHASH_NULLFREE(pCtx, pvh);
	SG_PATHNAME_NULLFREE(pCtx, pDrawerPath);
	SG_PATHNAME_NULLFREE(pCtx, pMappingFilePath);
	SG_PATHNAME_NULLFREE(pCtx, result_mappedLocalDirectory);
	SG_PATHNAME_NULLFREE(pCtx, curpath);
}
void SG_sync__closet_user_dags(
	SG_context* pCtx, 
	SG_repo* pRepoSrcNotMine, 
	const char* pszRefHidLeafSrc,
	SG_varray** ppvaSyncedUserList)
{
	char* pszSrcAdminId = NULL;
	char* pszHidLeafSrc = NULL;
	char* pszHidLeafDest = NULL;
	SG_vhash* pvhDescriptors = NULL;
	SG_repo* pRepoDest = NULL;
	SG_repo* pRepoSrcMine = NULL;
	char* pszDestAdminId = NULL;

	/* Using disallowed characters to ensure no collision with an actual repo name.
	 * Not that this isn't actually stored anywhere--we just use it as a key in the
	 * vhash below where the /real/ repos have descriptor names. */
	const char* pszRefUserMasterFakeName = "\\/USER_MASTER\\/"; 

	/* The repo routines do a null arg check of pRepoSrcNotMine.
	   The other args are optional. */

	if (!pszRefHidLeafSrc)
	{
		SG_ERR_CHECK(  SG_zing__get_leaf(pCtx, pRepoSrcNotMine, NULL, SG_DAGNUM__USERS, &pszHidLeafSrc)  );
		pszRefHidLeafSrc = pszHidLeafSrc;
	}

	/* Add all repositories in "normal" status, to the list we'll iterate over. */
	SG_ERR_CHECK(  SG_closet__descriptors__list(pCtx, &pvhDescriptors)  );

	/* If it exists, add the user master repo to the list. */
	{
		SG_bool bExists = SG_FALSE;
		SG_ERR_CHECK(  SG_repo__user_master__exists(pCtx, &bExists)  );
		if (bExists)
			SG_ERR_CHECK(  SG_vhash__add__null(pCtx, pvhDescriptors, pszRefUserMasterFakeName)  );
	}

	/* Iterate over the repositories, syncing the user database. */
	{
		SG_int32 i = 0;
		SG_uint32 numDescriptors = 0;

		SG_ERR_CHECK(  SG_vhash__count(pCtx, pvhDescriptors, &numDescriptors)  );
		for(i = 0; i < (SG_int32)numDescriptors; i++)
		{
			const char* pszRefNameDest = NULL;
			SG_bool bAdminIdsMatch = SG_TRUE;
			const SG_variant* pvRefDest = NULL;

			/* Note that the source repo will be in this loop, too, but we don't need to check for 
			 * it, adding another strcmp, because the leaf hid comparison below will effectively 
			 * skip it. So we do one extra leaf fetch and comparison, total, rather than an extra 
			 * strcmp for every repo in the closet. */

			SG_ERR_CHECK(  SG_vhash__get_nth_pair(pCtx, pvhDescriptors, i, &pszRefNameDest, &pvRefDest)  );

			if (SG_VARIANT_TYPE_NULL == pvRefDest->type)
				SG_ERR_CHECK(  SG_REPO__USER_MASTER__OPEN(pCtx, &pRepoDest)  );
			else
				SG_ERR_CHECK(  SG_REPO__OPEN_REPO_INSTANCE(pCtx, pszRefNameDest, &pRepoDest)  );

			SG_ERR_CHECK(  SG_zing__get_leaf(pCtx, pRepoDest, NULL, SG_DAGNUM__USERS, &pszHidLeafDest)  );

			if (strcmp(pszRefHidLeafSrc, pszHidLeafDest))
			{
				/* Pull from source to dest. 
				 * Pull is generally faster than push, so we're using it on purpose. */
				SG_pull__admin__local(pCtx, pRepoDest, pRepoSrcNotMine, NULL);
				if (SG_context__has_err(pCtx))
				{
					/* If there's an admin id mismatch, don't die. Log a warning and move on. */
					if (SG_context__err_equals(pCtx, SG_ERR_ADMIN_ID_MISMATCH))
					{
						const char* pszRefNameSrc = NULL;

						SG_ERR_DISCARD;

						SG_ERR_CHECK(  SG_repo__get_descriptor_name(pCtx, pRepoSrcNotMine, &pszRefNameSrc)  );
						if (!pszRefNameSrc)
							pszRefNameSrc = pszRefUserMasterFakeName;
						SG_ERR_CHECK(  SG_repo__get_admin_id(pCtx, pRepoSrcNotMine, &pszSrcAdminId)  );

						SG_ERR_CHECK(  SG_repo__get_admin_id(pCtx, pRepoDest, &pszDestAdminId)  );

						SG_ERR_CHECK(  SG_log__report_warning(pCtx, 
							"admin-id mismatch when syncing users: source repo %s has %s, dest repo %s has %s",
							pszRefNameSrc, pszSrcAdminId, pszRefNameDest, pszDestAdminId)  );

						bAdminIdsMatch = SG_FALSE;

						SG_NULLFREE(pCtx, pszDestAdminId);
						SG_NULLFREE(pCtx, pszSrcAdminId);
					}
					else
						SG_ERR_RETHROW;
				}

				if (bAdminIdsMatch)
				{
					SG_NULLFREE(pCtx, pszHidLeafDest);
					SG_ERR_CHECK(  SG_zing__get_leaf(pCtx, pRepoDest, NULL, SG_DAGNUM__USERS, &pszHidLeafDest)  );

					if (strcmp(pszRefHidLeafSrc, pszHidLeafDest))
					{
						/* The pull from source to dest resulted in a new leaf. 
						 * Use the new leaf and restart the loop. */
						SG_NULLFREE(pCtx, pszHidLeafSrc);
						pszRefHidLeafSrc = pszHidLeafSrc = pszHidLeafDest;
						pszHidLeafDest = NULL;

						SG_REPO_NULLFREE(pCtx, pRepoSrcMine);
						pRepoSrcNotMine = pRepoSrcMine = pRepoDest;
						pRepoDest = NULL;

						i = -1; /* start again at the first descriptor */
					}

				}
			}

			SG_NULLFREE(pCtx, pszHidLeafDest);
			SG_REPO_NULLFREE(pCtx, pRepoDest);
		}
	}

	if (ppvaSyncedUserList)
		SG_ERR_CHECK(  SG_user__list_all(pCtx, pRepoSrcNotMine, ppvaSyncedUserList)  );

	/* fall through */
fail:
	SG_NULLFREE(pCtx, pszSrcAdminId);
	SG_NULLFREE(pCtx, pszHidLeafSrc);
	SG_NULLFREE(pCtx, pszHidLeafDest);
	SG_VHASH_NULLFREE(pCtx, pvhDescriptors);
	SG_REPO_NULLFREE(pCtx, pRepoDest);
	SG_REPO_NULLFREE(pCtx, pRepoSrcMine);
	SG_NULLFREE(pCtx, pszDestAdminId);
}
int
main(int argc, char **argv)
{
    int stackDummy;
    SG_error err;
    SG_context * pCtx = NULL;
    JSRuntime *rt;
    JSContext *cx;
    SG_log_console__data cLogStdData;
    SG_log_text__data cLogFileData;
    SG_log_text__writer__daily_path__data cLogFileWriterData;
    JSObject *glob;
    int result;
    SG_bool skipModules = SG_FALSE;

    SG_zero(cLogStdData);
    SG_zero(cLogFileData);
    SG_zero(cLogFileWriterData);

    CheckHelpMessages();
    setlocale(LC_ALL, "");

    gStackBase = (jsuword)&stackDummy;

#ifdef XP_OS2
   /* these streams are normally line buffered on OS/2 and need a \n, *
    * so we need to unbuffer then to get a reasonable prompt          */
    setbuf(stdout,0);
    setbuf(stderr,0);
#endif

    gErrFile = stderr;
    gOutFile = stdout;

    argc--;
    argv++;

    err = SG_context__alloc(&pCtx);
    if (SG_IS_ERROR(err))
        return 1;
    SG_lib__global_initialize(pCtx);
    if (SG_context__has_err(pCtx))
    {
        _vscript_context_teardown(&pCtx);
        return 1;
    }

    _checkSkipModules(pCtx, &argc, argv, &skipModules);
    if (SG_context__has_err(pCtx))
    {
        _vscript_context_teardown(&pCtx);
        return 1;
    }

    SG_jscore__new_runtime(pCtx, ContextCallback, shell_functions, skipModules, &rt);
    if (SG_context__has_err(pCtx))
    {
        _vscript_context_teardown(&pCtx);
        return 1;
    }

    SG_jscore__new_context(pCtx, &cx, &glob, NULL);
    if (SG_context__has_err(pCtx))
    {
        _vscript_context_teardown(&pCtx);
        return 1;
    }

    JS_SetGlobalObject(cx, glob);

    //////////////////////////////////////////////////////////////////
    // Allocate and initialize the SG_context.
    _set_up_logging(pCtx, &cLogStdData, &cLogFileData, &cLogFileWriterData);

    result = ProcessArgs(cx, glob, argv, argc);

    _clean_up_logging(pCtx, &cLogStdData, &cLogFileData, &cLogFileWriterData);

    // TODO 2011/09/30 The following call removes "pCtx" from
    // TODO            the private data in the "cx".  This was
    // TODO            done for neatness before we destroy it.
    // TODO            And this has worked fine for years.
    // TODO
    // TODO            I have commented this out as an experiment
    // TODO            so that the "pCtx" is still available for
    // TODO            use in any GC/Finalize callbacks.
    // TODO
    // TODO SG_jsglue__set_sg_context(NULL, cx);
    JS_EndRequest(cx);
    JS_DestroyContext(cx);

    // End of SG_context post-processing.
    //////////////////////////////////////////////////////////////////

    result = result + _vscript_context_teardown(&pCtx);

    return result;
}
void SG_sync_remote__request_fragball(
	SG_context* pCtx,
	SG_repo* pRepo,
	const SG_pathname* pFragballDirPathname,
	SG_vhash* pvhRequest,
	char** ppszFragballName)
{
	SG_pathname* pFragballPathname = NULL;
	SG_uint64* paDagNums = NULL;
	SG_string* pstrFragballName = NULL;
	SG_rbtree* prbDagnodes = NULL;
	SG_rbtree_iterator* pit = NULL;
	SG_rev_spec* pRevSpec = NULL;
	SG_stringarray* psaFullHids = NULL;
	SG_rbtree* prbDagnums = NULL;
	SG_dagfrag* pFrag = NULL;
	char* pszRepoId = NULL;
	char* pszAdminId = NULL;
    SG_fragball_writer* pfb = NULL;

	SG_NULLARGCHECK_RETURN(pRepo);
	SG_NULLARGCHECK_RETURN(pFragballDirPathname);

    {
        char buf_filename[SG_TID_MAX_BUFFER_LENGTH];
        SG_ERR_CHECK(  SG_tid__generate(pCtx, buf_filename, sizeof(buf_filename))  );
        SG_ERR_CHECK(  SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pFragballPathname, pFragballDirPathname, buf_filename)  );
    }

	if (!pvhRequest)
	{
		// Add leaves from every dag to the fragball.
		SG_uint32 count_dagnums;
		SG_uint32 i;

        SG_ERR_CHECK(  SG_fragball_writer__alloc(pCtx, pRepo, pFragballPathname, SG_TRUE, 2, &pfb)  );
		SG_ERR_CHECK(  SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums)  );

		for (i=0; i<count_dagnums; i++)
		{
			SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, paDagNums[i], &prbDagnodes)  );
			SG_ERR_CHECK(  SG_fragball__write__dagnodes(pCtx, pfb, paDagNums[i], prbDagnodes)  );
			SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
		}

		SG_ERR_CHECK(  SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName)  );
		SG_ERR_CHECK(  SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName)  );
        SG_ERR_CHECK(  SG_fragball_writer__close(pCtx, pfb)  );
	}
	else
	{
		// Specific dags/nodes were requested. Build that fragball.
		SG_bool found;

#if TRACE_SYNC_REMOTE && 0
		SG_ERR_CHECK(  SG_vhash_debug__dump_to_console__named(pCtx, pvhRequest, "fragball request")  );
#endif

		SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE, &found)  );
		if (found)
		{
            // SG_SYNC_STATUS_KEY__CLONE_REQUEST is currently ignored
            SG_ERR_CHECK(  SG_repo__fetch_repo__fragball(pCtx, pRepo, 3, pFragballDirPathname, ppszFragballName) );
		}
		else
		{
			// Not a full clone.

            SG_ERR_CHECK(  SG_fragball_writer__alloc(pCtx, pRepo, pFragballPathname, SG_TRUE, 2, &pfb)  );
			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__SINCE, &found)  );
			if (found)
			{
                SG_vhash* pvh_since = NULL;

                SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__SINCE, &pvh_since)  );

                SG_ERR_CHECK(  _do_since(pCtx, pRepo, pvh_since, pfb)  );
            }

			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &found)  );
			if (found)
			{
				// Specific Dagnodes were requested. Add just those nodes to our "start from" rbtree.

				SG_vhash* pvhDags;
				SG_uint32 count_requested_dagnums;
				SG_uint32 i;
				const SG_variant* pvRevSpecs = NULL;
				SG_vhash* pvhRevSpec = NULL;

				// For each requested dag, get rev spec request.
				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags)  );
				SG_ERR_CHECK(  SG_vhash__count(pCtx, pvhDags, &count_requested_dagnums)  );
				if (count_requested_dagnums)
					SG_ERR_CHECK(  SG_repo__list_dags__rbtree(pCtx, pRepo, &prbDagnums)  );
				for (i=0; i<count_requested_dagnums; i++)
				{
					SG_bool isValidDagnum = SG_FALSE;
					SG_bool bSpecificNodesRequested = SG_FALSE;
					const char* pszRefDagNum = NULL;
					SG_uint64 iDagnum;

					// Get the dag's missing node vhash.
					SG_ERR_CHECK(  SG_vhash__get_nth_pair(pCtx, pvhDags, i, &pszRefDagNum, &pvRevSpecs)  );

					// Verify that requested dagnum exists
					SG_ERR_CHECK(  SG_rbtree__find(pCtx, prbDagnums, pszRefDagNum, &isValidDagnum, NULL)  );
					if (!isValidDagnum)
                        continue;

					SG_ERR_CHECK(  SG_dagnum__from_sz__hex(pCtx, pszRefDagNum, &iDagnum)  );
					
					if (pvRevSpecs && pvRevSpecs->type != SG_VARIANT_TYPE_NULL)
					{
						SG_uint32 countRevSpecs = 0;
						
						SG_ERR_CHECK(  SG_variant__get__vhash(pCtx, pvRevSpecs, &pvhRevSpec)  );
						SG_ERR_CHECK(  SG_rev_spec__from_vash(pCtx, pvhRevSpec, &pRevSpec)  );

						// Process the rev spec for each dag
						SG_ERR_CHECK(  SG_rev_spec__count(pCtx, pRevSpec, &countRevSpecs)  );
						if (countRevSpecs > 0)
						{
							bSpecificNodesRequested = SG_TRUE;

							SG_ERR_CHECK(  SG_rev_spec__get_all__repo(pCtx, pRepo, pRevSpec, SG_TRUE, &psaFullHids, NULL)  );
							SG_ERR_CHECK(  SG_stringarray__to_rbtree_keys(pCtx, psaFullHids, &prbDagnodes)  );
							SG_STRINGARRAY_NULLFREE(pCtx, psaFullHids);
						}
						SG_REV_SPEC_NULLFREE(pCtx, pRevSpec);
					}

					if (!bSpecificNodesRequested)
					{
						// When no specific nodes are in the request, add all leaves.
						SG_ERR_CHECK(  SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagnum, &prbDagnodes)  );
					}

					if (prbDagnodes) // can be null when leaves of an empty dag are requested
					{
						// Get the leaves of the other repo, which we need to connect to.
						SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__LEAVES, &found)  );
						if (found)
						{
							SG_vhash* pvhRefAllLeaves;
							SG_vhash* pvhRefDagLeaves;
							SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__LEAVES, &pvhRefAllLeaves)  );
							SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, pszRefDagNum, &found)  );
							{
								SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRefAllLeaves, pszRefDagNum, &pvhRefDagLeaves)  );
								SG_ERR_CHECK(  SG_sync__build_best_guess_dagfrag(pCtx, pRepo, iDagnum, 
									prbDagnodes, pvhRefDagLeaves, &pFrag)  );
							}
						}
						else
						{
							// The other repo's leaves weren't provided: add just the requested nodes, make no attempt to connect.
							SG_ERR_CHECK(  SG_repo__get_repo_id(pCtx, pRepo, &pszRepoId)  );
							SG_ERR_CHECK(  SG_repo__get_admin_id(pCtx, pRepo, &pszAdminId)  );
							SG_ERR_CHECK(  SG_dagfrag__alloc(pCtx, &pFrag, pszRepoId, pszAdminId, iDagnum)  );
							SG_ERR_CHECK(  SG_dagfrag__load_from_repo__simple(pCtx, pFrag, pRepo, prbDagnodes)  );
							SG_NULLFREE(pCtx, pszRepoId);
							SG_NULLFREE(pCtx, pszAdminId);
						}

						SG_ERR_CHECK(  SG_fragball__write__frag(pCtx, pfb, pFrag)  );
						
						SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
						SG_DAGFRAG_NULLFREE(pCtx, pFrag);
					}

				} // dagnum loop
			} // if "dags" exists

			/* Add requested blobs to the fragball */
			SG_ERR_CHECK(  SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &found)  );
			if (found)
			{
				// Blobs were requested.
				SG_vhash* pvhBlobs;
				SG_ERR_CHECK(  SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &pvhBlobs)  );
				SG_ERR_CHECK(  SG_sync__add_blobs_to_fragball(pCtx, pfb, pvhBlobs)  );
			}

			SG_ERR_CHECK(  SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName)  );
			SG_ERR_CHECK(  SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName)  );
		}
        SG_ERR_CHECK(  SG_fragball_writer__close(pCtx, pfb)  );
	}

	/* fallthru */
fail:
	// If we had an error, delete the half-baked fragball.
	if (pFragballPathname && SG_context__has_err(pCtx))
    {
		SG_ERR_IGNORE(  SG_fsobj__remove__pathname(pCtx, pFragballPathname)  );
    }

	SG_PATHNAME_NULLFREE(pCtx, pFragballPathname);
	SG_NULLFREE(pCtx, paDagNums);
	SG_STRING_NULLFREE(pCtx, pstrFragballName);
	SG_RBTREE_NULLFREE(pCtx, prbDagnodes);
	SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit);
	SG_RBTREE_NULLFREE(pCtx, prbDagnums);
	SG_REV_SPEC_NULLFREE(pCtx, pRevSpec);
	SG_STRINGARRAY_NULLFREE(pCtx, psaFullHids);
	SG_DAGFRAG_NULLFREE(pCtx, pFrag);
	SG_NULLFREE(pCtx, pszRepoId);
	SG_NULLFREE(pCtx, pszAdminId);
    SG_FRAGBALL_WRITER_NULLFREE(pCtx, pfb);
}