/** * Return a pathname set to the location of the "tmp" * directory for this working directory. * * We create something like: * * <root>/.sgdrawer/tmp/ * * Note that the main "tmp" directory is long-lived and * may be used by multiple tasks at the same time. So * for significant tasks, create a private sub-directory * within it. * */ void sg_wc_db__path__get_temp_dir(SG_context * pCtx, const sg_wc_db * pDb, SG_pathname ** ppPathTempDir) { SG_pathname * pPath = NULL; SG_ERR_CHECK( SG_workingdir__get_drawer_path(pCtx, pDb->pPathWorkingDirectoryTop, &pPath) ); SG_ERR_CHECK( SG_pathname__append__from_sz(pCtx, pPath, "tmp") ); SG_ERR_CHECK( SG_pathname__add_final_slash(pCtx, pPath) ); // the "tmp" dir could get deleted by routine housekeeping, // so re-create it if someone even thinks about using temp-based // pathnames. i'm going to skip the usual "if (!exists) mkdir" // stuff and just try to create it and ignore the error. SG_fsobj__mkdir__pathname(pCtx, pPath); if (SG_CONTEXT__HAS_ERR(pCtx)) { if (!SG_context__err_equals(pCtx, SG_ERR_DIR_ALREADY_EXISTS)) SG_ERR_RETHROW; SG_context__err_reset(pCtx); } *ppPathTempDir = pPath; return; fail: SG_PATHNAME_NULLFREE(pCtx, pPath); }
void u0026_jsonparser__test_jsonparser_vhash_2(SG_context * pCtx) { SG_string* pstr1 = NULL; SG_string* pstr2 = NULL; SG_vhash* pvh = NULL; VERIFY_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pstr1) ); VERIFY_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pstr2) ); VERIFY_ERR_CHECK( u0026_jsonparser__create_2(pCtx, pstr1) ); SG_VHASH__ALLOC__FROM_JSON__SZ(pCtx, &pvh, SG_string__sz(pstr1)); VERIFY_COND("from_json", !SG_context__has_err(pCtx)); VERIFY_COND("from_json", pvh); SG_context__err_reset(pCtx); SG_vhash__to_json(pCtx, pvh, pstr2); VERIFY_COND("from_json", !SG_context__has_err(pCtx)); // TODO do some checks fail: SG_STRING_NULLFREE(pCtx, pstr1); SG_STRING_NULLFREE(pCtx, pstr2); SG_VHASH_NULLFREE(pCtx, pvh); }
void SG_workingdir__generate_and_create_temp_dir_for_purpose(SG_context * pCtx, const SG_pathname * pPathWorkingDirectoryTop, const char * pszPurpose, SG_pathname ** ppPathTempDir) { SG_pathname * pPathTempRoot = NULL; SG_pathname * pPath = NULL; SG_string * pString = NULL; SG_int64 iTimeUTC; SG_time tmLocal; SG_uint32 kAttempt = 0; SG_NONEMPTYCHECK_RETURN(pszPurpose); SG_NULLARGCHECK_RETURN(ppPathTempDir); // get path to "<wd-top>/.sgtemp". SG_ERR_CHECK( SG_workingdir__get_temp_path(pCtx,pPathWorkingDirectoryTop,&pPathTempRoot) ); SG_ERR_CHECK( SG_time__get_milliseconds_since_1970_utc(pCtx,&iTimeUTC) ); SG_ERR_CHECK( SG_time__decode__local(pCtx,iTimeUTC,&tmLocal) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx,&pString) ); while (1) { // build path "<wd-top>/.sgtemp/<purpose>_20091201_0". // where <purpose> is something like "revert" or "merge". SG_ERR_CHECK( SG_string__sprintf(pCtx,pString,"%s_%04d%02d%02d_%d", pszPurpose, tmLocal.year,tmLocal.month,tmLocal.mday, kAttempt++) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx,&pPath,pPathTempRoot,SG_string__sz(pString)) ); // try to create a NEW temp directory. if this path already exists on disk, // loop and try again. if we have a hard errors, just give up. SG_fsobj__mkdir_recursive__pathname(pCtx,pPath); if (SG_context__has_err(pCtx) == SG_FALSE) goto success; if (SG_context__err_equals(pCtx,SG_ERR_DIR_ALREADY_EXISTS) == SG_FALSE) SG_ERR_RETHROW; SG_context__err_reset(pCtx); SG_PATHNAME_NULLFREE(pCtx,pPath); } success: *ppPathTempDir = pPath; SG_STRING_NULLFREE(pCtx, pString); SG_PATHNAME_NULLFREE(pCtx, pPathTempRoot); return; fail: SG_STRING_NULLFREE(pCtx, pString); SG_PATHNAME_NULLFREE(pCtx, pPathTempRoot); SG_PATHNAME_NULLFREE(pCtx, pPath); }
void sg_wc_db__gid__get_or_insert_alias_from_gid(SG_context * pCtx, sg_wc_db * pDb, const char * pszGid, SG_uint64 * puiAliasGid) { SG_bool bNotAlreadyPresent; sg_wc_db__gid__get_alias_from_gid(pCtx, pDb, pszGid, puiAliasGid); bNotAlreadyPresent = SG_CONTEXT__HAS_ERR(pCtx); if (bNotAlreadyPresent) { SG_context__err_reset(pCtx); SG_ERR_CHECK( sg_wc_db__gid__insert(pCtx, pDb, pszGid) ); SG_ERR_CHECK( sg_wc_db__gid__get_alias_from_gid(pCtx, pDb, pszGid, puiAliasGid) ); } #if TRACE_WC_GID { SG_int_to_string_buffer bufui64; SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "GID: get_or_insert [%s] ==> %s [new %d]\n", pszGid, SG_uint64_to_sz(*puiAliasGid,bufui64), bNotAlreadyPresent) ); } #endif fail: return; }
/** * Compute an MSTATUS and optionally fallback to a regular STATUS. * You own the returned pvaStatus. * */ void SG_wc_tx__mstatus(SG_context * pCtx, SG_wc_tx * pWcTx, SG_bool bNoIgnores, SG_bool bNoFallback, SG_bool bNoSort, SG_varray ** ppvaStatus, SG_vhash ** ppvhLegend) { _sg_wc_tx__mstatus(pCtx, pWcTx, bNoIgnores, bNoSort, ppvaStatus, ppvhLegend); if (SG_CONTEXT__HAS_ERR(pCtx) == SG_FALSE) return; if ((SG_context__err_equals(pCtx, SG_ERR_NOT_IN_A_MERGE) == SG_FALSE) || bNoFallback) SG_ERR_RETHROW; SG_context__err_reset(pCtx); SG_ERR_CHECK( SG_wc_tx__status(pCtx, pWcTx, NULL, SG_INT32_MAX, // no filtering SG_FALSE, // bListUnchanged bNoIgnores, SG_FALSE, // bNoTSC SG_FALSE, // bListSparse SG_FALSE, // bListReserved bNoSort, ppvaStatus, ppvhLegend) ); fail: return; }
JS_END_EXTERN_C #endif static JSBool GetLine(SG_context *pCtx, JSContext *cx, char *bufp, SG_uint32 bufremaining, FILE *file, const char *prompt) { #ifndef EDITLINE SG_UNUSED(cx); #endif #ifdef EDITLINE /* * Use readline only if file is stdin, because there's no way to specify * another handle. Are other filehandles interactive? */ if (file == stdin) { char *linep = readline(prompt); if (!linep) return JS_FALSE; if (linep[0] != '\0') add_history(linep); SG_strcpy(pCtx, bufp, bufremaining, linep); if(SG_context__has_err(pCtx)) { SG_context__err_reset(pCtx); return JS_FALSE; } JS_free(cx, linep); bufp += strlen(bufp); *bufp++ = '\n'; *bufp = '\0'; } else #endif { char line[256]; fprintf(gOutFile, "%s", prompt); fflush(gOutFile); if (!fgets(line, sizeof line, file)) return JS_FALSE; SG_strcpy(pCtx, bufp, bufremaining, line); if(SG_context__has_err(pCtx)) { SG_context__err_reset(pCtx); return JS_FALSE; } } return JS_TRUE; }
void MyFn(test__parseRfc850nonGmt)(SG_context *pCtx) { const char *ims = "Tue, 16 Mar 2010 14:11:13 EDT"; SG_int64 parsed = 0, localparsed; SG_time__parseRFC850(pCtx, ims, &parsed, &localparsed); VERIFY_CTX_ERR_EQUALS("out-of-range if not GMT", pCtx, SG_ERR_ARGUMENT_OUT_OF_RANGE); SG_context__err_reset(pCtx); }
void SG_jscontextpool__init(SG_context * pCtx, const char * szApplicationRoot) { if(gpJSContextPoolGlobalState != NULL) return; SG_ERR_CHECK_RETURN( SG_alloc1(pCtx, gpJSContextPoolGlobalState) ); SG_localsettings__get__collapsed_vhash(pCtx, "server", NULL, &gpJSContextPoolGlobalState->pServerConfig); if(SG_context__has_err(pCtx)) { SG_log__report_error__current_error(pCtx); SG_context__err_reset(pCtx); } if(gpJSContextPoolGlobalState->pServerConfig==NULL) { SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &gpJSContextPoolGlobalState->pServerConfig) ); } SG_ERR_CHECK( _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "enable_diagnostics", NULL) ); SG_ERR_CHECK( _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "readonly", NULL) ); SG_ERR_CHECK( _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "remote_ajax_libs", NULL) ); SG_ERR_CHECK( _sg_jscontextpool__force_config_bool(pCtx, gpJSContextPoolGlobalState->pServerConfig, "ssjs_mutable", &gpJSContextPoolGlobalState->ssjsMutable) ); if(szApplicationRoot==NULL) szApplicationRoot=""; SG_ERR_CHECK( SG_vhash__update__string__sz(pCtx, gpJSContextPoolGlobalState->pServerConfig, "application_root", szApplicationRoot) ); // Start up SpiderMonkey. SG_jscore__new_runtime(pCtx, SG_jsglue__context_callback, NULL, SG_FALSE, NULL); //If jscore is already initialized, just move on. if (SG_context__err_equals(pCtx, SG_ERR_ALREADY_INITIALIZED)) { SG_context__err_reset(pCtx); } SG_ERR_CHECK( SG_mutex__init(pCtx, &gpJSContextPoolGlobalState->lock) ); return; fail: SG_VHASH_NULLFREE(pCtx, gpJSContextPoolGlobalState->pServerConfig); SG_NULLFREE(pCtx, gpJSContextPoolGlobalState); }
void SG_sync__compare_repo_blobs(SG_context* pCtx, SG_repo* pRepo1, SG_repo* pRepo2, SG_bool* pbIdentical) { const SG_uint32 chunk_size = 1000; SG_vhash* pvh = NULL; const char* pszBlob1 = NULL; SG_uint32 i, j; SG_uint32 count_observed = 0; SG_uint32 count_returned; SG_NULLARGCHECK_RETURN(pRepo1); SG_NULLARGCHECK_RETURN(pRepo2); SG_NULLARGCHECK_RETURN(pbIdentical); for(i = 0; SG_TRUE; i++) { // Ian TODO: other encodings SG_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo1, SG_BLOBENCODING__ZLIB, SG_FALSE, SG_FALSE, chunk_size, chunk_size * i, &pvh) ); for (j = 0; j < chunk_size; j++) { SG_bool b = SG_TRUE; SG_vhash__get_nth_pair(pCtx, pvh, j, &pszBlob1, NULL); if (SG_context__err_equals(pCtx, SG_ERR_ARGUMENT_OUT_OF_RANGE)) { SG_context__err_reset(pCtx); break; } SG_ERR_CHECK_CURRENT; SG_ERR_CHECK( SG_repo__does_blob_exist(pCtx, pRepo2, pszBlob1, &b) ); if (!b) { *pbIdentical = SG_FALSE; break; } count_observed++; } if (!j) break; SG_VHASH_NULLFREE(pCtx, pvh); } SG_ERR_CHECK( SG_repo__get_blob_stats(pCtx, pRepo2, NULL, NULL, &count_returned, NULL, NULL, NULL, NULL, NULL, NULL, NULL) ); if (count_returned != count_observed) *pbIdentical = SG_FALSE; // fall through fail: SG_VHASH_NULLFREE(pCtx, pvh); }
int u0020_utf8pathnames__create_file(SG_context * pCtx, const SG_pathname * pPathnameTmpDir, _tableitem * pti) { // create a file in the given tmp dir using the given filename. SG_pathname * pPathnameNewFile; char * pBufUtf8; SG_uint32 lenUtf8; SG_file * pFile; int iResult; SG_bool bTest; // convert the utf32 string into utf8. VERIFY_ERR_CHECK_DISCARD( SG_utf8__from_utf32(pCtx, pti->pa32,&pBufUtf8,&lenUtf8) ); // we have to free pBufUtf8 // verify that the computed utf8 string matches what we thought it should be. // (this hopefully guards against the conversion layer playing NFC/NFD tricks.) iResult = SG_utf8__compare(pBufUtf8,(char *)pti->pa8); VERIFYP_COND("u0020_utf8pathnames",(iResult==0),("Compare failed [%s][%s]",pBufUtf8,pti->pa8)); // create full pathname to the file to create. VERIFY_ERR_CHECK_DISCARD( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPathnameNewFile, pPathnameTmpDir,pBufUtf8) ); // create the file and close it. // on Linux when our locale is set to something other than UTF-8, we may // get an ICU(10) == U_INVALID_CHAR_FOUND error because the test data is // not necessarily friendly to any one locale and there are some NFD // cases too. we map ICU(10) to SG_ERR_UNMAPPABLE_UNICODE_CHAR SG_file__open__pathname(pCtx,pPathnameNewFile,SG_FILE_WRONLY|SG_FILE_CREATE_NEW,0755,&pFile); #if defined(LINUX) bTest = ( (!SG_context__has_err(pCtx)) || (SG_context__err_equals(pCtx,SG_ERR_UNMAPPABLE_UNICODE_CHAR)) ); #else bTest = ( (!SG_context__has_err(pCtx)) ); #endif SG_context__err_reset(pCtx); VERIFYP_COND("u0020_utf8pathnames",bTest, ("Error Creating file [%s]",SG_pathname__sz(pPathnameNewFile))); VERIFY_ERR_CHECK_DISCARD( SG_file__close(pCtx, &pFile) ); SG_PATHNAME_NULLFREE(pCtx, pPathnameNewFile); SG_NULLFREE(pCtx, pBufUtf8); return 1; }
void SG_cmd_util__dump_log( SG_context * pCtx, SG_console_stream cs, SG_repo* pRepo, const char* psz_hid_cs, SG_vhash* pvhCleanPileOfBranches, SG_bool bShowOnlyOpenBranchNames, SG_bool bShowFullComments) { SG_history_result* pHistResult = NULL; SG_stringarray * psaHids = NULL; SG_STRINGARRAY__ALLOC(pCtx, &psaHids, 1); SG_ERR_CHECK( SG_stringarray__add(pCtx, psaHids, psz_hid_cs) ); SG_history__get_revision_details(pCtx, pRepo, psaHids, NULL, &pHistResult); if (SG_context__err_equals(pCtx, SG_ERR_NOT_FOUND)) { /* There's a branch that references a changeset that doesn't exist. Show what we can. */ SG_vhash* pvhRefClosedBranches = NULL; SG_vhash* pvhRefBranchValues = NULL; SG_context__err_reset(pCtx); if (pvhCleanPileOfBranches) { SG_bool bHas = SG_FALSE; SG_ERR_CHECK( SG_vhash__has(pCtx, pvhCleanPileOfBranches, "closed", &bHas) ); if (bHas) SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhCleanPileOfBranches, "closed", &pvhRefClosedBranches) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhCleanPileOfBranches, "values", &pvhRefBranchValues) ); } SG_ERR_CHECK( SG_console(pCtx, cs, "\n\t%8s: %s\n", "revision", psz_hid_cs) ); SG_ERR_CHECK( _dump_branch_name(pCtx, cs, psz_hid_cs, bShowOnlyOpenBranchNames, pvhRefBranchValues, pvhRefClosedBranches) ); SG_ERR_CHECK( SG_console(pCtx, cs, "\t%8s %s\n", "", "(not present in repository)") ); } else { SG_ERR_CHECK_CURRENT; SG_ERR_CHECK( SG_cmd_util__dump_history_results(pCtx, cs, pHistResult, pvhCleanPileOfBranches, bShowOnlyOpenBranchNames, bShowFullComments, SG_FALSE) ); } fail: SG_HISTORY_RESULT_NULLFREE(pCtx, pHistResult); SG_STRINGARRAY_NULLFREE(pCtx, psaHids); }
void sg_treediff_cache::GetStatusForPath(SG_context * pCtx, SG_bool bIsRetry, const SG_pathname * pPathName, SG_wc_status_flags * pStatusFlags, SG_bool * pbDirChanged) { wxCriticalSectionLocker lock(m_wc_transaction_lock); SG_wc_status_flags statusFlags = SG_WC_STATUS_FLAGS__U__FOUND; SG_string * psgstr_statusFlagsDetails = NULL; if (m_pwc_tx == NULL) { //Start a read-only transaction. SG_ERR_CHECK( SG_WC_TX__ALLOC__BEGIN(pCtx, &m_pwc_tx, ((pPathName)), SG_TRUE) ); } SG_ERR_CHECK( SG_wc_tx__get_item_status_flags(pCtx, m_pwc_tx, SG_pathname__sz(pPathName), SG_FALSE, SG_FALSE, &statusFlags, NULL) ); if (pbDirChanged != NULL && (statusFlags & SG_WC_STATUS_FLAGS__T__DIRECTORY)) { //It's a directory, so we need to check to see if there are modifications under it. SG_wc_status_flags dirStatusFlags = SG_WC_STATUS_FLAGS__U__FOUND; SG_ERR_CHECK( SG_wc_tx__get_item_dirstatus_flags(pCtx, m_pwc_tx, SG_pathname__sz(pPathName), &dirStatusFlags, NULL, pbDirChanged) ); } #if DEBUG SG_ERR_CHECK( SG_wc_debug__status__dump_flags(pCtx, statusFlags, "status flags", &psgstr_statusFlagsDetails) ); SG_ERR_CHECK( SG_log__report_verbose(pCtx, "%s", SG_string__sz(psgstr_statusFlagsDetails)) ); #endif //Don't recurse if this is our second try. if ((bIsRetry == SG_FALSE) && ((statusFlags & SG_WC_STATUS_FLAGS__T__BOGUS) == SG_WC_STATUS_FLAGS__T__BOGUS) ) { //This is a item that is new in the directory since we started //our transaction. Close the transaction, and start a new one. SG_ERR_CHECK( SG_log__report_verbose(pCtx, "Got a bogus status. Restarting transaction") ); clearCache(pCtx); GetStatusForPath(pCtx, SG_TRUE, pPathName, &statusFlags, pbDirChanged); } SG_ERR_CHECK( SG_time__get_milliseconds_since_1970_utc(pCtx, &m_timeLastRequest) ); *pStatusFlags = statusFlags; fail: SG_STRING_NULLFREE(pCtx, psgstr_statusFlagsDetails); //Log and reset any errors on the context. //Any error condition should report the status as //Found and still send a message back to the client. if (SG_context__has_err(pCtx) == SG_TRUE) { if (!SG_context__err_equals(pCtx, SG_ERR_WC_DB_BUSY)) { SG_log__report_error__current_error(pCtx); } SG_context__err_reset(pCtx); } }
void SG_jscontextpool__teardown(SG_context * pCtx) { if(gpJSContextPoolGlobalState!=NULL) { SG_ERR_CHECK_RETURN( SG_mutex__lock(pCtx, &gpJSContextPoolGlobalState->lock) ); // Wait until all outstanding SG_jscontexts have been released. Don't try to terminate // early, otherwise we have a race condition on our hands: If the outstanding SG_jscontext // tries to perform any JavaScript operations before the app terminates, but after we have // called JS_Shutdown(), we'll end up crashing on exit. This of course is worse than // making the user hit Ctrl-C again to do a hard shutdown if it's taking too long. if(gpJSContextPoolGlobalState->numContextsCheckedOut > 0) { SG_ERR_IGNORE( SG_log__report_warning(pCtx, "Waiting on %d SG_jscontexts that are still in use.", gpJSContextPoolGlobalState->numContextsCheckedOut) ); while(gpJSContextPoolGlobalState->numContextsCheckedOut > 0) { SG_ERR_CHECK_RETURN( SG_mutex__unlock(pCtx, &gpJSContextPoolGlobalState->lock) ); SG_sleep_ms(10); SG_ERR_CHECK_RETURN( SG_mutex__lock(pCtx, &gpJSContextPoolGlobalState->lock) ); } } SG_ERR_CHECK_RETURN( SG_mutex__unlock(pCtx, &gpJSContextPoolGlobalState->lock) ); SG_mutex__destroy(&gpJSContextPoolGlobalState->lock); while(gpJSContextPoolGlobalState->pFirstAvailableContext!=NULL) { SG_jscontext * pJs = gpJSContextPoolGlobalState->pFirstAvailableContext; gpJSContextPoolGlobalState->pFirstAvailableContext = pJs->pNextAvailableContext; (void)JS_SetContextThread(pJs->cx); JS_BeginRequest(pJs->cx); JS_SetContextPrivate(pJs->cx, pCtx); JS_DestroyContextNoGC(pJs->cx); if(SG_context__has_err(pCtx)) // An error was produced during GC... { SG_log__report_error__current_error(pCtx); SG_context__err_reset(pCtx); } SG_NULLFREE(pCtx, pJs); } SG_VHASH_NULLFREE(pCtx, gpJSContextPoolGlobalState->pServerConfig); SG_NULLFREE(pCtx, gpJSContextPoolGlobalState); } }
void _read_template_file( SG_context *pCtx, const char *templateFn, SG_string **pContent, /**< we allocate, we free on error, else caller owns it */ const _request_headers *pRequestHeaders, _replacer_cb replacer) { SG_pathname *tpath = NULL; SG_file *pFile = NULL; SG_uint32 got = 0; char tbuf[1024]; //todo: make this thread-safe: if(_sg_uridispatch__templatePath==NULL) SG_ERR_CHECK( _sgui_set_templatePath(pCtx) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__COPY(pCtx, &tpath, _sg_uridispatch__templatePath) ); SG_ERR_CHECK( SG_pathname__append__from_sz(pCtx, tpath, templateFn) ); SG_ERR_CHECK( SG_file__open__pathname(pCtx, tpath, SG_FILE_RDONLY|SG_FILE_OPEN_EXISTING, 0644, &pFile) ); SG_PATHNAME_NULLFREE(pCtx, tpath); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, pContent) ); do { SG_file__read(pCtx, pFile, sizeof(tbuf), (SG_byte *)tbuf, &got); if (SG_context__err_equals(pCtx, SG_ERR_EOF)) { SG_context__err_reset(pCtx); break; } SG_ERR_CHECK_CURRENT; SG_ERR_CHECK( SG_string__append__buf_len(pCtx, *pContent, (const SG_byte *)tbuf, got) ); } while (got > 0); SG_ERR_CHECK( SG_file__close(pCtx, &pFile) ); SG_ERR_CHECK( _templatize(pCtx, *pContent, pRequestHeaders, replacer) ); return; fail: SG_STRING_NULLFREE(pCtx, *pContent); SG_FILE_NULLCLOSE(pCtx, pFile); SG_PATHNAME_NULLFREE(pCtx, tpath); }
void SG_jscontext__release(SG_context * pCtx, SG_jscontext ** ppJs) { if(ppJs==NULL || *ppJs==NULL) return; if(gpJSContextPoolGlobalState->ssjsMutable) { SG_httprequestprofiler__start(SG_HTTPREQUESTPROFILER_CATEGORY__JSREQUEST_TOGGLING); JS_EndRequest((*ppJs)->cx); JS_DestroyContext((*ppJs)->cx); SG_httprequestprofiler__stop(); if(SG_context__has_err(pCtx)) // An error was produced during GC... { SG_log__report_error__current_error(pCtx); SG_context__err_reset(pCtx); } SG_NULLFREE(pCtx, (*ppJs)); } else { SG_jscontext * pJs = *ppJs; JS_MaybeGC(pJs->cx); JS_SetContextPrivate(pJs->cx, NULL); // Clear out the old pCtx pointer. SG_httprequestprofiler__start(SG_HTTPREQUESTPROFILER_CATEGORY__JSREQUEST_TOGGLING); JS_EndRequest(pJs->cx); pJs->isInARequest = SG_FALSE; (void)JS_ClearContextThread(pJs->cx); SG_httprequestprofiler__stop(); SG_ERR_CHECK_RETURN( SG_mutex__lock(pCtx, &gpJSContextPoolGlobalState->lock) ); pJs->pNextAvailableContext = gpJSContextPoolGlobalState->pFirstAvailableContext; gpJSContextPoolGlobalState->pFirstAvailableContext = pJs; --gpJSContextPoolGlobalState->numContextsCheckedOut; SG_ERR_CHECK_RETURN( SG_mutex__unlock(pCtx, &gpJSContextPoolGlobalState->lock) ); *ppJs = NULL; } }
/** * Find the appropriate external tool to let the user perform a TEXT MERGE * on a file. * * TODO 2010/07/13 For now, this is hard-coded to use DiffMerge. * TODO Later we want to allow them to have multiple * TODO tools configured and/or to use the file suffix * TODO and so on. */ static void _resolve__external_tool__lookup(SG_context * pCtx, struct _resolve_data * pData, const char * pszGid, const SG_vhash * pvhIssue, SG_string * pStrRepoPath, _resolve__external_tool ** ppET) { _resolve__external_tool * pET = NULL; SG_repo * pRepo; SG_UNUSED( pszGid ); SG_UNUSED( pvhIssue ); SG_ERR_CHECK( SG_pendingtree__get_repo(pCtx, pData->pPendingTree, &pRepo) ); SG_ERR_CHECK( SG_alloc1(pCtx, pET) ); // TODO 2010/07/13 Use localsettings to determine WHICH external tool we should use. // TODO (This could be based upon suffixes and/or whatever.) // TODO Then -- for THAT tool -- lookup the program executable path and // TODO the argument list. // TODO Substitute the given pathnames into the argument list. // TODO // TODO For now, we just hard-code DiffMerge. SG_ERR_CHECK( SG_strdup(pCtx, "DiffMerge", &pET->pszName) ); SG_localsettings__get__sz(pCtx, "merge/diffmerge/program", pRepo, &pET->pszExe, NULL); if (SG_context__has_err(pCtx) || (!pET->pszExe) || (!*pET->pszExe)) { SG_context__err_reset(pCtx); SG_ERR_THROW2( SG_ERR_NO_MERGE_TOOL_CONFIGURED, (pCtx, "'%s' Use 'vv localsetting set merge/diffmerge/program' and retry -or- manually merge content and then use 'vv resolve --mark'.", SG_string__sz(pStrRepoPath)) ); } // TODO 2010/07/13 Get argvec. *ppET = pET; return; fail: _RESOLVE__EXTERNAL_TOOL__NULLFREE(pCtx, pET); }
void MyFn(create_repo)(SG_context * pCtx, SG_repo** ppRepo) { SG_repo* pRepo = NULL; SG_pathname* pPath_repo = NULL; char buf_repo_id[SG_GID_BUFFER_LENGTH]; char buf_admin_id[SG_GID_BUFFER_LENGTH]; SG_vhash* pvhPartialDescriptor = NULL; char* pszRepoImpl = NULL; VERIFY_ERR_CHECK( SG_gid__generate(pCtx, buf_repo_id, sizeof(buf_repo_id)) ); VERIFY_ERR_CHECK( SG_gid__generate(pCtx, buf_admin_id, sizeof(buf_admin_id)) ); /* Get our paths fixed up */ VERIFY_ERR_CHECK( SG_PATHNAME__ALLOC(pCtx, &pPath_repo) ); VERIFY_ERR_CHECK( SG_pathname__set__from_cwd(pCtx, pPath_repo) ); VERIFY_ERR_CHECK( SG_pathname__append__from_sz(pCtx, pPath_repo, "repo") ); SG_fsobj__mkdir__pathname(pCtx, pPath_repo); SG_context__err_reset(pCtx); // Create the repo VERIFY_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhPartialDescriptor) ); VERIFY_ERR_CHECK_DISCARD( SG_localsettings__get__sz(pCtx, SG_LOCALSETTING__NEWREPO_DRIVER, NULL, &pszRepoImpl, NULL) ); if (pszRepoImpl) { VERIFY_ERR_CHECK_DISCARD( SG_vhash__add__string__sz(pCtx, pvhPartialDescriptor, SG_RIDESC_KEY__STORAGE, pszRepoImpl) ); } VERIFY_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhPartialDescriptor, SG_RIDESC_FSLOCAL__PATH_PARENT_DIR, SG_pathname__sz(pPath_repo)) ); VERIFY_ERR_CHECK( SG_repo__create_repo_instance(pCtx,NULL,pvhPartialDescriptor,SG_TRUE,NULL,buf_repo_id,buf_admin_id,&pRepo) ); *ppRepo = pRepo; // Fall through to common cleanup fail: SG_VHASH_NULLFREE(pCtx, pvhPartialDescriptor); SG_PATHNAME_NULLFREE(pCtx, pPath_repo); SG_NULLFREE(pCtx, pszRepoImpl); }
/** * Convert absolute path to a wd-top-relative string. * * This is just pathname/string parsing; we DO NOT confirm * that the path exists. We only confirm that the result * is properly contained within the working directory. * */ void sg_wc_db__path__absolute_to_repopath(SG_context * pCtx, const sg_wc_db * pDb, const SG_pathname * pPathItem, SG_string ** ppStringRepoPath) { SG_string * pString = NULL; const char * psz; SG_workingdir__wdpath_to_repopath(pCtx, pDb->pPathWorkingDirectoryTop, pPathItem, SG_FALSE, &pString); if (SG_CONTEXT__HAS_ERR(pCtx)) { if (SG_context__err_equals(pCtx, SG_ERR_CANNOT_MAKE_RELATIVE_PATH)) goto throw_not_in_working_copy; if (SG_context__err_equals(pCtx, SG_ERR_ITEM_NOT_UNDER_VERSION_CONTROL)) goto throw_not_in_working_copy; if (SG_context__err_equals(pCtx, SG_ERR_PATH_NOT_IN_WORKING_COPY)) goto throw_not_in_working_copy; SG_ERR_RETHROW; } psz = SG_string__sz(pString); if ((psz[0] == '.') && (psz[1] == '.') && ((psz[2] == '/') || (psz[2] == 0))) goto throw_not_in_working_copy; *ppStringRepoPath = pString; return; throw_not_in_working_copy: SG_context__err_reset(pCtx); SG_ERR_THROW2( SG_ERR_PATH_NOT_IN_WORKING_COPY, (pCtx, "The path '%s' is not inside the working copy rooted at '%s'.", SG_pathname__sz(pPathItem), SG_pathname__sz(pDb->pPathWorkingDirectoryTop)) ); fail: SG_STRING_NULLFREE(pCtx, pString); }
static void _remote_clone_allowed(SG_context* pCtx) { char* pszSetting = NULL; SG_bool bAllowed = SG_TRUE; SG_localsettings__get__sz(pCtx, SG_LOCALSETTING__SERVER_CLONE_ALLOWED, NULL, &pszSetting, NULL); if(!SG_context__has_err(pCtx) && pszSetting != NULL) bAllowed = (strcmp(pszSetting, "true")==0); if(SG_context__has_err(pCtx)) { SG_log__report_error__current_error(pCtx); SG_context__err_reset(pCtx); } if (!bAllowed) SG_ERR_THROW(SG_ERR_SERVER_DISALLOWED_REPO_CREATE_OR_DELETE); /* fall through */ fail: SG_NULLFREE(pCtx, pszSetting); }
static int _vscript_context_teardown(SG_context **ppCtx) { SG_context *pCtx = *ppCtx; int result = 0; ////////////////////////////////////////////////////////////////// // Post-processing for errors captured in SG_context. if (SG_context__has_err(pCtx)) { SG_context__err_to_console(pCtx, SG_CS_STDERR); SG_context__err_reset(pCtx); } *ppCtx = NULL; SG_lib__global_cleanup(pCtx); SG_CONTEXT_NULLFREE(pCtx); return(result); }
/** * This test requires the 'crash' command be defined/enabled in 'vv'. * This command will always take a SIGSEGV. */ void MyFn(test1)(SG_context * pCtx) { SG_exit_status exitStatusChild; SG_exec_argvec * pArgVec = NULL; VERIFY_ERR_CHECK( SG_exec_argvec__alloc(pCtx,&pArgVec) ); VERIFY_ERR_CHECK( SG_exec_argvec__append__sz(pCtx,pArgVec,"crash") ); // we use the easy version and only get back an exit status. // this should have caught the crash and tried to run gdb on // the coredump. we didn't divert the gdb output, so we'll // just eyeball the test log for now. SG_exec__exec_sync__files(pCtx, "vv" ,pArgVec,NULL,NULL,NULL,&exitStatusChild); VERIFY_CTX_ERR_EQUALS("child status", pCtx, SG_ERR_ABNORMAL_TERMINATION); SG_context__err_reset(pCtx); fail: SG_EXEC_ARGVEC_NULLFREE(pCtx, pArgVec); }
void SG_cmd_util__set_password(SG_context * pCtx, const char * pszSrcRepoSpec, SG_string * pStringUsername, SG_string * pStringPassword) { if (SG_password__supported()) { SG_password__set(pCtx, pszSrcRepoSpec, pStringUsername, pStringPassword); if (!SG_CONTEXT__HAS_ERR(pCtx)) return; if (!SG_context__err_equals(pCtx, SG_ERR_NOTIMPLEMENTED)) SG_ERR_RETHROW; SG_context__err_reset(pCtx); } SG_ERR_CHECK( SG_console(pCtx, SG_CS_STDERR, "Could not remember your password in the keyring.\n") ); fail: return; }
void SG_repo__pack__zlib(SG_context* pCtx, SG_repo * pRepo) { SG_vhash* pvh = NULL; SG_uint32 count = 0; SG_uint32 i = 0; SG_repo_tx_handle* pTx; SG_ERR_CHECK( SG_repo__list_blobs(pCtx, pRepo, SG_BLOBENCODING__FULL, SG_TRUE, SG_TRUE, 500, 0, &pvh) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvh, &count) ); SG_ERR_CHECK( SG_repo__begin_tx(pCtx, pRepo, &pTx) ); for (i=0; i<count; i++) { const char* psz_hid = NULL; const SG_variant* pv = NULL; SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvh, i, &psz_hid, &pv) ); SG_repo__change_blob_encoding(pCtx, pRepo, pTx, psz_hid, SG_BLOBENCODING__ZLIB, NULL, NULL, NULL, NULL, NULL); if (SG_context__has_err(pCtx)) { if (!SG_context__err_equals(pCtx,SG_ERR_REPO_BUSY)) { SG_ERR_RETHROW; } else { SG_context__err_reset(pCtx); } } } SG_ERR_CHECK( SG_repo__commit_tx(pCtx, pRepo, &pTx) ); SG_VHASH_NULLFREE(pCtx, pvh); return; fail: SG_VHASH_NULLFREE(pCtx, pvh); }
int u0020_utf8pathnames__readdir(SG_context * pCtx, const SG_pathname * pPathnameTmpDir) { // open the tmp dir for reading and read the filename of each file in it. // compare these with the version of the filename that we used to create // the file. // // WE RELY ON THE FACT THAT EACH FILENAME IN THE ARRAY STARTS // WITH A DIFFERENT LETTER. SG_error errRead; SG_dir * pDir; SG_string * pStringFilename = NULL; VERIFY_ERR_CHECK_DISCARD( SG_STRING__ALLOC(pCtx, &pStringFilename) ); // opendir gives us the first file automatically. VERIFY_ERR_CHECK_DISCARD( SG_dir__open(pCtx,pPathnameTmpDir,&pDir,&errRead,pStringFilename,NULL) ); VERIFYP_COND("u0020_utf8pathnames",(SG_IS_OK(errRead)),("Reading first file in directory.")); do { u0020_utf8pathnames__testfilename(pStringFilename); SG_dir__read(pCtx,pDir,pStringFilename,NULL); SG_context__get_err(pCtx,&errRead); } while (SG_IS_OK(errRead)); VERIFY_CTX_ERR_EQUALS("u0020_utf8pathnames",pCtx,SG_ERR_NOMOREFILES); SG_context__err_reset(pCtx); SG_DIR_NULLCLOSE(pCtx, pDir); SG_STRING_NULLFREE(pCtx, pStringFilename); return 1; }
static void _s2__do_cset_vs_cset(SG_context * pCtx, const SG_option_state * pOptSt, const SG_stringarray * psaArgs, SG_uint32 * pNrErrors) { SG_varray * pvaStatus = NULL; SG_varray * pvaStatusDirtyFiles = NULL; SG_stringarray * psa1 = NULL; SG_string * pStringGidRepoPath = NULL; SG_string * pStringErr = NULL; SG_uint32 nrErrors = 0; SG_ERR_CHECK( SG_vv2__status(pCtx, pOptSt->psz_repo, pOptSt->pRevSpec, psaArgs, WC__GET_DEPTH(pOptSt), SG_FALSE, // bNoSort &pvaStatus, NULL) ); if (pvaStatus) { if (pOptSt->bInteractive) { // Filter list down to just modified files and show them one-by-one. SG_ERR_CHECK( _get_dirty_files(pCtx, pvaStatus, &pvaStatusDirtyFiles) ); if (pvaStatusDirtyFiles) SG_ERR_CHECK( _do_gui_diffs(pCtx, SG_FALSE, pOptSt, pvaStatusDirtyFiles, &nrErrors) ); } else { SG_uint32 k, nrItems; // Print the changes with PATCH-like headers. // Accumulate any tool errors. SG_ERR_CHECK( SG_varray__count(pCtx, pvaStatus, &nrItems) ); for (k=0; k<nrItems; k++) { SG_vhash * pvhItem; const char * pszGid = NULL; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaStatus, k, &pvhItem) ); // TODO 2013/02/22 Our pvhItem has all of the details for the diff, // TODO but we don't yet have a public API to let it be // TODO used as is. So we build a @gid repo-path and // TODO run the old historical diff code on a 1-item array // TODO containing this @gid. // TODO // TODO We should fix this to just pass down the pvhItem // TOOD so that it doesn't have to repeat the status lookup. SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "gid", &pszGid) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pStringGidRepoPath) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pStringGidRepoPath, "@%s", pszGid) ); SG_ERR_CHECK( SG_STRINGARRAY__ALLOC(pCtx, &psa1, 1) ); SG_ERR_CHECK( SG_stringarray__add(pCtx, psa1, SG_string__sz(pStringGidRepoPath)) ); SG_vv2__diff_to_stream__throw(pCtx, pOptSt->psz_repo, pOptSt->pRevSpec, psa1, 0, SG_TRUE, // bNoSort -- doesn't matter only 1 item in list SG_FALSE, // bInteractive, pOptSt->psz_tool); // Don't throw the error from the tool. Just print it on STDERR // and remember that we had an error so that don't stop showing // the diffs just because we stumble over a changed binary file // or mis-configured tool, for example. if (SG_context__has_err(pCtx)) { SG_context__err_to_string(pCtx, SG_FALSE, &pStringErr); SG_context__err_reset(pCtx); SG_ERR_CHECK( SG_console__raw(pCtx, SG_CS_STDERR, SG_string__sz(pStringErr)) ); SG_STRING_NULLFREE(pCtx, pStringErr); nrErrors++; } SG_STRING_NULLFREE(pCtx, pStringGidRepoPath); SG_STRINGARRAY_NULLFREE(pCtx, psa1); } } } *pNrErrors = nrErrors; fail: SG_VARRAY_NULLFREE(pCtx, pvaStatus); SG_VARRAY_NULLFREE(pCtx, pvaStatusDirtyFiles); SG_STRINGARRAY_NULLFREE(pCtx, psa1); SG_STRING_NULLFREE(pCtx, pStringGidRepoPath); SG_STRING_NULLFREE(pCtx, pStringErr); }
/** * Do diff of an individual item. * When WC-based, we have a "DiffStep" vhash. * When historical, we have an item from a pvaStatus. * */ static void _do_diff1(SG_context * pCtx, SG_bool bWC, const SG_option_state * pOptSt, const SG_vhash * pvhItem, SG_uint32 * piResult) { SG_string * pStringGidRepoPath = NULL; SG_vhash * pvhResultCodes = NULL; SG_stringarray * psa1 = NULL; const char * pszGid; SG_int64 i64Result = 0; SG_string * pStringErr = NULL; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "gid", &pszGid) ); if (bWC) { SG_pathname * pPathWc = NULL; SG_bool bHasTool = SG_FALSE; // With the __diff__setup() and __diff__run() changes, we have already // examined the items during the __setup() step and recorded a tool for // the *FILE* that have changed content. So if "tool" isn't set in the // DiffStep/Item, we don't need to diff it -- it could be a structural // change, a non-file, a found item, etc. // // we do not use SG_wc__diff__throw() because we already have the diff info // and we want to control the result-code processing below. SG_ERR_CHECK( SG_vhash__has(pCtx, pvhItem, "tool", &bHasTool) ); if (bHasTool) SG_ERR_CHECK( SG_wc__diff__run(pCtx, pPathWc, pvhItem, &pvhResultCodes) ); } else { SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pStringGidRepoPath) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pStringGidRepoPath, "@%s", pszGid) ); SG_ERR_CHECK( SG_STRINGARRAY__ALLOC(pCtx, &psa1, 1) ); SG_ERR_CHECK( SG_stringarray__add(pCtx, psa1, SG_string__sz(pStringGidRepoPath)) ); // we do not use the __throw() version of this routine so we can control // result-code processing below. SG_ERR_CHECK( SG_vv2__diff_to_stream(pCtx, pOptSt->psz_repo, pOptSt->pRevSpec, psa1, 0, SG_FALSE, // bNoSort SG_TRUE, // bInteractive, pOptSt->psz_tool, &pvhResultCodes) ); } if (pvhResultCodes) { SG_vhash * pvhResult; // we do not own this SG_ERR_CHECK( SG_vhash__check__vhash(pCtx, pvhResultCodes, pszGid, &pvhResult) ); if (pvhResult) { const char * pszTool; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhResult, "tool", &pszTool) ); SG_ERR_CHECK( SG_vhash__get__int64(pCtx, pvhResult, "result", &i64Result) ); SG_difftool__check_result_code__throw(pCtx, i64Result, pszTool); if (SG_context__has_err(pCtx)) { SG_context__err_to_string(pCtx, SG_FALSE, &pStringErr); SG_context__err_reset(pCtx); SG_ERR_CHECK( SG_console__raw(pCtx, SG_CS_STDERR, SG_string__sz(pStringErr)) ); // eat the tool error. the result code is set. } } } if (piResult) *piResult = (SG_uint32)i64Result; fail: SG_STRING_NULLFREE(pCtx, pStringGidRepoPath); SG_VHASH_NULLFREE(pCtx, pvhResultCodes); SG_STRINGARRAY_NULLFREE(pCtx, psa1); SG_STRING_NULLFREE(pCtx, pStringErr); }
int u0040_unc__stat_dir(SG_context * pCtx, const char * szDir) { SG_pathname * pPathname = NULL; SG_pathname * pPathnameFile = NULL; SG_file * pf = NULL; SG_fsobj_stat fsobjStat; SG_bool bFileExists; SG_int_to_string_buffer bufSize; char bufDate[100]; SG_context__err_reset(pCtx); ////////////////////////////////////////////////////////////////// // stat the given directory. ////////////////////////////////////////////////////////////////// INFOP("u0040_unc",("Inspecting [%s]",szDir)); VERIFY_ERR_CHECK_RETURN( SG_PATHNAME__ALLOC__SZ(pCtx,&pPathname,szDir) ); VERIFY_ERR_CHECK( SG_fsobj__stat__pathname(pCtx,pPathname,&fsobjStat) ); VERIFY_COND("u0040_unc",(fsobjStat.type == SG_FSOBJ_TYPE__DIRECTORY)); // TODO should we verify length == 0 ? // TODO should we verify modtime ? SG_uint64_to_sz(fsobjStat.size, bufSize); VERIFY_ERR_CHECK_DISCARD( SG_time__format_utc__i64(pCtx,fsobjStat.mtime_ms,bufDate,SG_NrElements(bufDate)) ); INFOP("u0040_unc",("Result: [perms %04o][type %d][size %s][mtime %s]", fsobjStat.perms,fsobjStat.type, bufSize,bufDate)); ////////////////////////////////////////////////////////////////// // create a unique file in the directory and stat it. ////////////////////////////////////////////////////////////////// VERIFY_ERR_CHECK( unittest__alloc_unique_pathname(pCtx,szDir,&pPathnameFile) ); INFOP("u0040_unc",(" Creating file [%s]",SG_pathname__sz(pPathnameFile))); VERIFY_ERR_CHECK( SG_file__open__pathname(pCtx,pPathnameFile,SG_FILE_CREATE_NEW | SG_FILE_RDWR,0777,&pf) ); VERIFY_ERR_CHECK( SG_fsobj__stat__pathname(pCtx,pPathnameFile,&fsobjStat) ); VERIFY_COND("u0040_unc",(fsobjStat.type == SG_FSOBJ_TYPE__REGULAR)); VERIFY_COND("u0040_unc",(fsobjStat.size == 0)); VERIFY_COND("u0040_unc",(SG_fsobj__equivalent_perms(fsobjStat.perms,0777))); // TODO should we verify modtime ? SG_uint64_to_sz(fsobjStat.size, bufSize); VERIFY_ERR_CHECK_DISCARD( SG_time__format_utc__i64(pCtx,fsobjStat.mtime_ms,bufDate,SG_NrElements(bufDate)) ); INFOP("u0040_unc",(" Result: [perms %04o][type %d][size %s][mtime %s]", fsobjStat.perms,fsobjStat.type, bufSize,bufDate)); VERIFY_ERR_CHECK_DISCARD( SG_file__close(pCtx, &pf) ); // delete the file and stat it again VERIFY_ERR_CHECK_DISCARD( SG_fsobj__remove__pathname(pCtx,pPathnameFile) ); VERIFY_ERR_CHECK_DISCARD( SG_fsobj__exists__pathname(pCtx,pPathnameFile,&bFileExists,NULL,NULL) ); VERIFY_COND("u0040_unc",(!bFileExists)); ////////////////////////////////////////////////////////////////// // clean up ////////////////////////////////////////////////////////////////// SG_PATHNAME_NULLFREE(pCtx, pPathnameFile); SG_PATHNAME_NULLFREE(pCtx, pPathname); return 1; fail: SG_FILE_NULLCLOSE(pCtx, pf); SG_PATHNAME_NULLFREE(pCtx, pPathnameFile); SG_PATHNAME_NULLFREE(pCtx, pPathname); return 0; }
void SG_dagfrag__load_from_repo__one(SG_context * pCtx, SG_dagfrag * pFrag, SG_repo* pRepo, const char * szHidStart, SG_int32 nGenerations) { // load a fragment of the dag starting with the given dagnode // for nGenerations of parents. // // we add this portion of the graph to whatevery we already // have in our fragment. this may either augment (give us // a larger connected piece) or it may be an independent // subset. // // if nGenerations <= 0, load everything from this starting point // back to the NULL/root. // // generationStart is the generation of the starting dagnode. // // the starting dagnode *MAY* be in the final start-fringe. // normally, it will be. but if we are called multiple times // (and have more than one start point), it may be the case // that this node is a parent of one of the other start points. // // we compute generationEnd as the generation that we will NOT // include in the fragment; nodes of that generation will be in // the end-fringe. that is, we include [start...end) like most // C++ iterators. _my_data * pMyDataCached = NULL; SG_dagnode * pDagnodeAllocated = NULL; SG_dagnode * pDagnodeStart; SG_int32 generationStart, generationEnd; SG_bool bPresent = SG_FALSE; SG_rbtree* prb_WorkQueue = NULL; SG_NULLARGCHECK_RETURN(pFrag); SG_NONEMPTYCHECK_RETURN(szHidStart); // if we are extending the fragment, delete the generation-sorted // member cache copy. (see __foreach_member()). it's either that // or update it in parallel as we change the real CACHE and that // doesn't seem worth the bother. SG_RBTREE_NULLFREE(pCtx, pFrag->m_pRB_GenerationSortedMemberCache); pFrag->m_pRB_GenerationSortedMemberCache = NULL; SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &prb_WorkQueue) ); // fetch the starting dagnode and compute the generation bounds. // first, see if the cache already has info for this dagnode. // if not, fetch it from the source and then add it to the cache. SG_ERR_CHECK( _cache__lookup(pCtx, pFrag,szHidStart,&pMyDataCached,&bPresent) ); if (!bPresent) { if (!pRepo) SG_ERR_THROW( SG_ERR_INVALID_WHILE_FROZEN ); SG_ERR_CHECK( SG_repo__fetch_dagnode(pCtx, pRepo, szHidStart, &pDagnodeAllocated) ); pDagnodeStart = pDagnodeAllocated; } else { pDagnodeStart = pMyDataCached->m_pDagnode; } SG_ERR_CHECK( SG_dagnode__get_generation(pCtx, pDagnodeStart,&generationStart) ); SG_ASSERT_RELEASE_FAIL2( (generationStart > 0), (pCtx,"Invalid generation value [%d] for dagnode [%s]", generationStart,szHidStart) ); if ((nGenerations <= 0) || (generationStart <= nGenerations)) generationEnd = 0; else generationEnd = generationStart - nGenerations; if (!bPresent) { // this dagnode was not already present in the cache. // add it to the cache directly and set the state. // we don't need to go thru the work queue for it. // // then the add all of its parents to the work queue. SG_ERR_CHECK( _cache__add__dagnode(pCtx, pFrag, generationStart, pDagnodeAllocated,SG_DFS_START_MEMBER, &pMyDataCached) ); pDagnodeAllocated = NULL; SG_ERR_CHECK( _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue) ); } else { // the node was already present in the cache, so we have already // walked at least part of the graph around it. switch (pMyDataCached->m_state) { default: //case SG_DFS_UNKNOWN: SG_ASSERT_RELEASE_FAIL2( (0), (pCtx,"Invalid state [%d] in DAGFRAG Cache for [%s]", pMyDataCached->m_state,szHidStart) ); case SG_DFS_INTERIOR_MEMBER: // already in fragment case SG_DFS_START_MEMBER: // already in fragment, duplicated leaf? if (generationEnd < pMyDataCached->m_genDagnode) { // they've expanded the bounds of the fragment since we // last visited this dagnode. keep this dagnode in the // fragment and revisit the ancestors in case any were // put in the end-fringe that should now be included. // // we leave the state as INCLUDE or INCLUDE_AND_START // because a duplicate start point should remain a // start point. SG_ERR_CHECK( _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue) ); } else { // the current end-generation requested is >= the previous // end-generation, then we've completely explored this dagnode // already. that is, a complete walk from this node for nGenerations // would not reveal any new information. } break; case SG_DFS_END_FRINGE: { // they want to start at a dagnode that we put in the // end-fringe. this can happen if they need to expand // the bounds of the fragment to include older ancestors. // // we do not mark this as a start node because someone // else already has it as a parent. pMyDataCached->m_state = SG_DFS_INTERIOR_MEMBER; SG_ERR_CHECK( _add_parents_to_work_queue(pCtx, pMyDataCached->m_pDagnode,prb_WorkQueue) ); } break; } } // we optionally put the parents of the current node into the work queue. // // service the work queue until it is empty. this allows us to walk the graph without // recursion. that is, as we decide what to do with a node, we add the parents // to the queue. we then iterate thru the work queue until we have dealt with // everything -- that is, until all parents have been properly placed. // // we cannot use a standard iterator to drive this loop because we // modify the queue. while (1) { _process_work_queue_item(pCtx, pFrag,prb_WorkQueue,generationEnd,pRepo); if (!SG_context__has_err(pCtx)) break; // we processed everything in the queue and are done if (!SG_context__err_equals(pCtx,SG_ERR_RESTART_FOREACH)) SG_ERR_RETHROW; SG_context__err_reset(pCtx); // queue changed, restart iteration } SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue); /* ** we have loaded a piece of the dag (starting with the given start node ** and tracing all parent edges back n generations). we leave with everything ** in our progress queues so that other start nodes can be added to the ** fragment. this allows the processing of subsequent start nodes to ** override some of the decisions that we made. for example: ** ** Q_15 ** | ** | ** Z_16 ** / \ ** / \ ** Y_17 A_17 ** \ / \ ** \ / \ ** B_18 C_18 ** | ** | ** D_19 ** | ** | ** E_20 ** ** if we started with the leaf E_20 and requested 3 generations, we would have: ** start_set := { E } ** include_set := { B, D, E } ** end_set := { Y, A } ** ** after a subsequent call with the leaf C_18 and 3 generations, we would have: ** start_set := { C, E } ** include_set := { Z, A, B, C, D, E } ** end_set := { Q, Y } ** */ return; fail: SG_RBTREE_NULLFREE(pCtx, prb_WorkQueue); SG_DAGNODE_NULLFREE(pCtx, pDagnodeAllocated); }
static void _set_up_logging( SG_context * pCtx, SG_log_console__data * pcLogStdData, SG_log_text__data * pcLogFileData, SG_log_text__writer__daily_path__data * pcLogFileWriterData) { // Code coppied from _my_main() in sg.c char * szLogLevel = NULL; char * szLogPath = NULL; SG_uint32 logFileFlags = SG_LOG__FLAG__HANDLER_TYPE__ALL; // find the appropriate log path SG_ERR_CHECK( SG_localsettings__get__sz(pCtx, SG_LOCALSETTING__LOG_PATH, NULL, &szLogPath, NULL) ); // get the configured log level SG_ERR_CHECK( SG_localsettings__get__sz(pCtx, SG_LOCALSETTING__LOG_LEVEL, NULL, &szLogLevel, NULL) ); // register the stdout logger SG_ERR_CHECK( SG_log_console__set_defaults(pCtx, pcLogStdData) ); SG_ERR_CHECK( SG_log_console__register(pCtx, pcLogStdData, NULL, SG_LOG__FLAG__HANDLER_TYPE__NORMAL) ); // register the file logger SG_ERR_CHECK( SG_log_text__set_defaults(pCtx, pcLogFileData) ); pcLogFileData->fWriter = SG_log_text__writer__daily_path; pcLogFileData->pWriterData = pcLogFileWriterData; pcLogFileData->szRegisterMessage = NULL; pcLogFileData->szUnregisterMessage = NULL; if (szLogLevel != NULL) { if (SG_stricmp(szLogLevel, "quiet") == 0) { logFileFlags = SG_LOG__FLAG__HANDLER_TYPE__QUIET; pcLogFileData->bLogVerboseOperations = SG_FALSE; pcLogFileData->bLogVerboseValues = SG_FALSE; pcLogFileData->szVerboseFormat = NULL; pcLogFileData->szInfoFormat = NULL; } else if (SG_stricmp(szLogLevel, "normal") == 0) { logFileFlags = SG_LOG__FLAG__HANDLER_TYPE__NORMAL; pcLogFileData->bLogVerboseOperations = SG_FALSE; pcLogFileData->bLogVerboseValues = SG_FALSE; pcLogFileData->szVerboseFormat = NULL; } else if (SG_stricmp(szLogLevel, "verbose") == 0) { logFileFlags = SG_LOG__FLAG__HANDLER_TYPE__ALL; pcLogFileData->szRegisterMessage = "---- vscript started logging ----"; pcLogFileData->szUnregisterMessage = "---- vscript stopped logging ----"; } } logFileFlags |= SG_LOG__FLAG__DETAILED_MESSAGES; SG_ERR_CHECK( SG_log_text__writer__daily_path__set_defaults(pCtx, pcLogFileWriterData) ); pcLogFileWriterData->bReopen = SG_FALSE; pcLogFileWriterData->ePermissions = 0666; if (szLogPath != NULL) SG_ERR_CHECK( SG_PATHNAME__ALLOC__SZ(pCtx, &pcLogFileWriterData->pBasePath, szLogPath) ); else SG_ERR_CHECK( SG_PATHNAME__ALLOC__LOG_DIRECTORY(pCtx, &pcLogFileWriterData->pBasePath) ); pcLogFileWriterData->szFilenameFormat = "vscript-%d-%02d-%02d.log"; SG_ERR_CHECK( SG_log_text__register(pCtx, pcLogFileData, NULL, logFileFlags) ); fail: SG_context__err_reset(pCtx); SG_NULLFREE(pCtx, szLogPath); SG_NULLFREE(pCtx, szLogLevel); }
void SG_workingdir__find_mapping( SG_context* pCtx, const SG_pathname* pPathLocalDirectory, SG_pathname** ppPathMappedLocalDirectory, /**< Return the actual local directory that contains the mapping */ SG_string** ppstrNameRepoInstanceDescriptor, /**< Return the name of the repo instance descriptor */ char** ppszidGidAnchorDirectory /**< Return the GID of the repo directory */ ) { SG_pathname* curpath = NULL; SG_string* result_pstrDescriptorName = NULL; char* result_pszidGid = NULL; SG_pathname* result_mappedLocalDirectory = NULL; SG_vhash* pvhMapping = NULL; SG_pathname* pDrawerPath = NULL; SG_pathname* pMappingFilePath = NULL; SG_vhash* pvh = NULL; SG_NULLARGCHECK_RETURN(pPathLocalDirectory); SG_ERR_CHECK( SG_PATHNAME__ALLOC__COPY(pCtx, &curpath, pPathLocalDirectory) ); /* it's a directory, so it should have a final slash */ SG_ERR_CHECK( SG_pathname__add_final_slash(pCtx, curpath) ); while (SG_TRUE) { SG_ERR_CHECK( SG_workingdir__get_drawer_path(pCtx, curpath, &pDrawerPath) ); SG_fsobj__verify_directory_exists_on_disk__pathname(pCtx, pDrawerPath); if (!SG_context__has_err(pCtx)) { const char* pszDescriptorName = NULL; const char* pszGid = NULL; SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pMappingFilePath, pDrawerPath, "repo.json") ); SG_PATHNAME_NULLFREE(pCtx, pDrawerPath); SG_ERR_CHECK( SG_vfile__slurp(pCtx, pMappingFilePath, &pvh) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvh, "mapping", &pvhMapping) ); SG_PATHNAME_NULLFREE(pCtx, pMappingFilePath); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhMapping, "descriptor", &pszDescriptorName) ); SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhMapping, "anchor", &pszGid) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &result_pstrDescriptorName) ); SG_ERR_CHECK( SG_string__set__sz(pCtx, result_pstrDescriptorName, pszDescriptorName) ); if (pszGid) { SG_ERR_CHECK( SG_gid__alloc_clone(pCtx, pszGid, &result_pszidGid) ); } else { result_pszidGid = NULL; } SG_VHASH_NULLFREE(pCtx, pvh); result_mappedLocalDirectory = curpath; curpath = NULL; break; } else SG_context__err_reset(pCtx); SG_PATHNAME_NULLFREE(pCtx, pDrawerPath); SG_pathname__remove_last(pCtx, curpath); if (SG_context__err_equals(pCtx, SG_ERR_CANNOTTRIMROOTDIRECTORY)) { SG_context__err_reset(pCtx); break; } else { SG_ERR_CHECK_CURRENT; } } if (result_mappedLocalDirectory) { if (ppPathMappedLocalDirectory) { *ppPathMappedLocalDirectory = result_mappedLocalDirectory; } else { SG_PATHNAME_NULLFREE(pCtx, result_mappedLocalDirectory); } if (ppstrNameRepoInstanceDescriptor) { *ppstrNameRepoInstanceDescriptor = result_pstrDescriptorName; } else { SG_STRING_NULLFREE(pCtx, result_pstrDescriptorName); } if (ppszidGidAnchorDirectory) { *ppszidGidAnchorDirectory = result_pszidGid; } else { SG_NULLFREE(pCtx, result_pszidGid); } return; } else { SG_PATHNAME_NULLFREE(pCtx, curpath); SG_ERR_THROW_RETURN(SG_ERR_NOT_FOUND); } fail: SG_VHASH_NULLFREE(pCtx, pvh); SG_PATHNAME_NULLFREE(pCtx, pDrawerPath); SG_PATHNAME_NULLFREE(pCtx, pMappingFilePath); SG_PATHNAME_NULLFREE(pCtx, result_mappedLocalDirectory); SG_PATHNAME_NULLFREE(pCtx, curpath); }