/** * Checks a validation result and throws an error if it failed. */ static void _check_result( SG_context* pCtx, //< [in] [out] Error and context info. SG_uint32 uResult, //< [in] The validation result to check. SG_error uError, //< [in] The type of error to throw if the result indicates failure. const char* szName, //< [in] A name for the string being validated. SG_uint32 uMin, //< [in] Minimum length used in the validation. SG_uint32 uMax, //< [in] Maximum length used in the validation. const char* szInvalids //< [in] Set of invalid characters used in the validation. ) { if (uResult & SG_VALIDATE__RESULT__TOO_SHORT) { SG_ERR_THROW2(uError, (pCtx, "%s must contain at least %u character%s.", szName, uMin, uMin == 1u ? "" : "s")); } else if (uResult & SG_VALIDATE__RESULT__TOO_LONG) { SG_ERR_THROW2(uError, (pCtx, "%s cannot contain more than %u character%s.", szName, uMax, uMax == 1u ? "" : "s")); } else if (uResult & SG_VALIDATE__RESULT__INVALID_CHARACTER) { SG_ERR_THROW2(uError, (pCtx, "%s cannot contain any of the following: %s", szName, szInvalids)); } else if (uResult & SG_VALIDATE__RESULT__CONTROL_CHARACTER) { SG_ERR_THROW2(uError, (pCtx, "%s cannot contain any control characters.", szName)); } else { SG_ASSERT(uResult == SG_VALIDATE__RESULT__VALID); } fail: return; }
void SG_password__forget_all(SG_context *pCtx) { SG_string* pstrFilter = NULL; WCHAR* pwszFilter = NULL; DWORD numCreds = 0; PCREDENTIAL* paCreds = NULL; SG_uint32 i; SG_ERR_CHECK( SG_STRING__ALLOC__SZ(pCtx, &pstrFilter, KEY_PREFIX) ); SG_ERR_CHECK( SG_string__append__sz(pCtx, pstrFilter, KEY_SEPARATOR) ); SG_ERR_CHECK( SG_string__append__sz(pCtx, pstrFilter, "*") ); SG_ERR_CHECK( SG_utf8__extern_to_os_buffer__wchar(pCtx, SG_string__sz(pstrFilter), &pwszFilter, NULL) ); if ( !CredEnumerateW( pwszFilter, 0, &numCreds, &paCreds) ) { DWORD err = GetLastError(); if ( ERROR_NOT_FOUND != err) SG_ERR_THROW2( SG_ERR_GETLASTERROR(err), (pCtx, "%s", "unable to enumerate saved credentials") ); } for (i = 0; i < numCreds; i++) { if ( !CredDeleteW( paCreds[i]->TargetName, CRED_TYPE_GENERIC, 0) ) SG_ERR_THROW2( SG_ERR_GETLASTERROR(GetLastError()), (pCtx, "%s", "unable to delete credential") ); } /* fall through */ fail: SG_NULLFREE(pCtx, pwszFilter); SG_STRING_NULLFREE(pCtx, pstrFilter); }
/** * Add the given "entryname" to the portability-collider and THROW * if there are any issues/problems. * * Since a PARTIAL COMMIT has the ability to include some changes * and not include other changes, we have to re-verify that the * set of items to be included in a TREENODE is well-defined. * * pszEntryname contains the item's entryname as we want it to * appear in the final TREENODE/TREENODEENTRY. * * pStringRepoPath contains the item's path for error messages. * the entryname in this path may be different from the given * entryname (such as when we want to test with the baseline * version of the entryname for a non-participating item, but * want to complain using the current pathname). * */ void sg_wc_tx__commit__queue__utils__check_port(SG_context * pCtx, SG_wc_port * pPort, const char * pszEntryname, SG_treenode_entry_type tneType, const SG_string * pStringRepoPath) { const SG_string * pStringItemLog = NULL; // we don't own this SG_wc_port_flags portFlags; SG_bool bIsDuplicate; SG_ERR_CHECK( SG_wc_port__add_item(pCtx, pPort, NULL, pszEntryname, tneType, &bIsDuplicate) ); if (bIsDuplicate) SG_ERR_THROW2( SG_ERR_PENDINGTREE_PARTIAL_CHANGE_COLLISION, (pCtx, "Partial commit would cause collision: '%s' (%s)", pszEntryname, SG_string__sz(pStringRepoPath)) ); SG_ERR_CHECK( SG_wc_port__get_item_result_flags(pCtx, pPort, pszEntryname, &portFlags, &pStringItemLog) ); if (portFlags) SG_ERR_THROW2( SG_ERR_WC_PORT_FLAGS, (pCtx, "Partial commit could (potentially) cause problems: '%s' (%s)\n%s", pszEntryname, SG_string__sz(pStringRepoPath), SG_string__sz(pStringItemLog)) ); // the item does not cause problems. fail: return; }
void SG_jscore__new_context(SG_context * pCtx, JSContext ** pp_cx, JSObject ** pp_glob, const SG_vhash * pServerConfig) { JSContext * cx = NULL; JSObject * glob = NULL; SG_ASSERT(pCtx!=NULL); SG_NULLARGCHECK_RETURN(pp_cx); if(gpJSCoreGlobalState==NULL) SG_ERR_THROW2_RETURN(SG_ERR_UNINITIALIZED, (pCtx, "jscore has not been initialized")); if (gpJSCoreGlobalState->cb) JS_SetContextCallback(gpJSCoreGlobalState->rt, gpJSCoreGlobalState->cb); cx = JS_NewContext(gpJSCoreGlobalState->rt, 8192); if(cx==NULL) SG_ERR_THROW2_RETURN(SG_ERR_MALLOCFAILED, (pCtx, "Failed to allocate new JS context")); (void)JS_SetContextThread(cx); JS_BeginRequest(cx); JS_SetOptions(cx, JSOPTION_VAROBJFIX); JS_SetVersion(cx, JSVERSION_LATEST); JS_SetContextPrivate(cx, pCtx); glob = JS_NewCompartmentAndGlobalObject(cx, &global_class, NULL); if(glob==NULL) SG_ERR_THROW2(SG_ERR_JS, (pCtx, "Failed to create JavaScript global object for new JSContext.")); if(!JS_InitStandardClasses(cx, glob)) SG_ERR_THROW2(SG_ERR_JS, (pCtx, "JS_InitStandardClasses() failed.")); if (gpJSCoreGlobalState->shell_functions) if (!JS_DefineFunctions(cx, glob, gpJSCoreGlobalState->shell_functions)) SG_ERR_THROW2(SG_ERR_JS, (pCtx, "Failed to install shell functions")); SG_jsglue__set_sg_context(pCtx, cx); SG_ERR_CHECK( SG_jsglue__install_scripting_api(pCtx, cx, glob) ); SG_ERR_CHECK( SG_zing_jsglue__install_scripting_api(pCtx, cx, glob) ); if (! gpJSCoreGlobalState->bSkipModules) { _sg_jscore__install_modules(pCtx, cx, glob, pServerConfig); SG_ERR_CHECK_CURRENT_DISREGARD(SG_ERR_NOTAFILE); } *pp_cx = cx; *pp_glob = glob; return; fail: if (cx) { JS_EndRequest(cx); JS_DestroyContext(cx); } }
void sg_repo__bind_vtable(SG_context* pCtx, SG_repo * pRepo, const char * pszStorage) { SG_uint32 count_vtables = 0; SG_NULLARGCHECK(pRepo); if (pRepo->p_vtable) // can only be bound once { SG_ERR_THROW2(SG_ERR_INVALIDARG, (pCtx, "pRepo->p_vtable is already bound")); } if (pRepo->pvh_descriptor) { SG_ERR_THROW2(SG_ERR_INVALIDARG, (pCtx, "pRepo->pvh_descriptor is already bound")); } if (!g_prb_repo_vtables) { SG_ERR_THROW2(SG_ERR_UNKNOWN_STORAGE_IMPLEMENTATION, (pCtx, "There are no repo storage plugins installed")); } SG_ERR_CHECK( SG_rbtree__count(pCtx, g_prb_repo_vtables, &count_vtables) ); if (0 == count_vtables) { SG_ERR_THROW2(SG_ERR_UNKNOWN_STORAGE_IMPLEMENTATION, (pCtx, "There are no repo storage plugins installed")); } if (!pszStorage || !*pszStorage) { if (1 == count_vtables) { SG_bool b = SG_FALSE; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, NULL, g_prb_repo_vtables, &b, NULL, (void**) &pRepo->p_vtable) ); SG_ASSERT(pRepo->p_vtable); } else { SG_ERR_THROW2(SG_ERR_UNKNOWN_STORAGE_IMPLEMENTATION, (pCtx, "Multiple repo storage plugins installed. Must specify.")); } } else { SG_bool b = SG_FALSE; SG_ERR_CHECK( SG_rbtree__find(pCtx, g_prb_repo_vtables, pszStorage, &b, (void**) &pRepo->p_vtable) ); if (!b || !pRepo->p_vtable) { SG_ERR_THROW(SG_ERR_UNKNOWN_STORAGE_IMPLEMENTATION); } } fail: ; }
static void _get_host(SG_context* pCtx, const char* szRepoSpec, SG_string** ppstrHost) { LPWSTR pwszRepoSpec = NULL; SG_uint32 len = 0; URL_COMPONENTSW UrlComponents; SG_string* pstrHost = NULL; WCHAR buf[260]; SG_zero(UrlComponents); UrlComponents.dwStructSize = sizeof(URL_COMPONENTS); UrlComponents.dwHostNameLength = sizeof(buf); UrlComponents.lpszHostName = buf; SG_ERR_CHECK( SG_utf8__extern_to_os_buffer__wchar(pCtx, szRepoSpec, &pwszRepoSpec, &len) ); if ( !InternetCrackUrlW(pwszRepoSpec, len, ICU_DECODE, &UrlComponents) ) SG_ERR_THROW2( SG_ERR_GETLASTERROR(GetLastError()), (pCtx, "unable to parse host from URL") ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pstrHost) ); SG_ERR_CHECK( SG_utf8__intern_from_os_buffer__wchar(pCtx, pstrHost, UrlComponents.lpszHostName) ); SG_RETURN_AND_NULL(pstrHost, ppstrHost); /* fall through */ fail: SG_NULLFREE(pCtx, pwszRepoSpec); SG_STRING_NULLFREE(pCtx, pstrHost); }
static void _resolve__lookup_issue(SG_context * pCtx, struct _resolve_data * pData, const char * pszGid, const SG_vhash ** ppvhIssue) { const SG_vhash * pvhIssue; SG_bool bFound; if (!pData->pPendingTree) // previous iteration probably freed it after a SAVE SG_ERR_CHECK( SG_PENDINGTREE__ALLOC(pCtx, pData->pPathCwd, pData->bIgnoreWarnings, &pData->pPendingTree) ); SG_ERR_CHECK( SG_pendingtree__find_wd_issue_by_gid(pCtx, pData->pPendingTree, pszGid, &bFound, &pvhIssue) ); if (!bFound) { // Since we may have released the VFILE lock during the outer loop, // this could technically happen but only if they did something // like 'vv revert --all' in another shell while the external merge // tool was running. // // TODO 2010/07/11 I don't like this error message, but I doubt anybody // TODO will ever see it. Think about taking the GID and // TODO finding a repo-path for the item using just the // TODO PTNODEs and printing it. SG_ERR_THROW2( SG_ERR_ASSERT, (pCtx, "RESOLVE failed to find ISSUE for GID %s.", pszGid) ); } *ppvhIssue = pvhIssue; fail: return; }
/** * Print detailed info for each ISSUE in the array. */ static void _resolve__do_list(SG_context * pCtx, struct _resolve_data * pData) { SG_uint32 k, kLimit; SG_ERR_CHECK( SG_stringarray__count(pCtx, pData->psaGids, &kLimit) ); for (k=0; k<kLimit; k++) { const char * pszGid_k; const SG_vhash * pvhIssue_k; SG_bool bFound; SG_ERR_CHECK( SG_stringarray__get_nth(pCtx, pData->psaGids, k, &pszGid_k) ); SG_ERR_CHECK( SG_pendingtree__find_wd_issue_by_gid(pCtx, pData->pPendingTree, pszGid_k, &bFound, &pvhIssue_k) ); if (!bFound) { // This should never happen because we are still holding the // pendingtree lock and still have the original pendingtree // structure in memory. SG_ERR_THROW2( SG_ERR_ASSERT, (pCtx, "RESOLVE failed to find ISSUE for GID %s.", pszGid_k) ); } SG_ERR_CHECK( _resolve__list(pCtx, pData, pvhIssue_k, NULL) ); } fail: return; }
/** * Find the alias-gid for the root "@/" (aka "@b/"). Unlike the null-root, * this is not a compile-time constant. (perhaps it should be.) * * Since neither the GID nor repo-path of the root directory * can ever change, the TX-layer caller can just ask us for * the alias of the root directory and not have to bother with * TNE/PC and/or the prescan/liveview stuff. * * We could/should use sg_wc_db__tne__foreach_in_dir_by_parent_alias() * and pass __NULL_ROOT and set up a callback to get the first result * rather than duplicating parts of that routine, but that felt like * too much trouble. (But we could also verify that there is exactly * one row and it has the correct entryname.) * */ void sg_wc_db__tne__get_alias_of_root(SG_context * pCtx, sg_wc_db * pDb, const sg_wc_db__cset_row * pCSetRow, SG_uint64 * puiAliasGid_Root) { sqlite3_stmt * pStmt = NULL; int rc; SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pDb->psql, &pStmt, ("SELECT" " alias_gid" // 0 " FROM %s" " WHERE (alias_gid_parent = ?)"), pCSetRow->psz_tne_table_name) ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, SG_WC_DB__ALIAS_GID__NULL_ROOT) ); if ((rc=sqlite3_step(pStmt)) != SQLITE_ROW) { SG_ERR_THROW2( SG_ERR_SQLITE(rc), (pCtx, "sg_wc_db:%s can't find tne row for '@/' (aka '@b/').", pCSetRow->psz_tne_table_name) ); } *puiAliasGid_Root = (SG_uint64)sqlite3_column_int64(pStmt, 0); fail: SG_ERR_IGNORE( sg_sqlite__nullfinalize(pCtx, &pStmt) ); }
void sg_wc_db__gid__get_alias_from_gid(SG_context * pCtx, sg_wc_db * pDb, const char * pszGid, SG_uint64 * puiAliasGid) { sqlite3_stmt * pStmt = NULL; SG_uint64 uiAliasGid = 0; int rc; SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pDb->psql, &pStmt, ("SELECT alias_gid FROM tbl_gid WHERE gid = ?")) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 1, ((pszGid && *pszGid) ? pszGid : SG_WC_DB__GID__NULL_ROOT)) ); if ((rc=sqlite3_step(pStmt)) != SQLITE_ROW) SG_ERR_THROW2( SG_ERR_SQLITE(rc), (pCtx, "sg_wc_db:tbl_gid can't find gid %s.", pszGid) ); uiAliasGid = (SG_uint64)sqlite3_column_int64(pStmt, 0); SG_ERR_CHECK( sg_sqlite__finalize(pCtx, pStmt) ); *puiAliasGid = uiAliasGid; return; fail: SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmt) ); }
void SG_curl__set_headers_from_varray(SG_context * pCtx, SG_curl * pCurl, SG_varray * pvaHeaders, struct curl_slist ** ppHeaderList) { CURLcode rc = CURLE_OK; _sg_curl* p = (_sg_curl*)pCurl; struct curl_slist* pHeaderList = NULL; SG_uint32 count = 0; SG_uint32 i = 0; SG_NULLARGCHECK_RETURN(pCurl); SG_NULLARGCHECK_RETURN(pvaHeaders); SG_ERR_CHECK( SG_varray__count(pCtx, pvaHeaders, &count) ); for (i = 0; i < count; i++) { const char * psz = NULL; SG_ERR_CHECK_RETURN( SG_varray__get__sz(pCtx, pvaHeaders, i, &psz) ); pHeaderList = curl_slist_append(pHeaderList, psz); if (!pHeaderList) SG_ERR_THROW2_RETURN(SG_ERR_UNSPECIFIED, (pCtx, "Failed to add HTTP header.")); } rc = curl_easy_setopt(p->pCurl, CURLOPT_HTTPHEADER, pHeaderList); if (rc) SG_ERR_THROW2(SG_ERR_LIBCURL(rc), (pCtx, "Problem setting HTTP headers" )); SG_RETURN_AND_NULL(pHeaderList, ppHeaderList); fail: if (pHeaderList) SG_CURL_HEADERS_NULLFREE(pCtx, pHeaderList); }
void SG_curl__throw_on_non200(SG_context* pCtx, SG_curl* pCurl) { SG_int32 httpResponseCode = 0; SG_vhash* pvhErr = NULL; _sg_curl* p = (_sg_curl*)pCurl; SG_NULLARGCHECK_RETURN(pCurl); SG_ERR_CHECK( SG_curl__getinfo__int32(pCtx, pCurl, CURLINFO_RESPONSE_CODE, &httpResponseCode) ); if (httpResponseCode != 200) { if ((httpResponseCode == 500 || httpResponseCode == 410) && p->pstrErr) { SG_bool bHas = SG_FALSE; const char* szMsg = NULL; SG_VHASH__ALLOC__FROM_JSON__SZ(pCtx, &pvhErr, SG_string__sz(p->pstrErr)); if (SG_context__err_equals(pCtx, SG_ERR_JSONPARSER_SYNTAX)) { // The server didn't return a JSON-formatted response. if (httpResponseCode == 500) SG_ERR_RESET_THROW2(SG_ERR_EXTENDED_HTTP_500, (pCtx, "%s", SG_string__sz(p->pstrErr))); else SG_ERR_THROW2(SG_ERR_SERVER_HTTP_ERROR, (pCtx, "%d", httpResponseCode)); } SG_ERR_CHECK_CURRENT; SG_ERR_CHECK( SG_vhash__has(pCtx, pvhErr, "msg", &bHas) ); if (bHas) SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhErr, "msg", &szMsg) ); if (szMsg) SG_ERR_THROW2(SG_ERR_EXTENDED_HTTP_500, (pCtx, "%s", szMsg)); else SG_ERR_THROW2(SG_ERR_EXTENDED_HTTP_500, (pCtx, "%s", SG_string__sz(p->pstrErr))); } else if (httpResponseCode == 401) { SG_ERR_THROW(SG_ERR_AUTHORIZATION_REQUIRED); } else SG_ERR_THROW2(SG_ERR_SERVER_HTTP_ERROR, (pCtx, "%d", httpResponseCode)); } /* common cleanup */ fail: SG_VHASH_NULLFREE(pCtx, pvhErr); }
/** * Initialize a WD (create .sgdrawer and friends) and set up shop. * The given directory pathname can either be to a new (to be created) * directory -or- an existing (possibly non-empty) directory. * * WARNING: This routine deviates from the model of the other SG_wc__ * WARNING: routines because the WD does not yet exist (and we haven't * WARNING: yet created .sgdrawer). The caller may have already created * WARNING: an empty directory for us, but that is it. * * WARNING: This routine also deviates in that we *ONLY* provide a wc8api * WARNING: version and *NOT* a wc7txapi version. Likewise we only provide * WARNING: a sg.wc.initialize() version and not a sg_wc_tx.initialize() version. * WARNING: This is because I want it to be a complete self-contained * WARNING: operation (and I want to hide the number of SQL TXs that are * WARNING: required to get everything set up). * * Also, the given path (to the new WD root) can be an absolute or * relative path or NULL (we'll substitute the CWD). Unlike the other * API routines, it cannot be a repo-path. * * This should only be used by 'vv init' or sg.vv2.init_new_repo() * (after the repo has been created). * */ void SG_wc__initialize(SG_context * pCtx, const char * pszRepoName, const char * pszPath, const char * pszHidCSet, const char * pszTargetBranchName) { SG_wc_tx * pWcTx = NULL; SG_NONEMPTYCHECK_RETURN( pszRepoName ); // repo-name is required // pszPath is optional; default to CWD. SG_NONEMPTYCHECK_RETURN( pszHidCSet ); // pszTargetBranchName is optional; default to unattached. // Create the WD root, the drawer, and the SQL DB. // Load the given CSET into the tne_L0 table. // // We DO NOT create a timestamp cache at this time // because of clock blurr problem. (We can't trust // mtime until about 2 seconds after we create the // files.) SG_ERR_CHECK( SG_WC_TX__ALLOC__BEGIN__CREATE(pCtx, &pWcTx, pszRepoName, pszPath, SG_FALSE, pszHidCSet) ); if (pszTargetBranchName) { SG_ERR_CHECK( sg_wc_db__branch__attach(pCtx, pWcTx->pDb, pszTargetBranchName, SG_VC_BRANCHES__CHECK_ATTACH_NAME__DONT_CARE, SG_TRUE) ); } #if defined(DEBUG) #define DEBUG_INIT_FAILURE__E2 "gb54c5ca2cfe349eaa3ce599100d1c4d737052960d1e411e1a51e002500da2b78.test" { // Force a failure while we are initializing a new repo using the current directory in // order to test the cleanup/recovery code. // See st_wc_checkout_W7885.js SG_bool bExists = SG_FALSE; SG_pathname * pPathTest = NULL; SG_ERR_CHECK( SG_PATHNAME__ALLOC__SZ(pCtx, &pPathTest, DEBUG_INIT_FAILURE__E2) ); SG_ERR_CHECK( SG_fsobj__exists__pathname(pCtx, pPathTest, &bExists, NULL, NULL) ); SG_PATHNAME_NULLFREE(pCtx, pPathTest); if (bExists) SG_ERR_THROW2( SG_ERR_DEBUG_1, (pCtx, "Init: %s", DEBUG_INIT_FAILURE__E2) ); } #endif SG_ERR_CHECK( _check_for_nested_drawer(pCtx, pWcTx) ); SG_ERR_CHECK( SG_wc_tx__apply(pCtx, pWcTx) ); SG_ERR_CHECK( SG_wc_tx__free(pCtx, pWcTx) ); return; fail: SG_ERR_IGNORE( SG_wc_tx__abort_create_and_free(pCtx, &pWcTx) ); }
void SG_jscore__check_module_dags(SG_context *pCtx, JSContext *cx, JSObject *glob, const char *reponame) { jsval args[2]; JSBool js_ok; jsval rval; JSString *pjs; jsval fo = JSVAL_VOID; if (gpJSCoreGlobalState->bSkipModules || (! gpJSCoreGlobalState->pPathToModules)) return; SG_ERR_CHECK( _sg_jscore__install_modules(pCtx, cx, glob, NULL) ); SG_JS_NULL_CHECK( (pjs = JS_NewStringCopyZ(cx, reponame)) ); args[0] = STRING_TO_JSVAL(pjs); SG_JS_NULL_CHECK( (pjs = JS_NewStringCopyZ(cx, SG_pathname__sz(gpJSCoreGlobalState->pPathToModules))) ); args[1] = STRING_TO_JSVAL(pjs); if (! JS_LookupProperty(cx, glob, "checkModuleDags", &fo)) { SG_ERR_CHECK_CURRENT; SG_ERR_THROW2(SG_ERR_JS, (pCtx, "lookup of checkModuleDags failed")); } if (!JSVAL_IS_VOID(fo)) { js_ok = JS_CallFunctionName(cx, glob, "checkModuleDags", SG_NrElements(args), args, &rval); SG_ERR_CHECK_CURRENT; if(!js_ok) SG_ERR_THROW2(SG_ERR_JS, (pCtx, "An error occurred initializing modules: call to JavaScript checkModuleDags() failed")); } fail: ; }
/** * Convert relative path to absolute, but make * sure that normalization doesn't take it outside * of the working directory. * * This is just pathname/string parsing; we DO NOT * confirm that the path exists or could exist. * */ void sg_wc_db__path__relative_to_absolute(SG_context * pCtx, const sg_wc_db * pDb, const SG_string * pStringRelativePath, SG_pathname ** ppPathItem) { SG_pathname * pPathWDT = NULL; SG_pathname * pPath = NULL; SG_uint32 lenWDT; // choke if they give us a repo-path. SG_ARGCHECK_RETURN( (SG_string__sz(pStringRelativePath)[0] != '@'), pStringRelativePath ); // choke if this WC TX isn't cwd-based. SG_ERR_CHECK( _check_if_relative_paths_allowed(pCtx, pDb) ); // clone WDT so that we can force a trailing slash. SG_ERR_CHECK( SG_PATHNAME__ALLOC__COPY(pCtx, &pPathWDT, pDb->pPathWorkingDirectoryTop) ); SG_ERR_CHECK( SG_pathname__add_final_slash(pCtx, pPathWDT) ); lenWDT = SG_pathname__length_in_bytes(pPathWDT); // allocate and normalize a new path with the net // result rather than writing on the clone (so that // we can do the following prefix test safely). SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPath, pPathWDT, SG_string__sz(pStringRelativePath)) ); if (strncmp(SG_pathname__sz(pPath), SG_pathname__sz(pPathWDT), lenWDT) != 0) { SG_ERR_THROW2( SG_ERR_PATH_NOT_IN_WORKING_COPY, (pCtx, "The path '%s' is not inside the working copy rooted at '%s'.", SG_string__sz(pStringRelativePath), SG_pathname__sz(pDb->pPathWorkingDirectoryTop)) ); } SG_PATHNAME_NULLFREE(pCtx, pPathWDT); *ppPathItem = pPath; return; fail: SG_PATHNAME_NULLFREE(pCtx, pPathWDT); SG_PATHNAME_NULLFREE(pCtx, pPath); }
/** * Fetch the current branch if attached. * Return NULL if detached. * */ void sg_wc_db__branch__get_branch(SG_context * pCtx, sg_wc_db * pDb, char ** ppszBranchName) { sqlite3_stmt * pStmt = NULL; int rc; SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pDb->psql, &pStmt, ("SELECT" " name" // 0 " FROM tbl_branch" " WHERE id = ?")) ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, ID_KEY) ); rc = sqlite3_step(pStmt); switch (rc) { case SQLITE_ROW: if (sqlite3_column_type(pStmt, 0) == SQLITE_NULL) *ppszBranchName = NULL; else SG_ERR_CHECK( SG_STRDUP(pCtx, (const char *)sqlite3_column_text(pStmt, 0), ppszBranchName) ); break; case SQLITE_DONE: *ppszBranchName = NULL; break; default: SG_ERR_THROW2( SG_ERR_SQLITE(rc), (pCtx, "sg_wc_db:tbl_branch can't get branch name.") ); } SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); #if TRACE_WC_DB SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "sg_wc_db__branch__get_branch: %s\n", ((*ppszBranchName) ? (*ppszBranchName) : "<detached>")) ); #endif return; fail: SG_ERR_IGNORE( sg_sqlite__nullfinalize(pCtx, &pStmt) ); }
void SG_jscore__mutex__unlock( SG_context* pCtx, const char* pszName) { SG_bool bExists = SG_FALSE; _namedMutex* pNamedMutex = NULL; SG_ASSERT(gpJSCoreGlobalState); /* We always acquire the rbtree mutex first, then the specific named mutex. A deadlock is impossible. */ //SG_ERR_CHECK( SG_log__report_verbose(pCtx, "Waiting for JS mutex manager in UNLOCK.") ); SG_ERR_CHECK( SG_mutex__lock(pCtx, &gpJSCoreGlobalState->mutexJsNamed) ); SG_ERR_CHECK( SG_rbtree__find(pCtx, gpJSCoreGlobalState->prbJSMutexes, pszName, &bExists, (void**)&pNamedMutex) ); if (bExists) { SG_ASSERT(pNamedMutex); //SG_ERR_CHECK( SG_log__report_verbose(pCtx, "Releasing named JS mutex: %s", pszName) ); SG_ERR_CHECK( SG_mutex__unlock(pCtx, &pNamedMutex->mutex) ); pNamedMutex->count--; // Cannot be touched unless you hold mutexJsNamed. We do here. if ( 0 == pNamedMutex->count ) { //SG_ERR_CHECK( SG_log__report_verbose(pCtx, "Nobody else is waiting. Removing named JS mutex: %s", pszName) ); SG_ERR_CHECK( SG_rbtree__remove(pCtx, gpJSCoreGlobalState->prbJSMutexes, pszName) ); SG_NULLFREE(pCtx, pNamedMutex); } //else if (pNamedMutex->count > 1) // Cannot be touched unless you hold mutexJsNamed. We do here. //SG_ERR_CHECK( SG_log__report_verbose(pCtx, "%u threads are waiting for named JS mutex: %s", pNamedMutex->count-1, pszName) ); /* We deliberately unlock the rbtree mutex after the named mutex. Creating a new mutex with the same name can't be allowed until this one is released, because they are logically the same lock. Unlocking doesn't block on anything and should be fast. */ SG_ERR_CHECK( SG_mutex__unlock(pCtx, &gpJSCoreGlobalState->mutexJsNamed) ); //SG_ERR_CHECK( SG_log__report_verbose(pCtx, "Released JS mutex manager in UNLOCK.") ); } else SG_ERR_THROW2( SG_ERR_NOT_FOUND, (pCtx, "Named mutex: %s", pszName) ); return; fail: SG_ERR_IGNORE( SG_mutex__unlock(pCtx, &gpJSCoreGlobalState->mutexJsNamed) ); }
/** * Find the appropriate external tool to let the user perform a TEXT MERGE * on a file. * * TODO 2010/07/13 For now, this is hard-coded to use DiffMerge. * TODO Later we want to allow them to have multiple * TODO tools configured and/or to use the file suffix * TODO and so on. */ static void _resolve__external_tool__lookup(SG_context * pCtx, struct _resolve_data * pData, const char * pszGid, const SG_vhash * pvhIssue, SG_string * pStrRepoPath, _resolve__external_tool ** ppET) { _resolve__external_tool * pET = NULL; SG_repo * pRepo; SG_UNUSED( pszGid ); SG_UNUSED( pvhIssue ); SG_ERR_CHECK( SG_pendingtree__get_repo(pCtx, pData->pPendingTree, &pRepo) ); SG_ERR_CHECK( SG_alloc1(pCtx, pET) ); // TODO 2010/07/13 Use localsettings to determine WHICH external tool we should use. // TODO (This could be based upon suffixes and/or whatever.) // TODO Then -- for THAT tool -- lookup the program executable path and // TODO the argument list. // TODO Substitute the given pathnames into the argument list. // TODO // TODO For now, we just hard-code DiffMerge. SG_ERR_CHECK( SG_strdup(pCtx, "DiffMerge", &pET->pszName) ); SG_localsettings__get__sz(pCtx, "merge/diffmerge/program", pRepo, &pET->pszExe, NULL); if (SG_context__has_err(pCtx) || (!pET->pszExe) || (!*pET->pszExe)) { SG_context__err_reset(pCtx); SG_ERR_THROW2( SG_ERR_NO_MERGE_TOOL_CONFIGURED, (pCtx, "'%s' Use 'vv localsetting set merge/diffmerge/program' and retry -or- manually merge content and then use 'vv resolve --mark'.", SG_string__sz(pStrRepoPath)) ); } // TODO 2010/07/13 Get argvec. *ppET = pET; return; fail: _RESOLVE__EXTERNAL_TOOL__NULLFREE(pCtx, pET); }
/** * Return a VHASH containing the resolve info * if there is any. * * We return NULL if there is nothing. * */ void SG_wc_tx__get_item_resolve_info(SG_context * pCtx, SG_wc_tx * pWcTx, const char * pszInput, SG_vhash ** ppvhResolveInfo) { SG_vhash * pvhResolveInfo = NULL; SG_string * pStringRepoPath = NULL; sg_wc_liveview_item * pLVI; // we do not own this SG_bool bKnown; char chDomain; SG_NULLARGCHECK_RETURN( pWcTx ); SG_NONEMPTYCHECK_RETURN( pszInput ); SG_NULLARGCHECK_RETURN( ppvhResolveInfo ); SG_ERR_CHECK( sg_wc_db__path__anything_to_repopath(pCtx, pWcTx->pDb, pszInput, SG_WC_DB__PATH__IMPORT_FLAGS__TREAT_NULL_AS_ERROR, &pStringRepoPath, &chDomain) ); #if TRACE_WC_TX_STATUS SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "SG_wc_tx__get_item_resolve_info: '%s' normalized to [domain %c] '%s'\n", pszInput, chDomain, SG_string__sz(pStringRepoPath)) ); #endif SG_ERR_CHECK( sg_wc_tx__liveview__fetch_item__domain(pCtx, pWcTx, pStringRepoPath, &bKnown, &pLVI) ); if (!bKnown) { // We only get this if the path is completely bogus and // took us off into the weeds (as opposed to reporting // something just not-controlled). SG_ERR_THROW2( SG_ERR_NOT_FOUND, (pCtx, "Unknown item '%s'.", SG_string__sz(pStringRepoPath)) ); } if (pLVI->pvhIssue && pLVI->pvhSavedResolutions) SG_ERR_CHECK( SG_VHASH__ALLOC__COPY(pCtx, &pvhResolveInfo, pLVI->pvhSavedResolutions) ); *ppvhResolveInfo = pvhResolveInfo; pvhResolveInfo = NULL; fail: SG_STRING_NULLFREE(pCtx, pStringRepoPath); SG_VHASH_NULLFREE(pCtx, pvhResolveInfo); }
static void _loadModuleDir(SG_context *pCtx, const SG_pathname *path, const char *modname, JSContext *cx, JSObject *glob) { SG_rbtree * pJavascriptFiles = NULL; SG_rbtree_iterator * pJavascriptFile = NULL; const char *szJavascriptFile = NULL; SG_pathname *pJavascriptFilePath = NULL; SG_bool ok = SG_FALSE; SG_bool valid = SG_FALSE; char *psz_js = NULL; // free SG_uint32 len_js = 0; jsval rval = JSVAL_VOID; SG_ERR_CHECK( SG_dir__list(pCtx, path, NULL, NULL, ".js", &pJavascriptFiles) ); SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pJavascriptFile, pJavascriptFiles, &ok, &szJavascriptFile, NULL) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__COPY(pCtx, &pJavascriptFilePath, path) ); while(ok) { SG_ERR_CHECK( _isValidJsFile(pCtx, szJavascriptFile, &valid) ); if (valid) { SG_ERR_CHECK( SG_pathname__append__from_sz(pCtx, pJavascriptFilePath, szJavascriptFile) ); SG_ERR_CHECK( sg_read_entire_file(pCtx, pJavascriptFilePath, &psz_js, &len_js) ); if(!JS_EvaluateScript(cx, glob, psz_js, len_js, szJavascriptFile, 1, &rval)) { SG_ERR_CHECK_CURRENT; SG_ERR_THROW2(SG_ERR_JS, (pCtx, "An error occurred loading %s javascript file '%s'", modname, szJavascriptFile)); } SG_NULLFREE(pCtx, psz_js); SG_ERR_CHECK( SG_pathname__remove_last(pCtx, pJavascriptFilePath) ); } SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pJavascriptFile, &ok, &szJavascriptFile, NULL) ); } fail: SG_PATHNAME_NULLFREE(pCtx, pJavascriptFilePath); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pJavascriptFile); SG_RBTREE_NULLFREE(pCtx, pJavascriptFiles); SG_NULLFREE(pCtx, psz_js); }
void SG_password__set( SG_context* pCtx, const char *szRepoSpec, SG_string *pstrUserName, SG_string *pstrPassword) { SG_string* pstrTarget = NULL; LPWSTR pwszTarget = NULL; LPWSTR pwszPassword = NULL; SG_uint32 lenPassword = 0; LPWSTR pwszUserName = NULL; CREDENTIAL cred; SG_NULLARGCHECK_RETURN(szRepoSpec); SG_NULLARGCHECK_RETURN(pstrUserName); SG_NULLARGCHECK_RETURN(pstrPassword); if (!SG_string__length_in_bytes(pstrUserName)) SG_ERR_THROW2_RETURN(SG_ERR_INVALIDARG, (pCtx, "%s", "pstrUserName is empty")); SG_ERR_CHECK( _get_key(pCtx, szRepoSpec, SG_string__sz(pstrUserName), &pstrTarget) ); SG_ERR_CHECK( SG_utf8__extern_to_os_buffer__wchar(pCtx, SG_string__sz(pstrTarget), &pwszTarget, NULL) ); SG_ERR_CHECK( SG_utf8__extern_to_os_buffer__wchar(pCtx, SG_string__sz(pstrPassword), &pwszPassword, &lenPassword) ); SG_ERR_CHECK( SG_utf8__extern_to_os_buffer__wchar(pCtx, SG_string__sz(pstrUserName), &pwszUserName, NULL) ); SG_zero(cred); cred.Type = CRED_TYPE_GENERIC; cred.TargetName = pwszTarget; cred.CredentialBlob = (LPBYTE)pwszPassword; cred.CredentialBlobSize = lenPassword*sizeof(wchar_t); cred.Persist = CRED_PERSIST_LOCAL_MACHINE; // unsupported on Windows Vista Home Basic, Windows Vista Home Premium, Windows Vista Starter, and Windows XP Home Edition cred.UserName = pwszUserName; if ( !CredWriteW(&cred, 0) ) SG_ERR_THROW2( SG_ERR_GETLASTERROR(GetLastError()), (pCtx, "%s", "unable to save credentials") ); /* fall through */ fail: SG_STRING_NULLFREE(pCtx, pstrTarget); SG_NULLFREE(pCtx, pwszTarget); SG_NULLFREE(pCtx, pwszPassword); SG_NULLFREE(pCtx, pwszUserName); }
void SG_jscore__new_runtime( SG_context * pCtx, JSContextCallback cb, JSFunctionSpec *shell_functions, SG_bool bSkipModules, JSRuntime **ppRt ) { char * szSsjsMutable = NULL; if(gpJSCoreGlobalState != NULL) SG_ERR_THROW_RETURN(SG_ERR_ALREADY_INITIALIZED); SG_ERR_CHECK( SG_alloc1(pCtx, gpJSCoreGlobalState) ); // Store this for later. gpJSCoreGlobalState->cb = cb; gpJSCoreGlobalState->shell_functions = shell_functions; gpJSCoreGlobalState->bSkipModules = bSkipModules; if (! bSkipModules) SG_ERR_CHECK( _sg_jscore_getpaths(pCtx) ); SG_ERR_CHECK( SG_mutex__init(pCtx, &gpJSCoreGlobalState->mutexJsNamed) ); SG_ERR_CHECK( SG_RBTREE__ALLOC(pCtx, &gpJSCoreGlobalState->prbJSMutexes) ); // Start up SpiderMonkey. JS_SetCStringsAreUTF8(); gpJSCoreGlobalState->rt = JS_NewRuntime(64L * 1024L * 1024L); // TODO decide the right size here if(gpJSCoreGlobalState->rt==NULL) SG_ERR_THROW2(SG_ERR_JS, (pCtx, "Failed to allocate JS Runtime")); if (ppRt) *ppRt = gpJSCoreGlobalState->rt; return; fail: SG_NULLFREE(pCtx, szSsjsMutable); SG_PATHNAME_NULLFREE(pCtx, gpJSCoreGlobalState->pPathToDispatchDotJS); SG_PATHNAME_NULLFREE(pCtx, gpJSCoreGlobalState->pPathToModules); }
/** * Convert absolute path to a wd-top-relative string. * * This is just pathname/string parsing; we DO NOT confirm * that the path exists. We only confirm that the result * is properly contained within the working directory. * */ void sg_wc_db__path__absolute_to_repopath(SG_context * pCtx, const sg_wc_db * pDb, const SG_pathname * pPathItem, SG_string ** ppStringRepoPath) { SG_string * pString = NULL; const char * psz; SG_workingdir__wdpath_to_repopath(pCtx, pDb->pPathWorkingDirectoryTop, pPathItem, SG_FALSE, &pString); if (SG_CONTEXT__HAS_ERR(pCtx)) { if (SG_context__err_equals(pCtx, SG_ERR_CANNOT_MAKE_RELATIVE_PATH)) goto throw_not_in_working_copy; if (SG_context__err_equals(pCtx, SG_ERR_ITEM_NOT_UNDER_VERSION_CONTROL)) goto throw_not_in_working_copy; if (SG_context__err_equals(pCtx, SG_ERR_PATH_NOT_IN_WORKING_COPY)) goto throw_not_in_working_copy; SG_ERR_RETHROW; } psz = SG_string__sz(pString); if ((psz[0] == '.') && (psz[1] == '.') && ((psz[2] == '/') || (psz[2] == 0))) goto throw_not_in_working_copy; *ppStringRepoPath = pString; return; throw_not_in_working_copy: SG_context__err_reset(pCtx); SG_ERR_THROW2( SG_ERR_PATH_NOT_IN_WORKING_COPY, (pCtx, "The path '%s' is not inside the working copy rooted at '%s'.", SG_pathname__sz(pPathItem), SG_pathname__sz(pDb->pPathWorkingDirectoryTop)) ); fail: SG_STRING_NULLFREE(pCtx, pString); }
void sg_wc_db__path__generate_backup_path(SG_context * pCtx, const sg_wc_db * pDb, const char * pszGid, const char * pszEntryname, SG_pathname ** ppPathBackup) { SG_pathname * pPath = NULL; SG_string * pString = NULL; SG_uint32 k = 0; SG_bool bExists; // TODO 2011/10/25 assert no slashes in entryname. SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pString) ); for (k=0; k<100; k++) { SG_ERR_CHECK( SG_string__clear(pCtx, pString) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pString, SG_BACKUP_FORMAT, pszGid, pszEntryname, k) ); SG_ERR_CHECK( SG_PATHNAME__ALLOC__PATHNAME_SZ(pCtx, &pPath, pDb->pPathWorkingDirectoryTop, SG_string__sz(pString)) ); SG_ERR_CHECK( SG_fsobj__exists__pathname(pCtx, pPath, &bExists, NULL, NULL) ); if (!bExists) break; SG_PATHNAME_NULLFREE(pCtx, pPath); } if (k == 100) { SG_ERR_THROW2( SG_ERR_TOO_MANY_BACKUP_FILES, (pCtx, "%s", pszEntryname) ); } *ppPathBackup = pPath; pPath = NULL; fail: SG_PATHNAME_NULLFREE(pCtx, pPath); SG_STRING_NULLFREE(pCtx, pString); }
void sg_wc_db__gid__get_gid_from_alias(SG_context * pCtx, sg_wc_db * pDb, SG_uint64 uiAliasGid, char ** ppszGid) { char * pszGid = NULL; int rc; // TODO 2011/08/01 Should this check for "*null*" and offer to return NULL ? //Caching the prepared statement is a pretty huge performance win. if (pDb->pSqliteStmt__get_gid_from_alias == NULL) { SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pDb->psql, &pDb->pSqliteStmt__get_gid_from_alias, ("SELECT gid FROM tbl_gid WHERE alias_gid = ?")) ); } SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pDb->pSqliteStmt__get_gid_from_alias, 1, uiAliasGid) ); if ((rc=sqlite3_step(pDb->pSqliteStmt__get_gid_from_alias)) != SQLITE_ROW) { SG_int_to_string_buffer bufui64; SG_ERR_THROW2( SG_ERR_SQLITE(rc), (pCtx, "sg_wc_db:tbl_gid can't find alias %s.", SG_uint64_to_sz(uiAliasGid, bufui64)) ); } SG_ERR_CHECK( SG_STRDUP(pCtx, (const char *)sqlite3_column_text(pDb->pSqliteStmt__get_gid_from_alias, 0), &pszGid) ); SG_ERR_CHECK( sg_sqlite__reset(pCtx, pDb->pSqliteStmt__get_gid_from_alias) ); SG_ERR_CHECK( sg_sqlite__clear_bindings(pCtx, pDb->pSqliteStmt__get_gid_from_alias) ); *ppszGid = pszGid; return; fail: SG_ERR_IGNORE( sg_sqlite__reset(pCtx, pDb->pSqliteStmt__get_gid_from_alias) ); SG_ERR_IGNORE( sg_sqlite__clear_bindings(pCtx, pDb->pSqliteStmt__get_gid_from_alias) ); SG_NULLFREE(pCtx, pszGid); }
void sg_wc_db__gid__get_gid_from_alias2(SG_context * pCtx, sg_wc_db * pDb, SG_uint64 uiAliasGid, char ** ppszGid, SG_bool * pbIsTmp) { sqlite3_stmt * pStmt = NULL; char * pszGid = NULL; int rc; SG_bool bIsTmp; // TODO 2011/08/01 Should this check for "*null*" and offer to return NULL ? SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pDb->psql, &pStmt, ("SELECT gid,tmp FROM tbl_gid WHERE alias_gid = ?")) ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, uiAliasGid) ); if ((rc=sqlite3_step(pStmt)) != SQLITE_ROW) { SG_int_to_string_buffer bufui64; SG_ERR_THROW2( SG_ERR_SQLITE(rc), (pCtx, "sg_wc_db:tbl_gid can't find alias %s.", SG_uint64_to_sz(uiAliasGid, bufui64)) ); } SG_ERR_CHECK( SG_STRDUP(pCtx, (const char *)sqlite3_column_text(pStmt, 0), &pszGid) ); bIsTmp = (sqlite3_column_int(pStmt, 1) != 0); SG_ERR_CHECK( sg_sqlite__finalize(pCtx, pStmt) ); *ppszGid = pszGid; *pbIsTmp = bIsTmp; return; fail: SG_ERR_IGNORE( sg_sqlite__finalize(pCtx, pStmt) ); SG_NULLFREE(pCtx, pszGid); }
/** * Get the TNE Row for the (uiAliasGirParent, pszEntryname) pair. * This should not have any of the entryname ambiguity problems * that such a request on the live view might have. * */ void sg_wc_db__tne__get_row_by_parent_alias_and_entryname(SG_context * pCtx, sg_wc_db * pDb, const sg_wc_db__cset_row * pCSetRow, SG_uint64 uiAliasGidParent, const char * pszEntryname, SG_bool * pbFound, sg_wc_db__tne_row ** ppTneRow) { sqlite3_stmt * pStmt = NULL; sg_wc_db__tne_row * pTneRow = NULL; int rc; SG_ERR_CHECK( sg_sqlite__prepare(pCtx, pDb->psql, &pStmt, ("SELECT" " alias_gid," // 0 " hid," // 1 " type," // 2 " attrbits" // 3 " FROM %s" " WHERE alias_gid_parent = ?" " AND entryname = ?"), pCSetRow->psz_tne_table_name) ); SG_ERR_CHECK( sg_sqlite__bind_int64(pCtx, pStmt, 1, uiAliasGidParent) ); SG_ERR_CHECK( sg_sqlite__bind_text(pCtx, pStmt, 2, pszEntryname) ); if ((rc=sqlite3_step(pStmt)) != SQLITE_ROW) { SG_int_to_string_buffer bufui64; if ((rc == SQLITE_DONE) && pbFound) { *pbFound = SG_FALSE; *ppTneRow = NULL; goto done; } SG_ERR_THROW2( SG_ERR_SQLITE(rc), (pCtx, "sg_wc_db:%s can't find tne row for parent alias %s and entryname '%s'.", pCSetRow->psz_tne_table_name, SG_uint64_to_sz(uiAliasGidParent, bufui64), pszEntryname) ); } SG_ERR_CHECK( sg_wc_db__tne_row__alloc(pCtx, &pTneRow) ); pTneRow->p_s->uiAliasGid = (SG_uint64)sqlite3_column_int64(pStmt, 0); pTneRow->p_s->uiAliasGidParent = uiAliasGidParent; SG_ERR_CHECK( SG_STRDUP(pCtx, (const char *)sqlite3_column_text(pStmt, 1), &pTneRow->p_d->pszHid) ); pTneRow->p_s->tneType = (SG_uint32)sqlite3_column_int(pStmt, 2); pTneRow->p_d->attrbits = (SG_uint64)sqlite3_column_int64(pStmt, 3); SG_ERR_CHECK( SG_STRDUP(pCtx, pszEntryname, &pTneRow->p_s->pszEntryname) ); SG_ERR_CHECK( sg_sqlite__nullfinalize(pCtx, &pStmt) ); #if TRACE_WC_DB SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "sg_wc_db__tne__get_row_by_parent_alias_and_entryname: found:\n") ); SG_ERR_IGNORE( sg_wc_db__debug__tne_row__print(pCtx, pTneRow) ); #endif if (pbFound) *pbFound = SG_TRUE; *ppTneRow = pTneRow; return; fail: SG_WC_DB__TNE_ROW__NULLFREE(pCtx, pTneRow); done: SG_ERR_IGNORE( sg_sqlite__nullfinalize(pCtx, &pStmt) ); }
void SG_server__pull_request_fragball(SG_context* pCtx, SG_repo* pRepo, SG_vhash* pvhRequest, const SG_pathname* pFragballDirPathname, char** ppszFragballName, SG_vhash** ppvhStatus) { SG_pathname* pFragballPathname = NULL; SG_uint32* paDagNums = NULL; SG_rbtree* prbDagnodes = NULL; SG_string* pstrFragballName = NULL; char* pszRevFullHid = NULL; SG_rbtree_iterator* pit = NULL; SG_uint32* repoDagnums = NULL; SG_NULLARGCHECK_RETURN(pRepo); SG_NULLARGCHECK_RETURN(pFragballDirPathname); SG_NULLARGCHECK_RETURN(ppvhStatus); #if TRACE_SERVER SG_ERR_CHECK( SG_vhash_debug__dump_to_console__named(pCtx, pvhRequest, "pull fragball request") ); #endif SG_ERR_CHECK( SG_fragball__create(pCtx, pFragballDirPathname, &pFragballPathname) ); if (!pvhRequest) { // Add leaves from every dag to the fragball. SG_uint32 count_dagnums; SG_uint32 i; SG_ERR_CHECK( SG_repo__list_dags(pCtx, pRepo, &count_dagnums, &paDagNums) ); for (i=0; i<count_dagnums; i++) { SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, paDagNums[i], &prbDagnodes) ); SG_ERR_CHECK( SG_fragball__append__dagnodes(pCtx, pFragballPathname, pRepo, paDagNums[i], prbDagnodes) ); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); } SG_ERR_CHECK( SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName) ); SG_ERR_CHECK( SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName) ); } else { // Build the requested fragball. SG_bool found; SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__CLONE, &found) ); if (found) { // Full clone requested. SG_ERR_CHECK( SG_repo__fetch_repo__fragball(pCtx, pRepo, pFragballDirPathname, ppszFragballName) ); } else { // Not a full clone. SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &found) ); if (found) { // Dagnodes were requested. SG_uint32 generations = 0; SG_vhash* pvhDags; SG_uint32 count_requested_dagnums; SG_uint32 count_repo_dagnums = 0; SG_uint32 i; const char* pszDagNum = NULL; const SG_variant* pvRequestedNodes = NULL; SG_vhash* pvhRequestedNodes = NULL; const char* pszHidRequestedDagnode = NULL; // Were additional generations requested? SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__GENERATIONS, &found) ); if (found) SG_ERR_CHECK( SG_vhash__get__uint32(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__GENERATIONS, &generations) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__DAGS, &pvhDags) ); SG_ERR_CHECK( SG_vhash__count(pCtx, pvhDags, &count_requested_dagnums) ); if (count_requested_dagnums) SG_ERR_CHECK( SG_repo__list_dags(pCtx, pRepo, &count_repo_dagnums, &repoDagnums) ); // For each requested dag, get the requested nodes. for (i=0; i<count_requested_dagnums; i++) { SG_uint32 iMissingNodeCount; SG_uint32 iDagnum; SG_uint32 j; SG_bool isValidDagnum = SG_FALSE; SG_bool bSpecificNodesRequested = SG_FALSE; // Get the dag's missing node vhash. SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvhDags, i, &pszDagNum, &pvRequestedNodes) ); SG_ERR_CHECK( SG_dagnum__from_sz__decimal(pCtx, pszDagNum, &iDagnum) ); // Verify that requested dagnum exists for (j = 0; j < count_repo_dagnums; j++) { if (repoDagnums[j] == iDagnum) { isValidDagnum = SG_TRUE; break; } } if (!isValidDagnum) { char buf[SG_DAGNUM__BUF_MAX__NAME]; SG_ERR_CHECK( SG_dagnum__to_name(pCtx, iDagnum, buf, sizeof(buf)) ); SG_ERR_THROW2(SG_ERR_NO_SUCH_DAG, (pCtx, "%s", buf)); } if (pvRequestedNodes) { SG_ERR_CHECK( SG_variant__get__vhash(pCtx, pvRequestedNodes, &pvhRequestedNodes) ); // Get each node listed for the dag SG_ERR_CHECK( SG_vhash__count(pCtx, pvhRequestedNodes, &iMissingNodeCount) ); if (iMissingNodeCount > 0) { SG_uint32 j; const SG_variant* pvVal; bSpecificNodesRequested = SG_TRUE; SG_ERR_CHECK( SG_RBTREE__ALLOC__PARAMS(pCtx, &prbDagnodes, iMissingNodeCount, NULL) ); for (j=0; j<iMissingNodeCount; j++) { SG_ERR_CHECK( SG_vhash__get_nth_pair(pCtx, pvhRequestedNodes, j, &pszHidRequestedDagnode, &pvVal) ); if (pvVal) { const char* pszVal; SG_ERR_CHECK( SG_variant__get__sz(pCtx, pvVal, &pszVal) ); if (pszVal) { if (0 == strcmp(pszVal, SG_SYNC_REQUEST_VALUE_HID_PREFIX)) { SG_ERR_CHECK( SG_repo__hidlookup__dagnode(pCtx, pRepo, iDagnum, pszHidRequestedDagnode, &pszRevFullHid) ); pszHidRequestedDagnode = pszRevFullHid; } else if (0 == strcmp(pszVal, SG_SYNC_REQUEST_VALUE_TAG)) { SG_ERR_CHECK( SG_vc_tags__lookup__tag(pCtx, pRepo, pszHidRequestedDagnode, &pszRevFullHid) ); if (!pszRevFullHid) SG_ERR_THROW(SG_ERR_TAG_NOT_FOUND); pszHidRequestedDagnode = pszRevFullHid; } else SG_ERR_THROW(SG_ERR_PULL_INVALID_FRAGBALL_REQUEST); } } SG_ERR_CHECK( SG_rbtree__update(pCtx, prbDagnodes, pszHidRequestedDagnode) ); // Get additional dagnode generations, if requested. SG_ERR_CHECK( SG_sync__add_n_generations(pCtx, pRepo, pszHidRequestedDagnode, prbDagnodes, generations) ); SG_NULLFREE(pCtx, pszRevFullHid); } } } if (!bSpecificNodesRequested) { // When no specific nodes are in the request, add all leaves. SG_ERR_CHECK( SG_repo__fetch_dag_leaves(pCtx, pRepo, iDagnum, &prbDagnodes) ); // Get additional dagnode generations, if requested. if (generations) { SG_bool found; const char* hid; SG_ERR_CHECK( SG_rbtree__iterator__first(pCtx, &pit, prbDagnodes, &found, &hid, NULL) ); while (found) { SG_ERR_CHECK( SG_sync__add_n_generations(pCtx, pRepo, hid, prbDagnodes, generations) ); SG_ERR_CHECK( SG_rbtree__iterator__next(pCtx, pit, &found, &hid, NULL) ); } } } if (prbDagnodes) // can be null when leaves of an empty dag are requested { SG_ERR_CHECK( SG_fragball__append__dagnodes(pCtx, pFragballPathname, pRepo, iDagnum, prbDagnodes) ); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); } } // dagnum loop } // if "dags" exists /* Add requested blobs to the fragball */ SG_ERR_CHECK( SG_vhash__has(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &found) ); if (found) { // Blobs were requested. SG_vhash* pvhBlobs; SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhRequest, SG_SYNC_STATUS_KEY__BLOBS, &pvhBlobs) ); SG_ERR_CHECK( SG_sync__add_blobs_to_fragball(pCtx, pRepo, pFragballPathname, pvhBlobs) ); } SG_ERR_CHECK( SG_pathname__get_last(pCtx, pFragballPathname, &pstrFragballName) ); SG_ERR_CHECK( SG_STRDUP(pCtx, SG_string__sz(pstrFragballName), ppszFragballName) ); } } /* fallthru */ fail: // If we had an error, delete the half-baked fragball. if (pFragballPathname && SG_context__has_err(pCtx)) SG_ERR_IGNORE( SG_fsobj__remove__pathname(pCtx, pFragballPathname) ); SG_PATHNAME_NULLFREE(pCtx, pFragballPathname); SG_NULLFREE(pCtx, paDagNums); SG_RBTREE_NULLFREE(pCtx, prbDagnodes); SG_STRING_NULLFREE(pCtx, pstrFragballName); SG_NULLFREE(pCtx, pszRevFullHid); SG_RBTREE_ITERATOR_NULLFREE(pCtx, pit); SG_NULLFREE(pCtx, repoDagnums); }
/** * Our caller is trying to create new repo and create a WD * mapped to it. The destination directory may or may not * have already existed on disk before we started. If we are * building upon an existing directory, verify that it doesn't * contain any submodules because we don't yet support them. * */ static void _check_for_nested_drawer(SG_context * pCtx, SG_wc_tx * pWcTx) { SG_varray * pvaStatus = NULL; SG_string * pString_MyDrawerRepoPath = NULL; SG_string * pString_MatchedRepoPath = NULL; const char * psz_MyDrawerName = NULL; const char * psz_MyDrawerRepoPath = NULL; SG_uint32 k, nrItems; if (pWcTx->bWeCreated_WD || pWcTx->bWeCreated_WD_Contents) return; SG_ERR_CHECK( SG_wc_tx__status(pCtx, pWcTx, NULL, SG_UINT32_MAX, SG_FALSE, // bListUnchanged SG_TRUE, // bNoIgnores SG_TRUE, // bNoTSC, SG_FALSE, // bListSparse SG_TRUE, // bListReserved SG_TRUE, // bNoSort, &pvaStatus, NULL) ); if (!pvaStatus) return; // TODO 2012/11/13 For now I'm just going to see if there is a // TODO .sgdrawer somewhere within the directory tree. // TODO In theory, we could have ADD/ADDREMOVE just // TODO look for them and refuse to add its parent // TODO directory, but I don't to even support that // TODO until we've properly dealt with submodules. // TODO // TODO So for now, if there is a WD deeply nested within // TODO this directory, we just complain. This is mainly // TODO to prevent accidents. (Because they can still // TODO manually move a sub-WD somehere deep into this // TODO directory at some point in the future.) SG_ERR_CHECK( SG_workingdir__get_drawer_directory_name(pCtx, &psz_MyDrawerName) ); SG_ERR_CHECK( SG_STRING__ALLOC(pCtx, &pString_MyDrawerRepoPath) ); SG_ERR_CHECK( SG_string__sprintf(pCtx, pString_MyDrawerRepoPath, "@/%s", psz_MyDrawerName) ); SG_ERR_CHECK( SG_repopath__ensure_final_slash(pCtx, pString_MyDrawerRepoPath) ); psz_MyDrawerRepoPath = SG_string__sz(pString_MyDrawerRepoPath); SG_ERR_CHECK( SG_varray__count(pCtx, pvaStatus, &nrItems) ); for (k=0; k<nrItems; k++) { SG_vhash * pvhItem; SG_vhash * pvhItemStatus; SG_bool bIsReserved; SG_ERR_CHECK( SG_varray__get__vhash(pCtx, pvaStatus, k, &pvhItem) ); SG_ERR_CHECK( SG_vhash__get__vhash(pCtx, pvhItem, "status", &pvhItemStatus) ); SG_ERR_CHECK( SG_vhash__has(pCtx, pvhItemStatus, "isReserved", &bIsReserved) ); if (bIsReserved) { // Don't freak out over the .sgdrawer that we just created in the root. const char * pszRepoPath; SG_ERR_CHECK( SG_vhash__get__sz(pCtx, pvhItem, "path", &pszRepoPath) ); if (strcmp(pszRepoPath, psz_MyDrawerRepoPath) != 0) { SG_ERR_CHECK( SG_STRING__ALLOC__SZ(pCtx, &pString_MatchedRepoPath, pszRepoPath) ); SG_ERR_CHECK( SG_repopath__remove_last(pCtx, pString_MatchedRepoPath) ); SG_ERR_THROW2( SG_ERR_ENTRY_ALREADY_UNDER_VERSION_CONTROL, (pCtx, "The directory '%s' contains a working copy and submodules are not yet supported.", SG_string__sz(pString_MatchedRepoPath)) ); } } } fail: SG_STRING_NULLFREE(pCtx, pString_MatchedRepoPath); SG_STRING_NULLFREE(pCtx, pString_MyDrawerRepoPath); SG_VARRAY_NULLFREE(pCtx, pvaStatus); }
/** * Deal with the status for 1 item and accumulate * the result in pvaStatus. * */ static void _sg_wc_tx__status(SG_context * pCtx, SG_wc_tx * pWcTx, SG_varray * pvaStatus, const char * pszInput, SG_uint32 depth, SG_bool bListUnchanged, SG_bool bNoIgnores, SG_bool bNoTSC, SG_bool bListSparse, SG_bool bListReserved, SG_vhash ** ppvhLegend) { SG_string * pStringRepoPath = NULL; sg_wc_liveview_item * pLVI; // we do not own this SG_bool bKnown; char chDomain; SG_vhash * pvhCSets = NULL; SG_vhash * pvhLegend = NULL; const char * pszWasLabel_l = "Baseline (B)"; const char * pszWasLabel_r = "Working"; SG_NULLARGCHECK_RETURN( pWcTx ); SG_NULLARGCHECK_RETURN( pvaStatus ); // pszInput is optional -- if omitted, we assume "@/". // ppvhLegend is optional SG_ERR_CHECK( sg_wc_db__path__anything_to_repopath(pCtx, pWcTx->pDb, pszInput, SG_WC_DB__PATH__IMPORT_FLAGS__TREAT_NULL_AS_ROOT, &pStringRepoPath, &chDomain) ); #if TRACE_WC_TX_STATUS SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "SG_wc_tx__status: '%s' normalized to [domain %c] '%s'\n", pszInput, chDomain, SG_string__sz(pStringRepoPath)) ); #endif SG_ERR_CHECK( sg_wc_tx__liveview__fetch_item__domain(pCtx, pWcTx, pStringRepoPath, &bKnown, &pLVI) ); if (!bKnown) { // We only get this if the path is completely bogus and // took us off into the weeds (as opposed to reporting // something just not-controlled). SG_ERR_THROW2( SG_ERR_NOT_FOUND, (pCtx, "Unknown item '%s'.", SG_string__sz(pStringRepoPath)) ); } SG_ERR_CHECK( sg_wc_tx__rp__status__lvi(pCtx, pWcTx, pLVI, depth, bListUnchanged, bNoIgnores, bNoTSC, bListSparse, bListReserved, pszWasLabel_l, pszWasLabel_r, pvaStatus) ); #if TRACE_WC_TX_STATUS SG_ERR_IGNORE( SG_console(pCtx, SG_CS_STDERR, "SG_wc_tx__status: computed status [depth %d][bListUnchanged %d][bNoIgnores %d][bNoTSC %d][bListSparse %d][bListReserved %d] on '%s':\n", depth, bListUnchanged, bNoIgnores, bNoTSC, bListSparse, bListReserved, SG_string__sz(pStringRepoPath)) ); SG_ERR_IGNORE( SG_varray_debug__dump_varray_of_vhashes_to_console(pCtx, pvaStatus, "") ); #endif if (ppvhLegend) { const char * pszHid_x; SG_ERR_CHECK( SG_wc_tx__get_wc_csets__vhash(pCtx, pWcTx, &pvhCSets) ); SG_ERR_CHECK( SG_VHASH__ALLOC(pCtx, &pvhLegend) ); SG_ERR_CHECK( SG_vhash__check__sz(pCtx, pvhCSets, "A", &pszHid_x) ); if (pszHid_x) SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhLegend, "A", pszHid_x) ); SG_ERR_CHECK( SG_vhash__check__sz(pCtx, pvhCSets, "L0", &pszHid_x) ); if (pszHid_x) SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhLegend, "B", pszHid_x) ); SG_ERR_CHECK( SG_vhash__check__sz(pCtx, pvhCSets, "L1", &pszHid_x) ); if (pszHid_x) SG_ERR_CHECK( SG_vhash__add__string__sz(pCtx, pvhLegend, "C", pszHid_x) ); #if TRACE_WC_TX_STATUS SG_ERR_IGNORE( SG_vhash_debug__dump_to_console__named(pCtx, pvhLegend, "Legend") ); #endif *ppvhLegend = pvhLegend; pvhLegend = NULL; } fail: SG_STRING_NULLFREE(pCtx, pStringRepoPath); SG_VHASH_NULLFREE(pCtx, pvhLegend); SG_VHASH_NULLFREE(pCtx, pvhCSets); }